⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ibmvscsi.c

📁 linux-2.6.15.6
💻 C
📖 第 1 页 / 共 4 页
字号:
	struct indirect_descriptor *indirect =	    (struct indirect_descriptor *)data;	sg_mapped = dma_map_sg(dev, sg, cmd->use_sg, DMA_BIDIRECTIONAL);	if (sg_mapped == 0)		return 0;	set_srp_direction(cmd, srp_cmd, sg_mapped);	/* special case; we can use a single direct descriptor */	if (sg_mapped == 1) {		data->virtual_address = sg_dma_address(&sg[0]);		data->length = sg_dma_len(&sg[0]);		data->memory_handle = 0;		return 1;	}	if (sg_mapped > SG_ALL) {		printk(KERN_ERR		       "ibmvscsi: More than %d mapped sg entries, got %d\n",		       SG_ALL, sg_mapped);		return 0;	}	indirect->head.virtual_address = 0;	indirect->head.length = sg_mapped * sizeof(indirect->list[0]);	indirect->head.memory_handle = 0;	if (sg_mapped <= MAX_INDIRECT_BUFS) {		total_length = map_sg_list(sg_mapped, sg, &indirect->list[0]);		indirect->total_length = total_length;		return 1;	}	/* get indirect table */	if (!evt_struct->ext_list) {		evt_struct->ext_list =(struct memory_descriptor*)			dma_alloc_coherent(dev, 				SG_ALL * sizeof(struct memory_descriptor),				&evt_struct->ext_list_token, 0);		if (!evt_struct->ext_list) {		    printk(KERN_ERR		   	"ibmvscsi: Can't allocate memory for indirect table\n");			return 0;					}	}	total_length = map_sg_list(sg_mapped, sg, evt_struct->ext_list);		indirect->total_length = total_length;	indirect->head.virtual_address = evt_struct->ext_list_token;	indirect->head.length = sg_mapped * sizeof(indirect->list[0]);	memcpy(indirect->list, evt_struct->ext_list,		MAX_INDIRECT_BUFS * sizeof(struct memory_descriptor));	 	return 1;}/** * map_single_data: - Maps memory and initializes memory decriptor fields * @cmd:	struct scsi_cmnd with the memory to be mapped * @srp_cmd:	srp_cmd that contains the memory descriptor * @dev:	device for which to map dma memory * * Called by map_data_for_srp_cmd() when building srp cmd from scsi cmd. * Returns 1 on success.*/static int map_single_data(struct scsi_cmnd *cmd,			   struct srp_cmd *srp_cmd, struct device *dev){	struct memory_descriptor *data =	    (struct memory_descriptor *)srp_cmd->additional_data;	data->virtual_address =		dma_map_single(dev, cmd->request_buffer,			       cmd->request_bufflen,			       DMA_BIDIRECTIONAL);	if (dma_mapping_error(data->virtual_address)) {		printk(KERN_ERR		       "ibmvscsi: Unable to map request_buffer for command!\n");		return 0;	}	data->length = cmd->request_bufflen;	data->memory_handle = 0;	set_srp_direction(cmd, srp_cmd, 1);	return 1;}/** * map_data_for_srp_cmd: - Calls functions to map data for srp cmds * @cmd:	struct scsi_cmnd with the memory to be mapped * @srp_cmd:	srp_cmd that contains the memory descriptor * @dev:	dma device for which to map dma memory * * Called by scsi_cmd_to_srp_cmd() when converting scsi cmds to srp cmds  * Returns 1 on success.*/static int map_data_for_srp_cmd(struct scsi_cmnd *cmd,				struct srp_event_struct *evt_struct,				struct srp_cmd *srp_cmd, struct device *dev){	switch (cmd->sc_data_direction) {	case DMA_FROM_DEVICE:	case DMA_TO_DEVICE:		break;	case DMA_NONE:		return 1;	case DMA_BIDIRECTIONAL:		printk(KERN_ERR		       "ibmvscsi: Can't map DMA_BIDIRECTIONAL to read/write\n");		return 0;	default:		printk(KERN_ERR		       "ibmvscsi: Unknown data direction 0x%02x; can't map!\n",		       cmd->sc_data_direction);		return 0;	}	if (!cmd->request_buffer)		return 1;	if (cmd->use_sg)		return map_sg_data(cmd, evt_struct, srp_cmd, dev);	return map_single_data(cmd, srp_cmd, dev);}/* ------------------------------------------------------------ * Routines for sending and receiving SRPs *//** * ibmvscsi_send_srp_event: - Transforms event to u64 array and calls send_crq() * @evt_struct:	evt_struct to be sent * @hostdata:	ibmvscsi_host_data of host * * Returns the value returned from ibmvscsi_send_crq(). (Zero for success) * Note that this routine assumes that host_lock is held for synchronization*/static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,				   struct ibmvscsi_host_data *hostdata){	struct scsi_cmnd *cmnd;	u64 *crq_as_u64 = (u64 *) &evt_struct->crq;	int rc;	/* If we have exhausted our request limit, just fail this request.	 * Note that there are rare cases involving driver generated requests 	 * (such as task management requests) that the mid layer may think we	 * can handle more requests (can_queue) when we actually can't	 */	if ((evt_struct->crq.format == VIOSRP_SRP_FORMAT) &&	    (atomic_dec_if_positive(&hostdata->request_limit) < 0)) {		/* See if the adapter is disabled */		if (atomic_read(&hostdata->request_limit) < 0)			goto send_error;			printk(KERN_WARNING 		       "ibmvscsi: Warning, request_limit exceeded\n");		unmap_cmd_data(&evt_struct->iu.srp.cmd,			       evt_struct,			       hostdata->dev);		free_event_struct(&hostdata->pool, evt_struct);		return SCSI_MLQUEUE_HOST_BUSY;	}	/* Copy the IU into the transfer area */	*evt_struct->xfer_iu = evt_struct->iu;	evt_struct->xfer_iu->srp.generic.tag = (u64)evt_struct;	/* Add this to the sent list.  We need to do this 	 * before we actually send 	 * in case it comes back REALLY fast	 */	list_add_tail(&evt_struct->list, &hostdata->sent);	if ((rc =	     ibmvscsi_send_crq(hostdata, crq_as_u64[0], crq_as_u64[1])) != 0) {		list_del(&evt_struct->list);		printk(KERN_ERR "ibmvscsi: failed to send event struct rc %d\n",		       rc);		goto send_error;	}	return 0; send_error:	unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev);	if ((cmnd = evt_struct->cmnd) != NULL) {		cmnd->result = DID_ERROR << 16;		evt_struct->cmnd_done(cmnd);	} else if (evt_struct->done)		evt_struct->done(evt_struct);		free_event_struct(&hostdata->pool, evt_struct);	return 0;}/** * handle_cmd_rsp: -  Handle responses from commands * @evt_struct:	srp_event_struct to be handled * * Used as a callback by when sending scsi cmds. * Gets called by ibmvscsi_handle_crq()*/static void handle_cmd_rsp(struct srp_event_struct *evt_struct){	struct srp_rsp *rsp = &evt_struct->xfer_iu->srp.rsp;	struct scsi_cmnd *cmnd = evt_struct->cmnd;	if (unlikely(rsp->type != SRP_RSP_TYPE)) {		if (printk_ratelimit())			printk(KERN_WARNING 			       "ibmvscsi: bad SRP RSP type %d\n",			       rsp->type);	}		if (cmnd) {		cmnd->result = rsp->status;		if (((cmnd->result >> 1) & 0x1f) == CHECK_CONDITION)			memcpy(cmnd->sense_buffer,			       rsp->sense_and_response_data,			       rsp->sense_data_list_length);		unmap_cmd_data(&evt_struct->iu.srp.cmd, 			       evt_struct, 			       evt_struct->hostdata->dev);		if (rsp->doover)			cmnd->resid = rsp->data_out_residual_count;		else if (rsp->diover)			cmnd->resid = rsp->data_in_residual_count;	}	if (evt_struct->cmnd_done)		evt_struct->cmnd_done(cmnd);}/** * lun_from_dev: - Returns the lun of the scsi device * @dev:	struct scsi_device **/static inline u16 lun_from_dev(struct scsi_device *dev){	return (0x2 << 14) | (dev->id << 8) | (dev->channel << 5) | dev->lun;}/** * ibmvscsi_queue: - The queuecommand function of the scsi template  * @cmd:	struct scsi_cmnd to be executed * @done:	Callback function to be called when cmd is completed*/static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,				 void (*done) (struct scsi_cmnd *)){	struct srp_cmd *srp_cmd;	struct srp_event_struct *evt_struct;	struct indirect_descriptor *indirect;	struct ibmvscsi_host_data *hostdata =		(struct ibmvscsi_host_data *)&cmnd->device->host->hostdata;	u16 lun = lun_from_dev(cmnd->device);	evt_struct = get_event_struct(&hostdata->pool);	if (!evt_struct)		return SCSI_MLQUEUE_HOST_BUSY;	/* Set up the actual SRP IU */	srp_cmd = &evt_struct->iu.srp.cmd;	memset(srp_cmd, 0x00, sizeof(*srp_cmd));	srp_cmd->type = SRP_CMD_TYPE;	memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(cmnd->cmnd));	srp_cmd->lun = ((u64) lun) << 48;	if (!map_data_for_srp_cmd(cmnd, evt_struct, srp_cmd, hostdata->dev)) {		printk(KERN_ERR "ibmvscsi: couldn't convert cmd to srp_cmd\n");		free_event_struct(&hostdata->pool, evt_struct);		return SCSI_MLQUEUE_HOST_BUSY;	}	init_event_struct(evt_struct,			  handle_cmd_rsp,			  VIOSRP_SRP_FORMAT,			  cmnd->timeout_per_command/HZ);	evt_struct->cmnd = cmnd;	evt_struct->cmnd_done = done;	/* Fix up dma address of the buffer itself */	indirect = (struct indirect_descriptor *)srp_cmd->additional_data;	if (((srp_cmd->data_out_format == SRP_INDIRECT_BUFFER) ||	    (srp_cmd->data_in_format == SRP_INDIRECT_BUFFER)) &&	    (indirect->head.virtual_address == 0)) {		indirect->head.virtual_address = evt_struct->crq.IU_data_ptr +		    offsetof(struct srp_cmd, additional_data) +		    offsetof(struct indirect_descriptor, list);	}	return ibmvscsi_send_srp_event(evt_struct, hostdata);}/* ------------------------------------------------------------ * Routines for driver initialization *//** * adapter_info_rsp: - Handle response to MAD adapter info request * @evt_struct:	srp_event_struct with the response * * Used as a "done" callback by when sending adapter_info. Gets called * by ibmvscsi_handle_crq()*/static void adapter_info_rsp(struct srp_event_struct *evt_struct){	struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;	dma_unmap_single(hostdata->dev,			 evt_struct->iu.mad.adapter_info.buffer,			 evt_struct->iu.mad.adapter_info.common.length,			 DMA_BIDIRECTIONAL);	if (evt_struct->xfer_iu->mad.adapter_info.common.status) {		printk("ibmvscsi: error %d getting adapter info\n",		       evt_struct->xfer_iu->mad.adapter_info.common.status);	} else {		printk("ibmvscsi: host srp version: %s, "		       "host partition %s (%d), OS %d, max io %u\n",		       hostdata->madapter_info.srp_version,		       hostdata->madapter_info.partition_name,		       hostdata->madapter_info.partition_number,		       hostdata->madapter_info.os_type,		       hostdata->madapter_info.port_max_txu[0]);				if (hostdata->madapter_info.port_max_txu[0]) 			hostdata->host->max_sectors = 				hostdata->madapter_info.port_max_txu[0] >> 9;				if (hostdata->madapter_info.os_type == 3 &&		    strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) {			printk("ibmvscsi: host (Ver. %s) doesn't support large"			       "transfers\n",			       hostdata->madapter_info.srp_version);			printk("ibmvscsi: limiting scatterlists to %d\n",			       MAX_INDIRECT_BUFS);			hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS;		}	}}/** * send_mad_adapter_info: - Sends the mad adapter info request *      and stores the result so it can be retrieved with *      sysfs.  We COULD consider causing a failure if the *      returned SRP version doesn't match ours. * @hostdata:	ibmvscsi_host_data of host *  * Returns zero if successful.*/static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata){	struct viosrp_adapter_info *req;	struct srp_event_struct *evt_struct;		evt_struct = get_event_struct(&hostdata->pool);	if (!evt_struct) {		printk(KERN_ERR "ibmvscsi: couldn't allocate an event "		       "for ADAPTER_INFO_REQ!\n");		return;	}	init_event_struct(evt_struct,			  adapter_info_rsp,			  VIOSRP_MAD_FORMAT,			  init_timeout * HZ);		req = &evt_struct->iu.mad.adapter_info;	memset(req, 0x00, sizeof(*req));		req->common.type = VIOSRP_ADAPTER_INFO_TYPE;	req->common.length = sizeof(hostdata->madapter_info);	req->buffer = dma_map_single(hostdata->dev,				     &hostdata->madapter_info,				     sizeof(hostdata->madapter_info),				     DMA_BIDIRECTIONAL);	if (dma_mapping_error(req->buffer)) {		printk(KERN_ERR		       "ibmvscsi: Unable to map request_buffer "		       "for adapter_info!\n");		free_event_struct(&hostdata->pool, evt_struct);		return;	}	

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -