ibmvscsi.c

来自「LINUX 2.6.17.4的源码」· C语言 代码 · 共 1,586 行 · 第 1/4 页

C
1,586
字号
	if (sg_mapped == 0)		return 0;	set_srp_direction(cmd, srp_cmd, sg_mapped);	/* special case; we can use a single direct descriptor */	if (sg_mapped == 1) {		data->va = sg_dma_address(&sg[0]);		data->len = sg_dma_len(&sg[0]);		data->key = 0;		return 1;	}	if (sg_mapped > SG_ALL) {		printk(KERN_ERR		       "ibmvscsi: More than %d mapped sg entries, got %d\n",		       SG_ALL, sg_mapped);		return 0;	}	indirect->table_desc.va = 0;	indirect->table_desc.len = sg_mapped * sizeof(struct srp_direct_buf);	indirect->table_desc.key = 0;	if (sg_mapped <= MAX_INDIRECT_BUFS) {		total_length = map_sg_list(sg_mapped, sg,					   &indirect->desc_list[0]);		indirect->len = total_length;		return 1;	}	/* get indirect table */	if (!evt_struct->ext_list) {		evt_struct->ext_list = (struct srp_direct_buf *)			dma_alloc_coherent(dev, 					   SG_ALL * sizeof(struct srp_direct_buf),					   &evt_struct->ext_list_token, 0);		if (!evt_struct->ext_list) {			printk(KERN_ERR			       "ibmvscsi: Can't allocate memory for indirect table\n");			return 0;					}	}	total_length = map_sg_list(sg_mapped, sg, evt_struct->ext_list);		indirect->len = total_length;	indirect->table_desc.va = evt_struct->ext_list_token;	indirect->table_desc.len = sg_mapped * sizeof(indirect->desc_list[0]);	memcpy(indirect->desc_list, evt_struct->ext_list,	       MAX_INDIRECT_BUFS * sizeof(struct srp_direct_buf));	 	return 1;}/** * map_single_data: - Maps memory and initializes memory decriptor fields * @cmd:	struct scsi_cmnd with the memory to be mapped * @srp_cmd:	srp_cmd that contains the memory descriptor * @dev:	device for which to map dma memory * * Called by map_data_for_srp_cmd() when building srp cmd from scsi cmd. * Returns 1 on success.*/static int map_single_data(struct scsi_cmnd *cmd,			   struct srp_cmd *srp_cmd, struct device *dev){	struct srp_direct_buf *data =		(struct srp_direct_buf *) srp_cmd->add_data;	data->va =		dma_map_single(dev, cmd->request_buffer,			       cmd->request_bufflen,			       DMA_BIDIRECTIONAL);	if (dma_mapping_error(data->va)) {		printk(KERN_ERR		       "ibmvscsi: Unable to map request_buffer for command!\n");		return 0;	}	data->len = cmd->request_bufflen;	data->key = 0;	set_srp_direction(cmd, srp_cmd, 1);	return 1;}/** * map_data_for_srp_cmd: - Calls functions to map data for srp cmds * @cmd:	struct scsi_cmnd with the memory to be mapped * @srp_cmd:	srp_cmd that contains the memory descriptor * @dev:	dma device for which to map dma memory * * Called by scsi_cmd_to_srp_cmd() when converting scsi cmds to srp cmds  * Returns 1 on success.*/static int map_data_for_srp_cmd(struct scsi_cmnd *cmd,				struct srp_event_struct *evt_struct,				struct srp_cmd *srp_cmd, struct device *dev){	switch (cmd->sc_data_direction) {	case DMA_FROM_DEVICE:	case DMA_TO_DEVICE:		break;	case DMA_NONE:		return 1;	case DMA_BIDIRECTIONAL:		printk(KERN_ERR		       "ibmvscsi: Can't map DMA_BIDIRECTIONAL to read/write\n");		return 0;	default:		printk(KERN_ERR		       "ibmvscsi: Unknown data direction 0x%02x; can't map!\n",		       cmd->sc_data_direction);		return 0;	}	if (!cmd->request_buffer)		return 1;	if (cmd->use_sg)		return map_sg_data(cmd, evt_struct, srp_cmd, dev);	return map_single_data(cmd, srp_cmd, dev);}/* ------------------------------------------------------------ * Routines for sending and receiving SRPs *//** * ibmvscsi_send_srp_event: - Transforms event to u64 array and calls send_crq() * @evt_struct:	evt_struct to be sent * @hostdata:	ibmvscsi_host_data of host * * Returns the value returned from ibmvscsi_send_crq(). (Zero for success) * Note that this routine assumes that host_lock is held for synchronization*/static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,				   struct ibmvscsi_host_data *hostdata){	u64 *crq_as_u64 = (u64 *) &evt_struct->crq;	int rc;	/* If we have exhausted our request limit, just fail this request.	 * Note that there are rare cases involving driver generated requests 	 * (such as task management requests) that the mid layer may think we	 * can handle more requests (can_queue) when we actually can't	 */	if ((evt_struct->crq.format == VIOSRP_SRP_FORMAT) &&	    (atomic_dec_if_positive(&hostdata->request_limit) < 0))		goto send_error;	/* Copy the IU into the transfer area */	*evt_struct->xfer_iu = evt_struct->iu;	evt_struct->xfer_iu->srp.rsp.tag = (u64)evt_struct;	/* Add this to the sent list.  We need to do this 	 * before we actually send 	 * in case it comes back REALLY fast	 */	list_add_tail(&evt_struct->list, &hostdata->sent);	if ((rc =	     ibmvscsi_send_crq(hostdata, crq_as_u64[0], crq_as_u64[1])) != 0) {		list_del(&evt_struct->list);		printk(KERN_ERR "ibmvscsi: send error %d\n",		       rc);		goto send_error;	}	return 0; send_error:	unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev);	free_event_struct(&hostdata->pool, evt_struct); 	return SCSI_MLQUEUE_HOST_BUSY;}/** * handle_cmd_rsp: -  Handle responses from commands * @evt_struct:	srp_event_struct to be handled * * Used as a callback by when sending scsi cmds. * Gets called by ibmvscsi_handle_crq()*/static void handle_cmd_rsp(struct srp_event_struct *evt_struct){	struct srp_rsp *rsp = &evt_struct->xfer_iu->srp.rsp;	struct scsi_cmnd *cmnd = evt_struct->cmnd;	if (unlikely(rsp->opcode != SRP_RSP)) {		if (printk_ratelimit())			printk(KERN_WARNING 			       "ibmvscsi: bad SRP RSP type %d\n",			       rsp->opcode);	}		if (cmnd) {		cmnd->result = rsp->status;		if (((cmnd->result >> 1) & 0x1f) == CHECK_CONDITION)			memcpy(cmnd->sense_buffer,			       rsp->data,			       rsp->sense_data_len);		unmap_cmd_data(&evt_struct->iu.srp.cmd, 			       evt_struct, 			       evt_struct->hostdata->dev);		if (rsp->flags & SRP_RSP_FLAG_DOOVER)			cmnd->resid = rsp->data_out_res_cnt;		else if (rsp->flags & SRP_RSP_FLAG_DIOVER)			cmnd->resid = rsp->data_in_res_cnt;	}	if (evt_struct->cmnd_done)		evt_struct->cmnd_done(cmnd);}/** * lun_from_dev: - Returns the lun of the scsi device * @dev:	struct scsi_device **/static inline u16 lun_from_dev(struct scsi_device *dev){	return (0x2 << 14) | (dev->id << 8) | (dev->channel << 5) | dev->lun;}/** * ibmvscsi_queue: - The queuecommand function of the scsi template  * @cmd:	struct scsi_cmnd to be executed * @done:	Callback function to be called when cmd is completed*/static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,				 void (*done) (struct scsi_cmnd *)){	struct srp_cmd *srp_cmd;	struct srp_event_struct *evt_struct;	struct srp_indirect_buf *indirect;	struct ibmvscsi_host_data *hostdata =		(struct ibmvscsi_host_data *)&cmnd->device->host->hostdata;	u16 lun = lun_from_dev(cmnd->device);	u8 out_fmt, in_fmt;	evt_struct = get_event_struct(&hostdata->pool);	if (!evt_struct)		return SCSI_MLQUEUE_HOST_BUSY;	/* Set up the actual SRP IU */	srp_cmd = &evt_struct->iu.srp.cmd;	memset(srp_cmd, 0x00, SRP_MAX_IU_LEN);	srp_cmd->opcode = SRP_CMD;	memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(cmnd->cmnd));	srp_cmd->lun = ((u64) lun) << 48;	if (!map_data_for_srp_cmd(cmnd, evt_struct, srp_cmd, hostdata->dev)) {		printk(KERN_ERR "ibmvscsi: couldn't convert cmd to srp_cmd\n");		free_event_struct(&hostdata->pool, evt_struct);		return SCSI_MLQUEUE_HOST_BUSY;	}	init_event_struct(evt_struct,			  handle_cmd_rsp,			  VIOSRP_SRP_FORMAT,			  cmnd->timeout_per_command/HZ);	evt_struct->cmnd = cmnd;	evt_struct->cmnd_done = done;	/* Fix up dma address of the buffer itself */	indirect = (struct srp_indirect_buf *) srp_cmd->add_data;	out_fmt = srp_cmd->buf_fmt >> 4;	in_fmt = srp_cmd->buf_fmt & ((1U << 4) - 1);	if ((in_fmt == SRP_DATA_DESC_INDIRECT ||	     out_fmt == SRP_DATA_DESC_INDIRECT) &&	    indirect->table_desc.va == 0) {		indirect->table_desc.va = evt_struct->crq.IU_data_ptr +			offsetof(struct srp_cmd, add_data) +			offsetof(struct srp_indirect_buf, desc_list);	}	return ibmvscsi_send_srp_event(evt_struct, hostdata);}/* ------------------------------------------------------------ * Routines for driver initialization *//** * adapter_info_rsp: - Handle response to MAD adapter info request * @evt_struct:	srp_event_struct with the response * * Used as a "done" callback by when sending adapter_info. Gets called * by ibmvscsi_handle_crq()*/static void adapter_info_rsp(struct srp_event_struct *evt_struct){	struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;	dma_unmap_single(hostdata->dev,			 evt_struct->iu.mad.adapter_info.buffer,			 evt_struct->iu.mad.adapter_info.common.length,			 DMA_BIDIRECTIONAL);	if (evt_struct->xfer_iu->mad.adapter_info.common.status) {		printk("ibmvscsi: error %d getting adapter info\n",		       evt_struct->xfer_iu->mad.adapter_info.common.status);	} else {		printk("ibmvscsi: host srp version: %s, "		       "host partition %s (%d), OS %d, max io %u\n",		       hostdata->madapter_info.srp_version,		       hostdata->madapter_info.partition_name,		       hostdata->madapter_info.partition_number,		       hostdata->madapter_info.os_type,		       hostdata->madapter_info.port_max_txu[0]);				if (hostdata->madapter_info.port_max_txu[0]) 			hostdata->host->max_sectors = 				hostdata->madapter_info.port_max_txu[0] >> 9;				if (hostdata->madapter_info.os_type == 3 &&		    strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) {			printk("ibmvscsi: host (Ver. %s) doesn't support large"			       "transfers\n",			       hostdata->madapter_info.srp_version);			printk("ibmvscsi: limiting scatterlists to %d\n",			       MAX_INDIRECT_BUFS);			hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS;		}	}}/** * send_mad_adapter_info: - Sends the mad adapter info request *      and stores the result so it can be retrieved with *      sysfs.  We COULD consider causing a failure if the *      returned SRP version doesn't match ours. * @hostdata:	ibmvscsi_host_data of host *  * Returns zero if successful.*/static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata){	struct viosrp_adapter_info *req;	struct srp_event_struct *evt_struct;	dma_addr_t addr;	evt_struct = get_event_struct(&hostdata->pool);	if (!evt_struct) {		printk(KERN_ERR "ibmvscsi: couldn't allocate an event "		       "for ADAPTER_INFO_REQ!\n");		return;	}	init_event_struct(evt_struct,			  adapter_info_rsp,			  VIOSRP_MAD_FORMAT,			  init_timeout * HZ);		req = &evt_struct->iu.mad.adapter_info;	memset(req, 0x00, sizeof(*req));		req->common.type = VIOSRP_ADAPTER_INFO_TYPE;	req->common.length = sizeof(hostdata->madapter_info);	req->buffer = addr = dma_map_single(hostdata->dev,					    &hostdata->madapter_info,					    sizeof(hostdata->madapter_info),					    DMA_BIDIRECTIONAL);	if (dma_mapping_error(req->buffer)) {		printk(KERN_ERR		       "ibmvscsi: Unable to map request_buffer "		       "for adapter_info!\n");		free_event_struct(&hostdata->pool, evt_struct);		return;	}		if (ibmvscsi_send_srp_event(evt_struct, hostdata)) {		printk(KERN_ERR "ibmvscsi: couldn't send ADAPTER_INFO_REQ!\n");		dma_unmap_single(hostdata->dev,				 addr,				 sizeof(hostdata->madapter_info),				 DMA_BIDIRECTIONAL);	}};/** * login_rsp: - Handle response to SRP login request * @evt_struct:	srp_event_struct with the response * * Used as a "done" callback by when sending srp_login. Gets called * by ibmvscsi_handle_crq()*/static void login_rsp(struct srp_event_struct *evt_struct){	struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;	switch (evt_struct->xfer_iu->srp.login_rsp.opcode) {	case SRP_LOGIN_RSP:	/* it worked! */

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?