⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 sbp2.c

📁 ieee1394驱动,不多说了!直接可以在linux2.6内核中使用
💻 C
📖 第 1 页 / 共 5 页
字号:
			/* FIXME: Check for SBP2_DEVICE_TYPE_AND_LUN_KEY.			 * Its "ordered" bit has consequences for command ORB			 * list handling. See SBP-2 clauses 4.6, 7.4.11, 10.2 */			break;		}	}	workarounds = sbp2_default_workarounds;	if (!(workarounds & SBP2_WORKAROUND_OVERRIDE))		for (i = 0; i < ARRAY_SIZE(sbp2_workarounds_table); i++) {			if (sbp2_workarounds_table[i].firmware_revision !=			    SBP2_ROM_VALUE_WILDCARD &&			    sbp2_workarounds_table[i].firmware_revision !=			    (firmware_revision & 0xffff00))				continue;			if (sbp2_workarounds_table[i].model_id !=			    SBP2_ROM_VALUE_WILDCARD &&			    sbp2_workarounds_table[i].model_id != ud->model_id)				continue;			workarounds |= sbp2_workarounds_table[i].workarounds;			break;		}	if (workarounds)		SBP2_INFO("Workarounds for node " NODE_BUS_FMT ": 0x%x "			  "(firmware_revision 0x%06x, vendor_id 0x%06x,"			  " model_id 0x%06x)",			  NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid),			  workarounds, firmware_revision,			  ud->vendor_id ? ud->vendor_id : ud->ne->vendor_id,			  ud->model_id);	/* We would need one SCSI host template for each target to adjust	 * max_sectors on the fly, therefore warn only. */	if (workarounds & SBP2_WORKAROUND_128K_MAX_TRANS &&	    (sbp2_max_sectors * 512) > (128 * 1024))		SBP2_INFO("Node " NODE_BUS_FMT ": Bridge only supports 128KB "			  "max transfer size. WARNING: Current max_sectors "			  "setting is larger than 128KB (%d sectors)",			  NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid),			  sbp2_max_sectors);	/* If this is a logical unit directory entry, process the parent	 * to get the values. */	if (ud->flags & UNIT_DIRECTORY_LUN_DIRECTORY) {		struct unit_directory *parent_ud = container_of(			ud->device.parent, struct unit_directory, device);		sbp2_parse_unit_directory(lu, parent_ud);	} else {		lu->management_agent_addr = management_agent_addr;		lu->workarounds = workarounds;		if (ud->flags & UNIT_DIRECTORY_HAS_LUN)			lu->lun = ORB_SET_LUN(ud->lun);	}}#define SBP2_PAYLOAD_TO_BYTES(p) (1 << ((p) + 2))/* * This function is called in order to determine the max speed and packet * size we can use in our ORBs. Note, that we (the driver and host) only * initiate the transaction. The SBP-2 device actually transfers the data * (by reading from the DMA area we tell it). This means that the SBP-2 * device decides the actual maximum data it can transfer. We just tell it * the speed that it needs to use, and the max_rec the host supports, and * it takes care of the rest. */static int sbp2_max_speed_and_size(struct sbp2_lu *lu){	struct sbp2_fwhost_info *hi = lu->hi;	u8 payload;	lu->speed_code = hi->host->speed[NODEID_TO_NODE(lu->ne->nodeid)];	if (lu->speed_code > sbp2_max_speed) {		lu->speed_code = sbp2_max_speed;		SBP2_INFO("Reducing speed to %s",			  hpsb_speedto_str[sbp2_max_speed]);	}	/* Payload size is the lesser of what our speed supports and what	 * our host supports.  */	payload = min(sbp2_speedto_max_payload[lu->speed_code],		      (u8) (hi->host->csr.max_rec - 1));	/* If physical DMA is off, work around limitation in ohci1394:	 * packet size must not exceed PAGE_SIZE */	if (lu->ne->host->low_addr_space < (1ULL << 32))		while (SBP2_PAYLOAD_TO_BYTES(payload) + 24 > PAGE_SIZE &&		       payload)			payload--;	SBP2_INFO("Node " NODE_BUS_FMT ": Max speed [%s] - Max payload [%u]",		  NODE_BUS_ARGS(hi->host, lu->ne->nodeid),		  hpsb_speedto_str[lu->speed_code],		  SBP2_PAYLOAD_TO_BYTES(payload));	lu->max_payload_size = payload;	return 0;}static int sbp2_agent_reset(struct sbp2_lu *lu, int wait){	quadlet_t data;	u64 addr;	int retval;	unsigned long flags;	/* flush lu->protocol_work */	if (wait)		flush_scheduled_work();	data = ntohl(SBP2_AGENT_RESET_DATA);	addr = lu->command_block_agent_addr + SBP2_AGENT_RESET_OFFSET;	if (wait)		retval = hpsb_node_write(lu->ne, addr, &data, 4);	else		retval = sbp2util_node_write_no_wait(lu->ne, addr, &data, 4);	if (retval < 0) {		SBP2_ERR("hpsb_node_write failed.\n");		return -EIO;	}	/* make sure that the ORB_POINTER is written on next command */	spin_lock_irqsave(&lu->cmd_orb_lock, flags);	lu->last_orb = NULL;	spin_unlock_irqrestore(&lu->cmd_orb_lock, flags);	return 0;}static void sbp2_prep_command_orb_sg(struct sbp2_command_orb *orb,				     struct sbp2_fwhost_info *hi,				     struct sbp2_command_info *cmd,				     unsigned int scsi_use_sg,				     struct scatterlist *sgpnt,				     u32 orb_direction,				     enum dma_data_direction dma_dir){	cmd->dma_dir = dma_dir;	orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id);	orb->misc |= ORB_SET_DIRECTION(orb_direction);	/* special case if only one element (and less than 64KB in size) */	if ((scsi_use_sg == 1) &&	    (sgpnt[0].length <= SBP2_MAX_SG_ELEMENT_LENGTH)) {		cmd->dma_size = sgpnt[0].length;		cmd->dma_type = CMD_DMA_PAGE;		cmd->cmd_dma = dma_map_page(hi->host->device.parent,					    sgpnt[0].page, sgpnt[0].offset,					    cmd->dma_size, cmd->dma_dir);		orb->data_descriptor_lo = cmd->cmd_dma;		orb->misc |= ORB_SET_DATA_SIZE(cmd->dma_size);	} else {		struct sbp2_unrestricted_page_table *sg_element =						&cmd->scatter_gather_element[0];		u32 sg_count, sg_len;		dma_addr_t sg_addr;		int i, count = dma_map_sg(hi->host->device.parent, sgpnt,					  scsi_use_sg, dma_dir);		cmd->dma_size = scsi_use_sg;		cmd->sge_buffer = sgpnt;		/* use page tables (s/g) */		orb->misc |= ORB_SET_PAGE_TABLE_PRESENT(0x1);		orb->data_descriptor_lo = cmd->sge_dma;		/* loop through and fill out our SBP-2 page tables		 * (and split up anything too large) */		for (i = 0, sg_count = 0 ; i < count; i++, sgpnt++) {			sg_len = sg_dma_len(sgpnt);			sg_addr = sg_dma_address(sgpnt);			while (sg_len) {				sg_element[sg_count].segment_base_lo = sg_addr;				if (sg_len > SBP2_MAX_SG_ELEMENT_LENGTH) {					sg_element[sg_count].length_segment_base_hi =						PAGE_TABLE_SET_SEGMENT_LENGTH(SBP2_MAX_SG_ELEMENT_LENGTH);					sg_addr += SBP2_MAX_SG_ELEMENT_LENGTH;					sg_len -= SBP2_MAX_SG_ELEMENT_LENGTH;				} else {					sg_element[sg_count].length_segment_base_hi =						PAGE_TABLE_SET_SEGMENT_LENGTH(sg_len);					sg_len = 0;				}				sg_count++;			}		}		orb->misc |= ORB_SET_DATA_SIZE(sg_count);		sbp2util_cpu_to_be32_buffer(sg_element,				(sizeof(struct sbp2_unrestricted_page_table)) *				sg_count);	}}static void sbp2_prep_command_orb_no_sg(struct sbp2_command_orb *orb,					struct sbp2_fwhost_info *hi,					struct sbp2_command_info *cmd,					struct scatterlist *sgpnt,					u32 orb_direction,					unsigned int scsi_request_bufflen,					void *scsi_request_buffer,					enum dma_data_direction dma_dir){	cmd->dma_dir = dma_dir;	cmd->dma_size = scsi_request_bufflen;	cmd->dma_type = CMD_DMA_SINGLE;	cmd->cmd_dma = dma_map_single(hi->host->device.parent,				      scsi_request_buffer,				      cmd->dma_size, cmd->dma_dir);	orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id);	orb->misc |= ORB_SET_DIRECTION(orb_direction);	/* handle case where we get a command w/o s/g enabled	 * (but check for transfers larger than 64K) */	if (scsi_request_bufflen <= SBP2_MAX_SG_ELEMENT_LENGTH) {		orb->data_descriptor_lo = cmd->cmd_dma;		orb->misc |= ORB_SET_DATA_SIZE(scsi_request_bufflen);	} else {		/* The buffer is too large. Turn this into page tables. */		struct sbp2_unrestricted_page_table *sg_element =						&cmd->scatter_gather_element[0];		u32 sg_count, sg_len;		dma_addr_t sg_addr;		orb->data_descriptor_lo = cmd->sge_dma;		orb->misc |= ORB_SET_PAGE_TABLE_PRESENT(0x1);		/* fill out our SBP-2 page tables; split up the large buffer */		sg_count = 0;		sg_len = scsi_request_bufflen;		sg_addr = cmd->cmd_dma;		while (sg_len) {			sg_element[sg_count].segment_base_lo = sg_addr;			if (sg_len > SBP2_MAX_SG_ELEMENT_LENGTH) {				sg_element[sg_count].length_segment_base_hi =					PAGE_TABLE_SET_SEGMENT_LENGTH(SBP2_MAX_SG_ELEMENT_LENGTH);				sg_addr += SBP2_MAX_SG_ELEMENT_LENGTH;				sg_len -= SBP2_MAX_SG_ELEMENT_LENGTH;			} else {				sg_element[sg_count].length_segment_base_hi =					PAGE_TABLE_SET_SEGMENT_LENGTH(sg_len);				sg_len = 0;			}			sg_count++;		}		orb->misc |= ORB_SET_DATA_SIZE(sg_count);		sbp2util_cpu_to_be32_buffer(sg_element,				(sizeof(struct sbp2_unrestricted_page_table)) *				sg_count);	}}static void sbp2_create_command_orb(struct sbp2_lu *lu,				    struct sbp2_command_info *cmd,				    unchar *scsi_cmd,				    unsigned int scsi_use_sg,				    unsigned int scsi_request_bufflen,				    void *scsi_request_buffer,				    enum dma_data_direction dma_dir){	struct sbp2_fwhost_info *hi = lu->hi;	struct scatterlist *sgpnt = (struct scatterlist *)scsi_request_buffer;	struct sbp2_command_orb *orb = &cmd->command_orb;	u32 orb_direction;	/*	 * Set-up our command ORB.	 *	 * NOTE: We're doing unrestricted page tables (s/g), as this is	 * best performance (at least with the devices I have). This means	 * that data_size becomes the number of s/g elements, and	 * page_size should be zero (for unrestricted).	 */	orb->next_ORB_hi = ORB_SET_NULL_PTR(1);	orb->next_ORB_lo = 0x0;	orb->misc = ORB_SET_MAX_PAYLOAD(lu->max_payload_size);	orb->misc |= ORB_SET_SPEED(lu->speed_code);	orb->misc |= ORB_SET_NOTIFY(1);	if (dma_dir == DMA_NONE)		orb_direction = ORB_DIRECTION_NO_DATA_TRANSFER;	else if (dma_dir == DMA_TO_DEVICE && scsi_request_bufflen)		orb_direction = ORB_DIRECTION_WRITE_TO_MEDIA;	else if (dma_dir == DMA_FROM_DEVICE && scsi_request_bufflen)		orb_direction = ORB_DIRECTION_READ_FROM_MEDIA;	else {		SBP2_INFO("Falling back to DMA_NONE");		orb_direction = ORB_DIRECTION_NO_DATA_TRANSFER;	}	/* set up our page table stuff */	if (orb_direction == ORB_DIRECTION_NO_DATA_TRANSFER) {		orb->data_descriptor_hi = 0x0;		orb->data_descriptor_lo = 0x0;		orb->misc |= ORB_SET_DIRECTION(1);	} else if (scsi_use_sg)		sbp2_prep_command_orb_sg(orb, hi, cmd, scsi_use_sg, sgpnt,					 orb_direction, dma_dir);	else		sbp2_prep_command_orb_no_sg(orb, hi, cmd, sgpnt, orb_direction,					    scsi_request_bufflen,					    scsi_request_buffer, dma_dir);	sbp2util_cpu_to_be32_buffer(orb, sizeof(*orb));	memset(orb->cdb, 0, 12);	memcpy(orb->cdb, scsi_cmd, COMMAND_SIZE(*scsi_cmd));}static void sbp2_link_orb_command(struct sbp2_lu *lu,				  struct sbp2_command_info *cmd){	struct sbp2_fwhost_info *hi = lu->hi;	struct sbp2_command_orb *last_orb;	dma_addr_t last_orb_dma;	u64 addr = lu->command_block_agent_addr;	quadlet_t data[2];	size_t length;	unsigned long flags;	dma_sync_single_for_device(hi->host->device.parent,				   cmd->command_orb_dma,				   sizeof(struct sbp2_command_orb),				   DMA_TO_DEVICE);	dma_sync_single_for_device(hi->host->device.parent, cmd->sge_dma,				   sizeof(cmd->scatter_gather_element),				   DMA_BIDIRECTIONAL);	/* check to see if there are any previous orbs to use */	spin_lock_irqsave(&lu->cmd_orb_lock, flags);	last_orb = lu->last_orb;	last_orb_dma = lu->last_orb_dma;	if (!last_orb) {		/*		 * last_orb == NULL means: We know that the target's fetch agent		 * is not active right now.		 */		addr += SBP2_ORB_POINTER_OFFSET;		data[0] = ORB_SET_NODE_ID(hi->host->node_id);		data[1] = cmd->command_orb_dma;		sbp2util_cpu_to_be32_buffer(data, 8);		length = 8;	} else {		/*		 * last_orb != NULL means: We know that the target's fetch agent		 * is (very probably) not dead or in reset state right now.		 * We have an ORB already sent that we can append a new one to.		 * The target's fetch agent may or may not have read this		 * previous ORB yet.		 */		dma_sync_single_for_cpu(hi->host->device.parent, last_orb_dma,					sizeof(struct sbp2_command_orb),					DMA_TO_DEVICE);		last_orb->next_ORB_lo = cpu_to_be32(cmd->command_orb_dma);		wmb();		/* Tells hardware that this pointer is valid */		last_orb->next_ORB_hi = 0;		dma_sync_single_for_device(hi->host->device.parent,					   last_orb_dma,					   sizeof(struct sbp2_command_orb),					   DMA_TO_DEVICE);		addr += SBP2_DOORBELL_OFFSET;		data[0] = 0;		length = 4;	}	lu->last_orb = &cmd->command_orb;	lu->last_orb_dma = cmd->command_orb_dma;	spin_unlock_irqrestore(&lu->cmd_orb_lock, flags);	if (sbp2util_node_write_no_wait(lu->ne, addr, data, length)) {		/*		 * sbp2util_node_write_no_wait failed. We certainly ran out		 * of transaction labels, perhaps just because there were no		 * context switches which gave khpsbpkt a chance to collect		 * free tlabels. Try again in non-atomic context. If necessary,		 * the workqueue job will sleep to guaranteedly get a tlabel.		 * We do not accept new commands until the job is over.		 */		scsi_block_requests(lu->shost);		PREPARE_WORK(&lu->protocol_work,			     last_orb ? sbp2util_write_doorbell:					sbp2util_write_orb_pointer);		schedule_work(&lu->protocol_work);	}}static int sbp2_send_command(struct sbp2_lu *lu, struct scsi_cmnd *SCpnt,			     void (*done)(struct scsi_cmnd *)){	unchar *scsi_cmd = (unchar *)SCpnt->cmnd;	unsigned int request_bufflen = SCpnt->request_bufflen;	struct sbp2_command_info *cmd;	cmd = sbp2util_allocate_command_orb(lu, SCpnt, done);	if (!cmd)		return -EIO;	sbp2_create_command_orb(lu, cmd, scsi_cmd, SCpnt->use_sg,				request_bufflen, SCpnt->request_buffer,				SCpnt->sc_data_direction);	sbp2_link_orb_command(lu, cmd);	return 0;}/* * Translates SBP-2 status into SCSI sense data for check conditions */static unsigned int sbp2_status_to_sense_data(unchar *sbp2_status,					      unchar *sense_data){	/* OK, it's pretty ugly... ;-) */	sense_data[0] = 0x70;	sense_data[1] = 0x0;	sense_data[2] = sbp2_status[9];	sense_data[3] = sbp2_status[12];

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -