⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 fw-sbp2.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 3 页
字号:
				     SBP2_RECONNECT_REQUEST,				     lu->login_id, NULL) < 0) {		if (lu->retries++ >= 5) {			fw_error("failed to reconnect to %s\n",				 unit->device.bus_id);			/* Fall back and try to log in again. */			lu->retries = 0;			PREPARE_DELAYED_WORK(&lu->work, sbp2_login);		}		if (queue_delayed_work(sbp2_wq, &lu->work, DIV_ROUND_UP(HZ, 5)))			kref_get(&lu->tgt->kref);		kref_put(&lu->tgt->kref, sbp2_release_target);		return;	}	lu->generation        = generation;	lu->tgt->node_id      = node_id;	lu->tgt->address_high = local_node_id << 16;	fw_notify("reconnected to %s LUN %04x (%d retries)\n",		  unit->device.bus_id, lu->lun, lu->retries);	sbp2_agent_reset(lu);	sbp2_cancel_orbs(lu);	kref_put(&lu->tgt->kref, sbp2_release_target);}static void sbp2_update(struct fw_unit *unit){	struct sbp2_target *tgt = unit->device.driver_data;	struct sbp2_logical_unit *lu;	fw_device_enable_phys_dma(fw_device(unit->device.parent));	/*	 * Fw-core serializes sbp2_update() against sbp2_remove().	 * Iteration over tgt->lu_list is therefore safe here.	 */	list_for_each_entry(lu, &tgt->lu_list, link) {		lu->retries = 0;		if (queue_delayed_work(sbp2_wq, &lu->work, 0))			kref_get(&tgt->kref);	}}#define SBP2_UNIT_SPEC_ID_ENTRY	0x0000609e#define SBP2_SW_VERSION_ENTRY	0x00010483static const struct fw_device_id sbp2_id_table[] = {	{		.match_flags  = FW_MATCH_SPECIFIER_ID | FW_MATCH_VERSION,		.specifier_id = SBP2_UNIT_SPEC_ID_ENTRY,		.version      = SBP2_SW_VERSION_ENTRY,	},	{ }};static struct fw_driver sbp2_driver = {	.driver   = {		.owner  = THIS_MODULE,		.name   = sbp2_driver_name,		.bus    = &fw_bus_type,		.probe  = sbp2_probe,		.remove = sbp2_remove,	},	.update   = sbp2_update,	.id_table = sbp2_id_table,};static unsigned intsbp2_status_to_sense_data(u8 *sbp2_status, u8 *sense_data){	int sam_status;	sense_data[0] = 0x70;	sense_data[1] = 0x0;	sense_data[2] = sbp2_status[1];	sense_data[3] = sbp2_status[4];	sense_data[4] = sbp2_status[5];	sense_data[5] = sbp2_status[6];	sense_data[6] = sbp2_status[7];	sense_data[7] = 10;	sense_data[8] = sbp2_status[8];	sense_data[9] = sbp2_status[9];	sense_data[10] = sbp2_status[10];	sense_data[11] = sbp2_status[11];	sense_data[12] = sbp2_status[2];	sense_data[13] = sbp2_status[3];	sense_data[14] = sbp2_status[12];	sense_data[15] = sbp2_status[13];	sam_status = sbp2_status[0] & 0x3f;	switch (sam_status) {	case SAM_STAT_GOOD:	case SAM_STAT_CHECK_CONDITION:	case SAM_STAT_CONDITION_MET:	case SAM_STAT_BUSY:	case SAM_STAT_RESERVATION_CONFLICT:	case SAM_STAT_COMMAND_TERMINATED:		return DID_OK << 16 | sam_status;	default:		return DID_ERROR << 16;	}}static voidcomplete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status){	struct sbp2_command_orb *orb =		container_of(base_orb, struct sbp2_command_orb, base);	struct fw_device *device = fw_device(orb->lu->tgt->unit->device.parent);	int result;	if (status != NULL) {		if (STATUS_GET_DEAD(*status))			sbp2_agent_reset(orb->lu);		switch (STATUS_GET_RESPONSE(*status)) {		case SBP2_STATUS_REQUEST_COMPLETE:			result = DID_OK << 16;			break;		case SBP2_STATUS_TRANSPORT_FAILURE:			result = DID_BUS_BUSY << 16;			break;		case SBP2_STATUS_ILLEGAL_REQUEST:		case SBP2_STATUS_VENDOR_DEPENDENT:		default:			result = DID_ERROR << 16;			break;		}		if (result == DID_OK << 16 && STATUS_GET_LEN(*status) > 1)			result = sbp2_status_to_sense_data(STATUS_GET_DATA(*status),							   orb->cmd->sense_buffer);	} else {		/*		 * If the orb completes with status == NULL, something		 * went wrong, typically a bus reset happened mid-orb		 * or when sending the write (less likely).		 */		result = DID_BUS_BUSY << 16;	}	dma_unmap_single(device->card->device, orb->base.request_bus,			 sizeof(orb->request), DMA_TO_DEVICE);	if (scsi_sg_count(orb->cmd) > 0)		dma_unmap_sg(device->card->device, scsi_sglist(orb->cmd),			     scsi_sg_count(orb->cmd),			     orb->cmd->sc_data_direction);	if (orb->page_table_bus != 0)		dma_unmap_single(device->card->device, orb->page_table_bus,				 sizeof(orb->page_table), DMA_TO_DEVICE);	orb->cmd->result = result;	orb->done(orb->cmd);}static intsbp2_map_scatterlist(struct sbp2_command_orb *orb, struct fw_device *device,		     struct sbp2_logical_unit *lu){	struct scatterlist *sg;	int sg_len, l, i, j, count;	dma_addr_t sg_addr;	sg = scsi_sglist(orb->cmd);	count = dma_map_sg(device->card->device, sg, scsi_sg_count(orb->cmd),			   orb->cmd->sc_data_direction);	if (count == 0)		goto fail;	/*	 * Handle the special case where there is only one element in	 * the scatter list by converting it to an immediate block	 * request. This is also a workaround for broken devices such	 * as the second generation iPod which doesn't support page	 * tables.	 */	if (count == 1 && sg_dma_len(sg) < SBP2_MAX_SG_ELEMENT_LENGTH) {		orb->request.data_descriptor.high = lu->tgt->address_high;		orb->request.data_descriptor.low  = sg_dma_address(sg);		orb->request.misc |= COMMAND_ORB_DATA_SIZE(sg_dma_len(sg));		return 0;	}	/*	 * Convert the scatterlist to an sbp2 page table.  If any	 * scatterlist entries are too big for sbp2, we split them as we	 * go.  Even if we ask the block I/O layer to not give us sg	 * elements larger than 65535 bytes, some IOMMUs may merge sg elements	 * during DMA mapping, and Linux currently doesn't prevent this.	 */	for (i = 0, j = 0; i < count; i++) {		sg_len = sg_dma_len(sg + i);		sg_addr = sg_dma_address(sg + i);		while (sg_len) {			/* FIXME: This won't get us out of the pinch. */			if (unlikely(j >= ARRAY_SIZE(orb->page_table))) {				fw_error("page table overflow\n");				goto fail_page_table;			}			l = min(sg_len, SBP2_MAX_SG_ELEMENT_LENGTH);			orb->page_table[j].low = sg_addr;			orb->page_table[j].high = (l << 16);			sg_addr += l;			sg_len -= l;			j++;		}	}	fw_memcpy_to_be32(orb->page_table, orb->page_table,			  sizeof(orb->page_table[0]) * j);	orb->page_table_bus =		dma_map_single(device->card->device, orb->page_table,			       sizeof(orb->page_table), DMA_TO_DEVICE);	if (dma_mapping_error(orb->page_table_bus))		goto fail_page_table;	/*	 * The data_descriptor pointer is the one case where we need	 * to fill in the node ID part of the address.  All other	 * pointers assume that the data referenced reside on the	 * initiator (i.e. us), but data_descriptor can refer to data	 * on other nodes so we need to put our ID in descriptor.high.	 */	orb->request.data_descriptor.high = lu->tgt->address_high;	orb->request.data_descriptor.low  = orb->page_table_bus;	orb->request.misc |=		COMMAND_ORB_PAGE_TABLE_PRESENT |		COMMAND_ORB_DATA_SIZE(j);	return 0; fail_page_table:	dma_unmap_sg(device->card->device, sg, scsi_sg_count(orb->cmd),		     orb->cmd->sc_data_direction); fail:	return -ENOMEM;}/* SCSI stack integration */static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done){	struct sbp2_logical_unit *lu = cmd->device->hostdata;	struct fw_device *device = fw_device(lu->tgt->unit->device.parent);	struct sbp2_command_orb *orb;	unsigned max_payload;	int retval = SCSI_MLQUEUE_HOST_BUSY;	/*	 * Bidirectional commands are not yet implemented, and unknown	 * transfer direction not handled.	 */	if (cmd->sc_data_direction == DMA_BIDIRECTIONAL) {		fw_error("Can't handle DMA_BIDIRECTIONAL, rejecting command\n");		cmd->result = DID_ERROR << 16;		done(cmd);		return 0;	}	orb = kzalloc(sizeof(*orb), GFP_ATOMIC);	if (orb == NULL) {		fw_notify("failed to alloc orb\n");		return SCSI_MLQUEUE_HOST_BUSY;	}	/* Initialize rcode to something not RCODE_COMPLETE. */	orb->base.rcode = -1;	kref_init(&orb->base.kref);	orb->lu   = lu;	orb->done = done;	orb->cmd  = cmd;	orb->request.next.high   = SBP2_ORB_NULL;	orb->request.next.low    = 0x0;	/*	 * At speed 100 we can do 512 bytes per packet, at speed 200,	 * 1024 bytes per packet etc.  The SBP-2 max_payload field	 * specifies the max payload size as 2 ^ (max_payload + 2), so	 * if we set this to max_speed + 7, we get the right value.	 */	max_payload = min(device->max_speed + 7,			  device->card->max_receive - 1);	orb->request.misc =		COMMAND_ORB_MAX_PAYLOAD(max_payload) |		COMMAND_ORB_SPEED(device->max_speed) |		COMMAND_ORB_NOTIFY;	if (cmd->sc_data_direction == DMA_FROM_DEVICE)		orb->request.misc |=			COMMAND_ORB_DIRECTION(SBP2_DIRECTION_FROM_MEDIA);	else if (cmd->sc_data_direction == DMA_TO_DEVICE)		orb->request.misc |=			COMMAND_ORB_DIRECTION(SBP2_DIRECTION_TO_MEDIA);	if (scsi_sg_count(cmd) && sbp2_map_scatterlist(orb, device, lu) < 0)		goto out;	fw_memcpy_to_be32(&orb->request, &orb->request, sizeof(orb->request));	memset(orb->request.command_block,	       0, sizeof(orb->request.command_block));	memcpy(orb->request.command_block, cmd->cmnd, COMMAND_SIZE(*cmd->cmnd));	orb->base.callback = complete_command_orb;	orb->base.request_bus =		dma_map_single(device->card->device, &orb->request,			       sizeof(orb->request), DMA_TO_DEVICE);	if (dma_mapping_error(orb->base.request_bus))		goto out;	sbp2_send_orb(&orb->base, lu, lu->tgt->node_id, lu->generation,		      lu->command_block_agent_address + SBP2_ORB_POINTER);	retval = 0; out:	kref_put(&orb->base.kref, free_orb);	return retval;}static int sbp2_scsi_slave_alloc(struct scsi_device *sdev){	struct sbp2_logical_unit *lu = sdev->hostdata;	sdev->allow_restart = 1;	if (lu->tgt->workarounds & SBP2_WORKAROUND_INQUIRY_36)		sdev->inquiry_len = 36;	return 0;}static int sbp2_scsi_slave_configure(struct scsi_device *sdev){	struct sbp2_logical_unit *lu = sdev->hostdata;	sdev->use_10_for_rw = 1;	if (sdev->type == TYPE_ROM)		sdev->use_10_for_ms = 1;	if (sdev->type == TYPE_DISK &&	    lu->tgt->workarounds & SBP2_WORKAROUND_MODE_SENSE_8)		sdev->skip_ms_page_8 = 1;	if (lu->tgt->workarounds & SBP2_WORKAROUND_FIX_CAPACITY)		sdev->fix_capacity = 1;	if (lu->tgt->workarounds & SBP2_WORKAROUND_128K_MAX_TRANS)		blk_queue_max_sectors(sdev->request_queue, 128 * 1024 / 512);	return 0;}/* * Called by scsi stack when something has really gone wrong.  Usually * called when a command has timed-out for some reason. */static int sbp2_scsi_abort(struct scsi_cmnd *cmd){	struct sbp2_logical_unit *lu = cmd->device->hostdata;	fw_notify("sbp2_scsi_abort\n");	sbp2_agent_reset(lu);	sbp2_cancel_orbs(lu);	return SUCCESS;}/* * Format of /sys/bus/scsi/devices/.../ieee1394_id: * u64 EUI-64 : u24 directory_ID : u16 LUN  (all printed in hexadecimal) * * This is the concatenation of target port identifier and logical unit * identifier as per SAM-2...SAM-4 annex A. */static ssize_tsbp2_sysfs_ieee1394_id_show(struct device *dev, struct device_attribute *attr,			    char *buf){	struct scsi_device *sdev = to_scsi_device(dev);	struct sbp2_logical_unit *lu;	struct fw_device *device;	if (!sdev)		return 0;	lu = sdev->hostdata;	device = fw_device(lu->tgt->unit->device.parent);	return sprintf(buf, "%08x%08x:%06x:%04x\n",			device->config_rom[3], device->config_rom[4],			lu->tgt->directory_id, lu->lun);}static DEVICE_ATTR(ieee1394_id, S_IRUGO, sbp2_sysfs_ieee1394_id_show, NULL);static struct device_attribute *sbp2_scsi_sysfs_attrs[] = {	&dev_attr_ieee1394_id,	NULL};static struct scsi_host_template scsi_driver_template = {	.module			= THIS_MODULE,	.name			= "SBP-2 IEEE-1394",	.proc_name		= sbp2_driver_name,	.queuecommand		= sbp2_scsi_queuecommand,	.slave_alloc		= sbp2_scsi_slave_alloc,	.slave_configure	= sbp2_scsi_slave_configure,	.eh_abort_handler	= sbp2_scsi_abort,	.this_id		= -1,	.sg_tablesize		= SG_ALL,	.use_clustering		= ENABLE_CLUSTERING,	.cmd_per_lun		= 1,	.can_queue		= 1,	.sdev_attrs		= sbp2_scsi_sysfs_attrs,};MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");MODULE_DESCRIPTION("SCSI over IEEE1394");MODULE_LICENSE("GPL");MODULE_DEVICE_TABLE(ieee1394, sbp2_id_table);/* Provide a module alias so root-on-sbp2 initrds don't break. */#ifndef CONFIG_IEEE1394_SBP2_MODULEMODULE_ALIAS("sbp2");#endifstatic int __init sbp2_init(void){	sbp2_wq = create_singlethread_workqueue(KBUILD_MODNAME);	if (!sbp2_wq)		return -ENOMEM;	return driver_register(&sbp2_driver.driver);}static void __exit sbp2_cleanup(void){	driver_unregister(&sbp2_driver.driver);	destroy_workqueue(sbp2_wq);}module_init(sbp2_init);module_exit(sbp2_cleanup);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -