⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 sbp2.c

📁 Ieee1394驱动实现
💻 C
📖 第 1 页 / 共 5 页
字号:
 * Returns nonzero if timed out or prematurely interrupted.
 */
static int sbp2util_access_timeout(struct sbp2_lu *lu, int timeout)
{
	long leftover;

	leftover = wait_event_interruptible_timeout(
			sbp2_access_wq, lu->access_complete, timeout);
	lu->access_complete = 0;
	return leftover <= 0;
}

static void sbp2_free_packet(void *packet)
{
	hpsb_free_tlabel(packet);
	hpsb_free_packet(packet);
}

/*
 * This is much like hpsb_node_write(), except it ignores the response
 * subaction and returns immediately. Can be used from atomic context.
 */
static int sbp2util_node_write_no_wait(struct node_entry *ne, u64 addr,
				       quadlet_t *buf, size_t len)
{
	struct hpsb_packet *packet;

	packet = hpsb_make_writepacket(ne->host, ne->nodeid, addr, buf, len);
	if (!packet)
		return -ENOMEM;

	hpsb_set_packet_complete_task(packet, sbp2_free_packet, packet);
	hpsb_node_fill_packet(ne, packet);
	if (hpsb_send_packet(packet) < 0) {
		sbp2_free_packet(packet);
		return -EIO;
	}
	return 0;
}

static void sbp2util_notify_fetch_agent(struct sbp2_lu *lu, u64 offset,
					quadlet_t *data, size_t len)
{
	/* There is a small window after a bus reset within which the node
	 * entry's generation is current but the reconnect wasn't completed. */
	if (unlikely(atomic_read(&lu->state) == SBP2LU_STATE_IN_RESET))
		return;

	if (hpsb_node_write(lu->ne, lu->command_block_agent_addr + offset,
			    data, len))
		SBP2_ERR("sbp2util_notify_fetch_agent failed.");

	/* Now accept new SCSI commands, unless a bus reset happended during
	 * hpsb_node_write. */
	if (likely(atomic_read(&lu->state) != SBP2LU_STATE_IN_RESET))
		scsi_unblock_requests(lu->shost);
}

static void sbp2util_write_orb_pointer(struct work_struct *work)
{
	struct sbp2_lu *lu = container_of(work, struct sbp2_lu, protocol_work);
	quadlet_t data[2];

	data[0] = ORB_SET_NODE_ID(lu->hi->host->node_id);
	data[1] = lu->last_orb_dma;
	sbp2util_cpu_to_be32_buffer(data, 8);
	sbp2util_notify_fetch_agent(lu, SBP2_ORB_POINTER_OFFSET, data, 8);
}

static void sbp2util_write_doorbell(struct work_struct *work)
{
	struct sbp2_lu *lu = container_of(work, struct sbp2_lu, protocol_work);

	sbp2util_notify_fetch_agent(lu, SBP2_DOORBELL_OFFSET, NULL, 4);
}

static int sbp2util_create_command_orb_pool(struct sbp2_lu *lu)
{
	struct sbp2_fwhost_info *hi = lu->hi;
	struct sbp2_command_info *cmd;
	int i, orbs = sbp2_serialize_io ? 2 : SBP2_MAX_CMDS;

	for (i = 0; i < orbs; i++) {
		cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
		if (!cmd)
			return -ENOMEM;
		cmd->command_orb_dma = dma_map_single(hi->host->device.parent,
						&cmd->command_orb,
						sizeof(struct sbp2_command_orb),
						DMA_TO_DEVICE);
		cmd->sge_dma = dma_map_single(hi->host->device.parent,
					&cmd->scatter_gather_element,
					sizeof(cmd->scatter_gather_element),
					DMA_TO_DEVICE);
		INIT_LIST_HEAD(&cmd->list);
		list_add_tail(&cmd->list, &lu->cmd_orb_completed);
	}
	return 0;
}

static void sbp2util_remove_command_orb_pool(struct sbp2_lu *lu)
{
	struct hpsb_host *host = lu->hi->host;
	struct list_head *lh, *next;
	struct sbp2_command_info *cmd;
	unsigned long flags;

	spin_lock_irqsave(&lu->cmd_orb_lock, flags);
	if (!list_empty(&lu->cmd_orb_completed))
		list_for_each_safe(lh, next, &lu->cmd_orb_completed) {
			cmd = list_entry(lh, struct sbp2_command_info, list);
			dma_unmap_single(host->device.parent,
					 cmd->command_orb_dma,
					 sizeof(struct sbp2_command_orb),
					 DMA_TO_DEVICE);
			dma_unmap_single(host->device.parent, cmd->sge_dma,
					 sizeof(cmd->scatter_gather_element),
					 DMA_TO_DEVICE);
			kfree(cmd);
		}
	spin_unlock_irqrestore(&lu->cmd_orb_lock, flags);
	return;
}

/*
 * Finds the sbp2_command for a given outstanding command ORB.
 * Only looks at the in-use list.
 */
static struct sbp2_command_info *sbp2util_find_command_for_orb(
				struct sbp2_lu *lu, dma_addr_t orb)
{
	struct sbp2_command_info *cmd;
	unsigned long flags;

	spin_lock_irqsave(&lu->cmd_orb_lock, flags);
	if (!list_empty(&lu->cmd_orb_inuse))
		list_for_each_entry(cmd, &lu->cmd_orb_inuse, list)
			if (cmd->command_orb_dma == orb) {
				spin_unlock_irqrestore(
						&lu->cmd_orb_lock, flags);
				return cmd;
			}
	spin_unlock_irqrestore(&lu->cmd_orb_lock, flags);
	return NULL;
}

/*
 * Finds the sbp2_command for a given outstanding SCpnt.
 * Only looks at the in-use list.
 * Must be called with lu->cmd_orb_lock held.
 */
static struct sbp2_command_info *sbp2util_find_command_for_SCpnt(
				struct sbp2_lu *lu, void *SCpnt)
{
	struct sbp2_command_info *cmd;

	if (!list_empty(&lu->cmd_orb_inuse))
		list_for_each_entry(cmd, &lu->cmd_orb_inuse, list)
			if (cmd->Current_SCpnt == SCpnt)
				return cmd;
	return NULL;
}

static struct sbp2_command_info *sbp2util_allocate_command_orb(
				struct sbp2_lu *lu,
				struct scsi_cmnd *Current_SCpnt,
				void (*Current_done)(struct scsi_cmnd *))
{
	struct list_head *lh;
	struct sbp2_command_info *cmd = NULL;
	unsigned long flags;

	spin_lock_irqsave(&lu->cmd_orb_lock, flags);
	if (!list_empty(&lu->cmd_orb_completed)) {
		lh = lu->cmd_orb_completed.next;
		list_del(lh);
		cmd = list_entry(lh, struct sbp2_command_info, list);
		cmd->Current_done = Current_done;
		cmd->Current_SCpnt = Current_SCpnt;
		list_add_tail(&cmd->list, &lu->cmd_orb_inuse);
	} else
		SBP2_ERR("%s: no orbs available", __FUNCTION__);
	spin_unlock_irqrestore(&lu->cmd_orb_lock, flags);
	return cmd;
}

/*
 * Unmaps the DMAs of a command and moves the command to the completed ORB list.
 * Must be called with lu->cmd_orb_lock held.
 */
static void sbp2util_mark_command_completed(struct sbp2_lu *lu,
					    struct sbp2_command_info *cmd)
{
	struct hpsb_host *host = lu->ud->ne->host;

	if (cmd->cmd_dma) {
		if (cmd->dma_type == CMD_DMA_SINGLE)
			dma_unmap_single(host->device.parent, cmd->cmd_dma,
					 cmd->dma_size, cmd->dma_dir);
		else if (cmd->dma_type == CMD_DMA_PAGE)
			dma_unmap_page(host->device.parent, cmd->cmd_dma,
				       cmd->dma_size, cmd->dma_dir);
		/* XXX: Check for CMD_DMA_NONE bug */
		cmd->dma_type = CMD_DMA_NONE;
		cmd->cmd_dma = 0;
	}
	if (cmd->sge_buffer) {
		dma_unmap_sg(host->device.parent, cmd->sge_buffer,
			     cmd->dma_size, cmd->dma_dir);
		cmd->sge_buffer = NULL;
	}
	list_move_tail(&cmd->list, &lu->cmd_orb_completed);
}

/*
 * Is lu valid? Is the 1394 node still present?
 */
static inline int sbp2util_node_is_available(struct sbp2_lu *lu)
{
	return lu && lu->ne && !lu->ne->in_limbo;
}

/*********************************************
 * IEEE-1394 core driver stack related section
 *********************************************/

static int sbp2_probe(struct device *dev)
{
	struct unit_directory *ud;
	struct sbp2_lu *lu;

	ud = container_of(dev, struct unit_directory, device);

	/* Don't probe UD's that have the LUN flag. We'll probe the LUN(s)
	 * instead. */
	if (ud->flags & UNIT_DIRECTORY_HAS_LUN_DIRECTORY)
		return -ENODEV;

	lu = sbp2_alloc_device(ud);
	if (!lu)
		return -ENOMEM;

	sbp2_parse_unit_directory(lu, ud);
	return sbp2_start_device(lu);
}

static int sbp2_remove(struct device *dev)
{
	struct unit_directory *ud;
	struct sbp2_lu *lu;
	struct scsi_device *sdev;

	ud = container_of(dev, struct unit_directory, device);
	lu = ud->device.driver_data;
	if (!lu)
		return 0;

	if (lu->shost) {
		/* Get rid of enqueued commands if there is no chance to
		 * send them. */
		if (!sbp2util_node_is_available(lu))
			sbp2scsi_complete_all_commands(lu, DID_NO_CONNECT);
		/* scsi_remove_device() may trigger shutdown functions of SCSI
		 * highlevel drivers which would deadlock if blocked. */
		atomic_set(&lu->state, SBP2LU_STATE_IN_SHUTDOWN);
		scsi_unblock_requests(lu->shost);
	}
	sdev = lu->sdev;
	if (sdev) {
		lu->sdev = NULL;
		scsi_remove_device(sdev);
	}

	sbp2_logout_device(lu);
	sbp2_remove_device(lu);

	return 0;
}

static int sbp2_update(struct unit_directory *ud)
{
	struct sbp2_lu *lu = ud->device.driver_data;

	if (sbp2_reconnect_device(lu)) {
		/* Reconnect has failed. Perhaps we didn't reconnect fast
		 * enough. Try a regular login, but first log out just in
		 * case of any weirdness. */
		sbp2_logout_device(lu);

		if (sbp2_login_device(lu)) {
			/* Login failed too, just fail, and the backend
			 * will call our sbp2_remove for us */
			SBP2_ERR("Failed to reconnect to sbp2 device!");
			return -EBUSY;
		}
	}

	sbp2_set_busy_timeout(lu);
	sbp2_agent_reset(lu, 1);
	sbp2_max_speed_and_size(lu);

	/* Complete any pending commands with busy (so they get retried)
	 * and remove them from our queue. */
	sbp2scsi_complete_all_commands(lu, DID_BUS_BUSY);

	/* Accept new commands unless there was another bus reset in the
	 * meantime. */
	if (hpsb_node_entry_valid(lu->ne)) {
		atomic_set(&lu->state, SBP2LU_STATE_RUNNING);
		scsi_unblock_requests(lu->shost);
	}
	return 0;
}

static struct sbp2_lu *sbp2_alloc_device(struct unit_directory *ud)
{
	struct sbp2_fwhost_info *hi;
	struct Scsi_Host *shost = NULL;
	struct sbp2_lu *lu = NULL;

	lu = kzalloc(sizeof(*lu), GFP_KERNEL);
	if (!lu) {
		SBP2_ERR("failed to create lu");
		goto failed_alloc;
	}

	lu->ne = ud->ne;
	lu->ud = ud;
	lu->speed_code = IEEE1394_SPEED_100;
	lu->max_payload_size = sbp2_speedto_max_payload[IEEE1394_SPEED_100];
	lu->status_fifo_addr = CSR1212_INVALID_ADDR_SPACE;
	INIT_LIST_HEAD(&lu->cmd_orb_inuse);
	INIT_LIST_HEAD(&lu->cmd_orb_completed);
	INIT_LIST_HEAD(&lu->lu_list);
	spin_lock_init(&lu->cmd_orb_lock);
	atomic_set(&lu->state, SBP2LU_STATE_RUNNING);
	INIT_WORK(&lu->protocol_work, NULL);

	ud->device.driver_data = lu;

	hi = hpsb_get_hostinfo(&sbp2_highlevel, ud->ne->host);
	if (!hi) {
		hi = hpsb_create_hostinfo(&sbp2_highlevel, ud->ne->host,
					  sizeof(*hi));
		if (!hi) {
			SBP2_ERR("failed to allocate hostinfo");
			goto failed_alloc;
		}
		hi->host = ud->ne->host;
		INIT_LIST_HEAD(&hi->logical_units);

#ifdef CONFIG_IEEE1394_SBP2_PHYS_DMA
		/* Handle data movement if physical dma is not
		 * enabled or not supported on host controller */
		if (!hpsb_register_addrspace(&sbp2_highlevel, ud->ne->host,
					     &sbp2_physdma_ops,
					     0x0ULL, 0xfffffffcULL)) {
			SBP2_ERR("failed to register lower 4GB address range");
			goto failed_alloc;
		}
#else
		if (dma_set_mask(hi->host->device.parent, DMA_32BIT_MASK)) {
			SBP2_ERR("failed to set 4GB DMA mask");
			goto failed_alloc;
		}
#endif
	}

	/* Prevent unloading of the 1394 host */
	if (!try_module_get(hi->host->driver->owner)) {
		SBP2_ERR("failed to get a reference on 1394 host driver");
		goto failed_alloc;
	}

	lu->hi = hi;

	list_add_tail(&lu->lu_list, &hi->logical_units);

	/* Register the status FIFO address range. We could use the same FIFO
	 * for targets at different nodes. However we need different FIFOs per
	 * target in order to support multi-unit devices.
	 * The FIFO is located out of the local host controller's physical range
	 * but, if possible, within the posted write area. Status writes will
	 * then be performed as unified transactions. This slightly reduces
	 * bandwidth usage, and some Prolific based devices seem to require it.
	 */
	lu->status_fifo_addr = hpsb_allocate_and_register_addrspace(
			&sbp2_highlevel, ud->ne->host, &sbp2_ops,
			sizeof(struct sbp2_status_block), sizeof(quadlet_t),
			ud->ne->host->low_addr_space, CSR1212_ALL_SPACE_END);
	if (lu->status_fifo_addr == CSR1212_INVALID_ADDR_SPACE) {
		SBP2_ERR("failed to allocate status FIFO address range");
		goto failed_alloc;
	}

	shost = scsi_host_alloc(&sbp2_shost_template, sizeof(unsigned long));
	if (!shost) {
		SBP2_ERR("failed to register scsi host");
		goto failed_alloc;
	}

	shost->hostdata[0] = (unsigned long)lu;

	if (!scsi_add_host(shost, &ud->device)) {
		lu->shost = shost;
		return lu;
	}

	SBP2_ERR("failed to add scsi host");
	scsi_host_put(shost);

failed_alloc:
	sbp2_remove_device(lu);
	return NULL;
}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -