📄 sbp2a.c
字号:
if ((i & 0xf) == 0)
printk("\n ");
printk("%02x ", (int)dump[i]);
}
printk("\n");
return;
}
#else
#define sbp2util_packet_dump(w,x,y,z) do {} while (0)
#endif
static DECLARE_WAIT_QUEUE_HEAD(access_wq);
/*
* Waits for completion of an SBP-2 access request.
* Returns nonzero if timed out or prematurely interrupted.
*/
static int sbp2util_access_timeout(struct scsi_id_instance_data *scsi_id,
int timeout)
{
long leftover = wait_event_interruptible_timeout(
access_wq, scsi_id->access_complete, timeout);
scsi_id->access_complete = 0;
return leftover <= 0;
}
/* Frees an allocated packet */
static void sbp2_free_packet(struct hpsb_packet *packet)
{
hpsb_free_tlabel(packet);
hpsb_free_packet(packet);
}
/* This is much like hpsb_node_write(), except it ignores the response
* subaction and returns immediately. Can be used from interrupts.
*/
static int sbp2util_node_write_no_wait(struct node_entry *ne, u64 addr,
quadlet_t *buffer, size_t length)
{
struct hpsb_packet *packet;
packet = hpsb_make_writepacket(ne->host, ne->nodeid,
addr, buffer, length);
if (!packet)
return -ENOMEM;
hpsb_set_packet_complete_task(packet,
(void (*)(void *))sbp2_free_packet,
packet);
hpsb_node_fill_packet(ne, packet);
if (hpsb_send_packet(packet) < 0) {
sbp2_free_packet(packet);
return -EIO;
}
return 0;
}
static void sbp2util_notify_fetch_agent(struct scsi_id_instance_data *scsi_id,
u64 offset, quadlet_t *data, size_t len)
{
/*
* There is a small window after a bus reset within which the node
* entry's generation is current but the reconnect wasn't completed.
*/
if (unlikely(atomic_read(&scsi_id->state) == SBP2LU_STATE_IN_RESET))
return;
if (hpsb_node_write(scsi_id->ne,
scsi_id->sbp2_command_block_agent_addr + offset,
data, len))
SBP2_ERR("sbp2util_notify_fetch_agent failed.");
/*
* Now accept new SCSI commands, unless a bus reset happended during
* hpsb_node_write.
*/
if (likely(atomic_read(&scsi_id->state) != SBP2LU_STATE_IN_RESET))
scsi_unblock_requests(scsi_id->scsi_host);
}
static void sbp2util_write_orb_pointer(void *p)
{
quadlet_t data[2];
data[0] = ORB_SET_NODE_ID(
((struct scsi_id_instance_data *)p)->hi->host->node_id);
data[1] = ((struct scsi_id_instance_data *)p)->last_orb_dma;
sbp2util_cpu_to_be32_buffer(data, 8);
sbp2util_notify_fetch_agent(p, SBP2_ORB_POINTER_OFFSET, data, 8);
}
static void sbp2util_write_doorbell(void *p)
{
sbp2util_notify_fetch_agent(p, SBP2_DOORBELL_OFFSET, NULL, 4);
}
/*
* This function is called to create a pool of command orbs used for
* command processing. It is called when a new sbp2 device is detected.
*/
static int sbp2util_create_command_orb_pool(struct scsi_id_instance_data *scsi_id)
{
struct sbp2scsi_host_info *hi = scsi_id->hi;
int i;
unsigned long flags, orbs;
struct sbp2_command_info *command;
orbs = serialize_io ? 2 : SBP2_MAX_CMDS;
spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags);
for (i = 0; i < orbs; i++) {
command = kzalloc(sizeof(*command), GFP_ATOMIC);
if (!command) {
spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock,
flags);
return -ENOMEM;
}
command->command_orb_dma =
pci_map_single(hi->host->pdev, &command->command_orb,
sizeof(struct sbp2_command_orb),
PCI_DMA_TODEVICE);
SBP2_DMA_ALLOC("single command orb DMA");
command->sge_dma =
pci_map_single(hi->host->pdev,
&command->scatter_gather_element,
sizeof(command->scatter_gather_element),
PCI_DMA_BIDIRECTIONAL);
SBP2_DMA_ALLOC("scatter_gather_element");
INIT_LIST_HEAD(&command->list);
list_add_tail(&command->list, &scsi_id->sbp2_command_orb_completed);
}
spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
return 0;
}
/*
* This function is called to delete a pool of command orbs.
*/
static void sbp2util_remove_command_orb_pool(struct scsi_id_instance_data *scsi_id)
{
struct hpsb_host *host = scsi_id->hi->host;
struct list_head *lh, *next;
struct sbp2_command_info *command;
unsigned long flags;
spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags);
if (!list_empty(&scsi_id->sbp2_command_orb_completed)) {
list_for_each_safe(lh, next, &scsi_id->sbp2_command_orb_completed) {
command = list_entry(lh, struct sbp2_command_info, list);
/* Release our generic DMA's */
pci_unmap_single(host->pdev, command->command_orb_dma,
sizeof(struct sbp2_command_orb),
PCI_DMA_TODEVICE);
SBP2_DMA_FREE("single command orb DMA");
pci_unmap_single(host->pdev, command->sge_dma,
sizeof(command->scatter_gather_element),
PCI_DMA_BIDIRECTIONAL);
SBP2_DMA_FREE("scatter_gather_element");
kfree(command);
}
}
spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
return;
}
/*
* This function finds the sbp2_command for a given outstanding command
* orb.Only looks at the inuse list.
*/
static struct sbp2_command_info *sbp2util_find_command_for_orb(
struct scsi_id_instance_data *scsi_id, dma_addr_t orb)
{
struct sbp2_command_info *command;
unsigned long flags;
spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags);
if (!list_empty(&scsi_id->sbp2_command_orb_inuse)) {
list_for_each_entry(command, &scsi_id->sbp2_command_orb_inuse, list) {
if (command->command_orb_dma == orb) {
spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
return command;
}
}
}
spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
SBP2_ORB_DEBUG("could not match command orb %x", (unsigned int)orb);
return NULL;
}
/*
* This function finds the sbp2_command for a given outstanding SCpnt.
* Only looks at the inuse list.
* Must be called with scsi_id->sbp2_command_orb_lock held.
*/
static struct sbp2_command_info *sbp2util_find_command_for_SCpnt(
struct scsi_id_instance_data *scsi_id, void *SCpnt)
{
struct sbp2_command_info *command;
if (!list_empty(&scsi_id->sbp2_command_orb_inuse))
list_for_each_entry(command, &scsi_id->sbp2_command_orb_inuse, list)
if (command->Current_SCpnt == SCpnt)
return command;
return NULL;
}
/*
* This function allocates a command orb used to send a scsi command.
*/
static struct sbp2_command_info *sbp2util_allocate_command_orb(
struct scsi_id_instance_data *scsi_id,
struct scsi_cmnd *Current_SCpnt,
void (*Current_done)(struct scsi_cmnd *))
{
struct list_head *lh;
struct sbp2_command_info *command = NULL;
unsigned long flags;
spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags);
if (!list_empty(&scsi_id->sbp2_command_orb_completed)) {
lh = scsi_id->sbp2_command_orb_completed.next;
list_del(lh);
command = list_entry(lh, struct sbp2_command_info, list);
command->Current_done = Current_done;
command->Current_SCpnt = Current_SCpnt;
list_add_tail(&command->list, &scsi_id->sbp2_command_orb_inuse);
} else {
SBP2_ERR("%s: no orbs available", __FUNCTION__);
}
spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
return command;
}
/* Free our DMA's */
static void sbp2util_free_command_dma(struct sbp2_command_info *command)
{
struct scsi_id_instance_data *scsi_id =
(struct scsi_id_instance_data *)command->Current_SCpnt->device->host->hostdata[0];
struct hpsb_host *host;
if (!scsi_id) {
SBP2_ERR("%s: scsi_id == NULL", __FUNCTION__);
return;
}
host = scsi_id->ud->ne->host;
if (command->cmd_dma) {
if (command->dma_type == CMD_DMA_SINGLE) {
pci_unmap_single(host->pdev, command->cmd_dma,
command->dma_size, command->dma_dir);
SBP2_DMA_FREE("single bulk");
} else if (command->dma_type == CMD_DMA_PAGE) {
pci_unmap_page(host->pdev, command->cmd_dma,
command->dma_size, command->dma_dir);
SBP2_DMA_FREE("single page");
} /* XXX: Check for CMD_DMA_NONE bug */
command->dma_type = CMD_DMA_NONE;
command->cmd_dma = 0;
}
if (command->sge_buffer) {
pci_unmap_sg(host->pdev, command->sge_buffer,
command->dma_size, command->dma_dir);
SBP2_DMA_FREE("scatter list");
command->sge_buffer = NULL;
}
}
/*
* This function moves a command to the completed orb list.
* Must be called with scsi_id->sbp2_command_orb_lock held.
*/
static void sbp2util_mark_command_completed(
struct scsi_id_instance_data *scsi_id,
struct sbp2_command_info *command)
{
list_del(&command->list);
sbp2util_free_command_dma(command);
list_add_tail(&command->list, &scsi_id->sbp2_command_orb_completed);
}
/*
* Is scsi_id valid? Is the 1394 node still present?
*/
static inline int sbp2util_node_is_available(struct scsi_id_instance_data *scsi_id)
{
return scsi_id && scsi_id->ne && !scsi_id->ne->in_limbo;
}
/*********************************************
* IEEE-1394 core driver stack related section
*********************************************/
static struct scsi_id_instance_data *sbp2_alloc_device(struct unit_directory *ud);
static int sbp2_probe(struct device *dev)
{
struct unit_directory *ud;
struct scsi_id_instance_data *scsi_id;
SBP2_DEBUG_ENTER();
ud = container_of(dev, struct unit_directory, device);
/* Don't probe UD's that have the LUN flag. We'll probe the LUN(s)
* instead. */
if (ud->flags & UNIT_DIRECTORY_HAS_LUN_DIRECTORY)
return -ENODEV;
scsi_id = sbp2_alloc_device(ud);
if (!scsi_id)
return -ENOMEM;
sbp2_parse_unit_directory(scsi_id, ud);
return sbp2_start_device(scsi_id);
}
static int sbp2_remove(struct device *dev)
{
struct unit_directory *ud;
struct scsi_id_instance_data *scsi_id;
struct scsi_device *sdev;
SBP2_DEBUG_ENTER();
ud = container_of(dev, struct unit_directory, device);
scsi_id = ud->device.driver_data;
if (!scsi_id)
return 0;
if (scsi_id->scsi_host) {
/* Get rid of enqueued commands if there is no chance to
* send them. */
if (!sbp2util_node_is_available(scsi_id))
sbp2scsi_complete_all_commands(scsi_id, DID_NO_CONNECT);
/* scsi_remove_device() will trigger shutdown functions of SCSI
* highlevel drivers which would deadlock if blocked. */
atomic_set(&scsi_id->state, SBP2LU_STATE_IN_SHUTDOWN);
scsi_unblock_requests(scsi_id->scsi_host);
}
sdev = scsi_id->sdev;
if (sdev) {
scsi_id->sdev = NULL;
scsi_remove_device(sdev);
}
sbp2_logout_device(scsi_id);
sbp2_remove_device(scsi_id);
return 0;
}
static int sbp2_update(struct unit_directory *ud)
{
struct scsi_id_instance_data *scsi_id = ud->device.driver_data;
SBP2_DEBUG_ENTER();
if (sbp2_reconnect_device(scsi_id)) {
/*
* Ok, reconnect has failed. Perhaps we didn't
* reconnect fast enough. Try doing a regular login, but
* first do a logout just in case of any weirdness.
*/
sbp2_logout_device(scsi_id);
if (sbp2_login_device(scsi_id)) {
/* Login failed too, just fail, and the backend
* will call our sbp2_remove for us */
SBP2_ERR("Failed to reconnect to sbp2 device!");
return -EBUSY;
}
}
/* Set max retries to something large on the device. */
sbp2_set_busy_timeout(scsi_id);
/* Do a SBP-2 fetch agent reset. */
sbp2_agent_reset(scsi_id, 1);
/* Get the max speed and packet size that we can use. */
sbp2_max_speed_and_size(scsi_id);
/* Complete any pending commands with busy (so they get
* retried) and remove them from our queue
*/
sbp2scsi_complete_all_commands(scsi_id, DID_BUS_BUSY);
/* Accept new commands unless there was another bus reset in the
* meantime. */
if (hpsb_node_entry_valid(scsi_id->ne)) {
atomic_set(&scsi_id->state, SBP2LU_STATE_RUNNING);
scsi_unblock_requests(scsi_id->scsi_host);
}
return 0;
}
/* This functions is called by the sbp2_probe, for each new device. We now
* allocate one scsi host for each scsi_id (unit directory). */
static struct scsi_id_instance_data *sbp2_alloc_device(struct unit_directory *ud)
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -