i2o_scsi.c
来自「优龙2410linux2.6.8内核源代码」· C语言 代码 · 共 1,048 行 · 第 1/2 页
C
1,048 行
{ /* * bus_adapter, SCSI (obsolete), or FibreChannel busses only */ if( (d->lct_data.class_id!=I2O_CLASS_BUS_ADAPTER_PORT) // bus_adapter// && (d->lct_data.class_id!=I2O_CLASS_FIBRE_CHANNEL_PORT) // FC_PORT ) continue; shpnt = scsi_register(tpnt, sizeof(struct i2o_scsi_host)); if(shpnt==NULL) continue; shpnt->unique_id = (u32)d; shpnt->io_port = 0; shpnt->n_io_port = 0; shpnt->irq = 0; shpnt->this_id = /* Good question */15; i2o_scsi_init(c, d, shpnt); count++; } } i2o_scsi_hosts = count; if(count==0) { if(sg_chain_pool!=NULL) { kfree(sg_chain_pool); sg_chain_pool = NULL; } flush_pending(); del_timer(&retry_timer); i2o_remove_handler(&i2o_scsi_handler); } return count;}static int i2o_scsi_release(struct Scsi_Host *host){ if(--i2o_scsi_hosts==0) { if(sg_chain_pool!=NULL) { kfree(sg_chain_pool); sg_chain_pool = NULL; } flush_pending(); del_timer(&retry_timer); i2o_remove_handler(&i2o_scsi_handler); } scsi_unregister(host); return 0;}static const char *i2o_scsi_info(struct Scsi_Host *SChost){ struct i2o_scsi_host *hostdata; hostdata = (struct i2o_scsi_host *)SChost->hostdata; return(&hostdata->controller->name[0]);}/** * i2o_scsi_queuecommand - queue a SCSI command * @SCpnt: scsi command pointer * @done: callback for completion * * Issue a scsi comamnd asynchronously. Return 0 on success or 1 if * we hit an error (normally message queue congestion). The only * minor complication here is that I2O deals with the device addressing * so we have to map the bus/dev/lun back to an I2O handle as well * as faking absent devices ourself. * * Locks: takes the controller lock on error path only */ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt, void (*done) (struct scsi_cmnd *)){ int i; int tid; struct i2o_controller *c; struct scsi_cmnd *current_command; struct Scsi_Host *host; struct i2o_scsi_host *hostdata; u32 *msg, *mptr; u32 m; u32 *lenptr; int direction; int scsidir; u32 len; u32 reqlen; u32 tag; unsigned long flags; static int max_qd = 1; /* * Do the incoming paperwork */ host = SCpnt->device->host; hostdata = (struct i2o_scsi_host *)host->hostdata; c = hostdata->controller; prefetch(c); prefetchw(&queue_depth); SCpnt->scsi_done = done; if(SCpnt->device->id > 15) { printk(KERN_ERR "i2o_scsi: Wild target %d.\n", SCpnt->device->id); return -1; } tid = hostdata->task[SCpnt->device->id][SCpnt->device->lun]; dprintk(KERN_INFO "qcmd: Tid = %d\n", tid); current_command = SCpnt; /* set current command */ current_command->scsi_done = done; /* set ptr to done function */ /* We don't have such a device. Pretend we did the command and that selection timed out */ if(tid == -1) { SCpnt->result = DID_NO_CONNECT << 16; done(SCpnt); return 0; } dprintk(KERN_INFO "Real scsi messages.\n"); /* * Obtain an I2O message. If there are none free then * throw it back to the scsi layer */ m = le32_to_cpu(I2O_POST_READ32(c)); if(m==0xFFFFFFFF) return 1; msg = (u32 *)(c->msg_virt + m); /* * Put together a scsi execscb message */ len = SCpnt->request_bufflen; direction = 0x00000000; // SGL IN (osm<--iop) if (SCpnt->sc_data_direction == DMA_NONE) { scsidir = 0x00000000; // DATA NO XFER } else if (SCpnt->sc_data_direction == DMA_TO_DEVICE) { direction = 0x04000000; // SGL OUT (osm-->iop) scsidir = 0x80000000; // DATA OUT (iop-->dev) } else if(SCpnt->sc_data_direction == DMA_FROM_DEVICE) { scsidir = 0x40000000; // DATA IN (iop<--dev) } else { /* Unknown - kill the command */ SCpnt->result = DID_NO_CONNECT << 16; /* We must lock the request queue while completing */ spin_lock_irqsave(host->host_lock, flags); done(SCpnt); spin_unlock_irqrestore(host->host_lock, flags); return 0; } i2o_raw_writel(I2O_CMD_SCSI_EXEC<<24|HOST_TID<<12|tid, &msg[1]); i2o_raw_writel(scsi_context, &msg[2]); /* So the I2O layer passes to us */ i2o_raw_writel(i2o_context_list_add(SCpnt, c), &msg[3]); /* We want the SCSI control block back */ /* LSI_920_PCI_QUIRK * * Intermittant observations of msg frame word data corruption * observed on msg[4] after: * WRITE, READ-MODIFY-WRITE * operations. 19990606 -sralston * * (Hence we build this word via tag. Its good practice anyway * we don't want fetches over PCI needlessly) */ tag=0; /* * Attach tags to the devices */ if(SCpnt->device->tagged_supported) { /* * Some drives are too stupid to handle fairness issues * with tagged queueing. We throw in the odd ordered * tag to stop them starving themselves. */ if((jiffies - hostdata->tagclock[SCpnt->device->id][SCpnt->device->lun]) > (5*HZ)) { tag=0x01800000; /* ORDERED! */ hostdata->tagclock[SCpnt->device->id][SCpnt->device->lun]=jiffies; } else { /* Hmmm... I always see value of 0 here, * of which {HEAD_OF, ORDERED, SIMPLE} are NOT! -sralston */ if(SCpnt->tag == HEAD_OF_QUEUE_TAG) tag=0x01000000; else if(SCpnt->tag == ORDERED_QUEUE_TAG) tag=0x01800000; } } /* Direction, disconnect ok, tag, CDBLen */ i2o_raw_writel(scsidir|0x20000000|SCpnt->cmd_len|tag, &msg[4]); mptr=msg+5; /* * Write SCSI command into the message - always 16 byte block */ memcpy_toio(mptr, SCpnt->cmnd, 16); mptr+=4; lenptr=mptr++; /* Remember me - fill in when we know */ reqlen = 12; // SINGLE SGE /* * Now fill in the SGList and command * * FIXME: we need to set the sglist limits according to the * message size of the I2O controller. We might only have room * for 6 or so worst case */ if(SCpnt->use_sg) { struct scatterlist *sg = (struct scatterlist *)SCpnt->request_buffer; int sg_count; int chain = 0; len = 0; sg_count = pci_map_sg(c->pdev, sg, SCpnt->use_sg, SCpnt->sc_data_direction); /* FIXME: handle fail */ if(!sg_count) BUG(); if((sg_max_frags > 11) && (SCpnt->use_sg > 11)) { chain = 1; /* * Need to chain! */ i2o_raw_writel(direction|0xB0000000|(SCpnt->use_sg*2*4), mptr++); i2o_raw_writel(virt_to_bus(sg_chain_pool + sg_chain_tag), mptr); mptr = (u32*)(sg_chain_pool + sg_chain_tag); if (SCpnt->use_sg > max_sg_len) { max_sg_len = SCpnt->use_sg; printk("i2o_scsi: Chain SG! SCpnt=%p, SG_FragCnt=%d, SG_idx=%d\n", SCpnt, SCpnt->use_sg, sg_chain_tag); } if ( ++sg_chain_tag == SG_MAX_BUFS ) sg_chain_tag = 0; for(i = 0 ; i < SCpnt->use_sg; i++) { *mptr++=cpu_to_le32(direction|0x10000000|sg_dma_len(sg)); len+=sg_dma_len(sg); *mptr++=cpu_to_le32(sg_dma_address(sg)); sg++; } mptr[-2]=cpu_to_le32(direction|0xD0000000|sg_dma_len(sg-1)); } else { for(i = 0 ; i < SCpnt->use_sg; i++) { i2o_raw_writel(direction|0x10000000|sg_dma_len(sg), mptr++); len+=sg->length; i2o_raw_writel(sg_dma_address(sg), mptr++); sg++; } /* Make this an end of list. Again evade the 920 bug and unwanted PCI read traffic */ i2o_raw_writel(direction|0xD0000000|sg_dma_len(sg-1), &mptr[-2]); } if(!chain) reqlen = mptr - msg; i2o_raw_writel(len, lenptr); if(len != SCpnt->underflow) printk("Cmd len %08X Cmd underflow %08X\n", len, SCpnt->underflow); } else { dprintk(KERN_INFO "non sg for %p, %d\n", SCpnt->request_buffer, SCpnt->request_bufflen); i2o_raw_writel(len = SCpnt->request_bufflen, lenptr); if(len == 0) { reqlen = 9; } else { dma_addr_t dma_addr; dma_addr = pci_map_single(c->pdev, SCpnt->request_buffer, SCpnt->request_bufflen, SCpnt->sc_data_direction); if(dma_addr == 0) BUG(); /* How to handle ?? */ SCpnt->SCp.ptr = (char *)(unsigned long) dma_addr; i2o_raw_writel(0xD0000000|direction|SCpnt->request_bufflen, mptr++); i2o_raw_writel(dma_addr, mptr++); } } /* * Stick the headers on */ i2o_raw_writel(reqlen<<16 | SGL_OFFSET_10, msg); /* Queue the message */ i2o_post_message(c,m); atomic_inc(&queue_depth); if(atomic_read(&queue_depth)> max_qd) { max_qd=atomic_read(&queue_depth); printk("Queue depth now %d.\n", max_qd); } mb(); dprintk(KERN_INFO "Issued %ld\n", current_command->serial_number); return 0;}/** * i2o_scsi_abort - abort a running command * @SCpnt: command to abort * * Ask the I2O controller to abort a command. This is an asynchrnous * process and our callback handler will see the command complete * with an aborted message if it succeeds. * * Locks: no locks are held or needed */ static int i2o_scsi_abort(struct scsi_cmnd * SCpnt){ struct i2o_controller *c; struct Scsi_Host *host; struct i2o_scsi_host *hostdata; u32 msg[5]; int tid; int status = FAILED; printk(KERN_WARNING "i2o_scsi: Aborting command block.\n"); host = SCpnt->device->host; hostdata = (struct i2o_scsi_host *)host->hostdata; tid = hostdata->task[SCpnt->device->id][SCpnt->device->lun]; if(tid==-1) { printk(KERN_ERR "i2o_scsi: Impossible command to abort!\n"); return status; } c = hostdata->controller; spin_unlock_irq(host->host_lock); msg[0] = FIVE_WORD_MSG_SIZE; msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|tid; msg[2] = scsi_context; msg[3] = 0; msg[4] = i2o_context_list_remove(SCpnt, c); if(i2o_post_wait(c, msg, sizeof(msg), 240)) status = SUCCESS; spin_lock_irq(host->host_lock); return status;}/** * i2o_scsi_bus_reset - Issue a SCSI reset * @SCpnt: the command that caused the reset * * Perform a SCSI bus reset operation. In I2O this is just a message * we pass. I2O can do clever multi-initiator and shared reset stuff * but we don't support this. * * Locks: called with no lock held, requires no locks. */ static int i2o_scsi_bus_reset(struct scsi_cmnd * SCpnt){ int tid; struct i2o_controller *c; struct Scsi_Host *host; struct i2o_scsi_host *hostdata; u32 m; void *msg; unsigned long timeout; /* * Find the TID for the bus */ host = SCpnt->device->host; spin_unlock_irq(host->host_lock); printk(KERN_WARNING "i2o_scsi: Attempting to reset the bus.\n"); hostdata = (struct i2o_scsi_host *)host->hostdata; tid = hostdata->bus_task; c = hostdata->controller; /* * Now send a SCSI reset request. Any remaining commands * will be aborted by the IOP. We need to catch the reply * possibly ? */ timeout = jiffies+2*HZ; do { m = le32_to_cpu(I2O_POST_READ32(c)); if(m != 0xFFFFFFFF) break; set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(1); mb(); } while(time_before(jiffies, timeout)); msg = c->msg_virt + m; i2o_raw_writel(FOUR_WORD_MSG_SIZE|SGL_OFFSET_0, msg); i2o_raw_writel(I2O_CMD_SCSI_BUSRESET<<24|HOST_TID<<12|tid, msg+4); i2o_raw_writel(scsi_context|0x80000000, msg+8); /* We use the top bit to split controller and unit transactions */ /* Now store unit,tid so we can tie the completion back to a specific device */ __raw_writel(c->unit << 16 | tid, msg+12); wmb(); /* We want the command to complete after we return */ spin_lock_irq(host->host_lock); i2o_post_message(c,m); /* Should we wait for the reset to complete ? */ return SUCCESS;}/** * i2o_scsi_bios_param - Invent disk geometry * @sdev: scsi device * @dev: block layer device * @capacity: size in sectors * @ip: geometry array * * This is anyones guess quite frankly. We use the same rules everyone * else appears to and hope. It seems to work. */ static int i2o_scsi_bios_param(struct scsi_device * sdev, struct block_device *dev, sector_t capacity, int *ip){ int size; size = capacity; ip[0] = 64; /* heads */ ip[1] = 32; /* sectors */ if ((ip[2] = size >> 11) > 1024) { /* cylinders, test for big disk */ ip[0] = 255; /* heads */ ip[1] = 63; /* sectors */ ip[2] = size / (255 * 63); /* cylinders */ } return 0;}MODULE_AUTHOR("Red Hat Software");MODULE_LICENSE("GPL");static struct scsi_host_template driver_template = { .proc_name = "i2o_scsi", .name = "I2O SCSI Layer", .detect = i2o_scsi_detect, .release = i2o_scsi_release, .info = i2o_scsi_info, .queuecommand = i2o_scsi_queuecommand, .eh_abort_handler = i2o_scsi_abort, .eh_bus_reset_handler = i2o_scsi_bus_reset, .bios_param = i2o_scsi_bios_param, .can_queue = I2O_SCSI_CAN_QUEUE, .this_id = 15, .sg_tablesize = 8, .cmd_per_lun = 6, .use_clustering = ENABLE_CLUSTERING,};#include "../../scsi/scsi_module.c"
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?