📄 qlogicisp.c
字号:
PACKB(0, 0), /* 0x0047 */ PACKB(0, 0), /* 0x0048 */ PACKB(0, 0), /* 0x0049 */ PACKB(0, 0), /* 0x004a */ PACKB(0, 0), /* 0x004b */ PACKB(0, 0), /* 0x004c */ PACKB(0, 0), /* 0x004d */ PACKB(0, 0), /* 0x004e */ PACKB(0, 0), /* 0x004f */ PACKB(0, 0), /* 0x0050 */ PACKB(0, 0), /* 0x0051 */ PACKB(8, 8), /* MBOX_CMD_INIT_REQUEST_QUEUE_64 (0x0052) */ PACKB(8, 8) /* MBOX_CMD_INIT_RESPONSE_QUEUE_64 (0x0053) */#endif /* CONFIG_QL_ISP_A64 */};#define MAX_MBOX_COMMAND (sizeof(mbox_param)/sizeof(u_short))struct host_param { u_short fifo_threshold; u_short host_adapter_enable; u_short initiator_scsi_id; u_short bus_reset_delay; u_short retry_count; u_short retry_delay; u_short async_data_setup_time; u_short req_ack_active_negation; u_short data_line_active_negation; u_short data_dma_burst_enable; u_short command_dma_burst_enable; u_short tag_aging; u_short selection_timeout; u_short max_queue_depth;};/* * Device Flags: * * Bit Name * --------- * 7 Disconnect Privilege * 6 Parity Checking * 5 Wide Data Transfers * 4 Synchronous Data Transfers * 3 Tagged Queuing * 2 Automatic Request Sense * 1 Stop Queue on Check Condition * 0 Renegotiate on Error */struct dev_param { u_short device_flags; u_short execution_throttle; u_short synchronous_period; u_short synchronous_offset; u_short device_enable; u_short reserved; /* pad */};/* * The result queue can be quite a bit smaller since continuation entries * do not show up there: */#define RES_QUEUE_LEN ((QLOGICISP_REQ_QUEUE_LEN + 1) / 8 - 1)#define QUEUE_ENTRY_LEN 64#define QSIZE(entries) (((entries) + 1) * QUEUE_ENTRY_LEN)struct isp_queue_entry { char __opaque[QUEUE_ENTRY_LEN];};struct isp1020_hostdata { u_long memaddr; u_char revision; struct host_param host_param; struct dev_param dev_param[MAX_TARGETS]; struct pci_dev *pci_dev; struct isp_queue_entry *res_cpu; /* CPU-side address of response queue. */ struct isp_queue_entry *req_cpu; /* CPU-size address of request queue. */ /* result and request queues (shared with isp1020): */ u_int req_in_ptr; /* index of next request slot */ u_int res_out_ptr; /* index of next result slot */ /* this is here so the queues are nicely aligned */ long send_marker; /* do we need to send a marker? */ /* The cmd->handle has a fixed size, and is only 32-bits. We * need to take care to handle 64-bit systems correctly thus what * we actually place in cmd->handle is an index to the following * table. Kudos to Matt Jacob for the technique. -DaveM */ Scsi_Cmnd *cmd_slots[QLOGICISP_REQ_QUEUE_LEN + 1]; dma_addr_t res_dma; /* PCI side view of response queue */ dma_addr_t req_dma; /* PCI side view of request queue */};/* queue length's _must_ be power of two: */#define QUEUE_DEPTH(in, out, ql) ((in - out) & (ql))#define REQ_QUEUE_DEPTH(in, out) QUEUE_DEPTH(in, out, \ QLOGICISP_REQ_QUEUE_LEN)#define RES_QUEUE_DEPTH(in, out) QUEUE_DEPTH(in, out, RES_QUEUE_LEN)static void isp1020_enable_irqs(struct Scsi_Host *);static void isp1020_disable_irqs(struct Scsi_Host *);static int isp1020_init(struct Scsi_Host *);static int isp1020_reset_hardware(struct Scsi_Host *);static int isp1020_set_defaults(struct Scsi_Host *);static int isp1020_load_parameters(struct Scsi_Host *);static int isp1020_mbox_command(struct Scsi_Host *, u_short []); static int isp1020_return_status(struct Status_Entry *);static void isp1020_intr_handler(int, void *, struct pt_regs *);static void do_isp1020_intr_handler(int, void *, struct pt_regs *);#if USE_NVRAM_DEFAULTSstatic int isp1020_get_defaults(struct Scsi_Host *);static int isp1020_verify_nvram(struct Scsi_Host *);static u_short isp1020_read_nvram_word(struct Scsi_Host *, u_short);#endif#if DEBUG_ISP1020static void isp1020_print_scsi_cmd(Scsi_Cmnd *);#endif#if DEBUG_ISP1020_INTRstatic void isp1020_print_status_entry(struct Status_Entry *);#endif/* memaddr should be used to determine if memmapped port i/o is being used * non-null memaddr == mmap'd * JV 7-Jan-2000 */static inline u_short isp_inw(struct Scsi_Host *host, long offset){ struct isp1020_hostdata *h = (struct isp1020_hostdata *)host->hostdata; if (h->memaddr) return readw(h->memaddr + offset); else return inw(host->io_port + offset);}static inline void isp_outw(u_short val, struct Scsi_Host *host, long offset){ struct isp1020_hostdata *h = (struct isp1020_hostdata *)host->hostdata; if (h->memaddr) writew(val, h->memaddr + offset); else outw(val, host->io_port + offset);}static inline void isp1020_enable_irqs(struct Scsi_Host *host){ isp_outw(ISP_EN_INT|ISP_EN_RISC, host, PCI_INTF_CTL);}static inline void isp1020_disable_irqs(struct Scsi_Host *host){ isp_outw(0x0, host, PCI_INTF_CTL);}int isp1020_detect(Scsi_Host_Template *tmpt){ int hosts = 0; struct Scsi_Host *host; struct isp1020_hostdata *hostdata; struct pci_dev *pdev = NULL; ENTER("isp1020_detect"); tmpt->proc_name = "isp1020"; if (pci_present() == 0) { printk("qlogicisp : PCI not present\n"); return 0; } while ((pdev = pci_find_device(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1020, pdev))) { if (pci_enable_device(pdev)) continue; host = scsi_register(tmpt, sizeof(struct isp1020_hostdata)); hostdata = (struct isp1020_hostdata *) host->hostdata; memset(hostdata, 0, sizeof(struct isp1020_hostdata)); hostdata->pci_dev = pdev; if (isp1020_init(host)) goto fail_and_unregister; if (isp1020_reset_hardware(host)#if USE_NVRAM_DEFAULTS || isp1020_get_defaults(host)#else || isp1020_set_defaults(host)#endif /* USE_NVRAM_DEFAULTS */ || isp1020_load_parameters(host)) { iounmap((void *)hostdata->memaddr); release_region(host->io_port, 0xff); goto fail_and_unregister; } host->this_id = hostdata->host_param.initiator_scsi_id; if (request_irq(host->irq, do_isp1020_intr_handler, SA_INTERRUPT | SA_SHIRQ, "qlogicisp", host)) { printk("qlogicisp : interrupt %d already in use\n", host->irq); iounmap((void *)hostdata->memaddr); release_region(host->io_port, 0xff); goto fail_and_unregister; } isp_outw(0x0, host, PCI_SEMAPHORE); isp_outw(HCCR_CLEAR_RISC_INTR, host, HOST_HCCR); isp1020_enable_irqs(host); hosts++; continue; fail_and_unregister: if (hostdata->res_cpu) pci_free_consistent(hostdata->pci_dev, QSIZE(RES_QUEUE_LEN), hostdata->res_cpu, hostdata->res_dma); if (hostdata->req_cpu) pci_free_consistent(hostdata->pci_dev, QSIZE(QLOGICISP_REQ_QUEUE_LEN), hostdata->req_cpu, hostdata->req_dma); scsi_unregister(host); } LEAVE("isp1020_detect"); return hosts;}int isp1020_release(struct Scsi_Host *host){ struct isp1020_hostdata *hostdata; ENTER("isp1020_release"); hostdata = (struct isp1020_hostdata *) host->hostdata; isp_outw(0x0, host, PCI_INTF_CTL); free_irq(host->irq, host); iounmap((void *)hostdata->memaddr); release_region(host->io_port, 0xff); LEAVE("isp1020_release"); return 0;}const char *isp1020_info(struct Scsi_Host *host){ static char buf[80]; struct isp1020_hostdata *hostdata; ENTER("isp1020_info"); hostdata = (struct isp1020_hostdata *) host->hostdata; sprintf(buf, "QLogic ISP1020 SCSI on PCI bus %02x device %02x irq %d %s base 0x%lx", hostdata->pci_dev->bus->number, hostdata->pci_dev->devfn, host->irq, (hostdata->memaddr ? "MEM" : "I/O"), (hostdata->memaddr ? hostdata->memaddr : host->io_port)); LEAVE("isp1020_info"); return buf;}/* * The middle SCSI layer ensures that queuecommand never gets invoked * concurrently with itself or the interrupt handler (though the * interrupt handler may call this routine as part of * request-completion handling). */int isp1020_queuecommand(Scsi_Cmnd *Cmnd, void (*done)(Scsi_Cmnd *)){ int i, n, num_free; u_int in_ptr, out_ptr; struct dataseg * ds; struct scatterlist *sg; struct Command_Entry *cmd; struct Continuation_Entry *cont; struct Scsi_Host *host; struct isp1020_hostdata *hostdata; dma_addr_t dma_addr; ENTER("isp1020_queuecommand"); host = Cmnd->host; hostdata = (struct isp1020_hostdata *) host->hostdata; Cmnd->scsi_done = done; DEBUG(isp1020_print_scsi_cmd(Cmnd)); out_ptr = isp_inw(host, + MBOX4); in_ptr = hostdata->req_in_ptr; DEBUG(printk("qlogicisp : request queue depth %d\n", REQ_QUEUE_DEPTH(in_ptr, out_ptr))); cmd = (struct Command_Entry *) &hostdata->req_cpu[in_ptr]; in_ptr = (in_ptr + 1) & QLOGICISP_REQ_QUEUE_LEN; if (in_ptr == out_ptr) { printk("qlogicisp : request queue overflow\n"); return 1; } if (hostdata->send_marker) { struct Marker_Entry *marker; TRACE("queue marker", in_ptr, 0); DEBUG(printk("qlogicisp : adding marker entry\n")); marker = (struct Marker_Entry *) cmd; memset(marker, 0, sizeof(struct Marker_Entry)); marker->hdr.entry_type = ENTRY_MARKER; marker->hdr.entry_cnt = 1; marker->modifier = SYNC_ALL; hostdata->send_marker = 0; if (((in_ptr + 1) & QLOGICISP_REQ_QUEUE_LEN) == out_ptr) { isp_outw(in_ptr, host, MBOX4); hostdata->req_in_ptr = in_ptr; printk("qlogicisp : request queue overflow\n"); return 1; } cmd = (struct Command_Entry *) &hostdata->req_cpu[in_ptr]; in_ptr = (in_ptr + 1) & QLOGICISP_REQ_QUEUE_LEN; } TRACE("queue command", in_ptr, Cmnd); memset(cmd, 0, sizeof(struct Command_Entry)); cmd->hdr.entry_type = ENTRY_COMMAND; cmd->hdr.entry_cnt = 1; cmd->target_lun = Cmnd->lun; cmd->target_id = Cmnd->target; cmd->cdb_length = cpu_to_le16(Cmnd->cmd_len); cmd->control_flags = cpu_to_le16(CFLAG_READ | CFLAG_WRITE); cmd->time_out = cpu_to_le16(30); memcpy(cmd->cdb, Cmnd->cmnd, Cmnd->cmd_len); if (Cmnd->use_sg) { int sg_count; sg = (struct scatterlist *) Cmnd->request_buffer; ds = cmd->dataseg; sg_count = pci_map_sg(hostdata->pci_dev, sg, Cmnd->use_sg, scsi_to_pci_dma_dir(Cmnd->sc_data_direction)); cmd->segment_cnt = cpu_to_le16(sg_count); /* fill in first four sg entries: */ n = sg_count; if (n > IOCB_SEGS) n = IOCB_SEGS; for (i = 0; i < n; i++) { dma_addr = sg_dma_address(sg); ds[i].d_base = cpu_to_le32((u32) dma_addr);#ifdef CONFIG_QL_ISP_A64 ds[i].d_base_hi = cpu_to_le32((u32) (dma_addr>>32));#endif /* CONFIG_QL_ISP_A64 */ ds[i].d_count = cpu_to_le32(sg_dma_len(sg)); ++sg; } sg_count -= IOCB_SEGS; while (sg_count > 0) { ++cmd->hdr.entry_cnt; cont = (struct Continuation_Entry *) &hostdata->req_cpu[in_ptr]; in_ptr = (in_ptr + 1) & QLOGICISP_REQ_QUEUE_LEN; if (in_ptr == out_ptr) { printk("isp1020: unexpected request queue " "overflow\n"); return 1; } TRACE("queue continuation", in_ptr, 0); cont->hdr.entry_type = ENTRY_CONTINUATION; cont->hdr.entry_cnt = 0; cont->hdr.sys_def_1 = 0; cont->hdr.flags = 0;#ifndef CONFIG_QL_ISP_A64 cont->reserved = 0;#endif ds = cont->dataseg; n = sg_count; if (n > CONTINUATION_SEGS) n = CONTINUATION_SEGS; for (i = 0; i < n; ++i) { dma_addr = sg_dma_address(sg); ds[i].d_base = cpu_to_le32((u32) dma_addr);#ifdef CONFIG_QL_ISP_A64 ds[i].d_base_hi = cpu_to_le32((u32)(dma_addr>>32));#endif /* CONFIG_QL_ISP_A64 */ ds[i].d_count = cpu_to_le32(sg_dma_len(sg)); ++sg; } sg_count -= n; } } else if (Cmnd->request_bufflen) { /*Cmnd->SCp.ptr = (char *)(unsigned long)*/ dma_addr = pci_map_single(hostdata->pci_dev, Cmnd->request_buffer, Cmnd->request_bufflen, scsi_to_pci_dma_dir(Cmnd->sc_data_direction)); Cmnd->SCp.ptr = (char *)(unsigned long) dma_addr; cmd->dataseg[0].d_base = cpu_to_le32((u32) dma_addr);#ifdef CONFIG_QL_ISP_A64 cmd->dataseg[0].d_base_hi = cpu_to_le32((u32) (dma_addr>>32));#endif /* CONFIG_QL_ISP_A64 */ cmd->dataseg[0].d_count = cpu_to_le32((u32)Cmnd->request_bufflen); cmd->segment_cnt = cpu_to_le16(1); } else { cmd->dataseg[0].d_base = 0;#ifdef CONFIG_QL_ISP_A64 cmd->dataseg[0].d_base_hi = 0;#endif /* CONFIG_QL_ISP_A64 */ cmd->dataseg[0].d_count = 0; cmd->segment_cnt = cpu_to_le16(1); /* Shouldn't this be 0? */ } /* Committed, record Scsi_Cmd so we can find it later. */ cmd->handle = in_ptr; hostdata->cmd_slots[in_ptr] = Cmnd; isp_outw(in_ptr, host, MBOX4); hostdata->req_in_ptr = in_ptr; num_free = QLOGICISP_REQ_QUEUE_LEN - REQ_QUEUE_DEPTH(in_ptr, out_ptr); host->can_queue = host->host_busy + num_free; host->sg_tablesize = QLOGICISP_MAX_SG(num_free); LEAVE("isp1020_queuecommand"); return 0;}#define ASYNC_EVENT_INTERRUPT 0x01void do_isp1020_intr_handler(int irq, void *dev_id, struct pt_regs *regs){ unsigned long flags; spin_lock_irqsave(&io_request_lock, flags); isp1020_intr_handler(irq, dev_id, regs); spin_unlock_irqrestore(&io_request_lock, flags);}void isp1020_intr_handler(int irq, void *dev_id, struct pt_regs *regs){ Scsi_Cmnd *Cmnd; struct Status_Entry *sts; struct Scsi_Host *host = dev_id; struct isp1020_hostdata *hostdata; u_int in_ptr, out_ptr; u_short status; ENTER_INTR("isp1020_intr_handler"); hostdata = (struct isp1020_hostdata *) host->hostdata; DEBUG_INTR(printk("qlogicisp : interrupt on line %d\n", irq)); if (!(isp_inw(host, PCI_INTF_STS) & 0x04)) { /* spurious interrupts can happen legally */ DEBUG_INTR(printk("qlogicisp: got spurious interrupt\n")); return; } in_ptr = isp_inw(host, MBOX5);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -