📄 isp_pci.c
字号:
} cto->ct_resid = 0; cto->rsp.m0.ct_scsi_status = 0; MEMZERO(&cto->rsp, sizeof (cto->rsp)); nxti = *nxtip; for (nth_ctio = 0; nth_ctio < nctios; nth_ctio++) { u_int32_t oxfrlen; int seglim; seglim = nseg; if (seglim) { int seg; if (seglim > ISP_RQDSEG_T2) seglim = ISP_RQDSEG_T2; for (seg = 0; seg < seglim; seg++, nseg--) { /* * Unlike normal initiator commands, we don't do * any swizzling here. */#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0) cto->rsp.m0.ct_dataseg[seg].ds_base = virt_to_bus(sg->address);#else cto->rsp.m0.ct_dataseg[seg].ds_base = (u_int32_t) sg_dma_address(sg);#endif cto->rsp.m0.ct_dataseg[seg].ds_count = (u_int32_t) sg->length; cto->rsp.m0.ct_xfrlen += sg->length; sg++; } cto->ct_seg_count = seg; oxfrlen = cto->rsp.m0.ct_xfrlen; } else { /* * This case should only happen when we're sending a synthesized * MODE1 final status with sense data. */ if (send_sense == 0) { isp_prt(isp, ISP_LOGERR, "tdma_mkfc ran out of segments, no SENSE DATA"); cto->ct_resid = -EINVAL; return (CMD_COMPLETE); } oxfrlen = 0; } /* * At this point, the fields ct_lun, ct_iid, ct_rxid, ct_timeout * have been carried over unchanged from what our caller had set. * * The field ct_reloff is either what the caller set, or what we've * added to below. * * The dataseg fields and the seg_count fields we just got through * setting. The data direction we've preserved all along and only * clear it if we're sending a MODE1 status as the last CTIO. * */ if (nth_ctio == nctios - 1) { /* * We're the last in a sequence of CTIO2s, so mark this * CTIO2 and save the handle to the CCB such that when * this CTIO2 completes we can free dma resources and * do whatever else we need to do to finish the rest * of the command. */ cto->ct_syshandle = handle; cto->ct_header.rqs_seqno = 1; if (send_status) { /* * Get 'real' residual and set flags based on it. */ cto->ct_resid = resid; if (send_sense) { MEMCPY(cto->rsp.m1.ct_resp, sense, QLTM_SENSELEN); cto->rsp.m1.ct_senselen = QLTM_SENSELEN; scsi_status |= CT2_SNSLEN_VALID; cto->rsp.m1.ct_scsi_status = scsi_status; cto->ct_flags &= CT2_FLAG_MMASK; cto->ct_flags |= CT2_FLAG_MODE1 | CT2_NO_DATA | CT2_SENDSTATUS | CT2_CCINCR; if (cto->ct_resid > 0) cto->rsp.m1.ct_scsi_status |= CT2_DATA_UNDER; else if (cto->ct_resid < 0) cto->rsp.m1.ct_scsi_status |= CT2_DATA_OVER; } else { cto->rsp.m0.ct_scsi_status = scsi_status; cto->ct_flags |= CT2_SENDSTATUS | CT2_CCINCR; if (cto->ct_resid > 0) cto->rsp.m0.ct_scsi_status |= CT2_DATA_UNDER; else if (cto->ct_resid < 0) cto->rsp.m0.ct_scsi_status |= CT2_DATA_OVER; } } isp_prt(isp, ISP_LOGTDEBUG1, ctx, cto->ct_rxid, (int) tcmd->cd_lun, cto->ct_iid, cto->ct_flags, cto->ct_status, cto->rsp.m1.ct_scsi_status, cto->ct_resid, "<END>"); isp_put_ctio2(isp, cto, qe); ISP_TDQE(isp, "last tdma_mkfc", curi, cto); if (nctios > 1) { MEMORYBARRIER(isp, SYNC_REQUEST, curi, QENTRY_LEN); } } else { ct2_entry_t *oqe = qe; /* * Make sure handle fields are clean */ cto->ct_syshandle = 0; cto->ct_header.rqs_seqno = 0; isp_prt(isp, ISP_LOGTDEBUG1, ctx, cto->ct_rxid, (int) tcmd->cd_lun, cto->ct_iid, cto->ct_flags, cto->ct_status, cto->rsp.m1.ct_scsi_status, cto->ct_resid, "<MID>"); /* * Get a new CTIO2 entry from the request queue. */ qe = (ct2_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); nxti = ISP_NXT_QENTRY(nxti, RQUEST_QUEUE_LEN(isp)); if (nxti == optr) { isp_prt(isp, ISP_LOGERR, "queue overflow in tdma_mkfc"); return (CMD_EAGAIN); } /* * Now that we're done with the old CTIO2, * flush it out to the request queue. */ ISP_TDQE(isp, "tdma_mkfc", curi, cto); isp_put_ctio2(isp, cto, oqe); if (nth_ctio != 0) { MEMORYBARRIER(isp, SYNC_REQUEST, curi, QENTRY_LEN); } curi = ISP_NXT_QENTRY(curi, RQUEST_QUEUE_LEN(isp)); /* * Reset some fields in the CTIO2 so we can reuse * for the next one we'll flush to the request * queue. */ cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2; cto->ct_header.rqs_entry_count = 1; cto->ct_header.rqs_flags = 0; cto->ct_status = 0; cto->ct_resid = 0; cto->ct_seg_count = 0; /* * Adjust the new relative offset by the amount which is * recorded in the data segment of the old CTIO2 we just * finished filling out. */ cto->ct_reloff += oxfrlen; MEMZERO(&cto->rsp, sizeof (cto->rsp)); } } *nxtip = nxti; return (CMD_QUEUED);}#endifstatic intisp_pci_dmasetup(struct ispsoftc *isp, Scsi_Cmnd *Cmnd, ispreq_t *rq, u_int16_t *nxi, u_int16_t optr){ struct scatterlist *sg; DMA_ADDR_T one_shot_addr; unsigned int one_shot_length; int segcnt, seg, ovseg, seglim; void *h; u_int16_t nxti;#ifdef LINUX_ISP_TARGET_MODE if (rq->req_header.rqs_entry_type == RQSTYPE_CTIO || rq->req_header.rqs_entry_type == RQSTYPE_CTIO2) { int s; if (IS_SCSI(isp)) s = tdma_mk(isp, (tmd_cmd_t *)Cmnd, (ct_entry_t *)rq, nxi, optr); else s = tdma_mkfc(isp, (tmd_cmd_t *)Cmnd, (ct2_entry_t *)rq, nxi, optr); return (s); }#endif nxti = *nxi; h = (void *) ISP_QUEUE_ENTRY(isp->isp_rquest, isp->isp_reqidx);#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0) if (Cmnd->sc_data_direction == SCSI_DATA_NONE || Cmnd->request_bufflen == 0) { rq->req_seg_count = 1; goto mbxsync; }#else if (Cmnd->request_bufflen == 0) { rq->req_seg_count = 1; goto mbxsync; }#endif if (IS_FC(isp)) { if (rq->req_header.rqs_entry_type == RQSTYPE_T3RQS) seglim = ISP_RQDSEG_T3; else seglim = ISP_RQDSEG_T2; ((ispreqt2_t *)rq)->req_totalcnt = Cmnd->request_bufflen; /* * Linux doesn't make it easy to tell which direction * the data is expected to go, and you really need to * know this for FC. We'll have to assume that some * of these commands that might be used for writes * our outbounds and all else are inbound. */#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0) if (Cmnd->sc_data_direction == SCSI_DATA_WRITE) { ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_OUT; } else if (Cmnd->sc_data_direction == SCSI_DATA_READ) { ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_IN; } else if (Cmnd->sc_data_direction != SCSI_DATA_UNKNOWN) { isp_prt(isp, ISP_LOGERR, "bogus direction (%x) for %d byte request (opcode 0x%x)", Cmnd->sc_data_direction, Cmnd->request_bufflen, Cmnd->cmnd[0]); XS_SETERR(Cmnd, HBA_BOTCH); return (CMD_COMPLETE); } else#endif switch (Cmnd->cmnd[0]) { case FORMAT_UNIT: case WRITE_6: case MODE_SELECT: case SEND_DIAGNOSTIC: case WRITE_10: case WRITE_BUFFER: case WRITE_LONG: case WRITE_SAME: case MODE_SELECT_10: case WRITE_12: case WRITE_VERIFY_12: case SEND_VOLUME_TAG: ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_OUT; break; default: ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_IN; } } else { if (Cmnd->cmd_len > 12) seglim = 0; else seglim = ISP_RQDSEG;#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0) if (Cmnd->sc_data_direction == SCSI_DATA_WRITE) { rq->req_flags |= REQFLAG_DATA_OUT; } else if (Cmnd->sc_data_direction == SCSI_DATA_READ) { rq->req_flags |= REQFLAG_DATA_IN; } else if (Cmnd->sc_data_direction != SCSI_DATA_UNKNOWN) { isp_prt(isp, ISP_LOGERR, "bogus direction (%x) for %d byte request (opcode 0x%x)", Cmnd->sc_data_direction, Cmnd->request_bufflen, Cmnd->cmnd[0]); XS_SETERR(Cmnd, HBA_BOTCH); return (CMD_COMPLETE); } else#endif rq->req_flags |= REQFLAG_DATA_OUT | REQFLAG_DATA_IN; } one_shot_addr = (DMA_ADDR_T) 0; one_shot_length = 0;#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0) if ((segcnt = Cmnd->use_sg) == 0) { struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; segcnt = 1; sg = NULL; one_shot_length = Cmnd->request_bufflen; one_shot_addr = pci_map_single(pcs->pci_dev, Cmnd->request_buffer, Cmnd->request_bufflen, scsi_to_pci_dma_dir(Cmnd->sc_data_direction)); QLA_HANDLE(Cmnd) = (DMA_HTYPE_T) one_shot_addr; } else { struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; sg = (struct scatterlist *) Cmnd->request_buffer; segcnt = pci_map_sg(pcs->pci_dev, sg, Cmnd->use_sg, scsi_to_pci_dma_dir(Cmnd->sc_data_direction)); }#else if ((segcnt = Cmnd->use_sg) == 0) { segcnt = 1; sg = NULL; one_shot_length = Cmnd->request_bufflen; one_shot_addr = virt_to_bus(Cmnd->request_buffer); } else { sg = (struct scatterlist *) Cmnd->request_buffer; }#endif if (segcnt == 0) { isp_prt(isp, ISP_LOGWARN, "unable to dma map request"); XS_SETERR(Cmnd, HBA_BOTCH); return (CMD_EAGAIN); } for (seg = 0, rq->req_seg_count = 0; seg < segcnt && rq->req_seg_count < seglim; seg++, rq->req_seg_count++) { DMA_ADDR_T addr; unsigned int length; if (sg) { length = QLA_SG_C(sg); addr = QLA_SG_A(sg); sg++; } else { length = one_shot_length; addr = one_shot_addr; }#if defined(CONFIG_HIGHMEM) && LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,18) if (addr > 0xffffffff) { panic("Aieee! Highmem!"); }#endif if (rq->req_header.rqs_entry_type == RQSTYPE_T3RQS) { ispreqt3_t *rq3 = (ispreqt3_t *)rq; rq3->req_dataseg[rq3->req_seg_count].ds_count = length; rq3->req_dataseg[rq3->req_seg_count].ds_base = addr;#if defined(CONFIG_HIGHMEM) && LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,18) rq3->req_dataseg[rq3->req_seg_count].ds_basehi = addr >> 32;#endif } else if (rq->req_header.rqs_entry_type == RQSTYPE_T2RQS) { ispreqt2_t *rq2 = (ispreqt2_t *)rq; rq2->req_dataseg[rq2->req_seg_count].ds_count = length; rq2->req_dataseg[rq2->req_seg_count].ds_base = addr; } else { rq->req_dataseg[rq->req_seg_count].ds_count = length; rq->req_dataseg[rq->req_seg_count].ds_base = addr; } isp_prt(isp, ISP_LOGDEBUG1, "seg0[%d]%llx:%u from %p", seg, (long long)addr, length, sg? sg->address : Cmnd->request_buffer); } if (seg == segcnt) { goto mbxsync; } do { int lim; u_int16_t curip; ispcontreq_t local, *crq = &local, *qep; curip = nxti; qep = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curip); nxti = ISP_NXT_QENTRY((curip), RQUEST_QUEUE_LEN(isp)); if (nxti == optr) {#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0) isp_pci_dmateardown(isp, Cmnd, 0);#endif isp_prt(isp, ISP_LOGDEBUG0, "out of space for continuations"); XS_SETERR(Cmnd, HBA_BOTCH); return (CMD_EAGAIN); } rq->req_header.rqs_entry_count++; MEMZERO((void *)crq, sizeof (*crq)); crq->req_header.rqs_entry_count = 1; if (rq->req_header.rqs_entry_type == RQSTYPE_T3RQS) { lim = ISP_CDSEG64; crq->req_header.rqs_entry_type = RQSTYPE_A64_CONT; } else { lim = ISP_CDSEG; crq->req_header.rqs_entry_type = RQSTYPE_DATASEG; } for (ovseg = 0; seg < segcnt && ovseg < lim; rq->req_seg_count++, seg++, ovseg++, sg++) { if (sg->length == 0) { panic("zero length s-g element at line %d", __LINE__); }#if defined(CONFIG_HIGHMEM) && LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,18) if (QLA_SG_A(sg) > 0xffffffff) { panic("Aieee! Highmem!"); }#endif if (rq->req_header.rqs_entry_type == RQSTYPE_T3RQS) { ispcontreq64_t *xrq = (ispcontreq64_t *) crq; xrq->req_dataseg[ovseg].ds_count = QLA_SG_C(sg); xrq->req_dataseg[ovseg].ds_base = QLA_SG_A(sg);#if defined(CONFIG_HIGHMEM) && LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,18) xrq->req_dataseg[ovseg].ds_basehi = QLA_SG_A(sg) >> 32;#endif } else { crq->req_dataseg[ovseg].ds_count = QLA_SG_C(sg); crq->req_dataseg[ovseg].ds_base = QLA_SG_A(sg); } isp_prt(isp, ISP_LOGDEBUG1, "seg%d[%d]%llx:%u from %p", rq->req_header.rqs_entry_count-1, ovseg, (unsigned long long) QLA_SG_A(sg), QLA_SG_C(sg), sg->address); } MEMORYBARRIER(isp, SYNC_REQUEST, curip, QENTRY_LEN); isp_put_cont_req(isp, crq, qep); } while (seg < segcnt);mbxsync: if (rq->req_header.rqs_entry_type == RQSTYPE_T3RQS) { isp_put_request_t3(isp, (ispreqt3_t *) rq, (ispreqt3_t *) h); } else if (rq->req_header.rqs_entry_type == RQSTYPE_T2RQS) { isp_put_request_t2(isp, (ispreqt2_t *) rq, (ispreqt2_t *) h); } else { isp_put_request(isp, (ispreq_t *) rq, (ispreq_t *) h); } *nxi = nxti; return (CMD_QUEUED);}#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)static voidisp_pci_dmateardown(struct ispsoftc *isp, Scsi_Cmnd *Cmnd, u_int16_t handle){#ifdef LINUX_ISP_TARGET_MODE if (Cmnd->sc_magic != SCSI_CMND_MAGIC) { struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; tmd_cmd_t *tcmd = (tmd_cmd_t *) Cmnd; struct scatterlist *sg = tcmd->cd_data; int nseg = 0; while (sg->address) { nseg++; sg++; } pci_unmap_sg(pcs->pci_dev, tcmd->cd_data, nseg, (tcmd->cd_hflags & CDFH_DATA_IN)? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); } else#endif if (Cmnd->sc_data_direction != SCSI_DATA_NONE) { struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; if (Cmnd->use_sg) { pci_unmap_sg(pcs->pci_dev, (struct scatterlist *)Cmnd->buffer, Cmnd->use_sg, scsi_to_pci_dma_dir(Cmnd->sc_data_direction)); } else if (Cmnd->request_bufflen) { DMA_ADDR_T handle = (DMA_ADDR_T) QLA_HANDLE(Cmnd); pci_unmap_single(pcs->pci_dev, handle, Cmnd->request_bufflen, scsi_to_pci_dma_dir(Cmnd->sc_data_direction)); } }}#endifstatic voidisp_pci_reset1(struct ispsoftc *isp){ isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS); ENABLE_INTS(isp); isp->mbintsok = 1;}static voidisp_pci_dumpregs(struct ispsoftc *isp, const char *msg){ struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; u_int16_t csr; pci_read_config_word(pcs->pci_dev, PCI_COMMAND, &csr); printk("%s: ", isp->isp_name); if (msg) printk("%s\n", msg); if (IS_SCSI(isp)) printk(" biu_conf1=%x", ISP_READ(isp, BIU_CONF1)); else printk(" biu_csr=%x", ISP_READ(isp, BIU2100_CSR)); printk(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR), ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA)); printk("risc_hccr=%x\n", ISP_READ(isp, HCCR)); if (IS_SCSI(isp)) { ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE); printk(" cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n", ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS), ISP_READ(isp, CDMA_FIFO_STS)); printk(" ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n", ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS), ISP_READ(isp, DDMA_FIFO_STS)); printk(" sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n", ISP_READ(isp, SXP_INTERRUPT), ISP_READ(isp, SXP_GROSS_ERR), ISP_READ(isp, SXP_PINS_CTRL)); ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE); } printk(" mbox regs: %x %x %x %x %x\n", ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1), ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3), ISP_READ(isp, OUTMAILBOX4)); printk(" PCI Status Command/Status=%x\n", csr);}#ifdef MODULEMODULE_PARM(isp_pci_mapmem, "i");#endif/* * mode: c * Local variables: * c-indent-level: 4 * c-brace-imaginary-offset: 0 * c-brace-offset: -4 * c-argdecl-indent: 4 * c-label-offset: -4 * c-continued-statement-offset: 4 * c-continued-brace-offset: 0 * End: */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -