⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 isp_pci.c

📁 这个linux源代码是很全面的~基本完整了~使用c编译的~由于时间问题我没有亲自测试~但就算用来做参考资料也是非常好的
💻 C
📖 第 1 页 / 共 4 页
字号:
	    tmpconf |= BIU_PCI1080_CONF1_SXP0;	}	ispregwr(pcs,  IspVirt2Off(pcs, BIU_CONF1), tmpconf);    } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {	oldconf = ispregrd(pcs,  IspVirt2Off(pcs, BIU_CONF1));	ispregwr(pcs, IspVirt2Off(pcs, BIU_CONF1),	    oldconf | BIU_PCI1080_CONF1_DMA);    }    ispregwr(pcs, IspVirt2Off(pcs, regoff), val);    if (oldconf) {	ispregwr(pcs, IspVirt2Off(pcs, BIU_CONF1), oldconf);    }}#endifstatic intisp_pci_mbxdma(struct ispsoftc *isp){    if (isp->isp_xflist == NULL) {	size_t amt = isp->isp_maxcmds * sizeof (XS_T **);	isp->isp_xflist = kmalloc(amt, GFP_KERNEL);	if (isp->isp_xflist == NULL) {	    isp_prt(isp, ISP_LOGERR, "unable to allocate xflist array");	    return (1);	}	MEMZERO(isp->isp_xflist, amt);    }    if (isp->isp_rquest == NULL) {#if	LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;	dma_addr_t busaddr;	isp->isp_rquest =	    pci_alloc_consistent(pcs->pci_dev,		RQUEST_QUEUE_LEN(isp) * QENTRY_LEN, &busaddr);	if (isp->isp_rquest == NULL) {	    isp_prt(isp, ISP_LOGERR, "unable to allocate request queue");	    return (1);	}	isp->isp_rquest_dma = busaddr;#else	isp->isp_rquest = (caddr_t) GetPages(IspOrder(RQUEST_QUEUE_LEN(isp)));	if (isp->isp_rquest == NULL) {	    isp_prt(isp, ISP_LOGERR, "unable to allocate request queue");	    return (1);	}	/*	 * Map the Request queue.	 */	isp->isp_rquest_dma = virt_to_bus(isp->isp_rquest);#endif	if (isp->isp_rquest_dma & 0x3f) {	    isp_prt(isp, ISP_LOGERR, "Request Queue not on 64 byte boundary");	    return (1);        }	MEMZERO(isp->isp_rquest, ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)));    }    if (isp->isp_result == NULL) {#if	LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;	dma_addr_t busaddr;	isp->isp_result =	    pci_alloc_consistent(pcs->pci_dev,		RESULT_QUEUE_LEN(isp) * QENTRY_LEN, &busaddr);	if (isp->isp_result == NULL) {	    isp_prt(isp, ISP_LOGERR, "unable to allocate result queue");	    return (1);	}	isp->isp_result_dma = busaddr;#else	isp->isp_result = (caddr_t) GetPages(IspOrder(RESULT_QUEUE_LEN(isp)));	if (isp->isp_result == NULL) {	    isp_prt(isp, ISP_LOGERR, "unable to allocate result queue");	    free_pages((unsigned long)isp->isp_rquest,		IspOrder(RQUEST_QUEUE_LEN(isp)));	    return (1);	}	/*	 * Map the result queue.	 */	isp->isp_result_dma = virt_to_bus(isp->isp_result);#endif	if (isp->isp_rquest_dma & 0x3f) {	    isp_prt(isp, ISP_LOGERR, "Result Queue not on 64 byte boundary");	    return (1);        }	MEMZERO(isp->isp_result, ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)));    }    if (IS_FC(isp)) {	fcparam *fcp = isp->isp_param;	if (fcp->isp_scratch == NULL) {#if	LINUX_VERSION_CODE > KERNEL_VERSION(2,3,92)	    struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;	    dma_addr_t busaddr;	    fcp->isp_scratch =		pci_alloc_consistent(pcs->pci_dev, ISP2100_SCRLEN, &busaddr);	    if (fcp->isp_scratch == NULL) {		isp_prt(isp, ISP_LOGERR, "unable to allocate scratch space");		return (1);	    }	    fcp->isp_scdma = busaddr;#else	   /*	    * Just get a page....	    */	    fcp->isp_scratch = (void *) GetPages(1);	    if (fcp->isp_scratch == NULL) {		isp_prt(isp, ISP_LOGERR, "unable to allocate scratch space");		return (1);	    }	    fcp->isp_scdma = virt_to_bus((void *)fcp->isp_scratch);#endif	    MEMZERO(fcp->isp_scratch, ISP2100_SCRLEN);	    if (isp->isp_rquest_dma & 0x7) {		isp_prt(isp, ISP_LOGERR, "scratch space not 8 byte aligned");		return (1);	    }	}    }    return (0);}#ifdef	LINUX_ISP_TARGET_MODE/* * We need to handle DMA for target mode differently from initiator mode. *  * DMA mapping and construction and submission of CTIO Request Entries * and rendevous for completion are very tightly coupled because we start * out by knowing (per platform) how much data we have to move, but we * don't know, up front, how many DMA mapping segments will have to be used * cover that data, so we don't know how many CTIO Request Entries we * will end up using. Further, for performance reasons we may want to * (on the last CTIO for Fibre Channel), send status too (if all went well). * * The standard vector still goes through isp_pci_dmasetup, but the callback * for the DMA mapping routines comes here instead with the whole transfer * mapped and a pointer to a partially filled in already allocated request * queue entry. We finish the job. */static int tdma_mk(struct ispsoftc *, tmd_cmd_t *, ct_entry_t *,    u_int16_t *, u_int16_t);static int tdma_mkfc(struct ispsoftc *, tmd_cmd_t *, ct2_entry_t *,    u_int16_t *, u_int16_t);#define	STATUS_WITH_DATA        1    static inttdma_mk(struct ispsoftc *isp, tmd_cmd_t *tcmd, ct_entry_t *cto,    u_int16_t *nxtip, u_int16_t optr){    static const char ctx[] =	"CTIO[%x] lun %d for iid%d flgs 0x%x sts 0x%x ssts 0x%x res %u %s";    struct scatterlist *sg;    ct_entry_t *qe;    u_int8_t scsi_status;    u_int16_t curi, nxti, handle;    u_int32_t sflags;    int32_t resid;    int nth_ctio, nctios, send_status, nseg;    curi = isp->isp_reqidx;    qe = (ct_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, isp->isp_reqidx);    cto->ct_xfrlen = 0;    cto->ct_seg_count = 0;    cto->ct_header.rqs_entry_count = 1;    MEMZERO(cto->ct_dataseg, sizeof (cto->ct_dataseg));    if (tcmd->cd_xfrlen == 0) {	ISP_TDQE(isp, "tdma_mk[no data]", curi, cto);	isp_prt(isp, ISP_LOGTDEBUG1, ctx, cto->ct_fwhandle, (int) tcmd->cd_lun,	    (int) cto->ct_iid, cto->ct_flags, cto->ct_status,	    cto->ct_scsi_status, cto->ct_resid, "<END>");	isp_put_ctio(isp, cto, qe);	return (CMD_QUEUED);    }    sg = tcmd->cd_data;    nseg = 0;    resid = (int32_t) tcmd->cd_xfrlen;    while (resid > 0) {	nseg++;	resid -= sg->length;	sg++;    }    sg = tcmd->cd_data;#if	 LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)    {	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;	int new_seg_cnt;	new_seg_cnt = pci_map_sg(pcs->pci_dev, sg, nseg,	  (cto->ct_flags & CT_DATA_IN)? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);	if (new_seg_cnt == 0) {	    isp_prt(isp, ISP_LOGWARN, "unable to dma map request");	    cto->ct_resid = -ENOMEM;	    return (CMD_COMPLETE);	}	if (new_seg_cnt != nseg) {	    isp_prt(isp, ISP_LOGERR, "new seg cnt != old");	    cto->ct_resid = -EINVAL;	    return (CMD_COMPLETE);	}    }#endif    nctios = nseg / ISP_RQDSEG;    if (nseg % ISP_RQDSEG) {	nctios++;    }    /*     * Save handle, and potentially any SCSI status, which     * we'll reinsert on the last CTIO we're going to send.     */    handle = cto->ct_syshandle;    cto->ct_syshandle = 0;    cto->ct_header.rqs_seqno = 0;    send_status = (cto->ct_flags & CT_SENDSTATUS) != 0;    if (send_status) {	sflags = cto->ct_flags & (CT_SENDSTATUS | CT_CCINCR);	cto->ct_flags &= ~(CT_SENDSTATUS|CT_CCINCR);	/*	 * Preserve residual.	 */	resid = cto->ct_resid;	/*	 * Save actual SCSI status.	 */	scsi_status = cto->ct_scsi_status;#ifndef	STATUS_WITH_DATA	sflags |= CT_NO_DATA;	/*	 * We can't do a status at the same time as a data CTIO, so	 * we need to synthesize an extra CTIO at this level.	 */	nctios++;#endif    } else {	sflags = scsi_status = resid = 0;    }    cto->ct_resid = 0;    cto->ct_scsi_status = 0;    nxti = *nxtip;    for (nth_ctio = 0; nth_ctio < nctios; nth_ctio++) {	int seglim;	seglim = nseg;	if (seglim) {	    int seg;	    if (seglim > ISP_RQDSEG)		seglim = ISP_RQDSEG;	    for (seg = 0; seg < seglim; seg++, nseg--) {		/*		 * Unlike normal initiator commands, we don't do		 * any swizzling here.		 */#if	LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0)		cto->ct_dataseg[seg].ds_base = virt_to_bus(sg->address);#else		cto->ct_dataseg[seg].ds_base = (u_int32_t) sg_dma_address(sg);#endif		cto->ct_dataseg[seg].ds_count = (u_int32_t) sg->length;		cto->ct_xfrlen += sg->length;		sg++;	    }	    cto->ct_seg_count = seg;	} else {	    /*	     * This case should only happen when we're	     * sending an extra CTIO with final status.	     */	    if (send_status == 0) {		isp_prt(isp, ISP_LOGERR,		    "tdma_mk ran out of segments, no status to send");		return (CMD_EAGAIN);	    }	}	/*	 * At this point, the fields ct_lun, ct_iid, ct_tagval, ct_tagtype, and	 * ct_timeout have been carried over unchanged from what our caller had	 * set.	 *	 * The dataseg fields and the seg_count fields we just got through	 * setting. The data direction we've preserved all along and only	 * clear it if we're now sending status.	 */	if (nth_ctio == nctios - 1) {	    /*	     * We're the last in a sequence of CTIOs, so mark this	     * CTIO and save the handle to the command such that when	     * this CTIO completes we can free dma resources and	     * do whatever else we need to do to finish the rest	     * of the command.	     */	    cto->ct_syshandle = handle;	    cto->ct_header.rqs_seqno = 1;	    if (send_status) {		cto->ct_scsi_status = scsi_status;		cto->ct_flags |= sflags;		cto->ct_resid = resid;	    }	    if (send_status) {		isp_prt(isp, ISP_LOGTDEBUG1, ctx, 		    cto->ct_fwhandle, (int) tcmd->cd_lun, (int) cto->ct_iid,		    cto->ct_flags, cto->ct_status, cto->ct_scsi_status,		    cto->ct_resid, "<END>");	    } else {		isp_prt(isp, ISP_LOGTDEBUG1, ctx, 		    cto->ct_fwhandle, (int) tcmd->cd_lun, (int) cto->ct_iid,		    cto->ct_flags, cto->ct_status, cto->ct_scsi_status,		    cto->ct_resid, "<MID>");	    }	    isp_put_ctio(isp, cto, qe);	    ISP_TDQE(isp, "last tdma_mk", curi, cto);	    if (nctios > 1) {		MEMORYBARRIER(isp, SYNC_REQUEST, curi, QENTRY_LEN);	    }	} else {	    ct_entry_t *oqe = qe;	    /*	     * Make sure handle fields are clean	     */	    cto->ct_syshandle = 0;	    cto->ct_header.rqs_seqno = 0;	    isp_prt(isp, ISP_LOGTDEBUG1,		"CTIO[%x] lun%d for ID%d ct_flags 0x%x",		cto->ct_fwhandle, (int) tcmd->cd_lun,		(int) cto->ct_iid, cto->ct_flags);	    /*	     * Get a new CTIO	     */	    qe = (ct_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);	    nxti = ISP_NXT_QENTRY(nxti, RQUEST_QUEUE_LEN(isp));	    if (nxti == optr) {		isp_prt(isp, ISP_LOGERR, "queue overflow in tdma_mk");		return (CMD_EAGAIN);	    }	   /*	    * Now that we're done with the old CTIO,	    * flush it out to the request queue.	    */	    ISP_TDQE(isp, "tdma_mk", curi, cto);	    isp_put_ctio(isp, cto, oqe);	    if (nth_ctio != 0) {		MEMORYBARRIER(isp, SYNC_REQUEST, curi, QENTRY_LEN);	    }	    curi = ISP_NXT_QENTRY(curi, RQUEST_QUEUE_LEN(isp));	    /*	     * Reset some fields in the CTIO so we can reuse	     * for the next one we'll flush to the request	     * queue.	     */	    cto->ct_header.rqs_entry_type = RQSTYPE_CTIO;	    cto->ct_header.rqs_entry_count = 1;	    cto->ct_header.rqs_flags = 0;	    cto->ct_status = 0;	    cto->ct_scsi_status = 0;	    cto->ct_xfrlen = 0;	    cto->ct_resid = 0;	    cto->ct_seg_count = 0;	    MEMZERO(cto->ct_dataseg, sizeof (cto->ct_dataseg));	}    }    *nxtip = nxti;    return (CMD_QUEUED);}static inttdma_mkfc(struct ispsoftc *isp, tmd_cmd_t *tcmd, ct2_entry_t *cto,    u_int16_t *nxtip, u_int16_t optr){    static const char ctx[] = 	"CTIO2[%x] lun %d for iid %d flgs 0x%x sts 0x%x ssts 0x%x res %d %s";    u_int8_t sense[QLTM_SENSELEN];    struct scatterlist *sg;    ct2_entry_t *qe;    u_int16_t send_status, scsi_status, send_sense, handle;    u_int16_t curi, nxti;    int32_t resid;    int nth_ctio, nctios, nseg;    curi = isp->isp_reqidx;    qe = (ct2_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi);    if (tcmd->cd_xfrlen == 0) {	if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE1) {	    isp_prt(isp, ISP_LOGERR,		"tdma_mkfc, a status CTIO2 without MODE1 set (0x%x)",		cto->ct_flags);	    cto->ct_resid = -EINVAL;	    return (CMD_COMPLETE);	}	cto->ct_header.rqs_entry_count = 1;	cto->ct_header.rqs_seqno = 1;	/* ct_syshandle contains the synchronization handle set by caller */	/*	 * We preserve ct_lun, ct_iid, ct_rxid. We set the data movement	 * flags to NO DATA and clear relative offset flags. We preserve	 * ct_resid and the response area.	 */	cto->ct_flags |= CT2_NO_DATA;	if (cto->ct_resid > 0)	    cto->rsp.m1.ct_scsi_status |= CT2_DATA_UNDER;	else if (cto->ct_resid < 0)	    cto->rsp.m1.ct_scsi_status |= CT2_DATA_OVER;	cto->ct_seg_count = 0;	cto->ct_reloff = 0;	isp_prt(isp, ISP_LOGTDEBUG1, ctx, cto->ct_rxid, (int) tcmd->cd_lun,	    cto->ct_iid, cto->ct_flags, cto->ct_status,	    cto->rsp.m1.ct_scsi_status, cto->ct_resid, "<END>");	isp_put_ctio2(isp, cto, qe);	ISP_TDQE(isp, "tdma_mkfc[no data]", curi, qe);	return (CMD_QUEUED);    }    if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE0) {	isp_prt(isp, ISP_LOGERR,	    "tdma_mkfc, a data CTIO2 without MODE0 set (0x%x)", cto->ct_flags);	cto->ct_resid = -EINVAL;	return (CMD_COMPLETE);    }    sg = tcmd->cd_data;    nseg = 0;    resid = (int32_t) tcmd->cd_xfrlen;    while (resid > 0) {	nseg++;	resid -= sg->length;	sg++;    }    sg = tcmd->cd_data;#if	 LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)    {	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;	int new_seg_cnt;	new_seg_cnt = pci_map_sg(pcs->pci_dev, sg, nseg,	  (cto->ct_flags & CT2_DATA_IN)? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);	if (new_seg_cnt == 0) {	    isp_prt(isp, ISP_LOGWARN, "unable to dma map request");	    cto->ct_resid = -ENOMEM;	    return (CMD_COMPLETE);	}	if (new_seg_cnt != nseg) {	    isp_prt(isp, ISP_LOGERR, "new seg cnt != old");	    cto->ct_resid = -EINVAL;	    return (CMD_COMPLETE);	}    }#endif    nctios = nseg / ISP_RQDSEG_T2;    if (nseg % ISP_RQDSEG_T2) {	nctios++;    }    /*     * Save the handle, status, reloff, and residual. We'll reinsert the     * handle into the last CTIO2 we're going to send, and reinsert status     * and residual (and possibly sense data) if that's to be sent as well.     *     * We preserve ct_reloff and adjust it for each data CTIO2 we send past     * the first one. This is needed so that the FCP DATA IUs being sent out     * have the correct offset (they can arrive at the other end out of order).     */    handle = cto->ct_syshandle;    cto->ct_syshandle = 0;    send_status = (cto->ct_flags & CT2_SENDSTATUS) != 0;    if (send_status) {	cto->ct_flags &= ~(CT2_SENDSTATUS|CT2_CCINCR);	/*	 * Preserve residual.	 */	resid = cto->ct_resid;	/*	 * Save actual SCSI status. We'll reinsert the CT2_SNSLEN_VALID	 * later if appropriate.	 */	scsi_status = cto->rsp.m0.ct_scsi_status & 0xff;	send_sense = cto->rsp.m0.ct_scsi_status & CT2_SNSLEN_VALID;	/*	 * If we're sending status and have a CHECK CONDTION and	 * have sense data,  we send one more CTIO2 with just the	 * status and sense data. The upper layers have stashed	 * the sense data in the dataseg structure for us.	 */	if ((scsi_status & 0xf) == SCSI_CHECK && send_sense) {	    MEMCPY(sense, cto->rsp.m0.ct_dataseg, QLTM_SENSELEN);	    nctios++;	}    } else {	scsi_status = send_sense = resid = 0;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -