📄 scsi_resource.c
字号:
/* * packet free */voidscsi_std_pktfree(pkt)struct scsi_pkt *pkt;{ register s = splr(scsi_spl); register struct scsi_cmd *sp = (struct scsi_cmd *) pkt; if (sp->cmd_flags & CFLAG_CDBEXTERN) { (void) kmem_free_intr((caddr_t) sp->cmd_pkt.pkt_cdbp, (unsigned int) sp->cmd_cdblen); } if (sp->cmd_flags & CFLAG_SCBEXTERN) { (void) kmem_free_intr((caddr_t) sp->cmd_pkt.pkt_scbp, (unsigned int) sp->cmd_scblen); } /* * free the packet. */ sp->cmd_pkt.pkt_ha_private = (opaque_t) scsibase; scsibase = (struct scsi_cmd *) pkt; if (scsi_cmdwake) { scsi_cmdwake = 0; wakeup((caddr_t)&scsibase); } while (scpq.qlen != 0) { register func_t funcp; funcp = scq_retrieve(&scpq); if ((*funcp)() == 0) break; } (void) splx(s);}/* * * Dma resource allocation * */#define BDVMA ((u_long) &DVMA[0])#define EDVMA ((u_long) &DVMA[ctob(dvmasize)])#define DVMA_ADDR(addr, count) \ (((u_long)addr) >= BDVMA && ((u_long)addr) < EDVMA && \ (((u_long)addr)+count) >= BDVMA && (((u_long)addr)+count-1) < EDVMA)#ifdef sun4c#define SYS_VRANGE(addr) ((u_int)addr >= (u_int) Sysbase && \ (u_int)addr < (u_int) Syslimit)#define SYS_VADDR(bp) (((bp->b_flags & (B_PAGEIO|B_PHYS)) == 0) && \ SYS_VRANGE(bp->b_un.b_addr))#endif sun4cstruct scsi_pkt *scsi_std_dmaget(pkt, dmatoken, callback)struct scsi_pkt *pkt;opaque_t dmatoken;int (*callback)();{ struct buf *bp = (struct buf *) dmatoken; struct scsi_cmd *cmd = (struct scsi_cmd *) pkt; /* * clear any stale flags */ cmd->cmd_flags &= ~(CFLAG_DMAKEEP|CFLAG_DMASEND|CFLAG_DMAVALID); /* * We assume that if the address is already in the range of * kernel address DVMA..DVMA+ctob(dvmasize) that the mapping has * already been established by someone (so we don't have to). * * If this is the case it is also true that we don't have * release the mapping when we're done (i.e., when scsi_std_dmafree * is called), so we'll mark this mapping to not be released. * * Also, if this is a sun4c, and the I/O is too/from the kernel * heap, we can just use that (on a sun4c, I/O is valid for what- * ever is in context 0 (kernel context)). * */ if (DVMA_ADDR(bp->b_un.b_addr, bp->b_bcount)) { cmd->cmd_mapping = (((u_long)bp->b_un.b_addr)-((u_long)DVMA)); cmd->cmd_flags |= CFLAG_DMAKEEP;#ifdef sun4c } else if (SYS_VADDR(bp)) { /* * I don't believe that I need to lock the address range * down if it's in the kernel heap. */ cmd->cmd_mapping = (u_long) bp->b_un.b_addr; cmd->cmd_flags |= CFLAG_DMAKEEP;#endif sun4c } else if (callback == SLEEP_FUNC) { cmd->cmd_mapping = mb_mapalloc(DMA, bp, MDR_BIGSBUS, (int (*)())0, (caddr_t)0); } else {#ifdef TEST_SPLS register s, ipl; static int last_spl = -1; if (last_spl == -1) { last_spl = scsi_spl; } s = splr(last_spl); ipl = spltoipl(last_spl) + 1; if (ipl > spltoipl(splvm_val)) { last_spl = scsi_spl; } else last_spl = ipltospl(ipl);#else TEST_SPLS register int s = splr(scsi_spl);#endif TEST_SPLS /* * If the DVMA wait queue is empty, or we're in the middle * of our own callback (sfield()), call mb_mapalloc for * a mapping. If that fails, store up our caller to be * called back later when DVMA becomes available. * * If the DVMA wait queue is non-empty already, store * up our caller so it can be called later back when DVMA * becomes available. * * Now if our caller had specified NULL_FUNC, we do it * slightly differently- if we don't get resources, * then we arrange to field a dummy callback (that * goes nowhere). */ if (scdq.incallback || scdq.qlen == 0) { cmd->cmd_mapping = mb_mapalloc(DMA, bp, (MB_CANTWAIT | MDR_BIGSBUS), (callback == NULL_FUNC) ? NULL_FUNC : sfield, (caddr_t) 0); if (cmd->cmd_mapping == 0) { if (callback != NULL_FUNC) scq_store(&scdq, (func_t)callback); (void) splx(s); return ((struct scsi_pkt *) 0); } } else { if (callback != NULL_FUNC) scq_store(&scdq, (func_t)callback); (void) splx(s); return ((struct scsi_pkt *) 0); } (void) splx(s); } cmd->cmd_dmacount = bp->b_bcount; if ((bp->b_flags & B_READ) == 0) cmd->cmd_flags |= CFLAG_DMASEND; cmd->cmd_flags |= CFLAG_DMAVALID; return ((struct scsi_pkt *) cmd);}/*ARGSUSED*/static intsfield (arg)caddr_t arg;{ register s = splr(scsi_spl); scdq.incallback = 1; while (scdq.qlen != 0) { register func_t funcp; register u_int lastlen; /* * Latch up the current queue length, * because scq_retrieve will decrement it. */ lastlen = scdq.qlen; funcp = scq_retrieve(&scdq); if ((*funcp)() == 0) { /* * The target driver's allocation failed. Why? * If it failed due to packet allocation failure, * we can continue on. If it failed due to DVMA * allocation failure, we have to quit now and * let the mb code know that DVMA has run out * again. If the last dma queue length is less * than or equal to the now current dma queue * length, then the allocation failure was * due to DVMA running out again. */ if (lastlen > scdq.qlen) { continue; } scdq.incallback = 0; (void) splx(s); return (DVMA_RUNOUT); } } scdq.incallback = 0; (void) splx(s); return (0);}voidscsi_std_dmafree(pkt)struct scsi_pkt *pkt;{ struct scsi_cmd *cmd = (struct scsi_cmd *) pkt; /* * we don't need an spl here because mb_mapfree does that for us. */ if ((cmd->cmd_flags & CFLAG_DMAVALID) == 0) { return; } if ((cmd->cmd_flags & CFLAG_DMAKEEP) == 0) {#ifdef TEST_SPLS register s, ipl; static int last_spl = -1; if (last_spl == -1) { last_spl = scsi_spl; } s = splr(last_spl); ipl = spltoipl(last_spl) + 1; if (ipl > spltoipl(splvm_val)) { last_spl = scsi_spl; } else last_spl = ipltospl(ipl); mb_mapfree(DMA, (int *)&cmd->cmd_mapping); (void) splx(s);#else TEST_SPLS mb_mapfree(DMA, (int *)&cmd->cmd_mapping);#endif TEST_SPLS#if defined(sun4c) && defined(VAC) } else if (vac && (cmd->cmd_flags & CFLAG_DMASEND) == 0 && SYS_VRANGE(cmd->cmd_mapping)) { extern u_int hat_getkpfnum(); extern void hat_vacsync(); register addr_t vacaddr; vacaddr = (addr_t) (cmd->cmd_mapping & MMU_PAGEMASK); while (vacaddr < (addr_t) (cmd->cmd_mapping + cmd->cmd_dmacount)) { hat_vacsync(hat_getkpfnum(vacaddr)); vacaddr += MMU_PAGESIZE; }#endif defined(sun4c) && defined(VAC) } cmd->cmd_flags &= ~CFLAG_DMAVALID; cmd->cmd_mapping = cmd->cmd_dmacount = 0;}/* * Store a queue element, returning if the funcp is already queued. * Always called at splvm(). */static voidscq_store(scq, funcp)register struct scq *scq;func_t funcp;{ register int i; for (i = 0; i < SCQLEN; i++) { if (scq->funcp[i] == funcp) { return; } } scq->ncalls++; scq->qlen++; scq->funcp[scq->qstore] = funcp; scq->qstore = (scq->qstore + 1) & SCQLENMASK; ASSERT(scq->qstore != scq->qretrv);}/* * Retrieve the queue element at the head of the wait queue. * Always called at splvm(). */static func_tscq_retrieve(scq)register struct scq *scq;{ register func_t funcp; scq->qlen--; scq->qretrv = (scq->qretrv + 1) & SCQLENMASK; ASSERT(scq->qretrv != scq->qstore); funcp = scq->funcp[scq->qretrv]; scq->funcp[scq->qretrv] = NULL; return (funcp);}/* * Check a passed active data pointer for being within range */intscsi_chkdma(sp, max_xfer)register struct scsi_cmd *sp;register int max_xfer;{ register u_long maxv = max_xfer; if (sp->cmd_data < sp->cmd_mapping) return (0); else if (sp->cmd_data >= (sp->cmd_mapping + sp->cmd_dmacount)) return (0); else if ((sp->cmd_data + maxv) >= sp->cmd_mapping+sp->cmd_dmacount) { return (sp->cmd_mapping + sp->cmd_dmacount - sp->cmd_data); } else { return ((int)maxv); }}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -