📄 sym_hipd.c
字号:
/* * Insert a job into the start queue. */void sym_put_start_queue(struct sym_hcb *np, struct sym_ccb *cp){ u_short qidx;#ifdef SYM_CONF_IARB_SUPPORT /* * If the previously queued CCB is not yet done, * set the IARB hint. The SCRIPTS will go with IARB * for this job when starting the previous one. * We leave devices a chance to win arbitration by * not using more than 'iarb_max' consecutive * immediate arbitrations. */ if (np->last_cp && np->iarb_count < np->iarb_max) { np->last_cp->host_flags |= HF_HINT_IARB; ++np->iarb_count; } else np->iarb_count = 0; np->last_cp = cp;#endif#if SYM_CONF_DMA_ADDRESSING_MODE == 2 /* * Make SCRIPTS aware of the 64 bit DMA * segment registers not being up-to-date. */ if (np->dmap_dirty) cp->host_xflags |= HX_DMAP_DIRTY;#endif /* * Insert first the idle task and then our job. * The MBs should ensure proper ordering. */ qidx = np->squeueput + 2; if (qidx >= MAX_QUEUE*2) qidx = 0; np->squeue [qidx] = cpu_to_scr(np->idletask_ba); MEMORY_WRITE_BARRIER(); np->squeue [np->squeueput] = cpu_to_scr(cp->ccb_ba); np->squeueput = qidx; if (DEBUG_FLAGS & DEBUG_QUEUE) printf ("%s: queuepos=%d.\n", sym_name (np), np->squeueput); /* * Script processor may be waiting for reselect. * Wake it up. */ MEMORY_WRITE_BARRIER(); OUTB(np, nc_istat, SIGP|np->istat_sem);}#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING/* * Start next ready-to-start CCBs. */void sym_start_next_ccbs(struct sym_hcb *np, struct sym_lcb *lp, int maxn){ SYM_QUEHEAD *qp; struct sym_ccb *cp; /* * Paranoia, as usual. :-) */ assert(!lp->started_tags || !lp->started_no_tag); /* * Try to start as many commands as asked by caller. * Prevent from having both tagged and untagged * commands queued to the device at the same time. */ while (maxn--) { qp = sym_remque_head(&lp->waiting_ccbq); if (!qp) break; cp = sym_que_entry(qp, struct sym_ccb, link2_ccbq); if (cp->tag != NO_TAG) { if (lp->started_no_tag || lp->started_tags >= lp->started_max) { sym_insque_head(qp, &lp->waiting_ccbq); break; } lp->itlq_tbl[cp->tag] = cpu_to_scr(cp->ccb_ba); lp->head.resel_sa = cpu_to_scr(SCRIPTA_BA(np, resel_tag)); ++lp->started_tags; } else { if (lp->started_no_tag || lp->started_tags) { sym_insque_head(qp, &lp->waiting_ccbq); break; } lp->head.itl_task_sa = cpu_to_scr(cp->ccb_ba); lp->head.resel_sa = cpu_to_scr(SCRIPTA_BA(np, resel_no_tag)); ++lp->started_no_tag; } cp->started = 1; sym_insque_tail(qp, &lp->started_ccbq); sym_put_start_queue(np, cp); }}#endif /* SYM_OPT_HANDLE_DEVICE_QUEUEING *//* * The chip may have completed jobs. Look at the DONE QUEUE. * * On paper, memory read barriers may be needed here to * prevent out of order LOADs by the CPU from having * prefetched stale data prior to DMA having occurred. */static int sym_wakeup_done (struct sym_hcb *np){ struct sym_ccb *cp; int i, n; u32 dsa; n = 0; i = np->dqueueget; /* MEMORY_READ_BARRIER(); */ while (1) { dsa = scr_to_cpu(np->dqueue[i]); if (!dsa) break; np->dqueue[i] = 0; if ((i = i+2) >= MAX_QUEUE*2) i = 0; cp = sym_ccb_from_dsa(np, dsa); if (cp) { MEMORY_READ_BARRIER(); sym_complete_ok (np, cp); ++n; } else printf ("%s: bad DSA (%x) in done queue.\n", sym_name(np), (u_int) dsa); } np->dqueueget = i; return n;}/* * Complete all CCBs queued to the COMP queue. * * These CCBs are assumed: * - Not to be referenced either by devices or * SCRIPTS-related queues and datas. * - To have to be completed with an error condition * or requeued. * * The device queue freeze count is incremented * for each CCB that does not prevent this. * This function is called when all CCBs involved * in error handling/recovery have been reaped. */static void sym_flush_comp_queue(struct sym_hcb *np, int cam_status){ SYM_QUEHEAD *qp; struct sym_ccb *cp; while ((qp = sym_remque_head(&np->comp_ccbq)) != 0) { struct scsi_cmnd *cmd; cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); sym_insque_tail(&cp->link_ccbq, &np->busy_ccbq); /* Leave quiet CCBs waiting for resources */ if (cp->host_status == HS_WAIT) continue; cmd = cp->cmd; if (cam_status) sym_set_cam_status(cmd, cam_status);#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING if (sym_get_cam_status(cmd) == DID_SOFT_ERROR) { struct sym_tcb *tp = &np->target[cp->target]; struct sym_lcb *lp = sym_lp(tp, cp->lun); if (lp) { sym_remque(&cp->link2_ccbq); sym_insque_tail(&cp->link2_ccbq, &lp->waiting_ccbq); if (cp->started) { if (cp->tag != NO_TAG) --lp->started_tags; else --lp->started_no_tag; } } cp->started = 0; continue; }#endif sym_free_ccb(np, cp); sym_xpt_done(np, cmd); }}/* * Complete all active CCBs with error. * Used on CHIP/SCSI RESET. */static void sym_flush_busy_queue (struct sym_hcb *np, int cam_status){ /* * Move all active CCBs to the COMP queue * and flush this queue. */ sym_que_splice(&np->busy_ccbq, &np->comp_ccbq); sym_que_init(&np->busy_ccbq); sym_flush_comp_queue(np, cam_status);}/* * Start chip. * * 'reason' means: * 0: initialisation. * 1: SCSI BUS RESET delivered or received. * 2: SCSI BUS MODE changed. */void sym_start_up (struct sym_hcb *np, int reason){ int i; u32 phys; /* * Reset chip if asked, otherwise just clear fifos. */ if (reason == 1) sym_soft_reset(np); else { OUTB(np, nc_stest3, TE|CSF); OUTONB(np, nc_ctest3, CLF); } /* * Clear Start Queue */ phys = np->squeue_ba; for (i = 0; i < MAX_QUEUE*2; i += 2) { np->squeue[i] = cpu_to_scr(np->idletask_ba); np->squeue[i+1] = cpu_to_scr(phys + (i+2)*4); } np->squeue[MAX_QUEUE*2-1] = cpu_to_scr(phys); /* * Start at first entry. */ np->squeueput = 0; /* * Clear Done Queue */ phys = np->dqueue_ba; for (i = 0; i < MAX_QUEUE*2; i += 2) { np->dqueue[i] = 0; np->dqueue[i+1] = cpu_to_scr(phys + (i+2)*4); } np->dqueue[MAX_QUEUE*2-1] = cpu_to_scr(phys); /* * Start at first entry. */ np->dqueueget = 0; /* * Install patches in scripts. * This also let point to first position the start * and done queue pointers used from SCRIPTS. */ np->fw_patch(np); /* * Wakeup all pending jobs. */ sym_flush_busy_queue(np, DID_RESET); /* * Init chip. */ OUTB(np, nc_istat, 0x00); /* Remove Reset, abort */ INB(np, nc_mbox1); udelay(2000); /* The 895 needs time for the bus mode to settle */ OUTB(np, nc_scntl0, np->rv_scntl0 | 0xc0); /* full arb., ena parity, par->ATN */ OUTB(np, nc_scntl1, 0x00); /* odd parity, and remove CRST!! */ sym_selectclock(np, np->rv_scntl3); /* Select SCSI clock */ OUTB(np, nc_scid , RRE|np->myaddr); /* Adapter SCSI address */ OUTW(np, nc_respid, 1ul<<np->myaddr); /* Id to respond to */ OUTB(np, nc_istat , SIGP ); /* Signal Process */ OUTB(np, nc_dmode , np->rv_dmode); /* Burst length, dma mode */ OUTB(np, nc_ctest5, np->rv_ctest5); /* Large fifo + large burst */ OUTB(np, nc_dcntl , NOCOM|np->rv_dcntl); /* Protect SFBR */ OUTB(np, nc_ctest3, np->rv_ctest3); /* Write and invalidate */ OUTB(np, nc_ctest4, np->rv_ctest4); /* Master parity checking */ /* Extended Sreq/Sack filtering not supported on the C10 */ if (np->features & FE_C10) OUTB(np, nc_stest2, np->rv_stest2); else OUTB(np, nc_stest2, EXT|np->rv_stest2); OUTB(np, nc_stest3, TE); /* TolerANT enable */ OUTB(np, nc_stime0, 0x0c); /* HTH disabled STO 0.25 sec */ /* * For now, disable AIP generation on C1010-66. */ if (np->device_id == PCI_DEVICE_ID_LSI_53C1010_66) OUTB(np, nc_aipcntl1, DISAIP); /* * C10101 rev. 0 errata. * Errant SGE's when in narrow. Write bits 4 & 5 of * STEST1 register to disable SGE. We probably should do * that from SCRIPTS for each selection/reselection, but * I just don't want. :) */ if (np->device_id == PCI_DEVICE_ID_LSI_53C1010_33 && np->revision_id < 1) OUTB(np, nc_stest1, INB(np, nc_stest1) | 0x30); /* * DEL 441 - 53C876 Rev 5 - Part Number 609-0392787/2788 - ITEM 2. * Disable overlapped arbitration for some dual function devices, * regardless revision id (kind of post-chip-design feature. ;-)) */ if (np->device_id == PCI_DEVICE_ID_NCR_53C875) OUTB(np, nc_ctest0, (1<<5)); else if (np->device_id == PCI_DEVICE_ID_NCR_53C896) np->rv_ccntl0 |= DPR; /* * Write CCNTL0/CCNTL1 for chips capable of 64 bit addressing * and/or hardware phase mismatch, since only such chips * seem to support those IO registers. */ if (np->features & (FE_DAC|FE_NOPM)) { OUTB(np, nc_ccntl0, np->rv_ccntl0); OUTB(np, nc_ccntl1, np->rv_ccntl1); }#if SYM_CONF_DMA_ADDRESSING_MODE == 2 /* * Set up scratch C and DRS IO registers to map the 32 bit * DMA address range our data structures are located in. */ if (np->use_dac) { np->dmap_bah[0] = 0; /* ??? */ OUTL(np, nc_scrx[0], np->dmap_bah[0]); OUTL(np, nc_drs, np->dmap_bah[0]); }#endif /* * If phase mismatch handled by scripts (895A/896/1010), * set PM jump addresses. */ if (np->features & FE_NOPM) { OUTL(np, nc_pmjad1, SCRIPTB_BA(np, pm_handle)); OUTL(np, nc_pmjad2, SCRIPTB_BA(np, pm_handle)); } /* * Enable GPIO0 pin for writing if LED support from SCRIPTS. * Also set GPIO5 and clear GPIO6 if hardware LED control. */ if (np->features & FE_LED0) OUTB(np, nc_gpcntl, INB(np, nc_gpcntl) & ~0x01); else if (np->features & FE_LEDC) OUTB(np, nc_gpcntl, (INB(np, nc_gpcntl) & ~0x41) | 0x20); /* * enable ints */ OUTW(np, nc_sien , STO|HTH|MA|SGE|UDC|RST|PAR); OUTB(np, nc_dien , MDPE|BF|SSI|SIR|IID); /* * For 895/6 enable SBMC interrupt and save current SCSI bus mode. * Try to eat the spurious SBMC interrupt that may occur when * we reset the chip but not the SCSI BUS (at initialization). */ if (np->features & (FE_ULTRA2|FE_ULTRA3)) { OUTONW(np, nc_sien, SBMC); if (reason == 0) { INB(np, nc_mbox1); mdelay(100); INW(np, nc_sist); } np->scsi_mode = INB(np, nc_stest4) & SMODE; } /* * Fill in target structure. * Reinitialize usrsync. * Reinitialize usrwide. * Prepare sync negotiation according to actual SCSI bus mode. */ for (i=0;i<SYM_CONF_MAX_TARGET;i++) { struct sym_tcb *tp = &np->target[i]; tp->to_reset = 0; tp->head.sval = 0; tp->head.wval = np->rv_scntl3; tp->head.uval = 0; } /* * Download SCSI SCRIPTS to on-chip RAM if present, * and start script processor. * We do the download preferently from the CPU. * For platforms that may not support PCI memory mapping, * we use simple SCRIPTS that performs MEMORY MOVEs. */ phys = SCRIPTA_BA(np, init); if (np->ram_ba) { if (sym_verbose >= 2) printf("%s: Downloading SCSI SCRIPTS.\n", sym_name(np)); memcpy_toio(np->s.ramaddr, np->scripta0, np->scripta_sz); if (np->ram_ws == 8192) { memcpy_toio(np->s.ramaddr + 4096, np->scriptb0, np->scriptb_sz); phys = scr_to_cpu(np->scr_ram_seg); OUTL(np, nc_mmws, phys); OUTL(np, nc_mmrs, phys); OUTL(np, nc_sfs, phys); phys = SCRIPTB_BA(np, start64); } } np->istat_sem = 0; OUTL(np, nc_dsa, np->hcb_ba); OUTL_DSP(np, phys); /* * Notify the XPT about the RESET condition. */ if (reason != 0) sym_xpt_async_bus_reset(np);}/* * Switch trans mode for current job and its target. */static void sym_settrans(struct sym_hcb *np, int target, u_char opts, u_char ofs, u_char per, u_char wide, u_char div, u_char fak){ SYM_QUEHEAD *qp; u_char sval, wval, uval; struct sym_tcb *tp = &np->target[target]; assert(target == (INB(np, nc_sdid) & 0x0f)); sval = tp->head.sval; wval = tp->head.wval; uval = tp->head.uval;#if 0 printf("XXXX sval=%x wval=%x uval=%x (%x)\n", sval, wval, uval, np->rv_scntl3);#endif /* * Set the offset. */ if (!(np->features & FE_C10)) sval = (sval & ~0x1f) | ofs; else sval = (sval & ~0x3f) | ofs; /* * Set the sync divisor and extra clock factor. */ if (ofs != 0) { wval = (wval & ~0x70) | ((div+1) << 4); if (!(np->features & FE_C10)) sval = (sval & ~0xe0) | (fak << 5); else { uval = uval & ~(XCLKH_ST|XCLKH_DT|XCLKS_ST|XCLKS_DT); if (fak >= 1) uval |= (XCLKH_ST|XCLKH_DT); if (fak >= 2) uval |= (XCLKS_ST|XCLKS_DT); } } /* * Set the bus width. */ wval = wval & ~EWS; if (wide != 0) wval |= EWS; /* * Set misc. ultra enable bits. */ if (np->features & FE_C10) { uval = uval & ~(U3EN|AIPCKEN); if (opts) {
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -