📄 aic7xxx_osm.c
字号:
&& (ahc->features & AHC_TWIN) != 0) { channel = 1; target &= 0x7; } /* * Skip our own ID. Some Compaq/HP storage devices * have enclosure management devices that respond to * single bit selection (i.e. selecting ourselves). * It is expected that either an external application * or a modified kernel will be used to probe this * ID if it is appropriate. To accommodate these * installations, ahc_linux_alloc_target() will allocate * for our ID if asked to do so. */ if ((channel == 0 && target == ahc->our_id) || (channel == 1 && target == ahc->our_id_b)) continue; ahc_linux_alloc_target(ahc, channel, target); } ahc_intr_enable(ahc, TRUE); ahc_linux_start_dv(ahc); ahc_unlock(ahc, &s);#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) scsi_add_host(host, (ahc->dev_softc ? &ahc->dev_softc->dev : NULL)); /* XXX handle failure */ scsi_scan_host(host);#endif return (0);}uint64_tahc_linux_get_memsize(void){ struct sysinfo si; si_meminfo(&si); return ((uint64_t)si.totalram << PAGE_SHIFT);}/* * Find the smallest available unit number to use * for a new device. We don't just use a static * count to handle the "repeated hot-(un)plug" * scenario. */static intahc_linux_next_unit(void){ struct ahc_softc *ahc; int unit; unit = 0;retry: TAILQ_FOREACH(ahc, &ahc_tailq, links) { if (ahc->unit == unit) { unit++; goto retry; } } return (unit);}/* * Place the SCSI bus into a known state by either resetting it, * or forcing transfer negotiations on the next command to any * target. */voidahc_linux_initialize_scsi_bus(struct ahc_softc *ahc){ int i; int numtarg; i = 0; numtarg = 0; if (aic7xxx_no_reset != 0) ahc->flags &= ~(AHC_RESET_BUS_A|AHC_RESET_BUS_B); if ((ahc->flags & AHC_RESET_BUS_A) != 0) ahc_reset_channel(ahc, 'A', /*initiate_reset*/TRUE); else numtarg = (ahc->features & AHC_WIDE) ? 16 : 8; if ((ahc->features & AHC_TWIN) != 0) { if ((ahc->flags & AHC_RESET_BUS_B) != 0) { ahc_reset_channel(ahc, 'B', /*initiate_reset*/TRUE); } else { if (numtarg == 0) i = 8; numtarg += 8; } } /* * Force negotiation to async for all targets that * will not see an initial bus reset. */ for (; i < numtarg; i++) { struct ahc_devinfo devinfo; struct ahc_initiator_tinfo *tinfo; struct ahc_tmode_tstate *tstate; u_int our_id; u_int target_id; char channel; channel = 'A'; our_id = ahc->our_id; target_id = i; if (i > 7 && (ahc->features & AHC_TWIN) != 0) { channel = 'B'; our_id = ahc->our_id_b; target_id = i % 8; } tinfo = ahc_fetch_transinfo(ahc, channel, our_id, target_id, &tstate); ahc_compile_devinfo(&devinfo, our_id, target_id, CAM_LUN_WILDCARD, channel, ROLE_INITIATOR); ahc_update_neg_request(ahc, &devinfo, tstate, tinfo, AHC_NEG_ALWAYS); } /* Give the bus some time to recover */ if ((ahc->flags & (AHC_RESET_BUS_A|AHC_RESET_BUS_B)) != 0) { ahc_linux_freeze_simq(ahc); init_timer(&ahc->platform_data->reset_timer); ahc->platform_data->reset_timer.data = (u_long)ahc; ahc->platform_data->reset_timer.expires = jiffies + (AIC7XXX_RESET_DELAY * HZ)/1000; ahc->platform_data->reset_timer.function = ahc_linux_release_simq; add_timer(&ahc->platform_data->reset_timer); }}intahc_platform_alloc(struct ahc_softc *ahc, void *platform_arg){ ahc->platform_data = malloc(sizeof(struct ahc_platform_data), M_DEVBUF, M_NOWAIT); if (ahc->platform_data == NULL) return (ENOMEM); memset(ahc->platform_data, 0, sizeof(struct ahc_platform_data)); TAILQ_INIT(&ahc->platform_data->completeq); TAILQ_INIT(&ahc->platform_data->device_runq); ahc->platform_data->irq = AHC_LINUX_NOIRQ; ahc->platform_data->hw_dma_mask = 0xFFFFFFFF; ahc_lockinit(ahc); ahc_done_lockinit(ahc); init_timer(&ahc->platform_data->completeq_timer); ahc->platform_data->completeq_timer.data = (u_long)ahc; ahc->platform_data->completeq_timer.function = (ahc_linux_callback_t *)ahc_linux_thread_run_complete_queue;#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,0) init_MUTEX_LOCKED(&ahc->platform_data->eh_sem); init_MUTEX_LOCKED(&ahc->platform_data->dv_sem); init_MUTEX_LOCKED(&ahc->platform_data->dv_cmd_sem);#else ahc->platform_data->eh_sem = MUTEX_LOCKED; ahc->platform_data->dv_sem = MUTEX_LOCKED; ahc->platform_data->dv_cmd_sem = MUTEX_LOCKED;#endif#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0) tasklet_init(&ahc->platform_data->runq_tasklet, ahc_runq_tasklet, (unsigned long)ahc);#endif ahc->seltime = (aic7xxx_seltime & 0x3) << 4; ahc->seltime_b = (aic7xxx_seltime & 0x3) << 4; if (aic7xxx_pci_parity == 0) ahc->flags |= AHC_DISABLE_PCI_PERR; return (0);}voidahc_platform_free(struct ahc_softc *ahc){ struct ahc_linux_target *targ; struct ahc_linux_device *dev; int i, j; if (ahc->platform_data != NULL) { del_timer_sync(&ahc->platform_data->completeq_timer); ahc_linux_kill_dv_thread(ahc);#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0) tasklet_kill(&ahc->platform_data->runq_tasklet);#endif if (ahc->platform_data->host != NULL) {#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) scsi_remove_host(ahc->platform_data->host);#endif scsi_host_put(ahc->platform_data->host); } /* destroy all of the device and target objects */ for (i = 0; i < AHC_NUM_TARGETS; i++) { targ = ahc->platform_data->targets[i]; if (targ != NULL) { /* Keep target around through the loop. */ targ->refcount++; for (j = 0; j < AHC_NUM_LUNS; j++) { if (targ->devices[j] == NULL) continue; dev = targ->devices[j]; ahc_linux_free_device(ahc, dev); } /* * Forcibly free the target now that * all devices are gone. */ ahc_linux_free_target(ahc, targ); } } if (ahc->platform_data->irq != AHC_LINUX_NOIRQ) free_irq(ahc->platform_data->irq, ahc); if (ahc->tag == BUS_SPACE_PIO && ahc->bsh.ioport != 0) release_region(ahc->bsh.ioport, 256); if (ahc->tag == BUS_SPACE_MEMIO && ahc->bsh.maddr != NULL) { u_long base_addr; base_addr = (u_long)ahc->bsh.maddr; base_addr &= PAGE_MASK; iounmap((void *)base_addr);#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0) release_mem_region(ahc->platform_data->mem_busaddr, 0x1000);#endif }#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0) && \ LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) /* * In 2.4 we detach from the scsi midlayer before the PCI * layer invokes our remove callback. No per-instance * detach is provided, so we must reach inside the PCI * subsystem's internals and detach our driver manually. */ if (ahc->dev_softc != NULL) ahc->dev_softc->driver = NULL;#endif free(ahc->platform_data, M_DEVBUF); }}voidahc_platform_freeze_devq(struct ahc_softc *ahc, struct scb *scb){ ahc_platform_abort_scbs(ahc, SCB_GET_TARGET(ahc, scb), SCB_GET_CHANNEL(ahc, scb), SCB_GET_LUN(scb), SCB_LIST_NULL, ROLE_UNKNOWN, CAM_REQUEUE_REQ);}voidahc_platform_set_tags(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, ahc_queue_alg alg){ struct ahc_linux_device *dev; int was_queuing; int now_queuing; dev = ahc_linux_get_device(ahc, devinfo->channel - 'A', devinfo->target, devinfo->lun, /*alloc*/FALSE); if (dev == NULL) return; was_queuing = dev->flags & (AHC_DEV_Q_BASIC|AHC_DEV_Q_TAGGED); switch (alg) { default: case AHC_QUEUE_NONE: now_queuing = 0; break; case AHC_QUEUE_BASIC: now_queuing = AHC_DEV_Q_BASIC; break; case AHC_QUEUE_TAGGED: now_queuing = AHC_DEV_Q_TAGGED; break; } if ((dev->flags & AHC_DEV_FREEZE_TIL_EMPTY) == 0 && (was_queuing != now_queuing) && (dev->active != 0)) { dev->flags |= AHC_DEV_FREEZE_TIL_EMPTY; dev->qfrozen++; } dev->flags &= ~(AHC_DEV_Q_BASIC|AHC_DEV_Q_TAGGED|AHC_DEV_PERIODIC_OTAG); if (now_queuing) { u_int usertags; usertags = ahc_linux_user_tagdepth(ahc, devinfo); if (!was_queuing) { /* * Start out agressively and allow our * dynamic queue depth algorithm to take * care of the rest. */ dev->maxtags = usertags; dev->openings = dev->maxtags - dev->active; } if (dev->maxtags == 0) { /* * Queueing is disabled by the user. */ dev->openings = 1; } else if (alg == AHC_QUEUE_TAGGED) { dev->flags |= AHC_DEV_Q_TAGGED; if (aic7xxx_periodic_otag != 0) dev->flags |= AHC_DEV_PERIODIC_OTAG; } else dev->flags |= AHC_DEV_Q_BASIC; } else { /* We can only have one opening. */ dev->maxtags = 0; dev->openings = 1 - dev->active; }#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) if (dev->scsi_device != NULL) { switch ((dev->flags & (AHC_DEV_Q_BASIC|AHC_DEV_Q_TAGGED))) { case AHC_DEV_Q_BASIC: scsi_adjust_queue_depth(dev->scsi_device, MSG_SIMPLE_TASK, dev->openings + dev->active); break; case AHC_DEV_Q_TAGGED: scsi_adjust_queue_depth(dev->scsi_device, MSG_ORDERED_TASK, dev->openings + dev->active); break; default: /* * We allow the OS to queue 2 untagged transactions to * us at any time even though we can only execute them * serially on the controller/device. This should * remove some latency. */ scsi_adjust_queue_depth(dev->scsi_device, /*NON-TAGGED*/0, /*queue depth*/2); break; } }#endif}intahc_platform_abort_scbs(struct ahc_softc *ahc, int target, char channel, int lun, u_int tag, role_t role, uint32_t status){ int chan; int maxchan; int targ; int maxtarg; int clun; int maxlun; int count; if (tag != SCB_LIST_NULL) return (0); chan = 0; if (channel != ALL_CHANNELS) { chan = channel - 'A'; maxchan = chan + 1; } else { maxchan = (ahc->features & AHC_TWIN) ? 2 : 1; } targ = 0; if (target != CAM_TARGET_WILDCARD) { targ = target; maxtarg = targ + 1; } else { maxtarg = (ahc->features & AHC_WIDE) ? 16 : 8; } clun = 0; if (lun != CAM_LUN_WILDCARD) { clun = lun; maxlun = clun + 1; } else { maxlun = AHC_NUM_LUNS; } count = 0; for (; chan < maxchan; chan++) { for (; targ < maxtarg; targ++) { for (; clun < maxlun; clun++) { struct ahc_linux_device *dev; struct ahc_busyq *busyq; struct ahc_cmd *acmd; dev = ahc_linux_get_device(ahc, chan, targ, clun, /*alloc*/FALSE); if (dev == NULL) continue; busyq = &dev->busyq; while ((acmd = TAILQ_FIRST(busyq)) != NULL) { Scsi_Cmnd *cmd; cmd = &acmd_scsi_cmd(acmd); TAILQ_REMOVE(busyq, acmd, acmd_links.tqe); count++; cmd->result = status << 16; ahc_linux_queue_cmd_complete(ahc, cmd); } } } } return (count);}static voidahc_linux_thread_run_complete_queue(struct ahc_softc *ahc){ u_long flags; ahc_lock(ahc, &flags); del_timer(&ahc->platform_data->completeq_timer); ahc->platform_data->flags &= ~AHC_RUN_CMPLT_Q_TIMER; ahc_linux_run_complete_queue(ahc); ahc_unlock(ahc, &flags);}static voidahc_linux_start_dv(struct ahc_softc *ahc){ /* * Freeze the simq and si
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -