📄 aic7xxx_osm.c
字号:
{ if (ahc->platform_data != NULL) {#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0) tasklet_kill(&ahc->platform_data->runq_tasklet);#endif if (ahc->platform_data->host != NULL) scsi_unregister(ahc->platform_data->host); if (ahc->platform_data->irq != AHC_LINUX_NOIRQ) free_irq(ahc->platform_data->irq, ahc); if (ahc->tag == BUS_SPACE_PIO && ahc->bsh.ioport != 0) release_region(ahc->bsh.ioport, 256); if (ahc->tag == BUS_SPACE_MEMIO && ahc->bsh.maddr != NULL) { u_long base_addr; base_addr = (u_long)ahc->bsh.maddr; base_addr &= PAGE_MASK; iounmap((void *)base_addr);#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0) release_mem_region(ahc->platform_data->mem_busaddr, 0x1000);#endif }#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0) /* XXX Need an instance detach in the PCI code */ if (ahc->dev_softc != NULL) ahc->dev_softc->driver = NULL;#endif free(ahc->platform_data, M_DEVBUF); } if (TAILQ_EMPTY(&ahc_tailq)) { unregister_reboot_notifier(&ahc_linux_notifier);#ifdef CONFIG_PCI#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0) pci_unregister_driver(&aic7xxx_pci_driver);#endif#endif }}voidahc_platform_freeze_devq(struct ahc_softc *ahc, struct scb *scb){ ahc_platform_abort_scbs(ahc, SCB_GET_TARGET(ahc, scb), SCB_GET_CHANNEL(ahc, scb), SCB_GET_LUN(scb), SCB_LIST_NULL, ROLE_UNKNOWN, CAM_REQUEUE_REQ);}voidahc_platform_set_tags(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, ahc_queue_alg alg){ struct ahc_linux_device *dev; int was_queuing; int now_queuing; dev = ahc_linux_get_device(ahc, devinfo->channel - 'A', devinfo->target, devinfo->lun, /*alloc*/FALSE); if (dev == NULL) return; was_queuing = dev->flags & (AHC_DEV_Q_BASIC|AHC_DEV_Q_TAGGED); now_queuing = alg != AHC_QUEUE_NONE; if ((dev->flags & AHC_DEV_FREEZE_TIL_EMPTY) == 0 && (was_queuing != now_queuing) && (dev->active != 0)) { dev->flags |= AHC_DEV_FREEZE_TIL_EMPTY; dev->qfrozen++; } dev->flags &= ~(AHC_DEV_Q_BASIC|AHC_DEV_Q_TAGGED|AHC_DEV_PERIODIC_OTAG); if (now_queuing) { u_int usertags; usertags = ahc_linux_user_tagdepth(ahc, devinfo); if (!was_queuing) { /* * Start out agressively and allow our * dynamic queue depth algorithm to take * care of the rest. */ dev->maxtags = usertags; dev->openings = dev->maxtags - dev->active; } if (alg == AHC_QUEUE_TAGGED) { dev->flags |= AHC_DEV_Q_TAGGED; if (aic7xxx_periodic_otag != 0) dev->flags |= AHC_DEV_PERIODIC_OTAG; } else dev->flags |= AHC_DEV_Q_BASIC; } else { /* We can only have one opening. */ dev->maxtags = 0; dev->openings = 1 - dev->active; }}intahc_platform_abort_scbs(struct ahc_softc *ahc, int target, char channel, int lun, u_int tag, role_t role, uint32_t status){ int chan; int maxchan; int targ; int maxtarg; int clun; int maxlun; int count; if (tag != SCB_LIST_NULL) return (0); chan = 0; if (channel != ALL_CHANNELS) { chan = channel - 'A'; maxchan = chan + 1; } else { maxchan = (ahc->features & AHC_TWIN) ? 2 : 1; } targ = 0; if (target != CAM_TARGET_WILDCARD) { targ = target; maxtarg = targ + 1; } else { maxtarg = (ahc->features & AHC_WIDE) ? 16 : 8; } clun = 0; if (lun != CAM_LUN_WILDCARD) { clun = lun; maxlun = clun + 1; } else { maxlun = AHC_NUM_LUNS; } count = 0; for (; chan < maxchan; chan++) { for (; targ < maxtarg; targ++) { for (; clun < maxlun; clun++) { struct ahc_linux_device *dev; struct ahc_busyq *busyq; struct ahc_cmd *acmd; dev = ahc_linux_get_device(ahc, chan, targ, clun, /*alloc*/FALSE); if (dev == NULL) continue; busyq = &dev->busyq; while ((acmd = TAILQ_FIRST(busyq)) != NULL) { Scsi_Cmnd *cmd; cmd = &acmd_scsi_cmd(acmd); TAILQ_REMOVE(busyq, acmd, acmd_links.tqe); count++; cmd->result = status << 16; ahc_linux_queue_cmd_complete(ahc, cmd); } } } } return (count);}/* * Sets the queue depth for each SCSI device hanging * off the input host adapter. */static voidahc_linux_select_queue_depth(struct Scsi_Host * host, Scsi_Device * scsi_devs){ Scsi_Device *device; struct ahc_softc *ahc; u_long flags; int scbnum; ahc = *((struct ahc_softc **)host->hostdata); ahc_lock(ahc, &flags); scbnum = 0; for (device = scsi_devs; device != NULL; device = device->next) { if (device->host == host) { ahc_linux_device_queue_depth(ahc, device); scbnum += device->queue_depth; } } ahc_unlock(ahc, &flags);}static u_intahc_linux_user_tagdepth(struct ahc_softc *ahc, struct ahc_devinfo *devinfo){ static int warned_user; u_int tags; tags = 0; if ((ahc->user_discenable & devinfo->target_mask) != 0) { if (warned_user == 0 && ahc->unit >= NUM_ELEMENTS(aic7xxx_tag_info)) { printf("aic7xxx: WARNING, insufficient " "tag_info instances for installed " "controllers. Using defaults\n"); printf("aic7xxx: Please update the " "aic7xxx_tag_info array in the " "aic7xxx.c source file.\n"); tags = AHC_MAX_QUEUE; warned_user++; } else { adapter_tag_info_t *tag_info; tag_info = &aic7xxx_tag_info[ahc->unit]; tags = tag_info->tag_commands[devinfo->target_offset]; if (tags > AHC_MAX_QUEUE) tags = AHC_MAX_QUEUE; } } return (tags);}/* * Determines the queue depth for a given device. */static voidahc_linux_device_queue_depth(struct ahc_softc *ahc, Scsi_Device * device){ struct ahc_devinfo devinfo; u_int tags; ahc_compile_devinfo(&devinfo, device->channel == 0 ? ahc->our_id : ahc->our_id_b, device->id, device->lun, device->channel == 0 ? 'A' : 'B', ROLE_INITIATOR); tags = ahc_linux_user_tagdepth(ahc, &devinfo); if (tags != 0 && device->tagged_supported != 0) { device->queue_depth = tags; ahc_set_tags(ahc, &devinfo, AHC_QUEUE_TAGGED); printf("scsi%d:%c:%d:%d: Tagged Queuing enabled. Depth %d\n", ahc->platform_data->host->host_no, devinfo.channel, devinfo.target, devinfo.lun, tags); } else { /* * We allow the OS to queue 2 untagged transactions to * us at any time even though we can only execute them * serially on the controller/device. This should remove * some latency. */ device->queue_depth = 2; }}/* * Queue an SCB to the controller. */intahc_linux_queue(Scsi_Cmnd * cmd, void (*scsi_done) (Scsi_Cmnd *)){ struct ahc_softc *ahc; struct ahc_linux_device *dev; u_long flags; ahc = *(struct ahc_softc **)cmd->host->hostdata; /* * Save the callback on completion function. */ cmd->scsi_done = scsi_done; ahc_lock(ahc, &flags); dev = ahc_linux_get_device(ahc, cmd->channel, cmd->target, cmd->lun, /*alloc*/TRUE); if (dev == NULL) { ahc_unlock(ahc, &flags); printf("aic7xxx_linux_queue: Unable to allocate device!\n"); return (-ENOMEM); } cmd->result = CAM_REQ_INPROG << 16; TAILQ_INSERT_TAIL(&dev->busyq, (struct ahc_cmd *)cmd, acmd_links.tqe); if ((dev->flags & AHC_DEV_ON_RUN_LIST) == 0) { TAILQ_INSERT_TAIL(&ahc->platform_data->device_runq, dev, links); dev->flags |= AHC_DEV_ON_RUN_LIST; ahc_linux_run_device_queues(ahc); } ahc_unlock(ahc, &flags); return (0);}static voidahc_linux_run_device_queue(struct ahc_softc *ahc, struct ahc_linux_device *dev){ struct ahc_cmd *acmd; struct scsi_cmnd *cmd; struct scb *scb; struct hardware_scb *hscb; struct ahc_initiator_tinfo *tinfo; struct ahc_tmode_tstate *tstate; uint16_t mask; if ((dev->flags & AHC_DEV_ON_RUN_LIST) != 0) panic("running device on run list"); while ((acmd = TAILQ_FIRST(&dev->busyq)) != NULL && dev->openings > 0 && dev->qfrozen == 0) { /* * Schedule us to run later. The only reason we are not * running is because the whole controller Q is frozen. */ if (ahc->platform_data->qfrozen != 0) { TAILQ_INSERT_TAIL(&ahc->platform_data->device_runq, dev, links); dev->flags |= AHC_DEV_ON_RUN_LIST; return; } /* * Get an scb to use. */ if ((scb = ahc_get_scb(ahc)) == NULL) { TAILQ_INSERT_TAIL(&ahc->platform_data->device_runq, dev, links); dev->flags |= AHC_DEV_ON_RUN_LIST; ahc->flags |= AHC_RESOURCE_SHORTAGE; return; } TAILQ_REMOVE(&dev->busyq, acmd, acmd_links.tqe); cmd = &acmd_scsi_cmd(acmd); scb->io_ctx = cmd; scb->platform_data->dev = dev; hscb = scb->hscb; cmd->host_scribble = (char *)scb; /* * Fill out basics of the HSCB. */ hscb->control = 0; hscb->scsiid = BUILD_SCSIID(ahc, cmd); hscb->lun = cmd->lun; mask = SCB_GET_TARGET_MASK(ahc, scb); tinfo = ahc_fetch_transinfo(ahc, SCB_GET_CHANNEL(ahc, scb), SCB_GET_OUR_ID(scb), SCB_GET_TARGET(ahc, scb), &tstate); hscb->scsirate = tinfo->scsirate; hscb->scsioffset = tinfo->curr.offset; if ((tstate->ultraenb & mask) != 0) hscb->control |= ULTRAENB; if ((ahc->user_discenable & mask) != 0) hscb->control |= DISCENB; if ((tstate->auto_negotiate & mask) != 0) { scb->flags |= SCB_AUTO_NEGOTIATE; scb->hscb->control |= MK_MESSAGE; } if ((dev->flags & (AHC_DEV_Q_TAGGED|AHC_DEV_Q_BASIC)) != 0) { if (dev->commands_since_idle_or_otag == AHC_OTAG_THRESH && (dev->flags & AHC_DEV_Q_TAGGED) != 0) { hscb->control |= MSG_ORDERED_TASK; dev->commands_since_idle_or_otag = 0; } else { hscb->control |= MSG_SIMPLE_TASK; } } hscb->cdb_len = cmd->cmd_len; if (hscb->cdb_len <= 12) { memcpy(hscb->shared_data.cdb, cmd->cmnd, hscb->cdb_len); } else { memcpy(hscb->cdb32, cmd->cmnd, hscb->cdb_len); scb->flags |= SCB_CDB32_PTR; } scb->platform_data->xfer_len = 0; ahc_set_residual(scb, 0); ahc_set_sense_residual(scb, 0); scb->sg_count = 0; if (cmd->use_sg != 0) { struct ahc_dma_seg *sg; struct scatterlist *cur_seg; struct scatterlist *end_seg; int nseg; cur_seg = (struct scatterlist *)cmd->request_buffer; nseg = pci_map_sg(ahc->dev_softc, cur_seg, cmd->use_sg, scsi_to_pci_dma_dir(cmd ->sc_data_direction)); end_seg = cur_seg + nseg; /* Copy the segments into the SG list. */ sg = scb->sg_list; /* * The sg_count may be larger than nseg if * a transfer crosses a 32bit page. */ while (cur_seg < end_seg) { bus_addr_t addr; bus_size_t len; int consumed; addr = sg_dma_address(cur_seg); len = sg_dma_len(cur_seg); consumed = ahc_linux_map_seg(ahc, scb, sg, addr, len); sg += consumed; scb->sg_count += consumed; cur_seg++; } sg--; sg->len |= ahc_htole32(AHC_DMA_LAST_SEG); /* * Reset the sg list pointer. */ scb->hscb->sgptr = ahc_htole32(scb->sg_list_phys | SG_FULL_RESID); /* * Copy the first SG into the "current" * data pointer area. */ scb->hscb->dataptr = scb->sg_list->addr; scb->hscb->datacnt = scb->sg_list->len; } else if (cmd->request_bufflen != 0) { struct ahc_dma_seg *sg; bus_addr_t addr; sg = scb->sg_list; addr = pci_map_single(ahc->dev_softc, cmd->request_buffer, cmd->request_bufflen, scsi_to_pci_dma_dir(cmd->sc_data_direction)); scb->platform_data->buf_busaddr = addr; scb->sg_count = ahc_linux_map_seg(ahc, scb, sg, addr, cmd->request_bufflen); sg->len |= ahc_htole32(AHC_DMA_LAST_SEG); /* * Reset the sg list pointer. */ scb->hscb->sgptr = ahc_htole32(scb->sg_list_phys | SG_FULL_RESID); /* * Copy the first SG into the "current" * data pointer area. */ scb->hscb->dataptr = sg->addr; scb->hscb->datacnt = sg->len;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -