📄 aic7xxx_linux.c
字号:
} else dev->flags |= AHC_DEV_Q_BASIC; } else { /* We can only have one opening */ dev->maxtags = 0; dev->openings = 1 - dev->active; }}intahc_platform_abort_scbs(struct ahc_softc *ahc, int target, char channel, int lun, u_int tag, role_t role, uint32_t status){ int chan; int maxchan; int targ; int maxtarg; int clun; int maxlun; int count; if (tag != SCB_LIST_NULL) return (0); chan = 0; if (channel != ALL_CHANNELS) { chan = channel - 'A'; maxchan = chan + 1; } else { maxchan = (ahc->features & AHC_TWIN) ? 2 : 1; } targ = 0; if (target != CAM_TARGET_WILDCARD) { targ = target; maxtarg = targ + 1; } else { maxtarg = (ahc->features & AHC_WIDE) ? 16 : 8; } clun = 0; if (lun != CAM_LUN_WILDCARD) { clun = lun; maxlun = clun + 1; } else { maxlun = 16; } count = 0; for (; chan < maxchan; chan++) { for (; targ < maxtarg; targ++) { for (; clun < maxlun; clun++) { struct ahc_linux_device *dev; struct ahc_busyq *busyq; struct ahc_cmd *acmd; dev = ahc_linux_get_device(ahc, chan, targ, clun, /*alloc*/FALSE); if (dev == NULL) continue; busyq = &dev->busyq; while ((acmd = TAILQ_FIRST(busyq)) != NULL) { Scsi_Cmnd *cmd; cmd = &acmd_scsi_cmd(acmd); TAILQ_REMOVE(busyq, acmd, acmd_links.tqe); count++; cmd->result = status << 16; ahc_linux_queue_cmd_complete(ahc, cmd); } } } } return (count);}/* * Sets the queue depth for each SCSI device hanging * off the input host adapter. */static voidahc_linux_select_queue_depth(struct Scsi_Host * host, Scsi_Device * scsi_devs){ Scsi_Device *device; struct ahc_softc *ahc; u_long flags; int scbnum; ahc = *((struct ahc_softc **)host->hostdata); ahc_lock(ahc, &flags); scbnum = 0; for (device = scsi_devs; device != NULL; device = device->next) { if (device->host == host) { ahc_linux_device_queue_depth(ahc, device); scbnum += device->queue_depth; } } ahc_unlock(ahc, &flags);}/* * Determines the queue depth for a given device. */static voidahc_linux_device_queue_depth(struct ahc_softc *ahc, Scsi_Device * device){ struct ahc_devinfo devinfo; struct ahc_initiator_tinfo *targ_info; struct ahc_tmode_tstate *tstate; uint8_t tags; ahc_compile_devinfo(&devinfo, device->channel == 0 ? ahc->our_id : ahc->our_id_b, device->id, device->lun, device->channel == 0 ? 'A' : 'B', ROLE_INITIATOR); targ_info = ahc_fetch_transinfo(ahc, devinfo.channel, devinfo.our_scsiid, devinfo.target, &tstate); tags = 0; if (device->tagged_supported != 0 && (ahc->user_discenable & devinfo.target_mask) != 0) { if (ahc->unit >= NUM_ELEMENTS(aic7xxx_tag_info)) { printf("aic7xxx: WARNING, insufficient " "tag_info instances for installed " "controllers. Using defaults\n"); printf("aic7xxx: Please update the " "aic7xxx_tag_info array in the " "aic7xxx.c source file.\n"); tags = AHC_MAX_QUEUE; } else { adapter_tag_info_t *tag_info; tag_info = &aic7xxx_tag_info[ahc->unit]; tags = tag_info->tag_commands[devinfo.target_offset]; if (tags > AHC_MAX_QUEUE) tags = AHC_MAX_QUEUE; } } if (tags != 0) { device->queue_depth = tags; ahc_set_tags(ahc, &devinfo, AHC_QUEUE_TAGGED); printf("scsi%d:%c:%d:%d: Tagged Queuing enabled. Depth %d\n", ahc->platform_data->host->host_no, device->channel + 'A', device->id, device->lun, tags); } else { /* * We allow the OS to queue 2 untagged transactions to * us at any time even though we can only execute them * serially on the controller/device. This should remove * some latency. */ device->queue_depth = 2; }}/* * Queue an SCB to the controller. */intahc_linux_queue(Scsi_Cmnd * cmd, void (*scsi_done) (Scsi_Cmnd *)){ struct ahc_softc *ahc; struct ahc_linux_device *dev; u_long flags; ahc = *(struct ahc_softc **)cmd->host->hostdata; /* * Save the callback on completion function. */ cmd->scsi_done = scsi_done; ahc_lock(ahc, &flags); dev = ahc_linux_get_device(ahc, cmd->channel, cmd->target, cmd->lun, /*alloc*/TRUE); if (dev == NULL) { ahc_unlock(ahc, &flags); printf("aic7xxx_linux_queue: Unable to allocate device!\n"); return (-ENOMEM); } cmd->result = CAM_REQ_INPROG << 16; TAILQ_INSERT_TAIL(&dev->busyq, (struct ahc_cmd *)cmd, acmd_links.tqe); if ((dev->flags & AHC_DEV_ON_RUN_LIST) == 0) { TAILQ_INSERT_TAIL(&ahc->platform_data->device_runq, dev, links); dev->flags |= AHC_DEV_ON_RUN_LIST; ahc_linux_run_device_queues(ahc); } ahc_unlock(ahc, &flags); return (0);}static voidahc_linux_run_device_queue(struct ahc_softc *ahc, struct ahc_linux_device *dev){ struct ahc_cmd *acmd; struct scsi_cmnd *cmd; struct scb *scb; struct hardware_scb *hscb; struct ahc_initiator_tinfo *tinfo; struct ahc_tmode_tstate *tstate; uint16_t mask; if ((dev->flags & AHC_DEV_ON_RUN_LIST) != 0) panic("running device on run list"); while ((acmd = TAILQ_FIRST(&dev->busyq)) != NULL && dev->openings > 0 && dev->qfrozen == 0) { /* * Schedule us to run later. The only reason we are not * running is because the whole controller Q is frozen. */ if (ahc->platform_data->qfrozen != 0) { TAILQ_INSERT_TAIL(&ahc->platform_data->device_runq, dev, links); dev->flags |= AHC_DEV_ON_RUN_LIST; return; } /* * Get an scb to use. */ if ((scb = ahc_get_scb(ahc)) == NULL) { TAILQ_INSERT_TAIL(&ahc->platform_data->device_runq, dev, links); dev->flags |= AHC_DEV_ON_RUN_LIST; ahc->flags |= AHC_RESOURCE_SHORTAGE; return; } TAILQ_REMOVE(&dev->busyq, acmd, acmd_links.tqe); cmd = &acmd_scsi_cmd(acmd); scb->io_ctx = cmd; scb->platform_data->dev = dev; hscb = scb->hscb; cmd->host_scribble = (char *)scb; /* * Fill out basics of the HSCB. */ hscb->control = 0; hscb->scsiid = BUILD_SCSIID(ahc, cmd); hscb->lun = cmd->lun; mask = SCB_GET_TARGET_MASK(ahc, scb); tinfo = ahc_fetch_transinfo(ahc, SCB_GET_CHANNEL(ahc, scb), SCB_GET_OUR_ID(scb), SCB_GET_TARGET(ahc, scb), &tstate); hscb->scsirate = tinfo->scsirate; hscb->scsioffset = tinfo->curr.offset; if ((tstate->ultraenb & mask) != 0) hscb->control |= ULTRAENB; if ((ahc->user_discenable & mask) != 0) hscb->control |= DISCENB; if ((tstate->auto_negotiate & mask) != 0) { scb->flags |= SCB_AUTO_NEGOTIATE; scb->hscb->control |= MK_MESSAGE; } if ((dev->flags & (AHC_DEV_Q_TAGGED|AHC_DEV_Q_BASIC)) != 0) { if (dev->commands_since_idle_or_otag == AHC_OTAG_THRESH && (dev->flags & AHC_DEV_Q_TAGGED) != 0) { hscb->control |= MSG_ORDERED_TASK; dev->commands_since_idle_or_otag = 0; } else { hscb->control |= MSG_SIMPLE_TASK; } } hscb->cdb_len = cmd->cmd_len; if (hscb->cdb_len <= 12) { memcpy(hscb->shared_data.cdb, cmd->cmnd, hscb->cdb_len); } else { memcpy(hscb->cdb32, cmd->cmnd, hscb->cdb_len); scb->flags |= SCB_CDB32_PTR; } scb->platform_data->xfer_len = 0; ahc_set_residual(scb, 0); ahc_set_sense_residual(scb, 0); if (cmd->use_sg != 0) { struct ahc_dma_seg *sg; struct scatterlist *cur_seg; struct scatterlist *end_seg; int nseg; cur_seg = (struct scatterlist *)cmd->request_buffer; nseg = pci_map_sg(ahc->dev_softc, cur_seg, cmd->use_sg, scsi_to_pci_dma_dir(cmd ->sc_data_direction)); end_seg = cur_seg + nseg; /* Copy the segments into the SG list. */ sg = scb->sg_list; /* * The sg_count may be larger than nseg if * a transfer crosses a 32bit page. */ scb->sg_count = 0; while(cur_seg < end_seg) { bus_addr_t addr; bus_size_t len; int consumed; addr = sg_dma_address(cur_seg); len = sg_dma_len(cur_seg); consumed = ahc_linux_map_seg(ahc, scb, sg, addr, len); sg += consumed; scb->sg_count += consumed; cur_seg++; } sg--; sg->len |= ahc_htole32(AHC_DMA_LAST_SEG); /* * Reset the sg list pointer. */ scb->hscb->sgptr = ahc_htole32(scb->sg_list_phys | SG_FULL_RESID); /* * Copy the first SG into the "current" * data pointer area. */ scb->hscb->dataptr = scb->sg_list->addr; scb->hscb->datacnt = scb->sg_list->len; } else if (cmd->request_bufflen != 0) { struct ahc_dma_seg *sg; bus_addr_t addr; sg = scb->sg_list; addr = pci_map_single(ahc->dev_softc, cmd->request_buffer, cmd->request_bufflen, scsi_to_pci_dma_dir(cmd->sc_data_direction)); scb->sg_count = 0; scb->sg_count = ahc_linux_map_seg(ahc, scb, sg, addr, cmd->request_bufflen); sg->len |= ahc_htole32(AHC_DMA_LAST_SEG); /* * Reset the sg list pointer. */ scb->hscb->sgptr = ahc_htole32(scb->sg_list_phys | SG_FULL_RESID); /* * Copy the first SG into the "current" * data pointer area. */ scb->hscb->dataptr = sg->addr; scb->hscb->datacnt = sg->len; } else { scb->hscb->sgptr = ahc_htole32(SG_LIST_NULL); scb->hscb->dataptr = 0; scb->hscb->datacnt = 0; scb->sg_count = 0; } ahc_sync_sglist(ahc, scb, BUS_DMASYNC_PREWRITE); LIST_INSERT_HEAD(&ahc->pending_scbs, scb, pending_links); dev->openings--; dev->active++; dev->commands_issued++; if ((dev->flags & AHC_DEV_PERIODIC_OTAG) != 0) dev->commands_since_idle_or_otag++; /* * We only allow one untagged transaction * per target in the initiator role unless * we are storing a full busy target *lun* * table in SCB space. */ if ((scb->hscb->control & (TARGET_SCB|TAG_ENB)) == 0 && (ahc->features & AHC_SCB_BTT) == 0) { struct scb_tailq *untagged_q; int target_offset; target_offset = SCB_GET_TARGET_OFFSET(ahc, scb); untagged_q = &(ahc->untagged_queues[target_offset]); TAILQ_INSERT_TAIL(untagged_q, scb, links.tqe); scb->flags |= SCB_UNTAGGEDQ; if (TAILQ_FIRST(untagged_q) != scb) continue; } scb->flags |= SCB_ACTIVE; ahc_queue_scb(ahc, scb); }}/* * SCSI controller interrupt handler. */voidahc_linux_isr(int irq, void *dev_id, struct pt_regs * regs){ struct ahc_softc *ahc; struct ahc_cmd *acmd; u_long flags; ahc = (struct ahc_softc *) dev_id; ahc_lock(ahc, &flags); ahc_intr(ahc); /* * It would be nice to run the device queues from a * bottom half handler, but as there is no way to * dynamically register one, we'll have to postpone * that until we get integrated into the kernel. */ ahc_linux_run_device_queues(ahc); acmd = TAILQ_FIRST(&ahc->platform_data->completeq); TAILQ_INIT(&ahc->platform_data->completeq); ahc_unlock(ahc, &flags); if (acmd != NULL) ahc_linux_run_complete_queue(ahc, acmd);}voidahc_platform_flushwork(struct ahc_softc *ahc){ struct ahc_cmd *acmd; acmd = TAILQ_FIRST(&ahc->platform_data->completeq); TAILQ_INIT(&ahc->platform_data->completeq); if (acmd != NULL) ahc_linux_run_complete_queue(ahc, acmd);}static struct ahc_linux_target*ahc_linux_alloc_target(struct ahc_softc *ahc, u_int channel, u_int target){ struct ahc_linux_target *targ; u_int target_offset; targ = malloc(sizeof(*targ), M_DEVBUG, M_NOWAIT); if (targ == NULL) return (NULL); memset(targ, 0, sizeof(*targ)); targ->channel = channel; targ->target = target; target_offset = target; if (channel != 0) target_offset += 8; ahc->platform_data->targets[target_offset] = targ; return (targ);}static voidahc_linux_free_target(struct ahc_softc *ahc, struct ahc_linux_target *targ)
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -