📄 aic7xxx_osm.c
字号:
printf("Tagged Queuing enabled. Depth %d\n", tags); } else { ahc_set_tags(ahc, &devinfo, AHC_QUEUE_NONE); }}static intahc_linux_run_command(struct ahc_softc *ahc, struct ahc_linux_device *dev, struct scsi_cmnd *cmd){ struct scb *scb; struct hardware_scb *hscb; struct ahc_initiator_tinfo *tinfo; struct ahc_tmode_tstate *tstate; uint16_t mask; struct scb_tailq *untagged_q = NULL; /* * Schedule us to run later. The only reason we are not * running is because the whole controller Q is frozen. */ if (ahc->platform_data->qfrozen != 0) return SCSI_MLQUEUE_HOST_BUSY; /* * We only allow one untagged transaction * per target in the initiator role unless * we are storing a full busy target *lun* * table in SCB space. */ if (!blk_rq_tagged(cmd->request) && (ahc->features & AHC_SCB_BTT) == 0) { int target_offset; target_offset = cmd->device->id + cmd->device->channel * 8; untagged_q = &(ahc->untagged_queues[target_offset]); if (!TAILQ_EMPTY(untagged_q)) /* if we're already executing an untagged command * we're busy to another */ return SCSI_MLQUEUE_DEVICE_BUSY; } /* * Get an scb to use. */ scb = ahc_get_scb(ahc); if (!scb) return SCSI_MLQUEUE_HOST_BUSY; scb->io_ctx = cmd; scb->platform_data->dev = dev; hscb = scb->hscb; cmd->host_scribble = (char *)scb; /* * Fill out basics of the HSCB. */ hscb->control = 0; hscb->scsiid = BUILD_SCSIID(ahc, cmd); hscb->lun = cmd->device->lun; mask = SCB_GET_TARGET_MASK(ahc, scb); tinfo = ahc_fetch_transinfo(ahc, SCB_GET_CHANNEL(ahc, scb), SCB_GET_OUR_ID(scb), SCB_GET_TARGET(ahc, scb), &tstate); hscb->scsirate = tinfo->scsirate; hscb->scsioffset = tinfo->curr.offset; if ((tstate->ultraenb & mask) != 0) hscb->control |= ULTRAENB; if ((ahc->user_discenable & mask) != 0) hscb->control |= DISCENB; if ((tstate->auto_negotiate & mask) != 0) { scb->flags |= SCB_AUTO_NEGOTIATE; scb->hscb->control |= MK_MESSAGE; } if ((dev->flags & (AHC_DEV_Q_TAGGED|AHC_DEV_Q_BASIC)) != 0) { int msg_bytes; uint8_t tag_msgs[2]; msg_bytes = scsi_populate_tag_msg(cmd, tag_msgs); if (msg_bytes && tag_msgs[0] != MSG_SIMPLE_TASK) { hscb->control |= tag_msgs[0]; if (tag_msgs[0] == MSG_ORDERED_TASK) dev->commands_since_idle_or_otag = 0; } else if (dev->commands_since_idle_or_otag == AHC_OTAG_THRESH && (dev->flags & AHC_DEV_Q_TAGGED) != 0) { hscb->control |= MSG_ORDERED_TASK; dev->commands_since_idle_or_otag = 0; } else { hscb->control |= MSG_SIMPLE_TASK; } } hscb->cdb_len = cmd->cmd_len; if (hscb->cdb_len <= 12) { memcpy(hscb->shared_data.cdb, cmd->cmnd, hscb->cdb_len); } else { memcpy(hscb->cdb32, cmd->cmnd, hscb->cdb_len); scb->flags |= SCB_CDB32_PTR; } scb->platform_data->xfer_len = 0; ahc_set_residual(scb, 0); ahc_set_sense_residual(scb, 0); scb->sg_count = 0; if (cmd->use_sg != 0) { struct ahc_dma_seg *sg; struct scatterlist *cur_seg; struct scatterlist *end_seg; int nseg; cur_seg = (struct scatterlist *)cmd->request_buffer; nseg = pci_map_sg(ahc->dev_softc, cur_seg, cmd->use_sg, cmd->sc_data_direction); end_seg = cur_seg + nseg; /* Copy the segments into the SG list. */ sg = scb->sg_list; /* * The sg_count may be larger than nseg if * a transfer crosses a 32bit page. */ while (cur_seg < end_seg) { dma_addr_t addr; bus_size_t len; int consumed; addr = sg_dma_address(cur_seg); len = sg_dma_len(cur_seg); consumed = ahc_linux_map_seg(ahc, scb, sg, addr, len); sg += consumed; scb->sg_count += consumed; cur_seg++; } sg--; sg->len |= ahc_htole32(AHC_DMA_LAST_SEG); /* * Reset the sg list pointer. */ scb->hscb->sgptr = ahc_htole32(scb->sg_list_phys | SG_FULL_RESID); /* * Copy the first SG into the "current" * data pointer area. */ scb->hscb->dataptr = scb->sg_list->addr; scb->hscb->datacnt = scb->sg_list->len; } else if (cmd->request_bufflen != 0) { struct ahc_dma_seg *sg; dma_addr_t addr; sg = scb->sg_list; addr = pci_map_single(ahc->dev_softc, cmd->request_buffer, cmd->request_bufflen, cmd->sc_data_direction); scb->platform_data->buf_busaddr = addr; scb->sg_count = ahc_linux_map_seg(ahc, scb, sg, addr, cmd->request_bufflen); sg->len |= ahc_htole32(AHC_DMA_LAST_SEG); /* * Reset the sg list pointer. */ scb->hscb->sgptr = ahc_htole32(scb->sg_list_phys | SG_FULL_RESID); /* * Copy the first SG into the "current" * data pointer area. */ scb->hscb->dataptr = sg->addr; scb->hscb->datacnt = sg->len; } else { scb->hscb->sgptr = ahc_htole32(SG_LIST_NULL); scb->hscb->dataptr = 0; scb->hscb->datacnt = 0; scb->sg_count = 0; } LIST_INSERT_HEAD(&ahc->pending_scbs, scb, pending_links); dev->openings--; dev->active++; dev->commands_issued++; if ((dev->flags & AHC_DEV_PERIODIC_OTAG) != 0) dev->commands_since_idle_or_otag++; scb->flags |= SCB_ACTIVE; if (untagged_q) { TAILQ_INSERT_TAIL(untagged_q, scb, links.tqe); scb->flags |= SCB_UNTAGGEDQ; } ahc_queue_scb(ahc, scb); return 0;}/* * SCSI controller interrupt handler. */irqreturn_tahc_linux_isr(int irq, void *dev_id, struct pt_regs * regs){ struct ahc_softc *ahc; u_long flags; int ours; ahc = (struct ahc_softc *) dev_id; ahc_lock(ahc, &flags); ours = ahc_intr(ahc); ahc_unlock(ahc, &flags); return IRQ_RETVAL(ours);}voidahc_platform_flushwork(struct ahc_softc *ahc){}voidahc_send_async(struct ahc_softc *ahc, char channel, u_int target, u_int lun, ac_code code, void *arg){ switch (code) { case AC_TRANSFER_NEG: { char buf[80]; struct scsi_target *starget; struct ahc_linux_target *targ; struct info_str info; struct ahc_initiator_tinfo *tinfo; struct ahc_tmode_tstate *tstate; int target_offset; unsigned int target_ppr_options; BUG_ON(target == CAM_TARGET_WILDCARD); info.buffer = buf; info.length = sizeof(buf); info.offset = 0; info.pos = 0; tinfo = ahc_fetch_transinfo(ahc, channel, channel == 'A' ? ahc->our_id : ahc->our_id_b, target, &tstate); /* * Don't bother reporting results while * negotiations are still pending. */ if (tinfo->curr.period != tinfo->goal.period || tinfo->curr.width != tinfo->goal.width || tinfo->curr.offset != tinfo->goal.offset || tinfo->curr.ppr_options != tinfo->goal.ppr_options) if (bootverbose == 0) break; /* * Don't bother reporting results that * are identical to those last reported. */ target_offset = target; if (channel == 'B') target_offset += 8; starget = ahc->platform_data->starget[target_offset]; if (starget == NULL) break; targ = scsi_transport_target_data(starget); target_ppr_options = (spi_dt(starget) ? MSG_EXT_PPR_DT_REQ : 0) + (spi_qas(starget) ? MSG_EXT_PPR_QAS_REQ : 0) + (spi_iu(starget) ? MSG_EXT_PPR_IU_REQ : 0); if (tinfo->curr.period == spi_period(starget) && tinfo->curr.width == spi_width(starget) && tinfo->curr.offset == spi_offset(starget) && tinfo->curr.ppr_options == target_ppr_options) if (bootverbose == 0) break; spi_period(starget) = tinfo->curr.period; spi_width(starget) = tinfo->curr.width; spi_offset(starget) = tinfo->curr.offset; spi_dt(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_DT_REQ ? 1 : 0; spi_qas(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_QAS_REQ ? 1 : 0; spi_iu(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ ? 1 : 0; spi_display_xfer_agreement(starget); break; } case AC_SENT_BDR: { WARN_ON(lun != CAM_LUN_WILDCARD); scsi_report_device_reset(ahc->platform_data->host, channel - 'A', target); break; } case AC_BUS_RESET: if (ahc->platform_data->host != NULL) { scsi_report_bus_reset(ahc->platform_data->host, channel - 'A'); } break; default: panic("ahc_send_async: Unexpected async event"); }}/* * Calls the higher level scsi done function and frees the scb. */voidahc_done(struct ahc_softc *ahc, struct scb *scb){ struct scsi_cmnd *cmd; struct ahc_linux_device *dev; LIST_REMOVE(scb, pending_links); if ((scb->flags & SCB_UNTAGGEDQ) != 0) { struct scb_tailq *untagged_q; int target_offset; target_offset = SCB_GET_TARGET_OFFSET(ahc, scb); untagged_q = &(ahc->untagged_queues[target_offset]); TAILQ_REMOVE(untagged_q, scb, links.tqe); BUG_ON(!TAILQ_EMPTY(untagged_q)); } if ((scb->flags & SCB_ACTIVE) == 0) { printf("SCB %d done'd twice\n", scb->hscb->tag); ahc_dump_card_state(ahc); panic("Stopping for safety"); } cmd = scb->io_ctx; dev = scb->platform_data->dev; dev->active--; dev->openings++; if ((cmd->result & (CAM_DEV_QFRZN << 16)) != 0) { cmd->result &= ~(CAM_DEV_QFRZN << 16); dev->qfrozen--; } ahc_linux_unmap_scb(ahc, scb); /* * Guard against stale sense data. * The Linux mid-layer assumes that sense * was retrieved anytime the first byte of * the sense buffer looks "sane". */ cmd->sense_buffer[0] = 0; if (ahc_get_transaction_status(scb) == CAM_REQ_INPROG) { uint32_t amount_xferred; amount_xferred = ahc_get_transfer_length(scb) - ahc_get_residual(scb); if ((scb->flags & SCB_TRANSMISSION_ERROR) != 0) {#ifdef AHC_DEBUG if ((ahc_debug & AHC_SHOW_MISC) != 0) { ahc_print_path(ahc, scb); printf("Set CAM_UNCOR_PARITY\n"); }#endif ahc_set_transaction_status(scb, CAM_UNCOR_PARITY);#ifdef AHC_REPORT_UNDERFLOWS /* * This code is disabled by default as some * clients of the SCSI system do not properly * initialize the underflow parameter. This * results in spurious termination of commands * that complete as expected (e.g. underflow is * allowed as command can return variable amounts * of data. */ } else if (amount_xferred < scb->io_ctx->underflow) { u_int i; ahc_print_path(ahc, scb); printf("CDB:"); for (i = 0; i < scb->io_ctx->cmd_len; i++) printf(" 0x%x", scb->io_ctx->cmnd[i]); printf("\n"); ahc_print_path(ahc, scb); printf("Saw underflow (%ld of %ld bytes). " "Treated as error\n", ahc_get_residual(scb), ahc_get_transfer_length(scb)); ahc_set_transaction_status(scb, CAM_DATA_RUN_ERR);#endif } else { ahc_set_transaction_status(scb, CAM_REQ_CMP); } } else if (ahc_get_transaction_status(scb) == CAM_SCSI_STATUS_ERROR) { ahc_linux_handle_scsi_status(ahc, cmd->device, scb); } if (dev->openings == 1 && ahc_get_transaction_status(scb) == CAM_REQ_CMP && ahc_get_scsi_status(scb) != SCSI_STATUS_QUEUE_FULL) dev->tag_success_count++; /* * Some devices deal with temporary internal resource * shortages by returning queue full. When the queue * full occurrs, we throttle back. Slowly try to get * back to our previous queue depth. */ if ((dev->openings + dev->active) < dev->maxtags && dev->tag_success_count > AHC_TAG_SUCCESS_INTERVAL) { dev->tag_success_count = 0; dev->openings++; } if (dev->active == 0) dev->commands_since_idle_or_otag = 0; if ((scb->flags & SCB_RECOVERY_SCB) != 0) { printf("Recovery SCB completes\n"); if (ahc_get_transaction_status(scb) == CAM_BDR_SENT || ahc_get_transaction_status(scb) == CAM_REQ_ABORTED) ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT); if ((ahc->platform_data->flags & AHC_UP_EH_SEMAPHORE) != 0) { ahc->platform_data->flags &= ~AHC_UP_EH_SEMAPHORE; up(&ahc->platform_data->eh_sem); } } ahc_free_scb(ahc, scb); ahc_linux_queue_cmd_complete(ahc, cmd);}static voidahc_linux_handle_scsi_status(struct ahc_softc *ahc, struct scsi_device *sdev, struct scb *scb){ struct ahc_devinfo devinfo; struct ahc_linux_device *dev = scsi_transport_device_data(sdev); ahc_compile_devinfo(&devinfo, ahc->our_id, sdev->sdev_target->id, sdev->lun, sdev->sdev_target->channel == 0 ? 'A' : 'B', ROLE_INITIATOR); /* * We don't currently trust the mid-layer to * properly deal with queue full or busy. So, * when one occurs, we tell the mid-layer to * unconditionally requeue the command to us * so that we can retry it ourselves. We also * implement our own throttling mechanism so * we don't clobber the device with too many * commands. */ switch (ahc_get_scsi_status(scb)) { default: break; case SCSI_STATUS_CHECK_COND: case SCSI_STATUS_CMD_TERMINATED: { struct scsi_cmnd *cmd; /* * Copy sense information to the OS's cmd
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -