📄 aic79xx_osm.c
字号:
ahd_set_tags(ahd, &devinfo, AHD_QUEUE_TAGGED); ahd_print_devinfo(ahd, &devinfo); printf("Tagged Queuing enabled. Depth %d\n", tags); } else { ahd_set_tags(ahd, &devinfo, AHD_QUEUE_NONE); }}static intahd_linux_run_command(struct ahd_softc *ahd, struct ahd_linux_device *dev, struct scsi_cmnd *cmd){ struct scb *scb; struct hardware_scb *hscb; struct ahd_initiator_tinfo *tinfo; struct ahd_tmode_tstate *tstate; u_int col_idx; uint16_t mask; /* * Get an scb to use. */ tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id, cmd->device->id, &tstate); if ((dev->flags & (AHD_DEV_Q_TAGGED|AHD_DEV_Q_BASIC)) == 0 || (tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ) != 0) { col_idx = AHD_NEVER_COL_IDX; } else { col_idx = AHD_BUILD_COL_IDX(cmd->device->id, cmd->device->lun); } if ((scb = ahd_get_scb(ahd, col_idx)) == NULL) { ahd->flags |= AHD_RESOURCE_SHORTAGE; return SCSI_MLQUEUE_HOST_BUSY; } scb->io_ctx = cmd; scb->platform_data->dev = dev; hscb = scb->hscb; cmd->host_scribble = (char *)scb; /* * Fill out basics of the HSCB. */ hscb->control = 0; hscb->scsiid = BUILD_SCSIID(ahd, cmd); hscb->lun = cmd->device->lun; scb->hscb->task_management = 0; mask = SCB_GET_TARGET_MASK(ahd, scb); if ((ahd->user_discenable & mask) != 0) hscb->control |= DISCENB; if ((tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ) != 0) scb->flags |= SCB_PACKETIZED; if ((tstate->auto_negotiate & mask) != 0) { scb->flags |= SCB_AUTO_NEGOTIATE; scb->hscb->control |= MK_MESSAGE; } if ((dev->flags & (AHD_DEV_Q_TAGGED|AHD_DEV_Q_BASIC)) != 0) { int msg_bytes; uint8_t tag_msgs[2]; msg_bytes = scsi_populate_tag_msg(cmd, tag_msgs); if (msg_bytes && tag_msgs[0] != MSG_SIMPLE_TASK) { hscb->control |= tag_msgs[0]; if (tag_msgs[0] == MSG_ORDERED_TASK) dev->commands_since_idle_or_otag = 0; } else if (dev->commands_since_idle_or_otag == AHD_OTAG_THRESH && (dev->flags & AHD_DEV_Q_TAGGED) != 0) { hscb->control |= MSG_ORDERED_TASK; dev->commands_since_idle_or_otag = 0; } else { hscb->control |= MSG_SIMPLE_TASK; } } hscb->cdb_len = cmd->cmd_len; memcpy(hscb->shared_data.idata.cdb, cmd->cmnd, hscb->cdb_len); scb->platform_data->xfer_len = 0; ahd_set_residual(scb, 0); ahd_set_sense_residual(scb, 0); scb->sg_count = 0; if (cmd->use_sg != 0) { void *sg; struct scatterlist *cur_seg; u_int nseg; int dir; cur_seg = (struct scatterlist *)cmd->request_buffer; dir = cmd->sc_data_direction; nseg = pci_map_sg(ahd->dev_softc, cur_seg, cmd->use_sg, dir); scb->platform_data->xfer_len = 0; for (sg = scb->sg_list; nseg > 0; nseg--, cur_seg++) { dma_addr_t addr; bus_size_t len; addr = sg_dma_address(cur_seg); len = sg_dma_len(cur_seg); scb->platform_data->xfer_len += len; sg = ahd_sg_setup(ahd, scb, sg, addr, len, /*last*/nseg == 1); } } else if (cmd->request_bufflen != 0) { void *sg; dma_addr_t addr; int dir; sg = scb->sg_list; dir = cmd->sc_data_direction; addr = pci_map_single(ahd->dev_softc, cmd->request_buffer, cmd->request_bufflen, dir); scb->platform_data->xfer_len = cmd->request_bufflen; scb->platform_data->buf_busaddr = addr; sg = ahd_sg_setup(ahd, scb, sg, addr, cmd->request_bufflen, /*last*/TRUE); } LIST_INSERT_HEAD(&ahd->pending_scbs, scb, pending_links); dev->openings--; dev->active++; dev->commands_issued++; if ((dev->flags & AHD_DEV_PERIODIC_OTAG) != 0) dev->commands_since_idle_or_otag++; scb->flags |= SCB_ACTIVE; ahd_queue_scb(ahd, scb); return 0;}/* * SCSI controller interrupt handler. */irqreturn_tahd_linux_isr(int irq, void *dev_id, struct pt_regs * regs){ struct ahd_softc *ahd; u_long flags; int ours; ahd = (struct ahd_softc *) dev_id; ahd_lock(ahd, &flags); ours = ahd_intr(ahd); ahd_unlock(ahd, &flags); return IRQ_RETVAL(ours);}voidahd_platform_flushwork(struct ahd_softc *ahd){}voidahd_send_async(struct ahd_softc *ahd, char channel, u_int target, u_int lun, ac_code code, void *arg){ switch (code) { case AC_TRANSFER_NEG: { char buf[80]; struct scsi_target *starget; struct ahd_linux_target *targ; struct info_str info; struct ahd_initiator_tinfo *tinfo; struct ahd_tmode_tstate *tstate; unsigned int target_ppr_options; BUG_ON(target == CAM_TARGET_WILDCARD); info.buffer = buf; info.length = sizeof(buf); info.offset = 0; info.pos = 0; tinfo = ahd_fetch_transinfo(ahd, channel, ahd->our_id, target, &tstate); /* * Don't bother reporting results while * negotiations are still pending. */ if (tinfo->curr.period != tinfo->goal.period || tinfo->curr.width != tinfo->goal.width || tinfo->curr.offset != tinfo->goal.offset || tinfo->curr.ppr_options != tinfo->goal.ppr_options) if (bootverbose == 0) break; /* * Don't bother reporting results that * are identical to those last reported. */ starget = ahd->platform_data->starget[target]; if (starget == NULL) break; targ = scsi_transport_target_data(starget); target_ppr_options = (spi_dt(starget) ? MSG_EXT_PPR_DT_REQ : 0) + (spi_qas(starget) ? MSG_EXT_PPR_QAS_REQ : 0) + (spi_iu(starget) ? MSG_EXT_PPR_IU_REQ : 0) + (spi_rd_strm(starget) ? MSG_EXT_PPR_RD_STRM : 0) + (spi_pcomp_en(starget) ? MSG_EXT_PPR_PCOMP_EN : 0) + (spi_rti(starget) ? MSG_EXT_PPR_RTI : 0) + (spi_wr_flow(starget) ? MSG_EXT_PPR_WR_FLOW : 0) + (spi_hold_mcs(starget) ? MSG_EXT_PPR_HOLD_MCS : 0); if (tinfo->curr.period == spi_period(starget) && tinfo->curr.width == spi_width(starget) && tinfo->curr.offset == spi_offset(starget) && tinfo->curr.ppr_options == target_ppr_options) if (bootverbose == 0) break; spi_period(starget) = tinfo->curr.period; spi_width(starget) = tinfo->curr.width; spi_offset(starget) = tinfo->curr.offset; spi_dt(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_DT_REQ ? 1 : 0; spi_qas(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_QAS_REQ ? 1 : 0; spi_iu(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ ? 1 : 0; spi_rd_strm(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_RD_STRM ? 1 : 0; spi_pcomp_en(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_PCOMP_EN ? 1 : 0; spi_rti(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_RTI ? 1 : 0; spi_wr_flow(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_WR_FLOW ? 1 : 0; spi_hold_mcs(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_HOLD_MCS ? 1 : 0; spi_display_xfer_agreement(starget); break; } case AC_SENT_BDR: { WARN_ON(lun != CAM_LUN_WILDCARD); scsi_report_device_reset(ahd->platform_data->host, channel - 'A', target); break; } case AC_BUS_RESET: if (ahd->platform_data->host != NULL) { scsi_report_bus_reset(ahd->platform_data->host, channel - 'A'); } break; default: panic("ahd_send_async: Unexpected async event"); }}/* * Calls the higher level scsi done function and frees the scb. */voidahd_done(struct ahd_softc *ahd, struct scb *scb){ struct scsi_cmnd *cmd; struct ahd_linux_device *dev; if ((scb->flags & SCB_ACTIVE) == 0) { printf("SCB %d done'd twice\n", SCB_GET_TAG(scb)); ahd_dump_card_state(ahd); panic("Stopping for safety"); } LIST_REMOVE(scb, pending_links); cmd = scb->io_ctx; dev = scb->platform_data->dev; dev->active--; dev->openings++; if ((cmd->result & (CAM_DEV_QFRZN << 16)) != 0) { cmd->result &= ~(CAM_DEV_QFRZN << 16); dev->qfrozen--; } ahd_linux_unmap_scb(ahd, scb); /* * Guard against stale sense data. * The Linux mid-layer assumes that sense * was retrieved anytime the first byte of * the sense buffer looks "sane". */ cmd->sense_buffer[0] = 0; if (ahd_get_transaction_status(scb) == CAM_REQ_INPROG) { uint32_t amount_xferred; amount_xferred = ahd_get_transfer_length(scb) - ahd_get_residual(scb); if ((scb->flags & SCB_TRANSMISSION_ERROR) != 0) {#ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MISC) != 0) { ahd_print_path(ahd, scb); printf("Set CAM_UNCOR_PARITY\n"); }#endif ahd_set_transaction_status(scb, CAM_UNCOR_PARITY);#ifdef AHD_REPORT_UNDERFLOWS /* * This code is disabled by default as some * clients of the SCSI system do not properly * initialize the underflow parameter. This * results in spurious termination of commands * that complete as expected (e.g. underflow is * allowed as command can return variable amounts * of data. */ } else if (amount_xferred < scb->io_ctx->underflow) { u_int i; ahd_print_path(ahd, scb); printf("CDB:"); for (i = 0; i < scb->io_ctx->cmd_len; i++) printf(" 0x%x", scb->io_ctx->cmnd[i]); printf("\n"); ahd_print_path(ahd, scb); printf("Saw underflow (%ld of %ld bytes). " "Treated as error\n", ahd_get_residual(scb), ahd_get_transfer_length(scb)); ahd_set_transaction_status(scb, CAM_DATA_RUN_ERR);#endif } else { ahd_set_transaction_status(scb, CAM_REQ_CMP); } } else if (ahd_get_transaction_status(scb) == CAM_SCSI_STATUS_ERROR) { ahd_linux_handle_scsi_status(ahd, cmd->device, scb); } if (dev->openings == 1 && ahd_get_transaction_status(scb) == CAM_REQ_CMP && ahd_get_scsi_status(scb) != SCSI_STATUS_QUEUE_FULL) dev->tag_success_count++; /* * Some devices deal with temporary internal resource * shortages by returning queue full. When the queue * full occurrs, we throttle back. Slowly try to get * back to our previous queue depth. */ if ((dev->openings + dev->active) < dev->maxtags && dev->tag_success_count > AHD_TAG_SUCCESS_INTERVAL) { dev->tag_success_count = 0; dev->openings++; } if (dev->active == 0) dev->commands_since_idle_or_otag = 0; if ((scb->flags & SCB_RECOVERY_SCB) != 0) { printf("Recovery SCB completes\n"); if (ahd_get_transaction_status(scb) == CAM_BDR_SENT || ahd_get_transaction_status(scb) == CAM_REQ_ABORTED) ahd_set_transaction_status(scb, CAM_CMD_TIMEOUT); if ((ahd->platform_data->flags & AHD_SCB_UP_EH_SEM) != 0) { ahd->platform_data->flags &= ~AHD_SCB_UP_EH_SEM; up(&ahd->platform_data->eh_sem); } } ahd_free_scb(ahd, scb); ahd_linux_queue_cmd_complete(ahd, cmd);}static voidahd_linux_handle_scsi_status(struct ahd_softc *ahd, struct scsi_device *sdev, struct scb *scb){ struct ahd_devinfo devinfo; struct ahd_linux_device *dev = scsi_transport_device_data(sdev); ahd_compile_devinfo(&devinfo, ahd->our_id, sdev->sdev_target->id, sdev->lun, sdev->sdev_target->channel == 0 ? 'A' : 'B', ROLE_INITIATOR); /* * We don't currently trust the mid-layer to * properly deal with queue full or busy. So, * when one occurs, we tell the mid-layer to * unconditionally requeue the command to us * so that we can retry it ourselves. We also * implement our own throttling mechanism so * we don't clobber the device with too many * commands. */ switch (ahd_get_scsi_status(scb)) { default: break; case SCSI_STATUS_CHECK_COND: case SCSI_STATUS_CMD_TERMINATED: { struct scsi_cmnd *cmd; /* * Copy sense information to the OS's cmd * structure if it is available. */ cmd = scb->io_ctx; if ((scb->flags & (SCB_SENSE|SCB_PKT_SENSE)) != 0) { struct scsi_status_iu_header *siu; u_int sense_size; u_int sense_offset; if (scb->flags & SCB_SENSE) { sense_size = MIN(sizeof(struct scsi_sense_data) - ahd_get_sense_residual(scb), sizeof(cmd->sense_buffer)); sense_offset = 0; } else { /* * Copy only the sense data into the provided * buffer. */ siu = (struct scsi_status_iu_header *) scb->sense_data; sense_size = MIN(scsi_4btoul(siu->sense_length), sizeof(cmd->sense_buffer)); sense_offset = SIU_SENSE_OFFSET(siu); } memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer)); memcpy(cmd->sense_buffer, ahd_get_sense_buf(ahd, scb) + sense_offset, sense_size); cmd->result |= (DRIVER_SENSE << 24);#ifdef AHD_DEBUG if (ahd_debug & AHD_SHOW_SENSE) { int i; printf("Copied %d bytes of sense data at %d:", sense_size, sense_offset); for (i = 0; i < sense_size; i++) { if ((i & 0xF) == 0) printf("\n"); printf("0x%x ", cmd->sense_buffer[i]); } printf("\n"); }#endif } break; } case SCSI_STATUS_QUEUE_FULL: /* * By the time the core driver has returned this * command, all other commands that were queued * to us but not the device have been returned. * This ensures that dev->active is equal to * the number of commands actually queued to * the device. */ dev->tag_success_count = 0; if (dev->active != 0) { /* * Drop our opening count to the number * of commands currently outstanding. */ dev->openings = 0;#ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_QFULL) != 0) { ahd_print_path(ahd, scb); printf("Dropping tag count to %d\n", dev->active); }#endif
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -