📄 aic79xx_osm.c
字号:
goto no_cmd; } printf("%s: At time of recovery, card was %spaused\n", ahd_name(ahd), was_paused ? "" : "not "); ahd_dump_card_state(ahd); disconnected = TRUE; if (ahd_search_qinfifo(ahd, cmd->device->id, cmd->device->channel + 'A', cmd->device->lun, SCB_GET_TAG(pending_scb), ROLE_INITIATOR, CAM_REQ_ABORTED, SEARCH_COMPLETE) > 0) { printf("%s:%d:%d:%d: Cmd aborted from QINFIFO\n", ahd_name(ahd), cmd->device->channel, cmd->device->id, cmd->device->lun); retval = SUCCESS; goto done; } saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); last_phase = ahd_inb(ahd, LASTPHASE); saved_scbptr = ahd_get_scbptr(ahd); active_scbptr = saved_scbptr; if (disconnected && (ahd_inb(ahd, SEQ_FLAGS) & NOT_IDENTIFIED) == 0) { struct scb *bus_scb; bus_scb = ahd_lookup_scb(ahd, active_scbptr); if (bus_scb == pending_scb) disconnected = FALSE; } /* * At this point, pending_scb is the scb associated with the * passed in command. That command is currently active on the * bus or is in the disconnected state. */ if (last_phase != P_BUSFREE && SCB_GET_TAG(pending_scb) == active_scbptr) { /* * We're active on the bus, so assert ATN * and hope that the target responds. */ pending_scb = ahd_lookup_scb(ahd, active_scbptr); pending_scb->flags |= SCB_RECOVERY_SCB|SCB_ABORT; ahd_outb(ahd, MSG_OUT, HOST_MSG); ahd_outb(ahd, SCSISIGO, last_phase|ATNO); printf("%s:%d:%d:%d: Device is active, asserting ATN\n", ahd_name(ahd), cmd->device->channel, cmd->device->id, cmd->device->lun); wait = TRUE; } else if (disconnected) { /* * Actually re-queue this SCB in an attempt * to select the device before it reconnects. */ pending_scb->flags |= SCB_RECOVERY_SCB|SCB_ABORT; ahd_set_scbptr(ahd, SCB_GET_TAG(pending_scb)); pending_scb->hscb->cdb_len = 0; pending_scb->hscb->task_attribute = 0; pending_scb->hscb->task_management = SIU_TASKMGMT_ABORT_TASK; if ((pending_scb->flags & SCB_PACKETIZED) != 0) { /* * Mark the SCB has having an outstanding * task management function. Should the command * complete normally before the task management * function can be sent, the host will be notified * to abort our requeued SCB. */ ahd_outb(ahd, SCB_TASK_MANAGEMENT, pending_scb->hscb->task_management); } else { /* * If non-packetized, set the MK_MESSAGE control * bit indicating that we desire to send a message. * We also set the disconnected flag since there is * no guarantee that our SCB control byte matches * the version on the card. We don't want the * sequencer to abort the command thinking an * unsolicited reselection occurred. */ pending_scb->hscb->control |= MK_MESSAGE|DISCONNECTED; /* * The sequencer will never re-reference the * in-core SCB. To make sure we are notified * during reslection, set the MK_MESSAGE flag in * the card's copy of the SCB. */ ahd_outb(ahd, SCB_CONTROL, ahd_inb(ahd, SCB_CONTROL)|MK_MESSAGE); } /* * Clear out any entries in the QINFIFO first * so we are the next SCB for this target * to run. */ ahd_search_qinfifo(ahd, cmd->device->id, cmd->device->channel + 'A', cmd->device->lun, SCB_LIST_NULL, ROLE_INITIATOR, CAM_REQUEUE_REQ, SEARCH_COMPLETE); ahd_qinfifo_requeue_tail(ahd, pending_scb); ahd_set_scbptr(ahd, saved_scbptr); ahd_print_path(ahd, pending_scb); printf("Device is disconnected, re-queuing SCB\n"); wait = TRUE; } else { printf("%s:%d:%d:%d: Unable to deliver message\n", ahd_name(ahd), cmd->device->channel, cmd->device->id, cmd->device->lun); retval = FAILED; goto done; }no_cmd: /* * Our assumption is that if we don't have the command, no * recovery action was required, so we return success. Again, * the semantics of the mid-layer recovery engine are not * well defined, so this may change in time. */ retval = SUCCESS;done: if (paused) ahd_unpause(ahd); if (wait) { struct timer_list timer; int ret; pending_scb->platform_data->flags |= AHD_SCB_UP_EH_SEM; spin_unlock_irq(&ahd->platform_data->spin_lock); init_timer(&timer); timer.data = (u_long)pending_scb; timer.expires = jiffies + (5 * HZ); timer.function = ahd_linux_sem_timeout; add_timer(&timer); printf("Recovery code sleeping\n"); down(&ahd->platform_data->eh_sem); printf("Recovery code awake\n"); ret = del_timer_sync(&timer); if (ret == 0) { printf("Timer Expired\n"); retval = FAILED; } spin_lock_irq(&ahd->platform_data->spin_lock); } ahd_schedule_runq(ahd); ahd_linux_run_complete_queue(ahd); ahd_midlayer_entrypoint_unlock(ahd, &s); return (retval);}static voidahd_linux_dev_reset_complete(Scsi_Cmnd *cmd){ free(cmd, M_DEVBUF);}/* * Attempt to send a target reset message to the device that timed out. */static intahd_linux_dev_reset(Scsi_Cmnd *cmd){ struct ahd_softc *ahd; struct scsi_cmnd *recovery_cmd; struct ahd_linux_device *dev; struct ahd_initiator_tinfo *tinfo; struct ahd_tmode_tstate *tstate; struct scb *scb; struct hardware_scb *hscb; u_long s; struct timer_list timer; int retval; ahd = *(struct ahd_softc **)cmd->device->host->hostdata; recovery_cmd = malloc(sizeof(struct scsi_cmnd), M_DEVBUF, M_WAITOK); if (!recovery_cmd) return (FAILED); memset(recovery_cmd, 0, sizeof(struct scsi_cmnd)); recovery_cmd->device = cmd->device; recovery_cmd->scsi_done = ahd_linux_dev_reset_complete;#if AHD_DEBUG if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) printf("%s:%d:%d:%d: Device reset called for cmd %p\n", ahd_name(ahd), cmd->device->channel, cmd->device->id, cmd->device->lun, cmd);#endif ahd_midlayer_entrypoint_lock(ahd, &s); dev = ahd_linux_get_device(ahd, cmd->device->channel, cmd->device->id, cmd->device->lun, /*alloc*/FALSE); if (dev == NULL) { ahd_midlayer_entrypoint_unlock(ahd, &s); kfree(recovery_cmd); return (FAILED); } if ((scb = ahd_get_scb(ahd, AHD_NEVER_COL_IDX)) == NULL) { ahd_midlayer_entrypoint_unlock(ahd, &s); kfree(recovery_cmd); return (FAILED); } tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id, cmd->device->id, &tstate); recovery_cmd->result = CAM_REQ_INPROG << 16; recovery_cmd->host_scribble = (char *)scb; scb->io_ctx = recovery_cmd; scb->platform_data->dev = dev; scb->sg_count = 0; ahd_set_residual(scb, 0); ahd_set_sense_residual(scb, 0); hscb = scb->hscb; hscb->control = 0; hscb->scsiid = BUILD_SCSIID(ahd, cmd); hscb->lun = cmd->device->lun; hscb->cdb_len = 0; hscb->task_management = SIU_TASKMGMT_LUN_RESET; scb->flags |= SCB_DEVICE_RESET|SCB_RECOVERY_SCB|SCB_ACTIVE; if ((tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ) != 0) { scb->flags |= SCB_PACKETIZED; } else { hscb->control |= MK_MESSAGE; } dev->openings--; dev->active++; dev->commands_issued++; LIST_INSERT_HEAD(&ahd->pending_scbs, scb, pending_links); ahd_queue_scb(ahd, scb); scb->platform_data->flags |= AHD_SCB_UP_EH_SEM; spin_unlock_irq(&ahd->platform_data->spin_lock); init_timer(&timer); timer.data = (u_long)scb; timer.expires = jiffies + (5 * HZ); timer.function = ahd_linux_sem_timeout; add_timer(&timer); printf("Recovery code sleeping\n"); down(&ahd->platform_data->eh_sem); printf("Recovery code awake\n"); retval = SUCCESS; if (del_timer_sync(&timer) == 0) { printf("Timer Expired\n"); retval = FAILED; } spin_lock_irq(&ahd->platform_data->spin_lock); ahd_schedule_runq(ahd); ahd_linux_run_complete_queue(ahd); ahd_midlayer_entrypoint_unlock(ahd, &s); printf("%s: Device reset returning 0x%x\n", ahd_name(ahd), retval); return (retval);}/* * Reset the SCSI bus. */static intahd_linux_bus_reset(Scsi_Cmnd *cmd){ struct ahd_softc *ahd; u_long s; int found; ahd = *(struct ahd_softc **)cmd->device->host->hostdata;#ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) printf("%s: Bus reset called for cmd %p\n", ahd_name(ahd), cmd);#endif ahd_midlayer_entrypoint_lock(ahd, &s); found = ahd_reset_channel(ahd, cmd->device->channel + 'A', /*initiate reset*/TRUE); ahd_linux_run_complete_queue(ahd); ahd_midlayer_entrypoint_unlock(ahd, &s); if (bootverbose) printf("%s: SCSI bus reset delivered. " "%d SCBs aborted.\n", ahd_name(ahd), found); return (SUCCESS);}Scsi_Host_Template aic79xx_driver_template = { .module = THIS_MODULE, .name = "aic79xx", .proc_info = ahd_linux_proc_info, .info = ahd_linux_info, .queuecommand = ahd_linux_queue, .eh_abort_handler = ahd_linux_abort, .eh_device_reset_handler = ahd_linux_dev_reset, .eh_bus_reset_handler = ahd_linux_bus_reset,#if defined(__i386__) .bios_param = ahd_linux_biosparam,#endif .can_queue = AHD_MAX_QUEUE, .this_id = -1, .cmd_per_lun = 2, .use_clustering = ENABLE_CLUSTERING, .slave_alloc = ahd_linux_slave_alloc, .slave_configure = ahd_linux_slave_configure, .slave_destroy = ahd_linux_slave_destroy,};/**************************** Tasklet Handler *********************************//* * In 2.4.X and above, this routine is called from a tasklet, * so we must re-acquire our lock prior to executing this code. * In all prior kernels, ahd_schedule_runq() calls this routine * directly and ahd_schedule_runq() is called with our lock held. */static voidahd_runq_tasklet(unsigned long data){ struct ahd_softc* ahd; struct ahd_linux_device *dev; u_long flags; ahd = (struct ahd_softc *)data; ahd_lock(ahd, &flags); while ((dev = ahd_linux_next_device_to_run(ahd)) != NULL) { TAILQ_REMOVE(&ahd->platform_data->device_runq, dev, links); dev->flags &= ~AHD_DEV_ON_RUN_LIST; ahd_linux_check_device_queue(ahd, dev); /* Yeild to our interrupt handler */ ahd_unlock(ahd, &flags); ahd_lock(ahd, &flags); } ahd_unlock(ahd, &flags);}/******************************** Bus DMA *************************************/intahd_dma_tag_create(struct ahd_softc *ahd, bus_dma_tag_t parent, bus_size_t alignment, bus_size_t boundary, dma_addr_t lowaddr, dma_addr_t highaddr, bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize, int nsegments, bus_size_t maxsegsz, int flags, bus_dma_tag_t *ret_tag){ bus_dma_tag_t dmat; dmat = malloc(sizeof(*dmat), M_DEVBUF, M_NOWAIT); if (dmat == NULL) return (ENOMEM); /* * Linux is very simplistic about DMA memory. For now don't * maintain all specification information. Once Linux supplies * better facilities for doing these operations, or the * needs of this particular driver change, we might need to do * more here. */ dmat->alignment = alignment; dmat->boundary = boundary; dmat->maxsize = maxsize; *ret_tag = dmat; return (0);}voidahd_dma_tag_destroy(struct ahd_softc *ahd, bus_dma_tag_t dmat){ free(dmat, M_DEVBUF);}intahd_dmamem_alloc(struct ahd_softc *ahd, bus_dma_tag_t dmat, void** vaddr, int flags, bus_dmamap_t *mapp){ bus_dmamap_t map; map = malloc(sizeof(*map), M_DEVBUF, M_NOWAIT); if (map == NULL) return (ENOMEM); /* * Although we can dma data above 4GB, our * "consistent" memory is below 4GB for * space efficiency reasons (only need a 4byte * address). For this reason, we have to reset * our dma mask when doing allocations. */ if (ahd->dev_softc != NULL) if (pci_set_dma_mask(ahd->dev_softc, 0xFFFFFFFF)) { printk(KERN_WARNING "aic79xx: No suitable DMA available.\n"); kfree(map); return (ENODEV); } *vaddr = pci_alloc_consistent(ahd->dev_softc, dmat->maxsize, &map->bus_addr); if (ahd->dev_softc != NULL) if (pci_set_dma_mask(ahd->dev_softc, ahd->platform_data->hw_dma_mask)) { printk(KERN_WARNING "aic79xx: No suitable DMA available.\n"); kfree(map); return (ENODEV); } if (*vaddr == NULL) return (ENOMEM); *mapp = map; return(0);}voidahd_dmamem_free(struct ahd_softc *ahd, bus_dma_tag_t dmat, void* vaddr, bus_dmamap_t map){ pci_free_consistent(ahd->dev_softc, dmat->maxsize, vaddr, map->bus_addr);}intahd_dmamap_load(struct ahd_softc *ahd, bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, bus_size_t buflen, bus_dmamap_callback_t *cb, void *cb_arg, int flags){ /* * Assume for now that this will only be used during * initialization and not for per-transaction buffer mapping. */ bus_dma_segment_t stack_sg; stack_sg.ds_addr = map->bus_addr; stack_sg.ds_len = dmat->maxsize; cb(cb_arg, &stack_sg, /*nseg*/1, /*error*/0); return (0);}voidahd_dmamap_destroy(struct ahd_softc *ahd, bus_dma_tag_t dmat, bus_dmamap_t map){ /* * The map may is NULL in our < 2.3.X implementation. */ if (map != NULL)
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -