📄 aic7xxx_osm.c
字号:
* write wild stuff into our code segment */static char dummy_buffer[60] = "Please don't trounce on me insmod!!\n";MODULE_AUTHOR("Maintainer: Justin T. Gibbs <gibbs@scsiguy.com>");MODULE_DESCRIPTION("Adaptec Aic77XX/78XX SCSI Host Bus Adapter driver");#ifdef MODULE_LICENSEMODULE_LICENSE("Dual BSD/GPL");#endifMODULE_PARM(aic7xxx, "s");MODULE_PARM_DESC(aic7xxx,"period delimited, options string.\n"" verbose Enable verbose/diagnostic logging\n"" allow_memio Allow device registers to be memory mapped\n"" debug Bitmask of debug values to enable\n"" no_probe Toggle EISA/VLB controller probing\n"" probe_eisa_vl Toggle EISA/VLB controller probing\n"" no_reset Supress initial bus resets\n"" extended Enable extended geometry on all controllers\n"" periodic_otag Send an ordered tagged transaction\n"" periodically to prevent tag starvation.\n"" This may be required by some older disk\n"" drives or RAID arrays.\n"" reverse_scan Sort PCI devices highest Bus/Slot to lowest\n"" tag_info:<tag_str> Set per-target tag depth\n"" global_tag_depth:<int> Global tag depth for every target\n"" on every bus\n"" dv:<dv_settings> Set per-controller Domain Validation Setting.\n"" seltime:<int> Selection Timeout\n"" (0/256ms,1/128ms,2/64ms,3/32ms)\n""\n"" Sample /etc/modprobe.conf line:\n"" Toggle EISA/VLB probing\n"" Set tag depth on Controller 1/Target 1 to 10 tags\n"" Shorten the selection timeout to 128ms\n""\n"" options aic7xxx 'aic7xxx=probe_eisa_vl.tag_info:{{}.{.10}}.seltime:1'\n");#endifstatic void ahc_linux_handle_scsi_status(struct ahc_softc *, struct ahc_linux_device *, struct scb *);static void ahc_linux_queue_cmd_complete(struct ahc_softc *ahc, Scsi_Cmnd *cmd);static void ahc_linux_filter_inquiry(struct ahc_softc*, struct ahc_devinfo*);static void ahc_linux_sem_timeout(u_long arg);static void ahc_linux_freeze_simq(struct ahc_softc *ahc);static void ahc_linux_release_simq(u_long arg);static void ahc_linux_dev_timed_unfreeze(u_long arg);static int ahc_linux_queue_recovery_cmd(Scsi_Cmnd *cmd, scb_flag flag);static void ahc_linux_initialize_scsi_bus(struct ahc_softc *ahc);static void ahc_linux_size_nseg(void);static void ahc_linux_thread_run_complete_queue(struct ahc_softc *ahc);static void ahc_linux_start_dv(struct ahc_softc *ahc);static void ahc_linux_dv_timeout(struct scsi_cmnd *cmd);static int ahc_linux_dv_thread(void *data);static void ahc_linux_kill_dv_thread(struct ahc_softc *ahc);static void ahc_linux_dv_target(struct ahc_softc *ahc, u_int target);static void ahc_linux_dv_transition(struct ahc_softc *ahc, struct scsi_cmnd *cmd, struct ahc_devinfo *devinfo, struct ahc_linux_target *targ);static void ahc_linux_dv_fill_cmd(struct ahc_softc *ahc, struct scsi_cmnd *cmd, struct ahc_devinfo *devinfo);static void ahc_linux_dv_inq(struct ahc_softc *ahc, struct scsi_cmnd *cmd, struct ahc_devinfo *devinfo, struct ahc_linux_target *targ, u_int request_length);static void ahc_linux_dv_tur(struct ahc_softc *ahc, struct scsi_cmnd *cmd, struct ahc_devinfo *devinfo);static void ahc_linux_dv_rebd(struct ahc_softc *ahc, struct scsi_cmnd *cmd, struct ahc_devinfo *devinfo, struct ahc_linux_target *targ);static void ahc_linux_dv_web(struct ahc_softc *ahc, struct scsi_cmnd *cmd, struct ahc_devinfo *devinfo, struct ahc_linux_target *targ);static void ahc_linux_dv_reb(struct ahc_softc *ahc, struct scsi_cmnd *cmd, struct ahc_devinfo *devinfo, struct ahc_linux_target *targ);static void ahc_linux_dv_su(struct ahc_softc *ahc, struct scsi_cmnd *cmd, struct ahc_devinfo *devinfo, struct ahc_linux_target *targ);static int ahc_linux_fallback(struct ahc_softc *ahc, struct ahc_devinfo *devinfo);static void ahc_linux_dv_complete(Scsi_Cmnd *cmd);static void ahc_linux_generate_dv_pattern(struct ahc_linux_target *targ);static u_int ahc_linux_user_tagdepth(struct ahc_softc *ahc, struct ahc_devinfo *devinfo);static u_int ahc_linux_user_dv_setting(struct ahc_softc *ahc);static void ahc_linux_device_queue_depth(struct ahc_softc *ahc, struct ahc_linux_device *dev);static struct ahc_linux_target* ahc_linux_alloc_target(struct ahc_softc*, u_int, u_int);static void ahc_linux_free_target(struct ahc_softc*, struct ahc_linux_target*);static struct ahc_linux_device* ahc_linux_alloc_device(struct ahc_softc*, struct ahc_linux_target*, u_int);static void ahc_linux_free_device(struct ahc_softc*, struct ahc_linux_device*);static void ahc_linux_run_device_queue(struct ahc_softc*, struct ahc_linux_device*);static void ahc_linux_setup_tag_info_global(char *p);static aic_option_callback_t ahc_linux_setup_tag_info;static aic_option_callback_t ahc_linux_setup_dv;static int aic7xxx_setup(char *s);static int ahc_linux_next_unit(void);static void ahc_runq_tasklet(unsigned long data);static struct ahc_cmd *ahc_linux_run_complete_queue(struct ahc_softc *ahc);/********************************* Inlines ************************************/static __inline void ahc_schedule_runq(struct ahc_softc *ahc);static __inline struct ahc_linux_device* ahc_linux_get_device(struct ahc_softc *ahc, u_int channel, u_int target, u_int lun, int alloc);static __inline void ahc_schedule_completeq(struct ahc_softc *ahc);static __inline void ahc_linux_check_device_queue(struct ahc_softc *ahc, struct ahc_linux_device *dev);static __inline struct ahc_linux_device * ahc_linux_next_device_to_run(struct ahc_softc *ahc);static __inline void ahc_linux_run_device_queues(struct ahc_softc *ahc);static __inline void ahc_linux_unmap_scb(struct ahc_softc*, struct scb*);static __inline int ahc_linux_map_seg(struct ahc_softc *ahc, struct scb *scb, struct ahc_dma_seg *sg, bus_addr_t addr, bus_size_t len);static __inline voidahc_schedule_completeq(struct ahc_softc *ahc){ if ((ahc->platform_data->flags & AHC_RUN_CMPLT_Q_TIMER) == 0) { ahc->platform_data->flags |= AHC_RUN_CMPLT_Q_TIMER; ahc->platform_data->completeq_timer.expires = jiffies; add_timer(&ahc->platform_data->completeq_timer); }}/* * Must be called with our lock held. */static __inline voidahc_schedule_runq(struct ahc_softc *ahc){#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0) tasklet_schedule(&ahc->platform_data->runq_tasklet);#else /* * Tasklets are not available, so run inline. */ ahc_runq_tasklet((unsigned long)ahc);#endif}static __inline struct ahc_linux_device*ahc_linux_get_device(struct ahc_softc *ahc, u_int channel, u_int target, u_int lun, int alloc){ struct ahc_linux_target *targ; struct ahc_linux_device *dev; u_int target_offset; target_offset = target; if (channel != 0) target_offset += 8; targ = ahc->platform_data->targets[target_offset]; if (targ == NULL) { if (alloc != 0) { targ = ahc_linux_alloc_target(ahc, channel, target); if (targ == NULL) return (NULL); } else return (NULL); } dev = targ->devices[lun]; if (dev == NULL && alloc != 0) dev = ahc_linux_alloc_device(ahc, targ, lun); return (dev);}#define AHC_LINUX_MAX_RETURNED_ERRORS 4static struct ahc_cmd *ahc_linux_run_complete_queue(struct ahc_softc *ahc){ struct ahc_cmd *acmd; u_long done_flags; int with_errors; with_errors = 0; ahc_done_lock(ahc, &done_flags); while ((acmd = TAILQ_FIRST(&ahc->platform_data->completeq)) != NULL) { Scsi_Cmnd *cmd; if (with_errors > AHC_LINUX_MAX_RETURNED_ERRORS) { /* * Linux uses stack recursion to requeue * commands that need to be retried. Avoid * blowing out the stack by "spoon feeding" * commands that completed with error back * the operating system in case they are going * to be retried. "ick" */ ahc_schedule_completeq(ahc); break; } TAILQ_REMOVE(&ahc->platform_data->completeq, acmd, acmd_links.tqe); cmd = &acmd_scsi_cmd(acmd); cmd->host_scribble = NULL; if (ahc_cmd_get_transaction_status(cmd) != DID_OK || (cmd->result & 0xFF) != SCSI_STATUS_OK) with_errors++; cmd->scsi_done(cmd); } ahc_done_unlock(ahc, &done_flags); return (acmd);}static __inline voidahc_linux_check_device_queue(struct ahc_softc *ahc, struct ahc_linux_device *dev){ if ((dev->flags & AHC_DEV_FREEZE_TIL_EMPTY) != 0 && dev->active == 0) { dev->flags &= ~AHC_DEV_FREEZE_TIL_EMPTY; dev->qfrozen--; } if (TAILQ_FIRST(&dev->busyq) == NULL || dev->openings == 0 || dev->qfrozen != 0) return; ahc_linux_run_device_queue(ahc, dev);}static __inline struct ahc_linux_device *ahc_linux_next_device_to_run(struct ahc_softc *ahc){ if ((ahc->flags & AHC_RESOURCE_SHORTAGE) != 0 || (ahc->platform_data->qfrozen != 0 && AHC_DV_SIMQ_FROZEN(ahc) == 0)) return (NULL); return (TAILQ_FIRST(&ahc->platform_data->device_runq));}static __inline voidahc_linux_run_device_queues(struct ahc_softc *ahc){ struct ahc_linux_device *dev; while ((dev = ahc_linux_next_device_to_run(ahc)) != NULL) { TAILQ_REMOVE(&ahc->platform_data->device_runq, dev, links); dev->flags &= ~AHC_DEV_ON_RUN_LIST; ahc_linux_check_device_queue(ahc, dev); }}static __inline voidahc_linux_unmap_scb(struct ahc_softc *ahc, struct scb *scb){ Scsi_Cmnd *cmd; cmd = scb->io_ctx; ahc_sync_sglist(ahc, scb, BUS_DMASYNC_POSTWRITE); if (cmd->use_sg != 0) { struct scatterlist *sg; sg = (struct scatterlist *)cmd->request_buffer; pci_unmap_sg(ahc->dev_softc, sg, cmd->use_sg, scsi_to_pci_dma_dir(cmd->sc_data_direction)); } else if (cmd->request_bufflen != 0) { pci_unmap_single(ahc->dev_softc, scb->platform_data->buf_busaddr, cmd->request_bufflen, scsi_to_pci_dma_dir(cmd->sc_data_direction)); }}static __inline intahc_linux_map_seg(struct ahc_softc *ahc, struct scb *scb, struct ahc_dma_seg *sg, bus_addr_t addr, bus_size_t len){ int consumed; if ((scb->sg_count + 1) > AHC_NSEG) panic("Too few segs for dma mapping. " "Increase AHC_NSEG\n"); consumed = 1; sg->addr = ahc_htole32(addr & 0xFFFFFFFF); scb->platform_data->xfer_len += len; if (sizeof(bus_addr_t) > 4 && (ahc->flags & AHC_39BIT_ADDRESSING) != 0) len |= (addr >> 8) & AHC_SG_HIGH_ADDR_MASK; sg->len = ahc_htole32(len); return (consumed);}/************************ Host template entry points *************************/static int ahc_linux_detect(Scsi_Host_Template *);static int ahc_linux_queue(Scsi_Cmnd *, void (*)(Scsi_Cmnd *));static const char *ahc_linux_info(struct Scsi_Host *);#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)static int ahc_linux_slave_alloc(Scsi_Device *);static int ahc_linux_slave_configure(Scsi_Device *);static void ahc_linux_slave_destroy(Scsi_Device *);#if defined(__i386__)static int ahc_linux_biosparam(struct scsi_device*, struct block_device*, sector_t, int[]);#endif#elsestatic int ahc_linux_release(struct Scsi_Host *);static void ahc_linux_select_queue_depth(struct Scsi_Host *host, Scsi_Device *scsi_devs);#if defined(__i386__)static int ahc_linux_biosparam(Disk *, kdev_t, int[]);#endif#endifstatic int ahc_linux_bus_reset(Scsi_Cmnd *);static int ahc_linux_dev_reset(Scsi_Cmnd *);static int ahc_linux_abort(Scsi_Cmnd *);/* * Calculate a safe value for AHC_NSEG (as expressed through ahc_linux_nseg). * * In pre-2.5.X... * The midlayer allocates an S/G array dynamically when a command is issued * using SCSI malloc. This array, which is in an OS dependent format that * must later be copied to our private S/G list, is sized to house just the * number of segments needed for the current transfer. Since the code that * sizes the SCSI malloc pool does not take into consideration fragmentation * of the pool, executing transactions numbering just a fraction of our * concurrent transaction limit with list lengths aproaching AHC_NSEG will * quickly depleat the SCSI malloc pool of usable space. Unfortunately, the * mid-layer does not properly handle this scsi malloc failures for the S/G * array and the result can be a lockup of the I/O subsystem. We try to size * our S/G list so that it satisfies our drivers allocation requirements in * addition to avoiding fragmentation of the SCSI malloc pool. */static voidahc_linux_size_nseg(void){#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) u_int cur_size; u_int best_size; /* * The SCSI allocator rounds to the nearest 512 bytes * an cannot allocate across a page boundary. Our algorithm * is to start at 1K of scsi malloc space per-command and * loop through all factors of the PAGE_SIZE and pick the best. */ best_size = 0; for (cur_size = 1024; cur_size <= PAGE_SIZE; cur_size *= 2) { u_int nseg; nseg = cur_size / sizeof(struct scatterlist); if (nseg < AHC_LINUX_MIN_NSEG) continue; if (best_size == 0) { best_size = cur_size; ahc_linux_nseg = nseg; } else { u_int best_rem; u_int cur_rem; /* * Compare the traits of the current "best_size" * with the current size to determine if the * current size is a better size. */ best_rem = best_size % sizeof(struct scatterlist); cur_rem = cur_size % sizeof(struct scatterlist); if (cur_rem < best_rem) { best_size = cur_size; ahc_linux_nseg = nseg; } } }#endif}/* * Try to detect an Adaptec 7XXX controller. */static intahc_linux_detect(Scsi_Host_Template *template){ struct ahc_softc *ahc; int found = 0;#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) /* * It is a bug that the upper layer takes * this lock just prior to calling us. */ spin_unlock_irq(&io_request_lock);#endif /* * Sanity checking of Linux SCSI data structures so * that some of our hacks^H^H^H^H^Hassumptions aren't * violated. */ if (offsetof(struct ahc_cmd_internal, end) > offsetof(struct scsi_cmnd, host_scribble)) { printf("ahc_linux_detect: SCSI data structures changed.\n"); printf("ahc_linux_detect: Unable to attach\n"); return (0); } ahc_linux_size_nseg();#ifdef MODULE /* * If we've been passed any parameters, process them now. */ if (aic7xxx) aic7xxx_setup(aic7xxx); if (dummy_buffer[0] != 'P') printk(KERN_WARNING"aic7xxx: Please read the file /usr/src/linux/drivers/scsi/README.aic7xxx\n""aic7xxx: to see the proper way to specify options to the aic7xxx module\n""aic7xxx: Specifically, don't use any commas when passing arguments to\n""aic7xxx: insmod or else it might trash certain memory areas.\n");#endif#if LINUX_VERSION_CODE > KERNEL_VERSION(2,3,0) template->proc_name = "aic7xxx";#else template->proc_dir = &proc_scsi_aic7xxx;#endif
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -