📄 aic7xxx_osm.h
字号:
* Per device timer. */ struct timer_list timer; /* * The high limit for the tags variable. */ u_int maxtags; /* * The computed number of tags outstanding * at the time of the last QUEUE FULL event. */ u_int tags_on_last_queuefull; /* * How many times we have seen a queue full * with the same number of tags. This is used * to stop our adaptive queue depth algorithm * on devices with a fixed number of tags. */ u_int last_queuefull_same_count;#define AHC_LOCK_TAGS_COUNT 50 /* * How many transactions have been queued * without the device going idle. We use * this statistic to determine when to issue * an ordered tag to prevent transaction * starvation. This statistic is only updated * if the AHC_DEV_PERIODIC_OTAG flag is set * on this device. */ u_int commands_since_idle_or_otag;#define AHC_OTAG_THRESH 500 int lun; Scsi_Device *scsi_device; struct ahc_linux_target *target;};typedef enum { AHC_DV_REQUIRED = 0x01, AHC_INQ_VALID = 0x02, AHC_BASIC_DV = 0x04, AHC_ENHANCED_DV = 0x08} ahc_linux_targ_flags;/* DV States */typedef enum { AHC_DV_STATE_EXIT = 0, AHC_DV_STATE_INQ_SHORT_ASYNC, AHC_DV_STATE_INQ_ASYNC, AHC_DV_STATE_INQ_ASYNC_VERIFY, AHC_DV_STATE_TUR, AHC_DV_STATE_REBD, AHC_DV_STATE_INQ_VERIFY, AHC_DV_STATE_WEB, AHC_DV_STATE_REB, AHC_DV_STATE_SU, AHC_DV_STATE_BUSY} ahc_dv_state;struct ahc_linux_target { struct ahc_linux_device *devices[AHC_NUM_LUNS]; int channel; int target; int refcount; struct ahc_transinfo last_tinfo; struct ahc_softc *ahc; ahc_linux_targ_flags flags; struct scsi_inquiry_data *inq_data; /* * The next "fallback" period to use for narrow/wide transfers. */ uint8_t dv_next_narrow_period; uint8_t dv_next_wide_period; uint8_t dv_max_width; uint8_t dv_max_ppr_options; uint8_t dv_last_ppr_options; u_int dv_echo_size; ahc_dv_state dv_state; u_int dv_state_retry; char *dv_buffer; char *dv_buffer1;};/********************* Definitions Required by the Core ***********************//* * Number of SG segments we require. So long as the S/G segments for * a particular transaction are allocated in a physically contiguous * manner and are allocated below 4GB, the number of S/G segments is * unrestricted. */#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)/* * We dynamically adjust the number of segments in pre-2.5 kernels to * avoid fragmentation issues in the SCSI mid-layer's private memory * allocator. See aic7xxx_osm.c ahc_linux_size_nseg() for details. */extern u_int ahc_linux_nseg;#define AHC_NSEG ahc_linux_nseg#define AHC_LINUX_MIN_NSEG 64#else#define AHC_NSEG 128#endif/* * Per-SCB OSM storage. */typedef enum { AHC_UP_EH_SEMAPHORE = 0x1} ahc_linux_scb_flags;struct scb_platform_data { struct ahc_linux_device *dev; bus_addr_t buf_busaddr; uint32_t xfer_len;#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,0) uint32_t resid; /* Transfer residual */#endif uint32_t sense_resid; /* Auto-Sense residual */ ahc_linux_scb_flags flags;};/* * Define a structure used for each host adapter. All members are * aligned on a boundary >= the size of the member to honor the * alignment restrictions of the various platforms supported by * this driver. */typedef enum { AHC_DV_WAIT_SIMQ_EMPTY = 0x01, AHC_DV_WAIT_SIMQ_RELEASE = 0x02, AHC_DV_ACTIVE = 0x04, AHC_DV_SHUTDOWN = 0x08, AHC_RUN_CMPLT_Q_TIMER = 0x10} ahc_linux_softc_flags;TAILQ_HEAD(ahc_completeq, ahc_cmd);struct ahc_platform_data { /* * Fields accessed from interrupt context. */ struct ahc_linux_target *targets[AHC_NUM_TARGETS]; TAILQ_HEAD(, ahc_linux_device) device_runq; struct ahc_completeq completeq; spinlock_t spin_lock;#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0) struct tasklet_struct runq_tasklet;#endif u_int qfrozen; pid_t dv_pid; struct timer_list completeq_timer; struct timer_list reset_timer; struct semaphore eh_sem; struct semaphore dv_sem; struct semaphore dv_cmd_sem; /* XXX This needs to be in * the target struct */ struct scsi_device *dv_scsi_dev; struct Scsi_Host *host; /* pointer to scsi host */#define AHC_LINUX_NOIRQ ((uint32_t)~0) uint32_t irq; /* IRQ for this adapter */ uint32_t bios_address; uint32_t mem_busaddr; /* Mem Base Addr */ bus_addr_t hw_dma_mask; ahc_linux_softc_flags flags;};/************************** OS Utility Wrappers *******************************/#define printf printk#define M_NOWAIT GFP_ATOMIC#define M_WAITOK 0#define malloc(size, type, flags) kmalloc(size, flags)#define free(ptr, type) kfree(ptr)static __inline void ahc_delay(long);static __inline voidahc_delay(long usec){ /* * udelay on Linux can have problems for * multi-millisecond waits. Wait at most * 1024us per call. */ while (usec > 0) { udelay(usec % 1024); usec -= 1024; }}/***************************** Low Level I/O **********************************/static __inline uint8_t ahc_inb(struct ahc_softc * ahc, long port);static __inline void ahc_outb(struct ahc_softc * ahc, long port, uint8_t val);static __inline void ahc_outsb(struct ahc_softc * ahc, long port, uint8_t *, int count);static __inline void ahc_insb(struct ahc_softc * ahc, long port, uint8_t *, int count);static __inline uint8_tahc_inb(struct ahc_softc * ahc, long port){ uint8_t x; if (ahc->tag == BUS_SPACE_MEMIO) { x = readb(ahc->bsh.maddr + port); } else { x = inb(ahc->bsh.ioport + port); } mb(); return (x);}static __inline voidahc_outb(struct ahc_softc * ahc, long port, uint8_t val){ if (ahc->tag == BUS_SPACE_MEMIO) { writeb(val, ahc->bsh.maddr + port); } else { outb(val, ahc->bsh.ioport + port); } mb();}static __inline voidahc_outsb(struct ahc_softc * ahc, long port, uint8_t *array, int count){ int i; /* * There is probably a more efficient way to do this on Linux * but we don't use this for anything speed critical and this * should work. */ for (i = 0; i < count; i++) ahc_outb(ahc, port, *array++);}static __inline voidahc_insb(struct ahc_softc * ahc, long port, uint8_t *array, int count){ int i; /* * There is probably a more efficient way to do this on Linux * but we don't use this for anything speed critical and this * should work. */ for (i = 0; i < count; i++) *array++ = ahc_inb(ahc, port);}/**************************** Initialization **********************************/int ahc_linux_register_host(struct ahc_softc *, Scsi_Host_Template *);uint64_t ahc_linux_get_memsize(void);/*************************** Pretty Printing **********************************/struct info_str { char *buffer; int length; off_t offset; int pos;};void ahc_format_transinfo(struct info_str *info, struct ahc_transinfo *tinfo);/******************************** Locking *************************************//* Lock protecting internal data structures */static __inline void ahc_lockinit(struct ahc_softc *);static __inline void ahc_lock(struct ahc_softc *, unsigned long *flags);static __inline void ahc_unlock(struct ahc_softc *, unsigned long *flags);/* Lock acquisition and release of the above lock in midlayer entry points. */static __inline void ahc_midlayer_entrypoint_lock(struct ahc_softc *, unsigned long *flags);static __inline void ahc_midlayer_entrypoint_unlock(struct ahc_softc *, unsigned long *flags);/* Lock held during command compeletion to the upper layer */static __inline void ahc_done_lockinit(struct ahc_softc *);static __inline void ahc_done_lock(struct ahc_softc *, unsigned long *flags);static __inline void ahc_done_unlock(struct ahc_softc *, unsigned long *flags);/* Lock held during ahc_list manipulation and ahc softc frees */extern spinlock_t ahc_list_spinlock;static __inline void ahc_list_lockinit(void);static __inline void ahc_list_lock(unsigned long *flags);static __inline void ahc_list_unlock(unsigned long *flags);static __inline voidahc_lockinit(struct ahc_softc *ahc){ spin_lock_init(&ahc->platform_data->spin_lock);}static __inline voidahc_lock(struct ahc_softc *ahc, unsigned long *flags){ spin_lock_irqsave(&ahc->platform_data->spin_lock, *flags);}static __inline voidahc_unlock(struct ahc_softc *ahc, unsigned long *flags){ spin_unlock_irqrestore(&ahc->platform_data->spin_lock, *flags);}static __inline voidahc_midlayer_entrypoint_lock(struct ahc_softc *ahc, unsigned long *flags){ /* * In 2.5.X and some 2.4.X versions, the midlayer takes our * lock just before calling us, so we avoid locking again. * For other kernel versions, the io_request_lock is taken * just before our entry point is called. In this case, we * trade the io_request_lock for our per-softc lock. */#if AHC_SCSI_HAS_HOST_LOCK == 0 spin_unlock(&io_request_lock); spin_lock(&ahc->platform_data->spin_lock);#endif}static __inline voidahc_midlayer_entrypoint_unlock(struct ahc_softc *ahc, unsigned long *flags){#if AHC_SCSI_HAS_HOST_LOCK == 0 spin_unlock(&ahc->platform_data->spin_lock); spin_lock(&io_request_lock);#endif}static __inline voidahc_done_lockinit(struct ahc_softc *ahc){ /* * In 2.5.X, our own lock is held during completions. * In previous versions, the io_request_lock is used. * In either case, we can't initialize this lock again. */}static __inline voidahc_done_lock(struct ahc_softc *ahc, unsigned long *flags){#if AHC_SCSI_HAS_HOST_LOCK == 0 spin_lock_irqsave(&io_request_lock, *flags);#endif}static __inline voidahc_done_unlock(struct ahc_softc *ahc, unsigned long *flags){#if AHC_SCSI_HAS_HOST_LOCK == 0 spin_unlock_irqrestore(&io_request_lock, *flags);#endif}static __inline voidahc_list_lockinit(void){ spin_lock_init(&ahc_list_spinlock);}static __inline voidahc_list_lock(unsigned long *flags){ spin_lock_irqsave(&ahc_list_spinlock, *flags);}static __inline voidahc_list_unlock(unsigned long *flags){ spin_unlock_irqrestore(&ahc_list_spinlock, *flags);}/******************************* PCI Definitions ******************************//* * PCIM_xxx: mask to locate subfield in register * PCIR_xxx: config register offset * PCIC_xxx: device class * PCIS_xxx: device subclass * PCIP_xxx: device programming interface * PCIV_xxx: PCI vendor ID (only required to fixup ancient devices) * PCID_xxx: device ID */#define PCIR_DEVVENDOR 0x00#define PCIR_VENDOR 0x00#define PCIR_DEVICE 0x02#define PCIR_COMMAND 0x04#define PCIM_CMD_PORTEN 0x0001#define PCIM_CMD_MEMEN 0x0002#define PCIM_CMD_BUSMASTEREN 0x0004#define PCIM_CMD_MWRICEN 0x0010
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -