⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 aic79xx_osm.h

📁 h内核
💻 H
📖 第 1 页 / 共 3 页
字号:
	 * at the time of the last QUEUE FULL event.	 */	u_int			tags_on_last_queuefull;	/*	 * How many times we have seen a queue full	 * with the same number of tags.  This is used	 * to stop our adaptive queue depth algorithm	 * on devices with a fixed number of tags.	 */	u_int			last_queuefull_same_count;#define AHD_LOCK_TAGS_COUNT 50	/*	 * How many transactions have been queued	 * without the device going idle.  We use	 * this statistic to determine when to issue	 * an ordered tag to prevent transaction	 * starvation.  This statistic is only updated	 * if the AHD_DEV_PERIODIC_OTAG flag is set	 * on this device.	 */	u_int			commands_since_idle_or_otag;#define AHD_OTAG_THRESH	500	int			lun;	Scsi_Device	       *scsi_device;	struct			ahd_linux_target *target;};typedef enum {	AHD_DV_REQUIRED		 = 0x01,	AHD_INQ_VALID		 = 0x02,	AHD_BASIC_DV		 = 0x04,	AHD_ENHANCED_DV		 = 0x08} ahd_linux_targ_flags;/* DV States */typedef enum {	AHD_DV_STATE_EXIT = 0,	AHD_DV_STATE_INQ_SHORT_ASYNC,	AHD_DV_STATE_INQ_ASYNC,	AHD_DV_STATE_INQ_ASYNC_VERIFY,	AHD_DV_STATE_TUR,	AHD_DV_STATE_REBD,	AHD_DV_STATE_INQ_VERIFY,	AHD_DV_STATE_WEB,	AHD_DV_STATE_REB,	AHD_DV_STATE_SU,	AHD_DV_STATE_BUSY} ahd_dv_state;struct ahd_linux_target {	struct ahd_linux_device	 *devices[AHD_NUM_LUNS];	int			  channel;	int			  target;	int			  refcount;	struct ahd_transinfo	  last_tinfo;	struct ahd_softc	 *ahd;	ahd_linux_targ_flags	  flags;	struct scsi_inquiry_data *inq_data;	/*	 * The next "fallback" period to use for narrow/wide transfers.	 */	uint8_t			  dv_next_narrow_period;	uint8_t			  dv_next_wide_period;	uint8_t			  dv_max_width;	uint8_t			  dv_max_ppr_options;	uint8_t			  dv_last_ppr_options;	u_int			  dv_echo_size;	ahd_dv_state		  dv_state;	u_int			  dv_state_retry;	uint8_t			 *dv_buffer;	uint8_t			 *dv_buffer1;	/*	 * Cumulative counter of errors.	 */	u_long			errors_detected;	u_long			cmds_since_error;};/********************* Definitions Required by the Core ***********************//* * Number of SG segments we require.  So long as the S/G segments for * a particular transaction are allocated in a physically contiguous * manner and are allocated below 4GB, the number of S/G segments is * unrestricted. */#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)/* * We dynamically adjust the number of segments in pre-2.5 kernels to * avoid fragmentation issues in the SCSI mid-layer's private memory * allocator.  See aic79xx_osm.c ahd_linux_size_nseg() for details. */extern u_int ahd_linux_nseg;#define	AHD_NSEG ahd_linux_nseg#define	AHD_LINUX_MIN_NSEG 64#else#define	AHD_NSEG 128#endif/* * Per-SCB OSM storage. */typedef enum {	AHD_SCB_UP_EH_SEM = 0x1} ahd_linux_scb_flags;struct scb_platform_data {	struct ahd_linux_device	*dev;	dma_addr_t		 buf_busaddr;	uint32_t		 xfer_len;	uint32_t		 sense_resid;	/* Auto-Sense residual */	ahd_linux_scb_flags	 flags;};/* * Define a structure used for each host adapter.  All members are * aligned on a boundary >= the size of the member to honor the * alignment restrictions of the various platforms supported by * this driver. */typedef enum {	AHD_DV_WAIT_SIMQ_EMPTY	 = 0x01,	AHD_DV_WAIT_SIMQ_RELEASE = 0x02,	AHD_DV_ACTIVE		 = 0x04,	AHD_DV_SHUTDOWN		 = 0x08,	AHD_RUN_CMPLT_Q_TIMER	 = 0x10} ahd_linux_softc_flags;TAILQ_HEAD(ahd_completeq, ahd_cmd);struct ahd_platform_data {	/*	 * Fields accessed from interrupt context.	 */	struct ahd_linux_target *targets[AHD_NUM_TARGETS]; 	TAILQ_HEAD(, ahd_linux_device) device_runq;	struct ahd_completeq	 completeq;	spinlock_t		 spin_lock;	struct tasklet_struct	 runq_tasklet;	u_int			 qfrozen;	pid_t			 dv_pid;	struct timer_list	 completeq_timer;	struct timer_list	 reset_timer;	struct timer_list	 stats_timer;	struct semaphore	 eh_sem;	struct semaphore	 dv_sem;	struct semaphore	 dv_cmd_sem;	/* XXX This needs to be in						 * the target struct						 */	struct scsi_device	*dv_scsi_dev;	struct Scsi_Host        *host;		/* pointer to scsi host */#define AHD_LINUX_NOIRQ	((uint32_t)~0)	uint32_t		 irq;		/* IRQ for this adapter */	uint32_t		 bios_address;	uint32_t		 mem_busaddr;	/* Mem Base Addr */	uint64_t		 hw_dma_mask;	ahd_linux_softc_flags	 flags;};/************************** OS Utility Wrappers *******************************/#define printf printk#define M_NOWAIT GFP_ATOMIC#define M_WAITOK 0#define malloc(size, type, flags) kmalloc(size, flags)#define free(ptr, type) kfree(ptr)static __inline void ahd_delay(long);static __inline voidahd_delay(long usec){	/*	 * udelay on Linux can have problems for	 * multi-millisecond waits.  Wait at most	 * 1024us per call.	 */	while (usec > 0) {		udelay(usec % 1024);		usec -= 1024;	}}/***************************** Low Level I/O **********************************/static __inline uint8_t ahd_inb(struct ahd_softc * ahd, long port);static __inline uint16_t ahd_inw_atomic(struct ahd_softc * ahd, long port);static __inline void ahd_outb(struct ahd_softc * ahd, long port, uint8_t val);static __inline void ahd_outw_atomic(struct ahd_softc * ahd,				     long port, uint16_t val);static __inline void ahd_outsb(struct ahd_softc * ahd, long port,			       uint8_t *, int count);static __inline void ahd_insb(struct ahd_softc * ahd, long port,			       uint8_t *, int count);static __inline uint8_tahd_inb(struct ahd_softc * ahd, long port){	uint8_t x;	if (ahd->tags[0] == BUS_SPACE_MEMIO) {		x = readb(ahd->bshs[0].maddr + port);	} else {		x = inb(ahd->bshs[(port) >> 8].ioport + ((port) & 0xFF));	}	mb();	return (x);}static __inline uint16_tahd_inw_atomic(struct ahd_softc * ahd, long port){	uint8_t x;	if (ahd->tags[0] == BUS_SPACE_MEMIO) {		x = readw(ahd->bshs[0].maddr + port);	} else {		x = inw(ahd->bshs[(port) >> 8].ioport + ((port) & 0xFF));	}	mb();	return (x);}static __inline voidahd_outb(struct ahd_softc * ahd, long port, uint8_t val){	if (ahd->tags[0] == BUS_SPACE_MEMIO) {		writeb(val, ahd->bshs[0].maddr + port);	} else {		outb(val, ahd->bshs[(port) >> 8].ioport + (port & 0xFF));	}	mb();}static __inline voidahd_outw_atomic(struct ahd_softc * ahd, long port, uint16_t val){	if (ahd->tags[0] == BUS_SPACE_MEMIO) {		writew(val, ahd->bshs[0].maddr + port);	} else {		outw(val, ahd->bshs[(port) >> 8].ioport + (port & 0xFF));	}	mb();}static __inline voidahd_outsb(struct ahd_softc * ahd, long port, uint8_t *array, int count){	int i;	/*	 * There is probably a more efficient way to do this on Linux	 * but we don't use this for anything speed critical and this	 * should work.	 */	for (i = 0; i < count; i++)		ahd_outb(ahd, port, *array++);}static __inline voidahd_insb(struct ahd_softc * ahd, long port, uint8_t *array, int count){	int i;	/*	 * There is probably a more efficient way to do this on Linux	 * but we don't use this for anything speed critical and this	 * should work.	 */	for (i = 0; i < count; i++)		*array++ = ahd_inb(ahd, port);}/**************************** Initialization **********************************/int		ahd_linux_register_host(struct ahd_softc *,					Scsi_Host_Template *);uint64_t	ahd_linux_get_memsize(void);/*************************** Pretty Printing **********************************/struct info_str {	char *buffer;	int length;	off_t offset;	int pos;};void	ahd_format_transinfo(struct info_str *info,			     struct ahd_transinfo *tinfo);/******************************** Locking *************************************//* Lock protecting internal data structures */static __inline void ahd_lockinit(struct ahd_softc *);static __inline void ahd_lock(struct ahd_softc *, unsigned long *flags);static __inline void ahd_unlock(struct ahd_softc *, unsigned long *flags);/* Lock acquisition and release of the above lock in midlayer entry points. */static __inline void ahd_midlayer_entrypoint_lock(struct ahd_softc *,						  unsigned long *flags);static __inline void ahd_midlayer_entrypoint_unlock(struct ahd_softc *,						    unsigned long *flags);/* Lock held during command compeletion to the upper layer */static __inline void ahd_done_lockinit(struct ahd_softc *);static __inline void ahd_done_lock(struct ahd_softc *, unsigned long *flags);static __inline void ahd_done_unlock(struct ahd_softc *, unsigned long *flags);/* Lock held during ahd_list manipulation and ahd softc frees */extern spinlock_t ahd_list_spinlock;static __inline void ahd_list_lockinit(void);static __inline void ahd_list_lock(unsigned long *flags);static __inline void ahd_list_unlock(unsigned long *flags);static __inline voidahd_lockinit(struct ahd_softc *ahd){	spin_lock_init(&ahd->platform_data->spin_lock);}static __inline voidahd_lock(struct ahd_softc *ahd, unsigned long *flags){	spin_lock_irqsave(&ahd->platform_data->spin_lock, *flags);}static __inline voidahd_unlock(struct ahd_softc *ahd, unsigned long *flags){	spin_unlock_irqrestore(&ahd->platform_data->spin_lock, *flags);}static __inline voidahd_midlayer_entrypoint_lock(struct ahd_softc *ahd, unsigned long *flags){	/*	 * In 2.5.X and some 2.4.X versions, the midlayer takes our	 * lock just before calling us, so we avoid locking again.	 * For other kernel versions, the io_request_lock is taken	 * just before our entry point is called.  In this case, we	 * trade the io_request_lock for our per-softc lock.	 */#if AHD_SCSI_HAS_HOST_LOCK == 0	spin_unlock(&io_request_lock);	spin_lock(&ahd->platform_data->spin_lock);#endif}static __inline voidahd_midlayer_entrypoint_unlock(struct ahd_softc *ahd, unsigned long *flags){#if AHD_SCSI_HAS_HOST_LOCK == 0	spin_unlock(&ahd->platform_data->spin_lock);	spin_lock(&io_request_lock);#endif}static __inline voidahd_done_lockinit(struct ahd_softc *ahd){	/*	 * In 2.5.X, our own lock is held during completions.	 * In previous versions, the io_request_lock is used.	 * In either case, we can't initialize this lock again.	 */}static __inline voidahd_done_lock(struct ahd_softc *ahd, unsigned long *flags){#if AHD_SCSI_HAS_HOST_LOCK == 0	spin_lock(&io_request_lock);#endif}static __inline voidahd_done_unlock(struct ahd_softc *ahd, unsigned long *flags){#if AHD_SCSI_HAS_HOST_LOCK == 0	spin_unlock(&io_request_lock);#endif}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -