⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 blkdev.h

📁 Axis 221 camera embedded programing interface
💻 H
📖 第 1 页 / 共 2 页
字号:
#ifndef _LINUX_BLKDEV_H#define _LINUX_BLKDEV_H#include <linux/sched.h>#include <linux/major.h>#include <linux/genhd.h>#include <linux/list.h>#include <linux/timer.h>#include <linux/workqueue.h>#include <linux/pagemap.h>#include <linux/backing-dev.h>#include <linux/wait.h>#include <linux/mempool.h>#include <linux/bio.h>#include <linux/module.h>#include <linux/stringify.h>#include <asm/scatterlist.h>#ifdef CONFIG_LBD# include <asm/div64.h># define sector_div(a, b) do_div(a, b)#else# define sector_div(n, b)( \{ \	int _res; \	_res = (n) % (b); \	(n) /= (b); \	_res; \} \)#endif#ifdef CONFIG_BLOCKstruct scsi_ioctl_command;struct request_queue;typedef struct request_queue request_queue_t;struct elevator_queue;typedef struct elevator_queue elevator_t;struct request_pm_state;struct blk_trace;#define BLKDEV_MIN_RQ	4#define BLKDEV_MAX_RQ	128	/* Default maximum *//* * This is the per-process anticipatory I/O scheduler state. */struct as_io_context {	spinlock_t lock;	void (*dtor)(struct as_io_context *aic); /* destructor */	void (*exit)(struct as_io_context *aic); /* called on task exit */	unsigned long state;	atomic_t nr_queued; /* queued reads & sync writes */	atomic_t nr_dispatched; /* number of requests gone to the drivers */	/* IO History tracking */	/* Thinktime */	unsigned long last_end_request;	unsigned long ttime_total;	unsigned long ttime_samples;	unsigned long ttime_mean;	/* Layout pattern */	unsigned int seek_samples;	sector_t last_request_pos;	u64 seek_total;	sector_t seek_mean;};struct cfq_queue;struct cfq_io_context {	struct rb_node rb_node;	void *key;	struct cfq_queue *cfqq[2];	struct io_context *ioc;	unsigned long last_end_request;	sector_t last_request_pos; 	unsigned long last_queue;	unsigned long ttime_total;	unsigned long ttime_samples;	unsigned long ttime_mean;	unsigned int seek_samples;	u64 seek_total;	sector_t seek_mean;	struct list_head queue_list;	void (*dtor)(struct io_context *); /* destructor */	void (*exit)(struct io_context *); /* called on task exit */};/* * This is the per-process I/O subsystem state.  It is refcounted and * kmalloc'ed. Currently all fields are modified in process io context * (apart from the atomic refcount), so require no locking. */struct io_context {	atomic_t refcount;	struct task_struct *task;	unsigned int ioprio_changed;	/*	 * For request batching	 */	unsigned long last_waited; /* Time last woken after wait for request */	int nr_batch_requests;     /* Number of requests left in the batch */	struct as_io_context *aic;	struct rb_root cic_root;};void put_io_context(struct io_context *ioc);void exit_io_context(void);struct io_context *get_io_context(gfp_t gfp_flags, int node);void copy_io_context(struct io_context **pdst, struct io_context **psrc);void swap_io_context(struct io_context **ioc1, struct io_context **ioc2);struct request;typedef void (rq_end_io_fn)(struct request *, int);struct request_list {	int count[2];	int starved[2];	int elvpriv;	mempool_t *rq_pool;	wait_queue_head_t wait[2];};/* * request command types */enum rq_cmd_type_bits {	REQ_TYPE_FS		= 1,	/* fs request */	REQ_TYPE_BLOCK_PC,		/* scsi command */	REQ_TYPE_SENSE,			/* sense request */	REQ_TYPE_PM_SUSPEND,		/* suspend request */	REQ_TYPE_PM_RESUME,		/* resume request */	REQ_TYPE_PM_SHUTDOWN,		/* shutdown request */	REQ_TYPE_FLUSH,			/* flush request */	REQ_TYPE_SPECIAL,		/* driver defined type */	REQ_TYPE_LINUX_BLOCK,		/* generic block layer message */	/*	 * for ATA/ATAPI devices. this really doesn't belong here, ide should	 * use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver	 * private REQ_LB opcodes to differentiate what type of request this is	 */	REQ_TYPE_ATA_CMD,	REQ_TYPE_ATA_TASK,	REQ_TYPE_ATA_TASKFILE,	REQ_TYPE_ATA_PC,};/* * For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being * sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a * SCSI cdb. * * 0x00 -> 0x3f are driver private, to be used for whatever purpose they need, * typically to differentiate REQ_TYPE_SPECIAL requests. * */enum {	/*	 * just examples for now	 */	REQ_LB_OP_EJECT	= 0x40,		/* eject request */	REQ_LB_OP_FLUSH = 0x41,		/* flush device */};/* * request type modified bits. first three bits match BIO_RW* bits, important */enum rq_flag_bits {	__REQ_RW,		/* not set, read. set, write */	__REQ_FAILFAST,		/* no low level driver retries */	__REQ_SORTED,		/* elevator knows about this request */	__REQ_SOFTBARRIER,	/* may not be passed by ioscheduler */	__REQ_HARDBARRIER,	/* may not be passed by drive either */	__REQ_FUA,		/* forced unit access */	__REQ_NOMERGE,		/* don't touch this for merging */	__REQ_STARTED,		/* drive already may have started this one */	__REQ_DONTPREP,		/* don't call prep for this one */	__REQ_QUEUED,		/* uses queueing */	__REQ_ELVPRIV,		/* elevator private data attached */	__REQ_FAILED,		/* set if the request failed */	__REQ_QUIET,		/* don't worry about errors */	__REQ_PREEMPT,		/* set for "ide_preempt" requests */	__REQ_ORDERED_COLOR,	/* is before or after barrier */	__REQ_RW_SYNC,		/* request is sync (O_DIRECT) */	__REQ_ALLOCED,		/* request came from our alloc pool */	__REQ_RW_META,		/* metadata io request */	__REQ_NR_BITS,		/* stops here */};#define REQ_RW		(1 << __REQ_RW)#define REQ_FAILFAST	(1 << __REQ_FAILFAST)#define REQ_SORTED	(1 << __REQ_SORTED)#define REQ_SOFTBARRIER	(1 << __REQ_SOFTBARRIER)#define REQ_HARDBARRIER	(1 << __REQ_HARDBARRIER)#define REQ_FUA		(1 << __REQ_FUA)#define REQ_NOMERGE	(1 << __REQ_NOMERGE)#define REQ_STARTED	(1 << __REQ_STARTED)#define REQ_DONTPREP	(1 << __REQ_DONTPREP)#define REQ_QUEUED	(1 << __REQ_QUEUED)#define REQ_ELVPRIV	(1 << __REQ_ELVPRIV)#define REQ_FAILED	(1 << __REQ_FAILED)#define REQ_QUIET	(1 << __REQ_QUIET)#define REQ_PREEMPT	(1 << __REQ_PREEMPT)#define REQ_ORDERED_COLOR	(1 << __REQ_ORDERED_COLOR)#define REQ_RW_SYNC	(1 << __REQ_RW_SYNC)#define REQ_ALLOCED	(1 << __REQ_ALLOCED)#define REQ_RW_META	(1 << __REQ_RW_META)#define BLK_MAX_CDB	16/* * try to put the fields that are referenced together in the same cacheline */struct request {	struct list_head queuelist;	struct list_head donelist;	request_queue_t *q;	unsigned int cmd_flags;	enum rq_cmd_type_bits cmd_type;	/* Maintain bio traversal state for part by part I/O submission.	 * hard_* are block layer internals, no driver should touch them!	 */	sector_t sector;		/* next sector to submit */	sector_t hard_sector;		/* next sector to complete */	unsigned long nr_sectors;	/* no. of sectors left to submit */	unsigned long hard_nr_sectors;	/* no. of sectors left to complete */	/* no. of sectors left to submit in the current segment */	unsigned int current_nr_sectors;	/* no. of sectors left to complete in the current segment */	unsigned int hard_cur_sectors;	struct bio *bio;	struct bio *biotail;	struct hlist_node hash;	/* merge hash */	/*	 * The rb_node is only used inside the io scheduler, requests	 * are pruned when moved to the dispatch queue. So let the	 * completion_data share space with the rb_node.	 */	union {		struct rb_node rb_node;	/* sort/lookup */		void *completion_data;	};	/*	 * two pointers are available for the IO schedulers, if they need	 * more they have to dynamically allocate it.	 */	void *elevator_private;	void *elevator_private2;	struct gendisk *rq_disk;	unsigned long start_time;	/* Number of scatter-gather DMA addr+len pairs after	 * physical address coalescing is performed.	 */	unsigned short nr_phys_segments;	/* Number of scatter-gather addr+len pairs after	 * physical and DMA remapping hardware coalescing is performed.	 * This is the number of scatter-gather entries the driver	 * will actually have to deal with after DMA mapping is done.	 */	unsigned short nr_hw_segments;	unsigned short ioprio;	void *special;	char *buffer;	int tag;	int errors;	int ref_count;	/*	 * when request is used as a packet command carrier	 */	unsigned int cmd_len;	unsigned char cmd[BLK_MAX_CDB];	unsigned int data_len;	unsigned int sense_len;	void *data;	void *sense;	unsigned int timeout;	int retries;	/*	 * completion callback.	 */	rq_end_io_fn *end_io;	void *end_io_data;};/* * State information carried for REQ_TYPE_PM_SUSPEND and REQ_TYPE_PM_RESUME * requests. Some step values could eventually be made generic. */struct request_pm_state{	/* PM state machine step value, currently driver specific */	int	pm_step;	/* requested PM state value (S1, S2, S3, S4, ...) */	u32	pm_state;	void*	data;		/* for driver use */};#include <linux/elevator.h>typedef void (request_fn_proc) (request_queue_t *q);typedef int (make_request_fn) (request_queue_t *q, struct bio *bio);typedef int (prep_rq_fn) (request_queue_t *, struct request *);typedef void (unplug_fn) (request_queue_t *);struct bio_vec;typedef int (merge_bvec_fn) (request_queue_t *, struct bio *, struct bio_vec *);typedef int (issue_flush_fn) (request_queue_t *, struct gendisk *, sector_t *);typedef void (prepare_flush_fn) (request_queue_t *, struct request *);typedef void (softirq_done_fn)(struct request *);enum blk_queue_state {	Queue_down,	Queue_up,};struct blk_queue_tag {	struct request **tag_index;	/* map of busy tags */	unsigned long *tag_map;		/* bit map of free/busy tags */	struct list_head busy_list;	/* fifo list of busy tags */	int busy;			/* current depth */	int max_depth;			/* what we will send to device */	int real_max_depth;		/* what the array can hold */	atomic_t refcnt;		/* map can be shared */};struct request_queue{	/*	 * Together with queue_head for cacheline sharing	 */	struct list_head	queue_head;	struct request		*last_merge;	elevator_t		*elevator;	/*	 * the queue request freelist, one for reads and one for writes	 */	struct request_list	rq;	request_fn_proc		*request_fn;	make_request_fn		*make_request_fn;	prep_rq_fn		*prep_rq_fn;	unplug_fn		*unplug_fn;	merge_bvec_fn		*merge_bvec_fn;	issue_flush_fn		*issue_flush_fn;	prepare_flush_fn	*prepare_flush_fn;	softirq_done_fn		*softirq_done_fn;	/*	 * Dispatch queue sorting	 */	sector_t		end_sector;	struct request		*boundary_rq;	/*	 * Auto-unplugging state	 */	struct timer_list	unplug_timer;	int			unplug_thresh;	/* After this many requests */	unsigned long		unplug_delay;	/* After this many jiffies */	struct work_struct	unplug_work;	struct backing_dev_info	backing_dev_info;	/*	 * The queue owner gets to use this for whatever they like.	 * ll_rw_blk doesn't touch it.	 */	void			*queuedata;	/*	 * queue needs bounce pages for pages above this limit	 */	unsigned long		bounce_pfn;	gfp_t			bounce_gfp;	/*	 * various queue flags, see QUEUE_* below	 */	unsigned long		queue_flags;	/*	 * protects queue structures from reentrancy. ->__queue_lock should	 * _never_ be used directly, it is queue private. always use	 * ->queue_lock.	 */	spinlock_t		__queue_lock;	spinlock_t		*queue_lock;	/*	 * queue kobject	 */	struct kobject kobj;	/*	 * queue settings	 */	unsigned long		nr_requests;	/* Max # of requests */	unsigned int		nr_congestion_on;	unsigned int		nr_congestion_off;	unsigned int		nr_batching;	unsigned int		max_sectors;	unsigned int		max_hw_sectors;	unsigned short		max_phys_segments;	unsigned short		max_hw_segments;	unsigned short		hardsect_size;	unsigned int		max_segment_size;	unsigned long		seg_boundary_mask;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -