⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 blkdev.h

📁 Axis 221 camera embedded programing interface
💻 H
📖 第 1 页 / 共 2 页
字号:
	unsigned int		dma_alignment;	struct blk_queue_tag	*queue_tags;	unsigned int		nr_sorted;	unsigned int		in_flight;	/*	 * sg stuff	 */	unsigned int		sg_timeout;	unsigned int		sg_reserved_size;	int			node;#ifdef CONFIG_BLK_DEV_IO_TRACE	struct blk_trace	*blk_trace;#endif	/*	 * reserved for flush operations	 */	unsigned int		ordered, next_ordered, ordseq;	int			orderr, ordcolor;	struct request		pre_flush_rq, bar_rq, post_flush_rq;	struct request		*orig_bar_rq;	unsigned int		bi_size;	struct mutex		sysfs_lock;};#define QUEUE_FLAG_CLUSTER	0	/* cluster several segments into 1 */#define QUEUE_FLAG_QUEUED	1	/* uses generic tag queueing */#define QUEUE_FLAG_STOPPED	2	/* queue is stopped */#define	QUEUE_FLAG_READFULL	3	/* write queue has been filled */#define QUEUE_FLAG_WRITEFULL	4	/* read queue has been filled */#define QUEUE_FLAG_DEAD		5	/* queue being torn down */#define QUEUE_FLAG_REENTER	6	/* Re-entrancy avoidance */#define QUEUE_FLAG_PLUGGED	7	/* queue is plugged */#define QUEUE_FLAG_ELVSWITCH	8	/* don't use elevator, just do FIFO */enum {	/*	 * Hardbarrier is supported with one of the following methods.	 *	 * NONE		: hardbarrier unsupported	 * DRAIN	: ordering by draining is enough	 * DRAIN_FLUSH	: ordering by draining w/ pre and post flushes	 * DRAIN_FUA	: ordering by draining w/ pre flush and FUA write	 * TAG		: ordering by tag is enough	 * TAG_FLUSH	: ordering by tag w/ pre and post flushes	 * TAG_FUA	: ordering by tag w/ pre flush and FUA write	 */	QUEUE_ORDERED_NONE	= 0x00,	QUEUE_ORDERED_DRAIN	= 0x01,	QUEUE_ORDERED_TAG	= 0x02,	QUEUE_ORDERED_PREFLUSH	= 0x10,	QUEUE_ORDERED_POSTFLUSH	= 0x20,	QUEUE_ORDERED_FUA	= 0x40,	QUEUE_ORDERED_DRAIN_FLUSH = QUEUE_ORDERED_DRAIN |			QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH,	QUEUE_ORDERED_DRAIN_FUA	= QUEUE_ORDERED_DRAIN |			QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA,	QUEUE_ORDERED_TAG_FLUSH	= QUEUE_ORDERED_TAG |			QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH,	QUEUE_ORDERED_TAG_FUA	= QUEUE_ORDERED_TAG |			QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA,	/*	 * Ordered operation sequence	 */	QUEUE_ORDSEQ_STARTED	= 0x01,	/* flushing in progress */	QUEUE_ORDSEQ_DRAIN	= 0x02,	/* waiting for the queue to be drained */	QUEUE_ORDSEQ_PREFLUSH	= 0x04,	/* pre-flushing in progress */	QUEUE_ORDSEQ_BAR	= 0x08,	/* original barrier req in progress */	QUEUE_ORDSEQ_POSTFLUSH	= 0x10,	/* post-flushing in progress */	QUEUE_ORDSEQ_DONE	= 0x20,};#define blk_queue_plugged(q)	test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags)#define blk_queue_tagged(q)	test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)#define blk_queue_stopped(q)	test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)#define blk_queue_flushing(q)	((q)->ordseq)#define blk_fs_request(rq)	((rq)->cmd_type == REQ_TYPE_FS)#define blk_pc_request(rq)	((rq)->cmd_type == REQ_TYPE_BLOCK_PC)#define blk_special_request(rq)	((rq)->cmd_type == REQ_TYPE_SPECIAL)#define blk_sense_request(rq)	((rq)->cmd_type == REQ_TYPE_SENSE)#define blk_noretry_request(rq)	((rq)->cmd_flags & REQ_FAILFAST)#define blk_rq_started(rq)	((rq)->cmd_flags & REQ_STARTED)#define blk_account_rq(rq)	(blk_rq_started(rq) && blk_fs_request(rq))#define blk_pm_suspend_request(rq)	((rq)->cmd_type == REQ_TYPE_PM_SUSPEND)#define blk_pm_resume_request(rq)	((rq)->cmd_type == REQ_TYPE_PM_RESUME)#define blk_pm_request(rq)	\	(blk_pm_suspend_request(rq) || blk_pm_resume_request(rq))#define blk_sorted_rq(rq)	((rq)->cmd_flags & REQ_SORTED)#define blk_barrier_rq(rq)	((rq)->cmd_flags & REQ_HARDBARRIER)#define blk_fua_rq(rq)		((rq)->cmd_flags & REQ_FUA)#define list_entry_rq(ptr)	list_entry((ptr), struct request, queuelist)#define rq_data_dir(rq)		((rq)->cmd_flags & 1)/* * We regard a request as sync, if it's a READ or a SYNC write. */#define rq_is_sync(rq)		(rq_data_dir((rq)) == READ || (rq)->cmd_flags & REQ_RW_SYNC)#define rq_is_meta(rq)		((rq)->cmd_flags & REQ_RW_META)static inline int blk_queue_full(struct request_queue *q, int rw){	if (rw == READ)		return test_bit(QUEUE_FLAG_READFULL, &q->queue_flags);	return test_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags);}static inline void blk_set_queue_full(struct request_queue *q, int rw){	if (rw == READ)		set_bit(QUEUE_FLAG_READFULL, &q->queue_flags);	else		set_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags);}static inline void blk_clear_queue_full(struct request_queue *q, int rw){	if (rw == READ)		clear_bit(QUEUE_FLAG_READFULL, &q->queue_flags);	else		clear_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags);}/* * mergeable request must not have _NOMERGE or _BARRIER bit set, nor may * it already be started by driver. */#define RQ_NOMERGE_FLAGS	\	(REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER)#define rq_mergeable(rq)	\	(!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && blk_fs_request((rq)))/* * q->prep_rq_fn return values */#define BLKPREP_OK		0	/* serve it */#define BLKPREP_KILL		1	/* fatal error, kill */#define BLKPREP_DEFER		2	/* leave on queue */extern unsigned long blk_max_low_pfn, blk_max_pfn;/* * standard bounce addresses: * * BLK_BOUNCE_HIGH	: bounce all highmem pages * BLK_BOUNCE_ANY	: don't bounce anything * BLK_BOUNCE_ISA	: bounce pages above ISA DMA boundary */#define BLK_BOUNCE_HIGH		((u64)blk_max_low_pfn << PAGE_SHIFT)#define BLK_BOUNCE_ANY		((u64)blk_max_pfn << PAGE_SHIFT)#define BLK_BOUNCE_ISA		(ISA_DMA_THRESHOLD)#ifdef CONFIG_MMUextern int init_emergency_isa_pool(void);extern void blk_queue_bounce(request_queue_t *q, struct bio **bio);#elsestatic inline int init_emergency_isa_pool(void){	return 0;}static inline void blk_queue_bounce(request_queue_t *q, struct bio **bio){}#endif /* CONFIG_MMU */#define rq_for_each_bio(_bio, rq)	\	if ((rq->bio))			\		for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)extern int blk_register_queue(struct gendisk *disk);extern void blk_unregister_queue(struct gendisk *disk);extern void register_disk(struct gendisk *dev);extern void generic_make_request(struct bio *bio);extern void blk_put_request(struct request *);extern void __blk_put_request(request_queue_t *, struct request *);extern void blk_end_sync_rq(struct request *rq, int error);extern struct request *blk_get_request(request_queue_t *, int, gfp_t);extern void blk_insert_request(request_queue_t *, struct request *, int, void *);extern void blk_requeue_request(request_queue_t *, struct request *);extern void blk_plug_device(request_queue_t *);extern int blk_remove_plug(request_queue_t *);extern void blk_recount_segments(request_queue_t *, struct bio *);extern int scsi_cmd_ioctl(struct file *, struct gendisk *, unsigned int, void __user *);extern int sg_scsi_ioctl(struct file *, struct request_queue *,		struct gendisk *, struct scsi_ioctl_command __user *);/* * Temporary export, until SCSI gets fixed up. */extern int ll_back_merge_fn(request_queue_t *, struct request *, struct bio *);/* * A queue has just exitted congestion.  Note this in the global counter of * congested queues, and wake up anyone who was waiting for requests to be * put back. */static inline void blk_clear_queue_congested(request_queue_t *q, int rw){	clear_bdi_congested(&q->backing_dev_info, rw);}/* * A queue has just entered congestion.  Flag that in the queue's VM-visible * state flags and increment the global gounter of congested queues. */static inline void blk_set_queue_congested(request_queue_t *q, int rw){	set_bdi_congested(&q->backing_dev_info, rw);}extern void blk_start_queue(request_queue_t *q);extern void blk_stop_queue(request_queue_t *q);extern void blk_sync_queue(struct request_queue *q);extern void __blk_stop_queue(request_queue_t *q);extern void blk_run_queue(request_queue_t *);extern void blk_start_queueing(request_queue_t *);extern int blk_rq_map_user(request_queue_t *, struct request *, void __user *, unsigned long);extern int blk_rq_unmap_user(struct bio *);extern int blk_rq_map_kern(request_queue_t *, struct request *, void *, unsigned int, gfp_t);extern int blk_rq_map_user_iov(request_queue_t *, struct request *,			       struct sg_iovec *, int, unsigned int);extern int blk_execute_rq(request_queue_t *, struct gendisk *,			  struct request *, int);extern void blk_execute_rq_nowait(request_queue_t *, struct gendisk *,				  struct request *, int, rq_end_io_fn *);static inline request_queue_t *bdev_get_queue(struct block_device *bdev){	return bdev->bd_disk->queue;}static inline void blk_run_backing_dev(struct backing_dev_info *bdi,				       struct page *page){	if (bdi && bdi->unplug_io_fn)		bdi->unplug_io_fn(bdi, page);}static inline void blk_run_address_space(struct address_space *mapping){	if (mapping)		blk_run_backing_dev(mapping->backing_dev_info, NULL);}/* * end_request() and friends. Must be called with the request queue spinlock * acquired. All functions called within end_request() _must_be_ atomic. * * Several drivers define their own end_request and call * end_that_request_first() and end_that_request_last() * for parts of the original function. This prevents * code duplication in drivers. */extern int end_that_request_first(struct request *, int, int);extern int end_that_request_chunk(struct request *, int, int);extern void end_that_request_last(struct request *, int);extern void end_request(struct request *req, int uptodate);extern void blk_complete_request(struct request *);/* * end_that_request_first/chunk() takes an uptodate argument. we account * any value <= as an io error. 0 means -EIO for compatability reasons, * any other < 0 value is the direct error type. An uptodate value of * 1 indicates successful io completion */#define end_io_error(uptodate)	(unlikely((uptodate) <= 0))static inline void blkdev_dequeue_request(struct request *req){	elv_dequeue_request(req->q, req);}/* * Access functions for manipulating queue properties */extern request_queue_t *blk_init_queue_node(request_fn_proc *rfn,					spinlock_t *lock, int node_id);extern request_queue_t *blk_init_queue(request_fn_proc *, spinlock_t *);extern void blk_cleanup_queue(request_queue_t *);extern void blk_queue_make_request(request_queue_t *, make_request_fn *);extern void blk_queue_bounce_limit(request_queue_t *, u64);extern void blk_queue_max_sectors(request_queue_t *, unsigned int);extern void blk_queue_max_phys_segments(request_queue_t *, unsigned short);extern void blk_queue_max_hw_segments(request_queue_t *, unsigned short);extern void blk_queue_max_segment_size(request_queue_t *, unsigned int);extern void blk_queue_hardsect_size(request_queue_t *, unsigned short);extern void blk_queue_stack_limits(request_queue_t *t, request_queue_t *b);extern void blk_queue_segment_boundary(request_queue_t *, unsigned long);extern void blk_queue_prep_rq(request_queue_t *, prep_rq_fn *pfn);extern void blk_queue_merge_bvec(request_queue_t *, merge_bvec_fn *);extern void blk_queue_dma_alignment(request_queue_t *, int);extern void blk_queue_softirq_done(request_queue_t *, softirq_done_fn *);extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);extern int blk_queue_ordered(request_queue_t *, unsigned, prepare_flush_fn *);extern void blk_queue_issue_flush_fn(request_queue_t *, issue_flush_fn *);extern int blk_do_ordered(request_queue_t *, struct request **);extern unsigned blk_ordered_cur_seq(request_queue_t *);extern unsigned blk_ordered_req_seq(struct request *);extern void blk_ordered_complete_seq(request_queue_t *, unsigned, int);extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *);extern void blk_dump_rq_flags(struct request *, char *);extern void generic_unplug_device(request_queue_t *);extern void __generic_unplug_device(request_queue_t *);extern long nr_blockdev_pages(void);int blk_get_queue(request_queue_t *);request_queue_t *blk_alloc_queue(gfp_t);request_queue_t *blk_alloc_queue_node(gfp_t, int);extern void blk_put_queue(request_queue_t *);/* * tag stuff */#define blk_queue_tag_depth(q)		((q)->queue_tags->busy)#define blk_queue_tag_queue(q)		((q)->queue_tags->busy < (q)->queue_tags->max_depth)#define blk_rq_tagged(rq)		((rq)->cmd_flags & REQ_QUEUED)extern int blk_queue_start_tag(request_queue_t *, struct request *);extern struct request *blk_queue_find_tag(request_queue_t *, int);extern void blk_queue_end_tag(request_queue_t *, struct request *);extern int blk_queue_init_tags(request_queue_t *, int, struct blk_queue_tag *);extern void blk_queue_free_tags(request_queue_t *);extern int blk_queue_resize_tags(request_queue_t *, int);extern void blk_queue_invalidate_tags(request_queue_t *);extern struct blk_queue_tag *blk_init_tags(int);extern void blk_free_tags(struct blk_queue_tag *);static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,						int tag){	if (unlikely(bqt == NULL || tag >= bqt->real_max_depth))		return NULL;	return bqt->tag_index[tag];}extern void blk_rq_bio_prep(request_queue_t *, struct request *, struct bio *);extern int blkdev_issue_flush(struct block_device *, sector_t *);#define MAX_PHYS_SEGMENTS 128#define MAX_HW_SEGMENTS 128#define SAFE_MAX_SECTORS 255#define BLK_DEF_MAX_SECTORS 1024#define MAX_SEGMENT_SIZE	65536#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)static inline int queue_hardsect_size(request_queue_t *q){	int retval = 512;	if (q && q->hardsect_size)		retval = q->hardsect_size;	return retval;}static inline int bdev_hardsect_size(struct block_device *bdev){	return queue_hardsect_size(bdev_get_queue(bdev));}static inline int queue_dma_alignment(request_queue_t *q){	int retval = 511;	if (q && q->dma_alignment)		retval = q->dma_alignment;	return retval;}/* assumes size > 256 */static inline unsigned int blksize_bits(unsigned int size){	unsigned int bits = 8;	do {		bits++;		size >>= 1;	} while (size > 256);	return bits;}static inline unsigned int block_size(struct block_device *bdev){	return bdev->bd_block_size;}typedef struct {struct page *v;} Sector;unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *);static inline void put_dev_sector(Sector p){	page_cache_release(p.v);}struct work_struct;int kblockd_schedule_work(struct work_struct *work);void kblockd_flush(void);#define MODULE_ALIAS_BLOCKDEV(major,minor) \	MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))#define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \	MODULE_ALIAS("block-major-" __stringify(major) "-*")#else /* CONFIG_BLOCK *//* * stubs for when the block layer is configured out */#define buffer_heads_over_limit 0static inline long blk_congestion_wait(int rw, long timeout){	return io_schedule_timeout(timeout);}static inline long nr_blockdev_pages(void){	return 0;}static inline void exit_io_context(void){}#endif /* CONFIG_BLOCK */#endif

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -