📄 blkdev.h
字号:
* reserved for flush operations */ unsigned int ordered, next_ordered, ordseq; int orderr, ordcolor; struct request pre_flush_rq, bar_rq, post_flush_rq; struct request *orig_bar_rq; struct mutex sysfs_lock;#if defined(CONFIG_BLK_DEV_BSG) struct bsg_class_device bsg_dev;#endif};#define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */#define QUEUE_FLAG_READFULL 3 /* read queue has been filled */#define QUEUE_FLAG_WRITEFULL 4 /* write queue has been filled */#define QUEUE_FLAG_DEAD 5 /* queue being torn down */#define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */#define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */#define QUEUE_FLAG_ELVSWITCH 8 /* don't use elevator, just do FIFO */#define QUEUE_FLAG_BIDI 9 /* queue supports bidi requests */enum { /* * Hardbarrier is supported with one of the following methods. * * NONE : hardbarrier unsupported * DRAIN : ordering by draining is enough * DRAIN_FLUSH : ordering by draining w/ pre and post flushes * DRAIN_FUA : ordering by draining w/ pre flush and FUA write * TAG : ordering by tag is enough * TAG_FLUSH : ordering by tag w/ pre and post flushes * TAG_FUA : ordering by tag w/ pre flush and FUA write */ QUEUE_ORDERED_NONE = 0x00, QUEUE_ORDERED_DRAIN = 0x01, QUEUE_ORDERED_TAG = 0x02, QUEUE_ORDERED_PREFLUSH = 0x10, QUEUE_ORDERED_POSTFLUSH = 0x20, QUEUE_ORDERED_FUA = 0x40, QUEUE_ORDERED_DRAIN_FLUSH = QUEUE_ORDERED_DRAIN | QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH, QUEUE_ORDERED_DRAIN_FUA = QUEUE_ORDERED_DRAIN | QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA, QUEUE_ORDERED_TAG_FLUSH = QUEUE_ORDERED_TAG | QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH, QUEUE_ORDERED_TAG_FUA = QUEUE_ORDERED_TAG | QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA, /* * Ordered operation sequence */ QUEUE_ORDSEQ_STARTED = 0x01, /* flushing in progress */ QUEUE_ORDSEQ_DRAIN = 0x02, /* waiting for the queue to be drained */ QUEUE_ORDSEQ_PREFLUSH = 0x04, /* pre-flushing in progress */ QUEUE_ORDSEQ_BAR = 0x08, /* original barrier req in progress */ QUEUE_ORDSEQ_POSTFLUSH = 0x10, /* post-flushing in progress */ QUEUE_ORDSEQ_DONE = 0x20,};#define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags)#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)#define blk_queue_flushing(q) ((q)->ordseq)#define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS)#define blk_pc_request(rq) ((rq)->cmd_type == REQ_TYPE_BLOCK_PC)#define blk_special_request(rq) ((rq)->cmd_type == REQ_TYPE_SPECIAL)#define blk_sense_request(rq) ((rq)->cmd_type == REQ_TYPE_SENSE)#define blk_noretry_request(rq) ((rq)->cmd_flags & REQ_FAILFAST)#define blk_rq_started(rq) ((rq)->cmd_flags & REQ_STARTED)#define blk_account_rq(rq) (blk_rq_started(rq) && blk_fs_request(rq))#define blk_pm_suspend_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND)#define blk_pm_resume_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_RESUME)#define blk_pm_request(rq) \ (blk_pm_suspend_request(rq) || blk_pm_resume_request(rq))#define blk_sorted_rq(rq) ((rq)->cmd_flags & REQ_SORTED)#define blk_barrier_rq(rq) ((rq)->cmd_flags & REQ_HARDBARRIER)#define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA)#define blk_bidi_rq(rq) ((rq)->next_rq != NULL)#define blk_empty_barrier(rq) (blk_barrier_rq(rq) && blk_fs_request(rq) && !(rq)->hard_nr_sectors)#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)#define rq_data_dir(rq) ((rq)->cmd_flags & 1)/* * We regard a request as sync, if it's a READ or a SYNC write. */#define rq_is_sync(rq) (rq_data_dir((rq)) == READ || (rq)->cmd_flags & REQ_RW_SYNC)#define rq_is_meta(rq) ((rq)->cmd_flags & REQ_RW_META)static inline int blk_queue_full(struct request_queue *q, int rw){ if (rw == READ) return test_bit(QUEUE_FLAG_READFULL, &q->queue_flags); return test_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags);}static inline void blk_set_queue_full(struct request_queue *q, int rw){ if (rw == READ) set_bit(QUEUE_FLAG_READFULL, &q->queue_flags); else set_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags);}static inline void blk_clear_queue_full(struct request_queue *q, int rw){ if (rw == READ) clear_bit(QUEUE_FLAG_READFULL, &q->queue_flags); else clear_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags);}/* * mergeable request must not have _NOMERGE or _BARRIER bit set, nor may * it already be started by driver. */#define RQ_NOMERGE_FLAGS \ (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER)#define rq_mergeable(rq) \ (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && blk_fs_request((rq)))/* * q->prep_rq_fn return values */#define BLKPREP_OK 0 /* serve it */#define BLKPREP_KILL 1 /* fatal error, kill */#define BLKPREP_DEFER 2 /* leave on queue */extern unsigned long blk_max_low_pfn, blk_max_pfn;/* * standard bounce addresses: * * BLK_BOUNCE_HIGH : bounce all highmem pages * BLK_BOUNCE_ANY : don't bounce anything * BLK_BOUNCE_ISA : bounce pages above ISA DMA boundary */#define BLK_BOUNCE_HIGH ((u64)blk_max_low_pfn << PAGE_SHIFT)#define BLK_BOUNCE_ANY ((u64)blk_max_pfn << PAGE_SHIFT)#define BLK_BOUNCE_ISA (ISA_DMA_THRESHOLD)/* * default timeout for SG_IO if none specified */#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)#ifdef CONFIG_BOUNCEextern int init_emergency_isa_pool(void);extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);#elsestatic inline int init_emergency_isa_pool(void){ return 0;}static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio){}#endif /* CONFIG_MMU */struct req_iterator { int i; struct bio *bio;};/* This should not be used directly - use rq_for_each_segment */#define __rq_for_each_bio(_bio, rq) \ if ((rq->bio)) \ for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)#define rq_for_each_segment(bvl, _rq, _iter) \ __rq_for_each_bio(_iter.bio, _rq) \ bio_for_each_segment(bvl, _iter.bio, _iter.i)#define rq_iter_last(rq, _iter) \ (_iter.bio->bi_next == NULL && _iter.i == _iter.bio->bi_vcnt-1)extern int blk_register_queue(struct gendisk *disk);extern void blk_unregister_queue(struct gendisk *disk);extern void register_disk(struct gendisk *dev);extern void generic_make_request(struct bio *bio);extern void blk_put_request(struct request *);extern void __blk_put_request(struct request_queue *, struct request *);extern void blk_end_sync_rq(struct request *rq, int error);extern struct request *blk_get_request(struct request_queue *, int, gfp_t);extern void blk_insert_request(struct request_queue *, struct request *, int, void *);extern void blk_requeue_request(struct request_queue *, struct request *);extern void blk_plug_device(struct request_queue *);extern int blk_remove_plug(struct request_queue *);extern void blk_recount_segments(struct request_queue *, struct bio *);extern int scsi_cmd_ioctl(struct file *, struct request_queue *, struct gendisk *, unsigned int, void __user *);extern int sg_scsi_ioctl(struct file *, struct request_queue *, struct gendisk *, struct scsi_ioctl_command __user *);/* * Temporary export, until SCSI gets fixed up. */extern int blk_rq_append_bio(struct request_queue *q, struct request *rq, struct bio *bio);/* * A queue has just exitted congestion. Note this in the global counter of * congested queues, and wake up anyone who was waiting for requests to be * put back. */static inline void blk_clear_queue_congested(struct request_queue *q, int rw){ clear_bdi_congested(&q->backing_dev_info, rw);}/* * A queue has just entered congestion. Flag that in the queue's VM-visible * state flags and increment the global gounter of congested queues. */static inline void blk_set_queue_congested(struct request_queue *q, int rw){ set_bdi_congested(&q->backing_dev_info, rw);}extern void blk_start_queue(struct request_queue *q);extern void blk_stop_queue(struct request_queue *q);extern void blk_sync_queue(struct request_queue *q);extern void __blk_stop_queue(struct request_queue *q);extern void blk_run_queue(struct request_queue *);extern void blk_start_queueing(struct request_queue *);extern int blk_rq_map_user(struct request_queue *, struct request *, void __user *, unsigned long);extern int blk_rq_unmap_user(struct bio *);extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t);extern int blk_rq_map_user_iov(struct request_queue *, struct request *, struct sg_iovec *, int, unsigned int);extern int blk_execute_rq(struct request_queue *, struct gendisk *, struct request *, int);extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, struct request *, int, rq_end_io_fn *);extern int blk_verify_command(unsigned char *, int);extern void blk_unplug(struct request_queue *q);static inline struct request_queue *bdev_get_queue(struct block_device *bdev){ return bdev->bd_disk->queue;}static inline void blk_run_backing_dev(struct backing_dev_info *bdi, struct page *page){ if (bdi && bdi->unplug_io_fn) bdi->unplug_io_fn(bdi, page);}static inline void blk_run_address_space(struct address_space *mapping){ if (mapping) blk_run_backing_dev(mapping->backing_dev_info, NULL);}/* * end_request() and friends. Must be called with the request queue spinlock * acquired. All functions called within end_request() _must_be_ atomic. * * Several drivers define their own end_request and call * end_that_request_first() and end_that_request_last() * for parts of the original function. This prevents * code duplication in drivers. */extern int end_that_request_first(struct request *, int, int);extern int end_that_request_chunk(struct request *, int, int);extern void end_that_request_last(struct request *, int);extern void end_request(struct request *, int);extern void end_queued_request(struct request *, int);extern void end_dequeued_request(struct request *, int);extern void blk_complete_request(struct request *);/* * end_that_request_first/chunk() takes an uptodate argument. we account * any value <= as an io error. 0 means -EIO for compatability reasons, * any other < 0 value is the direct error type. An uptodate value of * 1 indicates successful io completion */#define end_io_error(uptodate) (unlikely((uptodate) <= 0))static inline void blkdev_dequeue_request(struct request *req){ elv_dequeue_request(req->q, req);}/* * Access functions for manipulating queue properties */extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id);extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);extern void blk_cleanup_queue(struct request_queue *);extern void blk_queue_make_request(struct request_queue *, make_request_fn *);extern void blk_queue_bounce_limit(struct request_queue *, u64);extern void blk_queue_max_sectors(struct request_queue *, unsigned int);extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short);extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short);extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);extern void blk_queue_hardsect_size(struct request_queue *, unsigned short);extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *);extern void blk_queue_dma_alignment(struct request_queue *, int);extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *);extern int blk_do_ordered(struct request_queue *, struct request **);extern unsigned blk_ordered_cur_seq(struct request_queue *);extern unsigned blk_ordered_req_seq(struct request *);extern void blk_ordered_complete_seq(struct request_queue *, unsigned, int);extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);extern void blk_dump_rq_flags(struct request *, char *);extern void generic_unplug_device(struct request_queue *);extern void __generic_unplug_device(struct request_queue *);extern long nr_blockdev_pages(void);int blk_get_queue(struct request_queue *);struct request_queue *blk_alloc_queue(gfp_t);struct request_queue *blk_alloc_queue_node(gfp_t, int);extern void blk_put_queue(struct request_queue *);/* * tag stuff */#define blk_queue_tag_depth(q) ((q)->queue_tags->busy)#define blk_queue_tag_queue(q) ((q)->queue_tags->busy < (q)->queue_tags->max_depth)#define blk_rq_tagged(rq) ((rq)->cmd_flags & REQ_QUEUED)extern int blk_queue_start_tag(struct request_queue *, struct request *);extern struct request *blk_queue_find_tag(struct request_queue *, int);extern void blk_queue_end_tag(struct request_queue *, struct request *);extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *);extern void blk_queue_free_tags(struct request_queue *);extern int blk_queue_resize_tags(struct request_queue *, int);extern void blk_queue_invalidate_tags(struct request_queue *);extern struct blk_queue_tag *blk_init_tags(int);extern void blk_free_tags(struct blk_queue_tag *);static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, int tag){ if (unlikely(bqt == NULL || tag >= bqt->real_max_depth)) return NULL; return bqt->tag_index[tag];}extern int blkdev_issue_flush(struct block_device *, sector_t *);#define MAX_PHYS_SEGMENTS 128#define MAX_HW_SEGMENTS 128#define SAFE_MAX_SECTORS 255#define BLK_DEF_MAX_SECTORS 1024#define MAX_SEGMENT_SIZE 65536#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)static inline int queue_hardsect_size(struct request_queue *q){ int retval = 512; if (q && q->hardsect_size) retval = q->hardsect_size; return retval;}static inline int bdev_hardsect_size(struct block_device *bdev){ return queue_hardsect_size(bdev_get_queue(bdev));}static inline int queue_dma_alignment(struct request_queue *q){ int retval = 511; if (q && q->dma_alignment) retval = q->dma_alignment; return retval;}/* assumes size > 256 */static inline unsigned int blksize_bits(unsigned int size){ unsigned int bits = 8; do { bits++; size >>= 1; } while (size > 256); return bits;}static inline unsigned int block_size(struct block_device *bdev){ return bdev->bd_block_size;}typedef struct {struct page *v;} Sector;unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *);static inline void put_dev_sector(Sector p){ page_cache_release(p.v);}struct work_struct;int kblockd_schedule_work(struct work_struct *work);void kblockd_flush_work(struct work_struct *work);#define MODULE_ALIAS_BLOCKDEV(major,minor) \ MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))#define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ MODULE_ALIAS("block-major-" __stringify(major) "-*")#else /* CONFIG_BLOCK *//* * stubs for when the block layer is configured out */#define buffer_heads_over_limit 0static inline long nr_blockdev_pages(void){ return 0;}static inline void exit_io_context(void){}#endif /* CONFIG_BLOCK */#endif
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -