📄 blkdev.h
字号:
unsigned short max_hw_sectors; unsigned short max_phys_segments; unsigned short max_hw_segments; unsigned short hardsect_size; unsigned int max_segment_size; unsigned long seg_boundary_mask; unsigned int dma_alignment; struct blk_queue_tag *queue_tags; atomic_t refcnt; unsigned int in_flight; /* * sg stuff */ unsigned int sg_timeout; unsigned int sg_reserved_size; int node; struct list_head drain_list; /* * reserved for flush operations */ struct request *flush_rq; unsigned char ordered;};enum { QUEUE_ORDERED_NONE, QUEUE_ORDERED_TAG, QUEUE_ORDERED_FLUSH,};#define RQ_INACTIVE (-1)#define RQ_ACTIVE 1#define RQ_SCSI_BUSY 0xffff#define RQ_SCSI_DONE 0xfffe#define RQ_SCSI_DISCONNECTING 0xffe0#define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */#define QUEUE_FLAG_READFULL 3 /* write queue has been filled */#define QUEUE_FLAG_WRITEFULL 4 /* read queue has been filled */#define QUEUE_FLAG_DEAD 5 /* queue being torn down */#define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */#define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */#define QUEUE_FLAG_DRAIN 8 /* draining queue for sched switch */#define QUEUE_FLAG_FLUSH 9 /* doing barrier flush sequence */#define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags)#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)#define blk_queue_flushing(q) test_bit(QUEUE_FLAG_FLUSH, &(q)->queue_flags)#define blk_fs_request(rq) ((rq)->flags & REQ_CMD)#define blk_pc_request(rq) ((rq)->flags & REQ_BLOCK_PC)#define blk_noretry_request(rq) ((rq)->flags & REQ_FAILFAST)#define blk_rq_started(rq) ((rq)->flags & REQ_STARTED)#define blk_account_rq(rq) (blk_rq_started(rq) && blk_fs_request(rq))#define blk_pm_suspend_request(rq) ((rq)->flags & REQ_PM_SUSPEND)#define blk_pm_resume_request(rq) ((rq)->flags & REQ_PM_RESUME)#define blk_pm_request(rq) \ ((rq)->flags & (REQ_PM_SUSPEND | REQ_PM_RESUME))#define blk_barrier_rq(rq) ((rq)->flags & REQ_HARDBARRIER)#define blk_barrier_preflush(rq) ((rq)->flags & REQ_BAR_PREFLUSH)#define blk_barrier_postflush(rq) ((rq)->flags & REQ_BAR_POSTFLUSH)#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)#define rq_data_dir(rq) ((rq)->flags & 1)static inline int blk_queue_full(struct request_queue *q, int rw){ if (rw == READ) return test_bit(QUEUE_FLAG_READFULL, &q->queue_flags); return test_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags);}static inline void blk_set_queue_full(struct request_queue *q, int rw){ if (rw == READ) set_bit(QUEUE_FLAG_READFULL, &q->queue_flags); else set_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags);}static inline void blk_clear_queue_full(struct request_queue *q, int rw){ if (rw == READ) clear_bit(QUEUE_FLAG_READFULL, &q->queue_flags); else clear_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags);}/* * mergeable request must not have _NOMERGE or _BARRIER bit set, nor may * it already be started by driver. */#define RQ_NOMERGE_FLAGS \ (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER)#define rq_mergeable(rq) \ (!((rq)->flags & RQ_NOMERGE_FLAGS) && blk_fs_request((rq)))/* * noop, requests are automagically marked as active/inactive by I/O * scheduler -- see elv_next_request */#define blk_queue_headactive(q, head_active)/* * q->prep_rq_fn return values */#define BLKPREP_OK 0 /* serve it */#define BLKPREP_KILL 1 /* fatal error, kill */#define BLKPREP_DEFER 2 /* leave on queue */extern unsigned long blk_max_low_pfn, blk_max_pfn;/* * standard bounce addresses: * * BLK_BOUNCE_HIGH : bounce all highmem pages * BLK_BOUNCE_ANY : don't bounce anything * BLK_BOUNCE_ISA : bounce pages above ISA DMA boundary */#define BLK_BOUNCE_HIGH ((u64)blk_max_low_pfn << PAGE_SHIFT)#define BLK_BOUNCE_ANY ((u64)blk_max_pfn << PAGE_SHIFT)#define BLK_BOUNCE_ISA (ISA_DMA_THRESHOLD)#ifdef CONFIG_MMUextern int init_emergency_isa_pool(void);extern void blk_queue_bounce(request_queue_t *q, struct bio **bio);#elsestatic inline int init_emergency_isa_pool(void){ return 0;}static inline void blk_queue_bounce(request_queue_t *q, struct bio **bio){}#endif /* CONFIG_MMU */#define rq_for_each_bio(_bio, rq) \ if ((rq->bio)) \ for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)struct sec_size { unsigned block_size; unsigned block_size_bits;};extern int blk_register_queue(struct gendisk *disk);extern void blk_unregister_queue(struct gendisk *disk);extern void register_disk(struct gendisk *dev);extern void generic_make_request(struct bio *bio);extern void blk_put_request(struct request *);extern void blk_end_sync_rq(struct request *rq);extern void blk_attempt_remerge(request_queue_t *, struct request *);extern struct request *blk_get_request(request_queue_t *, int, int);extern void blk_insert_request(request_queue_t *, struct request *, int, void *);extern void blk_requeue_request(request_queue_t *, struct request *);extern void blk_plug_device(request_queue_t *);extern int blk_remove_plug(request_queue_t *);extern void blk_recount_segments(request_queue_t *, struct bio *);extern int scsi_cmd_ioctl(struct file *, struct gendisk *, unsigned int, void __user *);extern void blk_start_queue(request_queue_t *q);extern void blk_stop_queue(request_queue_t *q);extern void blk_sync_queue(struct request_queue *q);extern void __blk_stop_queue(request_queue_t *q);extern void blk_run_queue(request_queue_t *);extern void blk_queue_activity_fn(request_queue_t *, activity_fn *, void *);extern int blk_rq_map_user(request_queue_t *, struct request *, void __user *, unsigned int);extern int blk_rq_unmap_user(struct bio *, unsigned int);extern int blk_rq_map_kern(request_queue_t *, struct request *, void *, unsigned int, unsigned int);extern int blk_rq_map_user_iov(request_queue_t *, struct request *, struct sg_iovec *, int);extern int blk_execute_rq(request_queue_t *, struct gendisk *, struct request *, int);static inline request_queue_t *bdev_get_queue(struct block_device *bdev){ return bdev->bd_disk->queue;}static inline void blk_run_backing_dev(struct backing_dev_info *bdi, struct page *page){ if (bdi && bdi->unplug_io_fn) bdi->unplug_io_fn(bdi, page);}static inline void blk_run_address_space(struct address_space *mapping){ if (mapping) blk_run_backing_dev(mapping->backing_dev_info, NULL);}/* * end_request() and friends. Must be called with the request queue spinlock * acquired. All functions called within end_request() _must_be_ atomic. * * Several drivers define their own end_request and call * end_that_request_first() and end_that_request_last() * for parts of the original function. This prevents * code duplication in drivers. */extern int end_that_request_first(struct request *, int, int);extern int end_that_request_chunk(struct request *, int, int);extern void end_that_request_last(struct request *);extern void end_request(struct request *req, int uptodate);/* * end_that_request_first/chunk() takes an uptodate argument. we account * any value <= as an io error. 0 means -EIO for compatability reasons, * any other < 0 value is the direct error type. An uptodate value of * 1 indicates successful io completion */#define end_io_error(uptodate) (unlikely((uptodate) <= 0))static inline void blkdev_dequeue_request(struct request *req){ BUG_ON(list_empty(&req->queuelist)); list_del_init(&req->queuelist); if (req->rl) elv_remove_request(req->q, req);}/* * Access functions for manipulating queue properties */extern request_queue_t *blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id);extern request_queue_t *blk_init_queue(request_fn_proc *, spinlock_t *);extern void blk_cleanup_queue(request_queue_t *);extern void blk_queue_make_request(request_queue_t *, make_request_fn *);extern void blk_queue_bounce_limit(request_queue_t *, u64);extern void blk_queue_max_sectors(request_queue_t *, unsigned short);extern void blk_queue_max_phys_segments(request_queue_t *, unsigned short);extern void blk_queue_max_hw_segments(request_queue_t *, unsigned short);extern void blk_queue_max_segment_size(request_queue_t *, unsigned int);extern void blk_queue_hardsect_size(request_queue_t *, unsigned short);extern void blk_queue_stack_limits(request_queue_t *t, request_queue_t *b);extern void blk_queue_segment_boundary(request_queue_t *, unsigned long);extern void blk_queue_prep_rq(request_queue_t *, prep_rq_fn *pfn);extern void blk_queue_merge_bvec(request_queue_t *, merge_bvec_fn *);extern void blk_queue_dma_alignment(request_queue_t *, int);extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);extern void blk_queue_ordered(request_queue_t *, int);extern void blk_queue_issue_flush_fn(request_queue_t *, issue_flush_fn *);extern struct request *blk_start_pre_flush(request_queue_t *,struct request *);extern int blk_complete_barrier_rq(request_queue_t *, struct request *, int);extern int blk_complete_barrier_rq_locked(request_queue_t *, struct request *, int);extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *);extern void blk_dump_rq_flags(struct request *, char *);extern void generic_unplug_device(request_queue_t *);extern void __generic_unplug_device(request_queue_t *);extern long nr_blockdev_pages(void);extern void blk_wait_queue_drained(request_queue_t *, int);extern void blk_finish_queue_drain(request_queue_t *);int blk_get_queue(request_queue_t *);request_queue_t *blk_alloc_queue(int gfp_mask);request_queue_t *blk_alloc_queue_node(int,int);#define blk_put_queue(q) blk_cleanup_queue((q))/* * tag stuff */#define blk_queue_tag_depth(q) ((q)->queue_tags->busy)#define blk_queue_tag_queue(q) ((q)->queue_tags->busy < (q)->queue_tags->max_depth)#define blk_rq_tagged(rq) ((rq)->flags & REQ_QUEUED)extern int blk_queue_start_tag(request_queue_t *, struct request *);extern struct request *blk_queue_find_tag(request_queue_t *, int);extern void blk_queue_end_tag(request_queue_t *, struct request *);extern int blk_queue_init_tags(request_queue_t *, int, struct blk_queue_tag *);extern void blk_queue_free_tags(request_queue_t *);extern int blk_queue_resize_tags(request_queue_t *, int);extern void blk_queue_invalidate_tags(request_queue_t *);extern long blk_congestion_wait(int rw, long timeout);extern void blk_rq_bio_prep(request_queue_t *, struct request *, struct bio *);extern int blkdev_issue_flush(struct block_device *, sector_t *);#define MAX_PHYS_SEGMENTS 128#define MAX_HW_SEGMENTS 128#define MAX_SECTORS 255#define MAX_SEGMENT_SIZE 65536#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)static inline int queue_hardsect_size(request_queue_t *q){ int retval = 512; if (q && q->hardsect_size) retval = q->hardsect_size; return retval;}static inline int bdev_hardsect_size(struct block_device *bdev){ return queue_hardsect_size(bdev_get_queue(bdev));}static inline int queue_dma_alignment(request_queue_t *q){ int retval = 511; if (q && q->dma_alignment) retval = q->dma_alignment; return retval;}static inline int bdev_dma_aligment(struct block_device *bdev){ return queue_dma_alignment(bdev_get_queue(bdev));}#define blk_finished_io(nsects) do { } while (0)#define blk_started_io(nsects) do { } while (0)/* assumes size > 256 */static inline unsigned int blksize_bits(unsigned int size){ unsigned int bits = 8; do { bits++; size >>= 1; } while (size > 256); return bits;}static inline unsigned int block_size(struct block_device *bdev){ return bdev->bd_block_size;}typedef struct {struct page *v;} Sector;unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *);static inline void put_dev_sector(Sector p){ page_cache_release(p.v);}struct work_struct;int kblockd_schedule_work(struct work_struct *work);void kblockd_flush(void);#ifdef CONFIG_LBD# include <asm/div64.h># define sector_div(a, b) do_div(a, b)#else# define sector_div(n, b)( \{ \ int _res; \ _res = (n) % (b); \ (n) /= (b); \ _res; \} \)#endif #define MODULE_ALIAS_BLOCKDEV(major,minor) \ MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))#define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ MODULE_ALIAS("block-major-" __stringify(major) "-*")#endif
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -