📄 ll_rw_blk.c
字号:
/* * linux/drivers/block/ll_rw_blk.c * * Copyright (C) 1991, 1992 Linus Torvalds * Copyright (C) 1994, Karl Keyte: Added support for disk statistics * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de> * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au> - July2000 *//* * This handles all read/write requests to block devices */#include <linux/sched.h>#include <linux/kernel.h>#include <linux/kernel_stat.h>#include <linux/errno.h>#include <linux/string.h>#include <linux/config.h>#include <linux/locks.h>#include <linux/mm.h>#include <linux/init.h>#include <linux/smp_lock.h>#include <asm/system.h>#include <asm/io.h>#include <linux/blk.h>#include <linux/highmem.h>#include <linux/raid/md.h>#include <linux/module.h>/* * MAC Floppy IWM hooks */#ifdef CONFIG_MAC_FLOPPY_IWMextern int mac_floppy_init(void);#endifextern int lvm_init(void);/* * For the allocated request tables */static kmem_cache_t *request_cachep;/* * The "disk" task queue is used to start the actual requests * after a plug */DECLARE_TASK_QUEUE(tq_disk);/* * Protect the request list against multiple users.. * * With this spinlock the Linux block IO subsystem is 100% SMP threaded * from the IRQ event side, and almost 100% SMP threaded from the syscall * side (we still have protect against block device array operations, and * the do_request() side is casually still unsafe. The kernel lock protects * this part currently.). * * there is a fair chance that things will work just OK if these functions * are called with no global kernel lock held ... */spinlock_t io_request_lock = SPIN_LOCK_UNLOCKED;/* This specifies how many sectors to read ahead on the disk. */int read_ahead[MAX_BLKDEV];/* blk_dev_struct is: * *request_fn * *current_request */struct blk_dev_struct blk_dev[MAX_BLKDEV]; /* initialized by blk_dev_init() *//* * blk_size contains the size of all block-devices in units of 1024 byte * sectors: * * blk_size[MAJOR][MINOR] * * if (!blk_size[MAJOR]) then no minor size checking is done. */int * blk_size[MAX_BLKDEV];/* * blksize_size contains the size of all block-devices: * * blksize_size[MAJOR][MINOR] * * if (!blksize_size[MAJOR]) then 1024 bytes is assumed. */int * blksize_size[MAX_BLKDEV];/* * hardsect_size contains the size of the hardware sector of a device. * * hardsect_size[MAJOR][MINOR] * * if (!hardsect_size[MAJOR]) * then 512 bytes is assumed. * else * sector_size is hardsect_size[MAJOR][MINOR] * This is currently set by some scsi devices and read by the msdos fs driver. * Other uses may appear later. */int * hardsect_size[MAX_BLKDEV];/* * The following tunes the read-ahead algorithm in mm/filemap.c */int * max_readahead[MAX_BLKDEV];/* * Max number of sectors per request */int * max_sectors[MAX_BLKDEV];static inline int get_max_sectors(kdev_t dev){ if (!max_sectors[MAJOR(dev)]) return MAX_SECTORS; return max_sectors[MAJOR(dev)][MINOR(dev)];}static inline request_queue_t *__blk_get_queue(kdev_t dev){ struct blk_dev_struct *bdev = blk_dev + MAJOR(dev); if (bdev->queue) return bdev->queue(dev); else return &blk_dev[MAJOR(dev)].request_queue;}/* * NOTE: the device-specific queue() functions * have to be atomic! */request_queue_t *blk_get_queue(kdev_t dev){ request_queue_t *ret; unsigned long flags; spin_lock_irqsave(&io_request_lock,flags); ret = __blk_get_queue(dev); spin_unlock_irqrestore(&io_request_lock,flags); return ret;}static int __blk_cleanup_queue(struct list_head *head){ struct list_head *entry; struct request *rq; int i = 0; if (list_empty(head)) return 0; entry = head->next; do { rq = list_entry(entry, struct request, table); entry = entry->next; list_del(&rq->table); kmem_cache_free(request_cachep, rq); i++; } while (!list_empty(head)); return i;}/** * blk_cleanup_queue: - release a &request_queue_t when it is no longer needed * @q: the request queue to be released * * Description: * blk_cleanup_queue is the pair to blk_init_queue(). It should * be called when a request queue is being released; typically * when a block device is being de-registered. Currently, its * primary task it to free all the &struct request structures that * were allocated to the queue. * Caveat: * Hopefully the low level driver will have finished any * outstanding requests first... **/void blk_cleanup_queue(request_queue_t * q){ int count = QUEUE_NR_REQUESTS; count -= __blk_cleanup_queue(&q->request_freelist[READ]); count -= __blk_cleanup_queue(&q->request_freelist[WRITE]); if (count) printk("blk_cleanup_queue: leaked requests (%d)\n", count); memset(q, 0, sizeof(*q));}/** * blk_queue_headactive - indicate whether head of request queue may be active * @q: The queue which this applies to. * @active: A flag indication where the head of the queue is active. * * Description: * The driver for a block device may choose to leave the currently active * request on the request queue, removing it only when it has completed. * The queue handling routines assume this by default for safety reasons * and will not involve the head of the request queue in any merging or * reordering of requests when the queue is unplugged (and thus may be * working on this particular request). * * If a driver removes requests from the queue before processing them, then * it may indicate that it does so, there by allowing the head of the queue * to be involved in merging and reordering. This is done be calling * blk_queue_headactive() with an @active flag of %0. * * If a driver processes several requests at once, it must remove them (or * at least all but one of them) from the request queue. * * When a queue is plugged (see blk_queue_pluggable()) the head will be * assumed to be inactive. **/ void blk_queue_headactive(request_queue_t * q, int active){ q->head_active = active;}/** * blk_queue_pluggable - define a plugging function for a request queue * @q: the request queue to which the function will apply * @plug: the function to be called to plug a queue * * Description: * A request queue will be "plugged" if a request is added to it * while it is empty. This allows a number of requests to be added * before any are processed, thus providing an opportunity for these * requests to be merged or re-ordered. * The default plugging function (generic_plug_device()) sets the * "plugged" flag for the queue and adds a task to the $tq_disk task * queue to unplug the queue and call the request function at a * later time. * * A device driver may provide an alternate plugging function by * passing it to blk_queue_pluggable(). This function should set * the "plugged" flag if it want calls to the request_function to be * blocked, and should place a task on $tq_disk which will unplug * the queue. Alternately it can simply do nothing and there-by * disable plugging of the device. **/void blk_queue_pluggable (request_queue_t * q, plug_device_fn *plug){ q->plug_device_fn = plug;}/** * blk_queue_make_request - define an alternate make_request function for a device * @q: the request queue for the device to be affected * @mfn: the alternate make_request function * * Description: * The normal way for &struct buffer_heads to be passed to a device * driver is for them to be collected into requests on a request * queue, and then to allow the device driver to select requests * off that queue when it is ready. This works well for many block * devices. However some block devices (typically virtual devices * such as md or lvm) do not benefit from the processing on the * request queue, and are served best by having the requests passed * directly to them. This can be achieved by providing a function * to blk_queue_make_request(). * * Caveat: * The driver that does this *must* be able to deal appropriately * with buffers in "highmemory", either by calling bh_kmap() to get * a kernel mapping, to by calling create_bounce() to create a * buffer in normal memory. **/void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn){ q->make_request_fn = mfn;}static inline int ll_new_segment(request_queue_t *q, struct request *req, int max_segments){ if (req->nr_segments < max_segments) { req->nr_segments++; q->elevator.nr_segments++; return 1; } return 0;}static int ll_back_merge_fn(request_queue_t *q, struct request *req, struct buffer_head *bh, int max_segments){ if (req->bhtail->b_data + req->bhtail->b_size == bh->b_data) return 1; return ll_new_segment(q, req, max_segments);}static int ll_front_merge_fn(request_queue_t *q, struct request *req, struct buffer_head *bh, int max_segments){ if (bh->b_data + bh->b_size == req->bh->b_data) return 1; return ll_new_segment(q, req, max_segments);}static int ll_merge_requests_fn(request_queue_t *q, struct request *req, struct request *next, int max_segments){ int total_segments = req->nr_segments + next->nr_segments; int same_segment; same_segment = 0; if (req->bhtail->b_data + req->bhtail->b_size == next->bh->b_data) { total_segments--; same_segment = 1; } if (total_segments > max_segments) return 0; q->elevator.nr_segments -= same_segment; req->nr_segments = total_segments; return 1;}/* * "plug" the device if there are no outstanding requests: this will * force the transfer to start only after we have put all the requests * on the list. * * This is called with interrupts off and no requests on the queue. * (and with the request spinlock acquired) */static void generic_plug_device(request_queue_t *q, kdev_t dev){ /* * no need to replug device */ if (!list_empty(&q->queue_head) || q->plugged) return; q->plugged = 1; queue_task(&q->plug_tq, &tq_disk);}/* * remove the plug and let it rip.. */static inline void __generic_unplug_device(request_queue_t *q){ if (q->plugged) { q->plugged = 0; if (!list_empty(&q->queue_head)) q->request_fn(q); }}static void generic_unplug_device(void *data){ request_queue_t *q = (request_queue_t *) data; unsigned long flags; spin_lock_irqsave(&io_request_lock, flags); __generic_unplug_device(q); spin_unlock_irqrestore(&io_request_lock, flags);}static void blk_init_free_list(request_queue_t *q){ struct request *rq; int i; /* * Divide requests in half between read and write. This used to * be a 2/3 advantage for reads, but now reads can steal from * the write free list. */ for (i = 0; i < QUEUE_NR_REQUESTS; i++) { rq = kmem_cache_alloc(request_cachep, SLAB_KERNEL); rq->rq_status = RQ_INACTIVE; list_add(&rq->table, &q->request_freelist[i & 1]); } init_waitqueue_head(&q->wait_for_request); spin_lock_init(&q->request_lock);}static int __make_request(request_queue_t * q, int rw, struct buffer_head * bh);/** * blk_init_queue - prepare a request queue for use with a block device * @q: The &request_queue_t to be initialised * @rfn: The function to be called to process requests that have been * placed on the queue. * * Description: * If a block device wishes to use the standard request handling procedures, * which sorts requests and coalesces adjacent requests, then it must * call blk_init_queue(). The function @rfn will be called when there * are requests on the queue that need to be processed. If the device * supports plugging, then @rfn may not be called immediately when requests * are available on the queue, but may be called at some time later instead. * Plugged queues are generally unplugged when a buffer belonging to one * of the requests on the queue is needed, or due to memory pressure. * * @rfn is not required, or even expected, to remove all requests off the * queue, but only as many as it can handle at a time. If it does leave * requests on the queue, it is responsible for arranging that the requests * get dealt with eventually. * * A global spin lock $io_request_lock must be held while manipulating the * requests on the request queue. * * The request on the head of the queue is by default assumed to be * potentially active, and it is not considered for re-ordering or merging * whenever the given queue is unplugged. This behaviour can be changed with * blk_queue_headactive(). * * Note: * blk_init_queue() must be paired with a blk_cleanup-queue() call * when the block device is deactivated (such as at module unload).
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -