⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ll_rw_blk.c

📁 linux和2410结合开发 用他可以生成2410所需的zImage文件
💻 C
📖 第 1 页 / 共 3 页
字号:
/* *  linux/drivers/block/ll_rw_blk.c * * Copyright (C) 1991, 1992 Linus Torvalds * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics * Elevator latency, (C) 2000  Andrea Arcangeli <andrea@suse.de> SuSE * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de> * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au> -  July2000 *//* * This handles all read/write requests to block devices */#include <linux/sched.h>#include <linux/kernel.h>#include <linux/kernel_stat.h>#include <linux/errno.h>#include <linux/string.h>#include <linux/config.h>#include <linux/locks.h>#include <linux/mm.h>#include <linux/swap.h>#include <linux/init.h>#include <linux/smp_lock.h>#include <linux/completion.h>#include <asm/system.h>#include <asm/io.h>#include <linux/blk.h>#include <linux/highmem.h>#include <linux/slab.h>#include <linux/module.h>/* Maybe something to cleanup in 2.3? * We shouldn't touch 0x3f2 on machines which don't have a PC floppy controller * - it may contain something else which could cause a system hang.  This is * now selected by a configuration option, but maybe it ought to be in the * floppy code itself? - rmk */#if defined(__i386__) || (defined(__arm__) && defined(CONFIG_ARCH_ACORN))#define FLOPPY_BOOT_DISABLE#endif#ifdef CONFIG_BLK_DEV_FD#undef FLOPPY_BOOT_DISABLE#endif/* * MAC Floppy IWM hooks */#ifdef CONFIG_MAC_FLOPPY_IWMextern int mac_floppy_init(void);#endif/* * For the allocated request tables */static kmem_cache_t *request_cachep;/* * The "disk" task queue is used to start the actual requests * after a plug */DECLARE_TASK_QUEUE(tq_disk);/* * Protect the request list against multiple users.. * * With this spinlock the Linux block IO subsystem is 100% SMP threaded * from the IRQ event side, and almost 100% SMP threaded from the syscall * side (we still have protect against block device array operations, and * the do_request() side is casually still unsafe. The kernel lock protects * this part currently.). * * there is a fair chance that things will work just OK if these functions * are called with no global kernel lock held ... */spinlock_t io_request_lock = SPIN_LOCK_UNLOCKED;/* This specifies how many sectors to read ahead on the disk. */int read_ahead[MAX_BLKDEV];/* blk_dev_struct is: *	*request_fn *	*current_request */struct blk_dev_struct blk_dev[MAX_BLKDEV]; /* initialized by blk_dev_init() *//* * blk_size contains the size of all block-devices in units of 1024 byte * sectors: * * blk_size[MAJOR][MINOR] * * if (!blk_size[MAJOR]) then no minor size checking is done. */int * blk_size[MAX_BLKDEV];/* * blksize_size contains the size of all block-devices: * * blksize_size[MAJOR][MINOR] * * if (!blksize_size[MAJOR]) then 1024 bytes is assumed. */int * blksize_size[MAX_BLKDEV];/* * hardsect_size contains the size of the hardware sector of a device. * * hardsect_size[MAJOR][MINOR] * * if (!hardsect_size[MAJOR]) *		then 512 bytes is assumed. * else *		sector_size is hardsect_size[MAJOR][MINOR] * This is currently set by some scsi devices and read by the msdos fs driver. * Other uses may appear later. */int * hardsect_size[MAX_BLKDEV];/* * The following tunes the read-ahead algorithm in mm/filemap.c */int * max_readahead[MAX_BLKDEV];/* * Max number of sectors per request */int * max_sectors[MAX_BLKDEV];/* * How many reqeusts do we allocate per queue, * and how many do we "batch" on freeing them? */static int queue_nr_requests, batch_requests;static inline int get_max_sectors(kdev_t dev){	if (!max_sectors[MAJOR(dev)])		return MAX_SECTORS;	return max_sectors[MAJOR(dev)][MINOR(dev)];}inline request_queue_t *blk_get_queue(kdev_t dev){	struct blk_dev_struct *bdev = blk_dev + MAJOR(dev);	if (bdev->queue)		return bdev->queue(dev);	else		return &blk_dev[MAJOR(dev)].request_queue;}static int __blk_cleanup_queue(struct request_list *list){	struct list_head *head = &list->free;	struct request *rq;	int i = 0;	while (!list_empty(head)) {		rq = list_entry(head->next, struct request, queue);		list_del(&rq->queue);		kmem_cache_free(request_cachep, rq);		i++;	};	if (i != list->count)		printk("request list leak!\n");	list->count = 0;	return i;}/** * blk_cleanup_queue: - release a &request_queue_t when it is no longer needed * @q:    the request queue to be released * * Description: *     blk_cleanup_queue is the pair to blk_init_queue().  It should *     be called when a request queue is being released; typically *     when a block device is being de-registered.  Currently, its *     primary task it to free all the &struct request structures that *     were allocated to the queue. * Caveat:  *     Hopefully the low level driver will have finished any *     outstanding requests first... **/void blk_cleanup_queue(request_queue_t * q){	int count = queue_nr_requests;	count -= __blk_cleanup_queue(&q->rq[READ]);	count -= __blk_cleanup_queue(&q->rq[WRITE]);	if (count)		printk("blk_cleanup_queue: leaked requests (%d)\n", count);	memset(q, 0, sizeof(*q));}/** * blk_queue_headactive - indicate whether head of request queue may be active * @q:       The queue which this applies to. * @active:  A flag indication where the head of the queue is active. * * Description: *    The driver for a block device may choose to leave the currently active *    request on the request queue, removing it only when it has completed. *    The queue handling routines assume this by default for safety reasons *    and will not involve the head of the request queue in any merging or *    reordering of requests when the queue is unplugged (and thus may be *    working on this particular request). * *    If a driver removes requests from the queue before processing them, then *    it may indicate that it does so, there by allowing the head of the queue *    to be involved in merging and reordering.  This is done be calling *    blk_queue_headactive() with an @active flag of %0. * *    If a driver processes several requests at once, it must remove them (or *    at least all but one of them) from the request queue. * *    When a queue is plugged the head will be assumed to be inactive. **/ void blk_queue_headactive(request_queue_t * q, int active){	q->head_active = active;}/** * blk_queue_make_request - define an alternate make_request function for a device * @q:  the request queue for the device to be affected * @mfn: the alternate make_request function * * Description: *    The normal way for &struct buffer_heads to be passed to a device *    driver is for them to be collected into requests on a request *    queue, and then to allow the device driver to select requests *    off that queue when it is ready.  This works well for many block *    devices. However some block devices (typically virtual devices *    such as md or lvm) do not benefit from the processing on the *    request queue, and are served best by having the requests passed *    directly to them.  This can be achieved by providing a function *    to blk_queue_make_request(). * * Caveat: *    The driver that does this *must* be able to deal appropriately *    with buffers in "highmemory", either by calling bh_kmap() to get *    a kernel mapping, to by calling create_bounce() to create a *    buffer in normal memory. **/void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn){	q->make_request_fn = mfn;}static inline int ll_new_segment(request_queue_t *q, struct request *req, int max_segments){	if (req->nr_segments < max_segments) {		req->nr_segments++;		return 1;	}	return 0;}static int ll_back_merge_fn(request_queue_t *q, struct request *req, 			    struct buffer_head *bh, int max_segments){	if (req->bhtail->b_data + req->bhtail->b_size == bh->b_data)		return 1;	return ll_new_segment(q, req, max_segments);}static int ll_front_merge_fn(request_queue_t *q, struct request *req, 			     struct buffer_head *bh, int max_segments){	if (bh->b_data + bh->b_size == req->bh->b_data)		return 1;	return ll_new_segment(q, req, max_segments);}static int ll_merge_requests_fn(request_queue_t *q, struct request *req,				struct request *next, int max_segments){	int total_segments = req->nr_segments + next->nr_segments;	if (req->bhtail->b_data + req->bhtail->b_size == next->bh->b_data)		total_segments--;    	if (total_segments > max_segments)		return 0;	req->nr_segments = total_segments;	return 1;}/* * "plug" the device if there are no outstanding requests: this will * force the transfer to start only after we have put all the requests * on the list. * * This is called with interrupts off and no requests on the queue. * (and with the request spinlock acquired) */static void generic_plug_device(request_queue_t *q, kdev_t dev){	/*	 * no need to replug device	 */	if (!list_empty(&q->queue_head) || q->plugged)		return;	q->plugged = 1;	queue_task(&q->plug_tq, &tq_disk);}/* * remove the plug and let it rip.. */static inline void __generic_unplug_device(request_queue_t *q){	if (q->plugged) {		q->plugged = 0;		if (!list_empty(&q->queue_head))			q->request_fn(q);	}}void generic_unplug_device(void *data){	request_queue_t *q = (request_queue_t *) data;	unsigned long flags;	spin_lock_irqsave(&io_request_lock, flags);	__generic_unplug_device(q);	spin_unlock_irqrestore(&io_request_lock, flags);}static void blk_init_free_list(request_queue_t *q){	struct request *rq;	int i;	INIT_LIST_HEAD(&q->rq[READ].free);	INIT_LIST_HEAD(&q->rq[WRITE].free);	q->rq[READ].count = 0;	q->rq[WRITE].count = 0;	/*	 * Divide requests in half between read and write	 */	for (i = 0; i < queue_nr_requests; i++) {		rq = kmem_cache_alloc(request_cachep, SLAB_KERNEL);		if (rq == NULL) {			/* We'll get a `leaked requests' message from blk_cleanup_queue */			printk(KERN_EMERG "blk_init_free_list: error allocating requests\n");			break;		}		memset(rq, 0, sizeof(struct request));		rq->rq_status = RQ_INACTIVE;		list_add(&rq->queue, &q->rq[i&1].free);		q->rq[i&1].count++;	}	init_waitqueue_head(&q->wait_for_request);	spin_lock_init(&q->queue_lock);}static int __make_request(request_queue_t * q, int rw, struct buffer_head * bh);/** * blk_init_queue  - prepare a request queue for use with a block device * @q:    The &request_queue_t to be initialised * @rfn:  The function to be called to process requests that have been *        placed on the queue. * * Description: *    If a block device wishes to use the standard request handling procedures, *    which sorts requests and coalesces adjacent requests, then it must *    call blk_init_queue().  The function @rfn will be called when there *    are requests on the queue that need to be processed.  If the device *    supports plugging, then @rfn may not be called immediately when requests *    are available on the queue, but may be called at some time later instead. *    Plugged queues are generally unplugged when a buffer belonging to one *    of the requests on the queue is needed, or due to memory pressure. * *    @rfn is not required, or even expected, to remove all requests off the *    queue, but only as many as it can handle at a time.  If it does leave *    requests on the queue, it is responsible for arranging that the requests *    get dealt with eventually. * *    A global spin lock $io_request_lock must be held while manipulating the *    requests on the request queue. * *    The request on the head of the queue is by default assumed to be *    potentially active, and it is not considered for re-ordering or merging *    whenever the given queue is unplugged. This behaviour can be changed with *    blk_queue_headactive(). * * Note: *    blk_init_queue() must be paired with a blk_cleanup_queue() call *    when the block device is deactivated (such as at module unload). **/void blk_init_queue(request_queue_t * q, request_fn_proc * rfn){	INIT_LIST_HEAD(&q->queue_head);	elevator_init(&q->elevator, ELEVATOR_LINUS);	blk_init_free_list(q);	q->request_fn     	= rfn;	q->back_merge_fn	= ll_back_merge_fn;	q->front_merge_fn      	= ll_front_merge_fn;	q->merge_requests_fn	= ll_merge_requests_fn;	q->make_request_fn	= __make_request;	q->plug_tq.sync		= 0;	q->plug_tq.routine	= &generic_unplug_device;	q->plug_tq.data		= q;	q->plugged        	= 0;	/*

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -