⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ll_rw_blk.c

📁 Linux内核源代码 为压缩文件 是<<Linux内核>>一书中的源代码
💻 C
📖 第 1 页 / 共 3 页
字号:
 **/void blk_init_queue(request_queue_t * q, request_fn_proc * rfn){	INIT_LIST_HEAD(&q->queue_head);	INIT_LIST_HEAD(&q->request_freelist[READ]);	INIT_LIST_HEAD(&q->request_freelist[WRITE]);	elevator_init(&q->elevator, ELEVATOR_LINUS);	blk_init_free_list(q);	q->request_fn     	= rfn;	q->back_merge_fn       	= ll_back_merge_fn;	q->front_merge_fn      	= ll_front_merge_fn;	q->merge_requests_fn	= ll_merge_requests_fn;	q->make_request_fn	= __make_request;	q->plug_tq.sync		= 0;	q->plug_tq.routine	= &generic_unplug_device;	q->plug_tq.data		= q;	q->plugged        	= 0;	/*	 * These booleans describe the queue properties.  We set the	 * default (and most common) values here.  Other drivers can	 * use the appropriate functions to alter the queue properties.	 * as appropriate.	 */	q->plug_device_fn 	= generic_plug_device;	q->head_active    	= 1;}#define blkdev_free_rq(list) list_entry((list)->next, struct request, table);/* * Get a free request. io_request_lock must be held and interrupts * disabled on the way in. */static inline struct request *get_request(request_queue_t *q, int rw){	struct list_head *list = &q->request_freelist[rw];	struct request *rq;	/*	 * Reads get preferential treatment and are allowed to steal	 * from the write free list if necessary.	 */	if (!list_empty(list)) {		rq = blkdev_free_rq(list);		goto got_rq;	}	/*	 * if the WRITE list is non-empty, we know that rw is READ	 * and that the READ list is empty. allow reads to 'steal'	 * from the WRITE list.	 */	if (!list_empty(&q->request_freelist[WRITE])) {		list = &q->request_freelist[WRITE];		rq = blkdev_free_rq(list);		goto got_rq;	}	return NULL;got_rq:	list_del(&rq->table);	rq->free_list = list;	rq->rq_status = RQ_ACTIVE;	rq->special = NULL;	rq->q = q;	return rq;}/* * No available requests for this queue, unplug the device. */static struct request *__get_request_wait(request_queue_t *q, int rw){	register struct request *rq;	DECLARE_WAITQUEUE(wait, current);	add_wait_queue_exclusive(&q->wait_for_request, &wait);	for (;;) {		__set_current_state(TASK_UNINTERRUPTIBLE);		spin_lock_irq(&io_request_lock);		rq = get_request(q, rw);		spin_unlock_irq(&io_request_lock);		if (rq)			break;		generic_unplug_device(q);		schedule();	}	remove_wait_queue(&q->wait_for_request, &wait);	current->state = TASK_RUNNING;	return rq;}static inline struct request *get_request_wait(request_queue_t *q, int rw){	register struct request *rq;	spin_lock_irq(&io_request_lock);	rq = get_request(q, rw);	spin_unlock_irq(&io_request_lock);	if (rq)		return rq;	return __get_request_wait(q, rw);}/* RO fail safe mechanism */static long ro_bits[MAX_BLKDEV][8];int is_read_only(kdev_t dev){	int minor,major;	major = MAJOR(dev);	minor = MINOR(dev);	if (major < 0 || major >= MAX_BLKDEV) return 0;	return ro_bits[major][minor >> 5] & (1 << (minor & 31));}void set_device_ro(kdev_t dev,int flag){	int minor,major;	major = MAJOR(dev);	minor = MINOR(dev);	if (major < 0 || major >= MAX_BLKDEV) return;	if (flag) ro_bits[major][minor >> 5] |= 1 << (minor & 31);	else ro_bits[major][minor >> 5] &= ~(1 << (minor & 31));}inline void drive_stat_acct (kdev_t dev, int rw,				unsigned long nr_sectors, int new_io){	unsigned int major = MAJOR(dev);	unsigned int index;	index = disk_index(dev);	if ((index >= DK_MAX_DISK) || (major >= DK_MAX_MAJOR))		return;	kstat.dk_drive[major][index] += new_io;	if (rw == READ) {		kstat.dk_drive_rio[major][index] += new_io;		kstat.dk_drive_rblk[major][index] += nr_sectors;	} else if (rw == WRITE) {		kstat.dk_drive_wio[major][index] += new_io;		kstat.dk_drive_wblk[major][index] += nr_sectors;	} else		printk(KERN_ERR "drive_stat_acct: cmd not R/W?\n");}/* * add-request adds a request to the linked list. * It disables interrupts (acquires the request spinlock) so that it can muck * with the request-lists in peace. Thus it should be called with no spinlocks * held. * * By this point, req->cmd is always either READ/WRITE, never READA, * which is important for drive_stat_acct() above. */static inline void add_request(request_queue_t * q, struct request * req,			       struct list_head *head, int lat){	int major;	drive_stat_acct(req->rq_dev, req->cmd, req->nr_sectors, 1);	/*	 * let selected elevator insert the request	 */	q->elevator.elevator_fn(req, &q->elevator, &q->queue_head, head, lat);        /*	 * FIXME(eric) I don't understand why there is a need for this	 * special case code.  It clearly doesn't fit any more with	 * the new queueing architecture, and it got added in 2.3.10.	 * I am leaving this in here until I hear back from the COMPAQ	 * people.         */	major = MAJOR(req->rq_dev);	if (major >= COMPAQ_SMART2_MAJOR+0 && major <= COMPAQ_SMART2_MAJOR+7)		(q->request_fn)(q);	if (major >= COMPAQ_CISS_MAJOR+0 && major <= COMPAQ_CISS_MAJOR+7)                (q->request_fn)(q);	if (major >= DAC960_MAJOR+0 && major <= DAC960_MAJOR+7)		(q->request_fn)(q);}/* * Must be called with io_request_lock held and interrupts disabled */void inline blkdev_release_request(struct request *req){	req->rq_status = RQ_INACTIVE;	/*	 * Request may not have originated from ll_rw_blk	 */	if (req->free_list) {		list_add(&req->table, req->free_list);		req->free_list = NULL;		wake_up(&req->q->wait_for_request);	}}/* * Has to be called with the request spinlock acquired */static void attempt_merge(request_queue_t * q,			  struct request *req,			  int max_sectors,			  int max_segments){	struct request *next;  	next = blkdev_next_request(req);	if (req->sector + req->nr_sectors != next->sector)		return;	if (req->cmd != next->cmd	    || req->rq_dev != next->rq_dev	    || req->nr_sectors + next->nr_sectors > max_sectors	    || next->sem)		return;	/*	 * If we are not allowed to merge these requests, then	 * return.  If we are allowed to merge, then the count	 * will have been updated to the appropriate number,	 * and we shouldn't do it here too.	 */	if(!(q->merge_requests_fn)(q, req, next, max_segments))		return;	req->bhtail->b_reqnext = next->bh;	req->bhtail = next->bhtail;	req->nr_sectors = req->hard_nr_sectors += next->hard_nr_sectors;	list_del(&next->queue);	blkdev_release_request(next);}static inline void attempt_back_merge(request_queue_t * q,				      struct request *req,				      int max_sectors,				      int max_segments){	if (&req->queue == q->queue_head.prev)		return;	attempt_merge(q, req, max_sectors, max_segments);}static inline void attempt_front_merge(request_queue_t * q,				       struct list_head * head,				       struct request *req,				       int max_sectors,				       int max_segments){	struct list_head * prev;	prev = req->queue.prev;	if (head == prev)		return;	attempt_merge(q, blkdev_entry_to_request(prev), max_sectors, max_segments);}static int __make_request(request_queue_t * q, int rw,				  struct buffer_head * bh){	unsigned int sector, count;	int max_segments = MAX_SEGMENTS;	struct request * req = NULL, *freereq = NULL;	int rw_ahead, max_sectors, el_ret;	struct list_head *head;	int latency;	elevator_t *elevator = &q->elevator;	count = bh->b_size >> 9;	sector = bh->b_rsector;	rw_ahead = 0;	/* normal case; gets changed below for READA */	switch (rw) {		case READA:			rw_ahead = 1;			rw = READ;	/* drop into READ */		case READ:		case WRITE:			break;		default:			BUG();			goto end_io;	}	/* We'd better have a real physical mapping!	   Check this bit only if the buffer was dirty and just locked	   down by us so at this point flushpage will block and	   won't clear the mapped bit under us. */	if (!buffer_mapped(bh))		BUG();	/*	 * Temporary solution - in 2.5 this will be done by the lowlevel	 * driver. Create a bounce buffer if the buffer data points into	 * high memory - keep the original buffer otherwise.	 */#if CONFIG_HIGHMEM	bh = create_bounce(rw, bh);#endif/* look for a free request. */	/*	 * Try to coalesce the new request with old requests	 */	max_sectors = get_max_sectors(bh->b_rdev);	latency = elevator_request_latency(elevator, rw);	/*	 * Now we acquire the request spinlock, we have to be mega careful	 * not to schedule or do something nonatomic	 */again:	spin_lock_irq(&io_request_lock);	/*	 * skip first entry, for devices with active queue head	 */	head = &q->queue_head;	if (q->head_active && !q->plugged)		head = head->next;	if (list_empty(head)) {		q->plug_device_fn(q, bh->b_rdev); /* is atomic */		goto get_rq;	}	el_ret = elevator->elevator_merge_fn(q, &req, bh, rw,					     &max_sectors, &max_segments);	switch (el_ret) {		case ELEVATOR_BACK_MERGE:			if (!q->back_merge_fn(q, req, bh, max_segments))				break;			req->bhtail->b_reqnext = bh;			req->bhtail = bh;			req->nr_sectors = req->hard_nr_sectors += count;			req->e = elevator;			drive_stat_acct(req->rq_dev, req->cmd, count, 0);			attempt_back_merge(q, req, max_sectors, max_segments);			goto out;		case ELEVATOR_FRONT_MERGE:			if (!q->front_merge_fn(q, req, bh, max_segments))				break;			bh->b_reqnext = req->bh;			req->bh = bh;			req->buffer = bh->b_data;			req->current_nr_sectors = count;			req->sector = req->hard_sector = sector;			req->nr_sectors = req->hard_nr_sectors += count;			req->e = elevator;			drive_stat_acct(req->rq_dev, req->cmd, count, 0);			attempt_front_merge(q, head, req, max_sectors, max_segments);			goto out;		/*		 * elevator says don't/can't merge. get new request		 */		case ELEVATOR_NO_MERGE:			break;		default:			printk("elevator returned crap (%d)\n", el_ret);			BUG();	}			/*	 * Grab a free request from the freelist. Read first try their	 * own queue - if that is empty, we steal from the write list.	 * Writes must block if the write list is empty, and read aheads	 * are not crucial.	 */get_rq:	if (freereq) {		req = freereq;		freereq = NULL;	} else if ((req = get_request(q, rw)) == NULL) {		spin_unlock_irq(&io_request_lock);		if (rw_ahead)			goto end_io;		freereq = __get_request_wait(q, rw);		goto again;	}/* fill up the request-info, and add it to the queue */	req->cmd = rw;	req->errors = 0;	req->hard_sector = req->sector = sector;	req->hard_nr_sectors = req->nr_sectors = count;	req->current_nr_sectors = count;	req->nr_segments = 1; /* Always 1 for a new request. */	req->nr_hw_segments = 1; /* Always 1 for a new request. */	req->buffer = bh->b_data;	req->sem = NULL;	req->bh = bh;	req->bhtail = bh;	req->rq_dev = bh->b_rdev;	req->e = elevator;	add_request(q, req, head, latency);out:	if (!q->plugged)		(q->request_fn)(q);	if (freereq)		blkdev_release_request(freereq);	spin_unlock_irq(&io_request_lock);	return 0;end_io:	bh->b_end_io(bh, test_bit(BH_Uptodate, &bh->b_state));	return 0;}/** * generic_make_request: hand a buffer head to it's device driver for I/O * @rw:  READ, WRITE, or READA - what sort of I/O is desired. * @bh:  The buffer head describing the location in memory and on the device. * * generic_make_request() is used to make I/O requests of block * devices. It is passed a &struct buffer_head and a &rw value.  The * %READ and %WRITE options are (hopefully) obvious in meaning.  The * %READA value means that a read is required, but that the driver is * free to fail the request if, for example, it cannot get needed * resources immediately.

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -