⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ll_rw_blk.c

📁 linux和2410结合开发 用他可以生成2410所需的zImage文件
💻 C
📖 第 1 页 / 共 3 页
字号:
	 * These booleans describe the queue properties.  We set the	 * default (and most common) values here.  Other drivers can	 * use the appropriate functions to alter the queue properties.	 * as appropriate.	 */	q->plug_device_fn 	= generic_plug_device;	q->head_active    	= 1;}#define blkdev_free_rq(list) list_entry((list)->next, struct request, queue);/* * Get a free request. io_request_lock must be held and interrupts * disabled on the way in. */static inline struct request *get_request(request_queue_t *q, int rw){	struct request *rq = NULL;	struct request_list *rl = q->rq + rw;	if (!list_empty(&rl->free)) {		rq = blkdev_free_rq(&rl->free);		list_del(&rq->queue);		rl->count--;		rq->rq_status = RQ_ACTIVE;		rq->special = NULL;		rq->q = q;	}	return rq;}/* * No available requests for this queue, unplug the device. */static struct request *__get_request_wait(request_queue_t *q, int rw){	register struct request *rq;	DECLARE_WAITQUEUE(wait, current);	generic_unplug_device(q);	add_wait_queue(&q->wait_for_request, &wait);	do {		set_current_state(TASK_UNINTERRUPTIBLE);		if (q->rq[rw].count < batch_requests)			schedule();		spin_lock_irq(&io_request_lock);		rq = get_request(q,rw);		spin_unlock_irq(&io_request_lock);	} while (rq == NULL);	remove_wait_queue(&q->wait_for_request, &wait);	current->state = TASK_RUNNING;	return rq;}static inline struct request *get_request_wait(request_queue_t *q, int rw){	register struct request *rq;	spin_lock_irq(&io_request_lock);	rq = get_request(q, rw);	spin_unlock_irq(&io_request_lock);	if (rq)		return rq;	return __get_request_wait(q, rw);}/* RO fail safe mechanism */static long ro_bits[MAX_BLKDEV][8];int is_read_only(kdev_t dev){	int minor,major;	major = MAJOR(dev);	minor = MINOR(dev);	if (major < 0 || major >= MAX_BLKDEV) return 0;	return ro_bits[major][minor >> 5] & (1 << (minor & 31));}void set_device_ro(kdev_t dev,int flag){	int minor,major;	major = MAJOR(dev);	minor = MINOR(dev);	if (major < 0 || major >= MAX_BLKDEV) return;	if (flag) ro_bits[major][minor >> 5] |= 1 << (minor & 31);	else ro_bits[major][minor >> 5] &= ~(1 << (minor & 31));}inline void drive_stat_acct (kdev_t dev, int rw,				unsigned long nr_sectors, int new_io){	unsigned int major = MAJOR(dev);	unsigned int index;	index = disk_index(dev);	if ((index >= DK_MAX_DISK) || (major >= DK_MAX_MAJOR))		return;	kstat.dk_drive[major][index] += new_io;	if (rw == READ) {		kstat.dk_drive_rio[major][index] += new_io;		kstat.dk_drive_rblk[major][index] += nr_sectors;	} else if (rw == WRITE) {		kstat.dk_drive_wio[major][index] += new_io;		kstat.dk_drive_wblk[major][index] += nr_sectors;	} else		printk(KERN_ERR "drive_stat_acct: cmd not R/W?\n");}/* * add-request adds a request to the linked list. * io_request_lock is held and interrupts disabled, as we muck with the * request queue list. * * By this point, req->cmd is always either READ/WRITE, never READA, * which is important for drive_stat_acct() above. */static inline void add_request(request_queue_t * q, struct request * req,			       struct list_head *insert_here){	drive_stat_acct(req->rq_dev, req->cmd, req->nr_sectors, 1);	if (!q->plugged && q->head_active && insert_here == &q->queue_head) {		spin_unlock_irq(&io_request_lock);		BUG();	}	/*	 * elevator indicated where it wants this request to be	 * inserted at elevator_merge time	 */	list_add(&req->queue, insert_here);}/* * Must be called with io_request_lock held and interrupts disabled */inline void blkdev_release_request(struct request *req){	request_queue_t *q = req->q;	int rw = req->cmd;	req->rq_status = RQ_INACTIVE;	req->q = NULL;	/*	 * Request may not have originated from ll_rw_blk. if not,	 * assume it has free buffers and check waiters	 */	if (q) {		list_add(&req->queue, &q->rq[rw].free);		if (++q->rq[rw].count >= batch_requests && waitqueue_active(&q->wait_for_request))			wake_up(&q->wait_for_request);	}}/* * Has to be called with the request spinlock acquired */static void attempt_merge(request_queue_t * q,			  struct request *req,			  int max_sectors,			  int max_segments){	struct request *next;  	next = blkdev_next_request(req);	if (req->sector + req->nr_sectors != next->sector)		return;	if (req->cmd != next->cmd	    || req->rq_dev != next->rq_dev	    || req->nr_sectors + next->nr_sectors > max_sectors	    || next->waiting)		return;	/*	 * If we are not allowed to merge these requests, then	 * return.  If we are allowed to merge, then the count	 * will have been updated to the appropriate number,	 * and we shouldn't do it here too.	 */	if (!q->merge_requests_fn(q, req, next, max_segments))		return;	q->elevator.elevator_merge_req_fn(req, next);	req->bhtail->b_reqnext = next->bh;	req->bhtail = next->bhtail;	req->nr_sectors = req->hard_nr_sectors += next->hard_nr_sectors;	list_del(&next->queue);	blkdev_release_request(next);}static inline void attempt_back_merge(request_queue_t * q,				      struct request *req,				      int max_sectors,				      int max_segments){	if (&req->queue == q->queue_head.prev)		return;	attempt_merge(q, req, max_sectors, max_segments);}static inline void attempt_front_merge(request_queue_t * q,				       struct list_head * head,				       struct request *req,				       int max_sectors,				       int max_segments){	struct list_head * prev;	prev = req->queue.prev;	if (head == prev)		return;	attempt_merge(q, blkdev_entry_to_request(prev), max_sectors, max_segments);}static int __make_request(request_queue_t * q, int rw,				  struct buffer_head * bh){	unsigned int sector, count;	int max_segments = MAX_SEGMENTS;	struct request * req, *freereq = NULL;	int rw_ahead, max_sectors, el_ret;	struct list_head *head, *insert_here;	int latency;	elevator_t *elevator = &q->elevator;	count = bh->b_size >> 9;	sector = bh->b_rsector;	rw_ahead = 0;	/* normal case; gets changed below for READA */	switch (rw) {		case READA:			rw_ahead = 1;			rw = READ;	/* drop into READ */		case READ:		case WRITE:			latency = elevator_request_latency(elevator, rw);			break;		default:			BUG();			goto end_io;	}	/* We'd better have a real physical mapping!	   Check this bit only if the buffer was dirty and just locked	   down by us so at this point flushpage will block and	   won't clear the mapped bit under us. */	if (!buffer_mapped(bh))		BUG();	/*	 * Temporary solution - in 2.5 this will be done by the lowlevel	 * driver. Create a bounce buffer if the buffer data points into	 * high memory - keep the original buffer otherwise.	 */#if CONFIG_HIGHMEM	bh = create_bounce(rw, bh);#endif/* look for a free request. */	/*	 * Try to coalesce the new request with old requests	 */	max_sectors = get_max_sectors(bh->b_rdev);again:	req = NULL;	head = &q->queue_head;	/*	 * Now we acquire the request spinlock, we have to be mega careful	 * not to schedule or do something nonatomic	 */	spin_lock_irq(&io_request_lock);	insert_here = head->prev;	if (list_empty(head)) {		q->plug_device_fn(q, bh->b_rdev); /* is atomic */		goto get_rq;	} else if (q->head_active && !q->plugged)		head = head->next;	el_ret = elevator->elevator_merge_fn(q, &req, head, bh, rw,max_sectors);	switch (el_ret) {		case ELEVATOR_BACK_MERGE:			if (!q->back_merge_fn(q, req, bh, max_segments)) {				insert_here = &req->queue;				break;			}			elevator->elevator_merge_cleanup_fn(q, req, count);			req->bhtail->b_reqnext = bh;			req->bhtail = bh;			req->nr_sectors = req->hard_nr_sectors += count;			blk_started_io(count);			drive_stat_acct(req->rq_dev, req->cmd, count, 0);			attempt_back_merge(q, req, max_sectors, max_segments);			goto out;		case ELEVATOR_FRONT_MERGE:			if (!q->front_merge_fn(q, req, bh, max_segments)) {				insert_here = req->queue.prev;				break;			}			elevator->elevator_merge_cleanup_fn(q, req, count);			bh->b_reqnext = req->bh;			req->bh = bh;			req->buffer = bh->b_data;			req->current_nr_sectors = count;			req->sector = req->hard_sector = sector;			req->nr_sectors = req->hard_nr_sectors += count;			blk_started_io(count);			drive_stat_acct(req->rq_dev, req->cmd, count, 0);			attempt_front_merge(q, head, req, max_sectors, max_segments);			goto out;		/*		 * elevator says don't/can't merge. get new request		 */		case ELEVATOR_NO_MERGE:			/*			 * use elevator hints as to where to insert the			 * request. if no hints, just add it to the back			 * of the queue			 */			if (req)				insert_here = &req->queue;			break;		default:			printk("elevator returned crap (%d)\n", el_ret);			BUG();	}			/*	 * Grab a free request from the freelist - if that is empty, check	 * if we are doing read ahead and abort instead of blocking for	 * a free slot.	 */get_rq:	if (freereq) {		req = freereq;		freereq = NULL;	} else if ((req = get_request(q, rw)) == NULL) {		spin_unlock_irq(&io_request_lock);		if (rw_ahead)			goto end_io;		freereq = __get_request_wait(q, rw);		goto again;	}/* fill up the request-info, and add it to the queue */	req->elevator_sequence = latency;	req->cmd = rw;	req->errors = 0;	req->hard_sector = req->sector = sector;	req->hard_nr_sectors = req->nr_sectors = count;	req->current_nr_sectors = count;	req->nr_segments = 1; /* Always 1 for a new request. */	req->nr_hw_segments = 1; /* Always 1 for a new request. */	req->buffer = bh->b_data;	req->waiting = NULL;	req->bh = bh;	req->bhtail = bh;	req->rq_dev = bh->b_rdev;	blk_started_io(count);	add_request(q, req, insert_here);out:	if (freereq)		blkdev_release_request(freereq);	spin_unlock_irq(&io_request_lock);	return 0;end_io:	bh->b_end_io(bh, test_bit(BH_Uptodate, &bh->b_state));	return 0;}/** * generic_make_request: hand a buffer head to it's device driver for I/O * @rw:  READ, WRITE, or READA - what sort of I/O is desired. * @bh:  The buffer head describing the location in memory and on the device. * * generic_make_request() is used to make I/O requests of block * devices. It is passed a &struct buffer_head and a &rw value.  The * %READ and %WRITE options are (hopefully) obvious in meaning.  The * %READA value means that a read is required, but that the driver is * free to fail the request if, for example, it cannot get needed * resources immediately. * * generic_make_request() does not return any status.  The * success/failure status of the request, along with notification of * completion, is delivered asynchronously through the bh->b_end_io * function described (one day) else where. * * The caller of generic_make_request must make sure that b_page, * b_addr, b_size are set to describe the memory buffer, that b_rdev * and b_rsector are set to describe the device address, and the * b_end_io and optionally b_private are set to describe how * completion notification should be signaled.  BH_Mapped should also * be set (to confirm that b_dev and b_blocknr are valid). * * generic_make_request and the drivers it calls may use b_reqnext, * and may change b_rdev and b_rsector.  So the values of these fields * should NOT be depended on after the call to generic_make_request. * Because of this, the caller should record the device address * information in b_dev and b_blocknr. * * Apart from those fields mentioned above, no other fields, and in * particular, no other flags, are changed by generic_make_request or * any lower level drivers. * */void generic_make_request (int rw, struct buffer_head * bh){	int major = MAJOR(bh->b_rdev);	int minorsize = 0;	request_queue_t *q;	if (!bh->b_end_io)

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -