⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ll_rw_blk.c

📁 arm平台上的uclinux系统全部源代码
💻 C
📖 第 1 页 / 共 2 页
字号:
/* *  linux/drivers/block/ll_rw_blk.c * * Copyright (C) 1991, 1992 Linus Torvalds * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics *//* * This handles all read/write requests to block devices */#include <linux/sched.h>#include <linux/kernel.h>#include <linux/kernel_stat.h>#include <linux/errno.h>#include <linux/string.h>#include <linux/config.h>#include <linux/locks.h>#include <linux/mm.h>#include <asm/system.h>#include <asm/io.h>#include "blk.h"/* * The request-struct contains all necessary data * to load a nr of sectors into memory */static struct request all_requests[NR_REQUEST];/* * The "disk" task queue is used to start the actual requests * after a plug */DECLARE_TASK_QUEUE(tq_disk);/* * used to wait on when there are no free requests */struct wait_queue * wait_for_request;/* This specifies how many sectors to read ahead on the disk.  */int read_ahead[MAX_BLKDEV];/* blk_dev_struct is: *	*request_fn *	*current_request */struct blk_dev_struct blk_dev[MAX_BLKDEV]; /* initialized by blk_dev_init() *//* * blk_size contains the size of all block-devices in units of 1024 byte * sectors: * * blk_size[MAJOR][MINOR] * * if (!blk_size[MAJOR]) then no minor size checking is done. */int * blk_size[MAX_BLKDEV];/* * blksize_size contains the size of all block-devices: * * blksize_size[MAJOR][MINOR] * * if (!blksize_size[MAJOR]) then 1024 bytes is assumed. */int * blksize_size[MAX_BLKDEV];/* * hardsect_size contains the size of the hardware sector of a device. * * hardsect_size[MAJOR][MINOR] * * if (!hardsect_size[MAJOR]) *		then 512 bytes is assumed. * else *		sector_size is hardsect_size[MAJOR][MINOR] * This is currently set by some scsi device and read by the msdos fs driver * This might be a some uses later. */int * hardsect_size[MAX_BLKDEV];/* * remove the plug and let it rip.. */void unplug_device(void * data){	struct blk_dev_struct * dev = (struct blk_dev_struct *) data;	unsigned long flags;	save_flags_cli(flags);	if (dev->current_request == &dev->plug) {		struct request * next = dev->plug.next;		dev->current_request = next;		if (next) {			dev->plug.next = NULL;			(dev->request_fn)();		}	}	restore_flags(flags);}/* * "plug" the device if there are no outstanding requests: this will * force the transfer to start only after we have put all the requests * on the list. * * This is called with interrupts off and no requests on the queue. */static inline void plug_device(struct blk_dev_struct * dev){	dev->current_request = &dev->plug;	queue_task_irq_off(&dev->plug_tq, &tq_disk);}/* * look for a free request in the first N entries. * NOTE: interrupts must be disabled on the way in, and will still *       be disabled on the way out. */static inline struct request * get_request(int n, kdev_t dev){	static struct request *prev_found = NULL, *prev_limit = NULL;	register struct request *req, *limit;	if (n <= 0)		panic("get_request(%d): impossible!\n", n);	limit = all_requests + n;	if (limit != prev_limit) {		prev_limit = limit;		prev_found = all_requests;	}	req = prev_found;	for (;;) {		req = ((req > all_requests) ? req : limit) - 1;		if (req->rq_status == RQ_INACTIVE)			break;		if (req == prev_found)			return NULL;	}	prev_found = req;	req->rq_status = RQ_ACTIVE;	req->rq_dev = dev;	return req;}/* * wait until a free request in the first N entries is available. */static struct request * __get_request_wait(int n, kdev_t dev){	register struct request *req;	struct wait_queue wait = { current, NULL };	add_wait_queue(&wait_for_request, &wait);	for (;;) {		current->state = TASK_UNINTERRUPTIBLE;		cli();		req = get_request(n, dev);		sti();		if (req)			break;		run_task_queue(&tq_disk);		schedule();	}	remove_wait_queue(&wait_for_request, &wait);	current->state = TASK_RUNNING;	return req;}static inline struct request * get_request_wait(int n, kdev_t dev){	register struct request *req;	cli();	req = get_request(n, dev);	sti();	if (req)		return req;	return __get_request_wait(n, dev);}/* RO fail safe mechanism */static long ro_bits[MAX_BLKDEV][8];int is_read_only(kdev_t dev){	int minor,major;	major = MAJOR(dev);	minor = MINOR(dev);	if (major < 0 || major >= MAX_BLKDEV) return 0;	return ro_bits[major][minor >> 5] & (1 << (minor & 31));}void set_device_ro(kdev_t dev,int flag){	int minor,major;	major = MAJOR(dev);	minor = MINOR(dev);	if (major < 0 || major >= MAX_BLKDEV) return;	if (flag) ro_bits[major][minor >> 5] |= 1 << (minor & 31);	else ro_bits[major][minor >> 5] &= ~(1 << (minor & 31));}static inline void drive_stat_acct(int cmd, unsigned long nr_sectors,                                   short disk_index){	kstat.dk_drive[disk_index]++;	if (cmd == READ) {		kstat.dk_drive_rio[disk_index]++;		kstat.dk_drive_rblk[disk_index] += nr_sectors;	} else if (cmd == WRITE) {		kstat.dk_drive_wio[disk_index]++;		kstat.dk_drive_wblk[disk_index] += nr_sectors;	} else		printk(KERN_ERR "drive_stat_acct: cmd not R/W?\n");}/* * add-request adds a request to the linked list. * It disables interrupts so that it can muck with the * request-lists in peace. * * By this point, req->cmd is always either READ/WRITE, never READA/WRITEA, * which is important for drive_stat_acct() above. */void add_request(struct blk_dev_struct * dev, struct request * req){	struct request * tmp;	short		 disk_index;	switch (MAJOR(req->rq_dev)) {		case SCSI_DISK_MAJOR:			disk_index = (MINOR(req->rq_dev) & 0x0070) >> 4;			if (disk_index < 4)				drive_stat_acct(req->cmd, req->nr_sectors, disk_index);			break;		case IDE0_MAJOR:	/* same as HD_MAJOR */		case XT_DISK_MAJOR:			disk_index = (MINOR(req->rq_dev) & 0x0040) >> 6;			drive_stat_acct(req->cmd, req->nr_sectors, disk_index);			break;		case IDE1_MAJOR:			disk_index = ((MINOR(req->rq_dev) & 0x0040) >> 6) + 2;			drive_stat_acct(req->cmd, req->nr_sectors, disk_index);		default:			break;	}	req->next = NULL;	cli();	if (req->bh)		mark_buffer_clean(req->bh);	if (!(tmp = dev->current_request)) {		dev->current_request = req;		(dev->request_fn)();		sti();		return;	}	for ( ; tmp->next ; tmp = tmp->next) {		if ((IN_ORDER(tmp,req) ||		    !IN_ORDER(tmp,tmp->next)) &&		    IN_ORDER(req,tmp->next))			break;	}	req->next = tmp->next;	tmp->next = req;/* for SCSI devices, call request_fn unconditionally */	if (scsi_blk_major(MAJOR(req->rq_dev)))		(dev->request_fn)();	sti();}#define MAX_SECTORS 244static inline void attempt_merge (struct request *req){	struct request *next = req->next;	if (!next)		return;	if (req->sector + req->nr_sectors != next->sector)		return;	if (next->sem || req->cmd != next->cmd || req->rq_dev != next->rq_dev || req->nr_sectors + next->nr_sectors >= MAX_SECTORS)		return;#if 0	printk ("%s: merge %ld, %ld + %ld == %ld\n", kdevname(req->rq_dev), req->sector, req->nr_sectors, next->nr_sectors, req->nr_sectors + next->nr_sectors);#endif	req->bhtail->b_reqnext = next->bh;	req->bhtail = next->bhtail;	req->nr_sectors += next->nr_sectors;	next->rq_status = RQ_INACTIVE;	req->next = next->next;	wake_up (&wait_for_request);}void make_request(int major,int rw, struct buffer_head * bh){	unsigned int sector, count;	struct request * req;	int rw_ahead, max_req;	count = bh->b_size >> 9;	sector = bh->b_rsector;	/* Uhhuh.. Nasty dead-lock possible here.. */	if (buffer_locked(bh)) {#if 0		printk("make_request(): buffer already locked\n");#endif		return;	}	/* Maybe the above fixes it, and maybe it doesn't boot. Life is interesting */	lock_buffer(bh);	if (blk_size[major])		if (blk_size[major][MINOR(bh->b_rdev)] < (sector + count)>>1) {			bh->b_state &= (1 << BH_Lock) | (1 << BH_FreeOnIO);                        /* This may well happen - the kernel calls bread()                           without checking the size of the device, e.g.,                           when mounting a device. */			printk(KERN_INFO                               "attempt to access beyond end of device\n");			printk(KERN_INFO "%s: rw=%d, want=%d, limit=%d\n",                               kdevname(bh->b_rdev), rw,                               (sector + count)>>1,                               blk_size[major][MINOR(bh->b_rdev)]);			unlock_buffer(bh);			return;		}	rw_ahead = 0;	/* normal case; gets changed below for READA/WRITEA */	switch (rw) {		case READA:			rw_ahead = 1;			rw = READ;	/* drop into READ */		case READ:			if (buffer_uptodate(bh)) {#if 0				printk ("make_request(): buffer uptodate for READ\n");#endif

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -