⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 mtd_blkdevs-24.c

📁 linux下的MTD设备驱动源代码,配合jffs2 yaffss2文件系统.
💻 C
📖 第 1 页 / 共 2 页
字号:
/* * $Id: mtd_blkdevs-24.c,v 1.15 2003/10/10 08:55:03 dwmw2 Exp $ * * (C) 2003 David Woodhouse <dwmw2@infradead.org> * * Interface to Linux 2.4 block layer for MTD 'translation layers'. * */#include <linux/kernel.h>#include <linux/slab.h>#include <linux/module.h>#include <linux/list.h>#include <linux/fs.h>#include <linux/mtd/blktrans.h>#include <linux/mtd/mtd.h>#include <linux/blkdev.h>#include <linux/blk.h>#include <linux/blkpg.h>#include <linux/spinlock.h>#include <linux/hdreg.h>#include <linux/init.h>#include <asm/semaphore.h>#include <asm/uaccess.h>static LIST_HEAD(blktrans_majors);extern struct semaphore mtd_table_mutex;extern struct mtd_info *mtd_table[];struct mtd_blkcore_priv {	devfs_handle_t devfs_dir_handle;	int blksizes[256];	int sizes[256];	struct hd_struct part_table[256];	struct gendisk gd;	spinlock_t devs_lock; /* See comment in _request function */	struct completion thread_dead;	int exiting;	wait_queue_head_t thread_wq;};static inline struct mtd_blktrans_dev *tr_get_dev(struct mtd_blktrans_ops *tr,					   int devnum){	struct list_head *this;	struct mtd_blktrans_dev *d;	list_for_each(this, &tr->devs) {		d = list_entry(this, struct mtd_blktrans_dev, list);		if (d->devnum == devnum)			return d;	}	return NULL;}static inline struct mtd_blktrans_ops *get_tr(int major){	struct list_head *this;	struct mtd_blktrans_ops *t;	list_for_each(this, &blktrans_majors) {		t = list_entry(this, struct mtd_blktrans_ops, list);		if (t->major == major)			return t;	}	return NULL;}static int do_blktrans_request(struct mtd_blktrans_ops *tr,			       struct mtd_blktrans_dev *dev,			       struct request *req){	unsigned long block, nsect;	char *buf;	int minor;	minor = MINOR(req->rq_dev);	block = req->sector;	nsect = req->current_nr_sectors;	buf = req->buffer;	if (block + nsect > tr->blkcore_priv->part_table[minor].nr_sects) {		printk(KERN_WARNING "Access beyond end of device.\n");		return 0;	}	block += tr->blkcore_priv->part_table[minor].start_sect;	switch(req->cmd) {	case READ:		for (; nsect > 0; nsect--, block++, buf += 512)			if (tr->readsect(dev, block, buf))				return 0;		return 1;	case WRITE:		if (!tr->writesect)			return 0;		for (; nsect > 0; nsect--, block++, buf += 512)			if (tr->writesect(dev, block, buf))				return 0;		return 1;	default:		printk(KERN_NOTICE "Unknown request cmd %d\n", req->cmd);		return 0;	}}static int mtd_blktrans_thread(void *arg){	struct mtd_blktrans_ops *tr = arg;	struct request_queue *rq = BLK_DEFAULT_QUEUE(tr->major);	/* we might get involved when memory gets low, so use PF_MEMALLOC */	current->flags |= PF_MEMALLOC;	snprintf(current->comm, sizeof(current->comm), "%sd", tr->name);	/* daemonize() doesn't do this for us since some kernel threads	   actually want to deal with signals. We can't just call 	   exit_sighand() since that'll cause an oops when we finally	   do exit. */#ifndef __rh_config_h__ /* HAVE_NPTL */	spin_lock_irq(&current->sigmask_lock);	sigfillset(&current->blocked);	recalc_sigpending(current);	spin_unlock_irq(&current->sigmask_lock);#else	spin_lock_irq(&current->sighand->siglock);	sigfillset(&current->blocked);	recalc_sigpending();	spin_unlock_irq(&current->sighand->siglock);#endif	daemonize();	while (!tr->blkcore_priv->exiting) {		struct request *req;		struct mtd_blktrans_dev *dev;		int devnum;		int res = 0;		DECLARE_WAITQUEUE(wait, current);		spin_lock_irq(&io_request_lock);		if (list_empty(&rq->queue_head)) {			add_wait_queue(&tr->blkcore_priv->thread_wq, &wait);			set_current_state(TASK_INTERRUPTIBLE);			spin_unlock_irq(&io_request_lock);			schedule();			remove_wait_queue(&tr->blkcore_priv->thread_wq, &wait);			continue;		}		req = blkdev_entry_next_request(&rq->queue_head);		devnum = MINOR(req->rq_dev) >> tr->part_bits;		/* The ll_rw_blk code knows not to touch the request		   at the head of the queue */		spin_unlock_irq(&io_request_lock);		/* FIXME: Where can we store the dev, on which		   we already have a refcount anyway? We need to		   lock against concurrent addition/removal of devices,		   but if we use the mtd_table_mutex we deadlock when		   grok_partitions is called from the registration		   callbacks. */		spin_lock(&tr->blkcore_priv->devs_lock);		dev = tr_get_dev(tr, devnum);		spin_unlock(&tr->blkcore_priv->devs_lock);		BUG_ON(!dev);		/* Ensure serialisation of requests */		down(&dev->sem);		res = do_blktrans_request(tr, dev, req);		up(&dev->sem);		if (!end_that_request_first(req, res, tr->name)) {			spin_lock_irq(&io_request_lock);			blkdev_dequeue_request(req);			end_that_request_last(req);			spin_unlock_irq(&io_request_lock);		}	}	complete_and_exit(&tr->blkcore_priv->thread_dead, 0);}static void mtd_blktrans_request(struct request_queue *rq){	struct mtd_blktrans_ops *tr = rq->queuedata;	wake_up(&tr->blkcore_priv->thread_wq);}int blktrans_open(struct inode *i, struct file *f){	struct mtd_blktrans_ops *tr = NULL;	struct mtd_blktrans_dev *dev = NULL;	int major_nr = MAJOR(i->i_rdev);	int minor_nr = MINOR(i->i_rdev);	int devnum;	int ret = -ENODEV;	if (is_read_only(i->i_rdev) && (f->f_mode & FMODE_WRITE))		return -EROFS;	down(&mtd_table_mutex);	tr = get_tr(major_nr);	if (!tr)		goto out; 	devnum = minor_nr >> tr->part_bits;	dev = tr_get_dev(tr, devnum);	if (!dev)		goto out;	if (!tr->blkcore_priv->part_table[minor_nr].nr_sects) {		ret = -ENODEV;		goto out;	}	if (!try_inc_mod_count(dev->mtd->owner))		goto out;	if (!try_inc_mod_count(tr->owner))		goto out_tr;	dev->mtd->usecount++;	ret = 0;	if (tr->open && (ret = tr->open(dev))) {		dev->mtd->usecount--;		if (dev->mtd->owner)			__MOD_DEC_USE_COUNT(dev->mtd->owner);	out_tr:		if (tr->owner)			__MOD_DEC_USE_COUNT(tr->owner);	} out:	up(&mtd_table_mutex);	return ret;}int blktrans_release(struct inode *i, struct file *f){	struct mtd_blktrans_dev *dev;	struct mtd_blktrans_ops *tr;	int ret = 0;	int devnum;	down(&mtd_table_mutex);	tr = get_tr(MAJOR(i->i_rdev));	if (!tr) {		up(&mtd_table_mutex);		return -ENODEV;	}	devnum = MINOR(i->i_rdev) >> tr->part_bits;	dev = tr_get_dev(tr, devnum);	if (!dev) {		up(&mtd_table_mutex);		return -ENODEV;	}	if (tr->release)		ret = tr->release(dev);	if (!ret) {		dev->mtd->usecount--;		if (dev->mtd->owner)			__MOD_DEC_USE_COUNT(dev->mtd->owner);		if (tr->owner)			__MOD_DEC_USE_COUNT(tr->owner);	}		up(&mtd_table_mutex);	return ret;}static int mtd_blktrans_rrpart(kdev_t rdev, struct mtd_blktrans_ops *tr,			       struct mtd_blktrans_dev *dev){	struct gendisk *gd = &(tr->blkcore_priv->gd);	int i;	int minor = MINOR(rdev);	if (minor & ((1<<tr->part_bits)-1) || !tr->part_bits) {		/* BLKRRPART on a partition. Go away. */		return -ENOTTY;	}	if (!capable(CAP_SYS_ADMIN))	    return -EACCES;	/* We are required to prevent simultaneous open() ourselves.	   The core doesn't do that for us. Did I ever mention how	   much the Linux block layer sucks? Sledgehammer approach... */	down(&mtd_table_mutex);	for (i=0; i < (1<<tr->part_bits); i++) {		invalidate_device(MKDEV(tr->major, minor+i), 1);		gd->part[minor + i].start_sect = 0;		gd->part[minor + i].nr_sects = 0;	}	grok_partitions(gd, minor, 1 << tr->part_bits, 			tr->blkcore_priv->sizes[minor]);	up(&mtd_table_mutex);	return 0;}static int blktrans_ioctl(struct inode *inode, struct file *file, 			      unsigned int cmd, unsigned long arg){	struct mtd_blktrans_dev *dev;	struct mtd_blktrans_ops *tr;	int devnum;	switch(cmd) {	case BLKGETSIZE:        case BLKGETSIZE64:        case BLKBSZSET:        case BLKBSZGET:        case BLKROSET:        case BLKROGET:        case BLKRASET:        case BLKRAGET:        case BLKPG:        case BLKELVGET:        case BLKELVSET:		return blk_ioctl(inode->i_rdev, cmd, arg);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -