⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 mtdblock_v4.c

📁 1.under bootloader 1)cd your_dir/mrua_EM8620L_2.5.115.RC8_dev.arm.bootirq/MRUA_src/loader 2)将f
💻 C
📖 第 1 页 / 共 2 页
字号:
		put_mtd_device(mtdblk->mtd);		vfree(mtdblk->cache_data);		kfree(mtdblk);		return 0;	}	mtdblks[dev] = mtdblk;	mtd_sizes[dev] = mtdblk->mtd->size/1024;	if (mtdblk->mtd->erasesize)		mtd_blksizes[dev] = mtdblk->mtd->erasesize;	if (mtd_blksizes[dev] > PAGE_SIZE)		mtd_blksizes[dev] = PAGE_SIZE;	set_device_ro (inode->i_rdev, !(mtdblk->mtd->flags & MTD_WRITEABLE));		spin_unlock(&mtdblks_lock);		DEBUG(MTD_DEBUG_LEVEL1, "ok\n");	return 0;}static release_t mtdblock_release(struct inode *inode, struct file *file){	int dev;	struct mtdblk_dev *mtdblk;   	DEBUG(MTD_DEBUG_LEVEL1, "mtdblock_release\n");	if (inode == NULL)		release_return(-ENODEV);	dev = MINOR(inode->i_rdev);	mtdblk = mtdblks[dev];	down(&mtdblk->cache_sem);	write_cached_data(mtdblk);	up(&mtdblk->cache_sem);	spin_lock(&mtdblks_lock);	if (!--mtdblk->count) {		/* It was the last usage. Free the device */		mtdblks[dev] = NULL;		spin_unlock(&mtdblks_lock);		if (mtdblk->mtd->sync)			mtdblk->mtd->sync(mtdblk->mtd);		put_mtd_device(mtdblk->mtd);		vfree(mtdblk->cache_data);		kfree(mtdblk);	} else {		spin_unlock(&mtdblks_lock);	}	DEBUG(MTD_DEBUG_LEVEL1, "ok\n");	BLK_DEC_USE_COUNT;	release_return(0);}  /*  * This is a special request_fn because it is executed in a process context  * to be able to sleep independently of the caller.  The io_request_lock  * is held upon entry and exit. * The head of our request queue is considered active so there is no need  * to dequeue requests before we are done. */static void handle_mtdblock_request(void){	struct request *req;	struct mtdblk_dev *mtdblk;	unsigned int res;	for (;;) {		INIT_REQUEST;		req = CURRENT;		spin_unlock_irq(&io_request_lock);		mtdblk = mtdblks[MINOR(req->rq_dev)];		res = 0;		if (MINOR(req->rq_dev) >= MAX_MTD_DEVICES)			panic("%s: minor out of bounds", __FUNCTION__);		if ((req->sector + req->current_nr_sectors) > (mtdblk->mtd->size >> 9))			goto end_req;		// Handle the request		switch (req->cmd)		{			int err;			case READ:			down(&mtdblk->cache_sem);			err = do_cached_read (mtdblk, req->sector << 9, 					req->current_nr_sectors << 9,					req->buffer);			up(&mtdblk->cache_sem);			if (!err)				res = 1;			break;			case WRITE:			// Read only device			if ( !(mtdblk->mtd->flags & MTD_WRITEABLE) ) 				break;			// Do the write			down(&mtdblk->cache_sem);			err = do_cached_write (mtdblk, req->sector << 9,					req->current_nr_sectors << 9, 					req->buffer);			up(&mtdblk->cache_sem);			if (!err)				res = 1;			break;		}end_req:		spin_lock_irq(&io_request_lock);		end_request(res);	}}static volatile int leaving = 0;static DECLARE_MUTEX_LOCKED(thread_sem);static DECLARE_WAIT_QUEUE_HEAD(thr_wq);int mtdblock_thread(void *dummy){	struct task_struct *tsk = current;	DECLARE_WAITQUEUE(wait, tsk);	/* we might get involved when memory gets low, so use PF_MEMALLOC */	tsk->flags |= PF_MEMALLOC;	strcpy(tsk->comm, "mtdblockd");	spin_lock_irq(&tsk->sigmask_lock);	sigfillset(&tsk->blocked);	recalc_sigpending(tsk);	spin_unlock_irq(&tsk->sigmask_lock);	daemonize();	while (!leaving) {		add_wait_queue(&thr_wq, &wait);		set_current_state(TASK_INTERRUPTIBLE);		spin_lock_irq(&io_request_lock);		if (QUEUE_EMPTY || QUEUE_PLUGGED) {			spin_unlock_irq(&io_request_lock);			schedule();			remove_wait_queue(&thr_wq, &wait); 		} else {			remove_wait_queue(&thr_wq, &wait); 			set_current_state(TASK_RUNNING);			handle_mtdblock_request();			spin_unlock_irq(&io_request_lock);		}	}	up(&thread_sem);	return 0;}#if LINUX_VERSION_CODE < 0x20300#define RQFUNC_ARG void#else#define RQFUNC_ARG request_queue_t *q#endifstatic void mtdblock_request(RQFUNC_ARG){	/* Don't do anything, except wake the thread if necessary */	wake_up(&thr_wq);}static int mtdblock_ioctl(struct inode * inode, struct file * file,		      unsigned int cmd, unsigned long arg){	struct mtdblk_dev *mtdblk;	mtdblk = mtdblks[MINOR(inode->i_rdev)];#ifdef PARANOIA	if (!mtdblk)		BUG();#endif	switch (cmd) {	case BLKGETSIZE:   /* Return device size */		return put_user((mtdblk->mtd->size >> 9), (unsigned long *) arg);#ifdef BLKGETSIZE64	case BLKGETSIZE64:		return put_user((u64)mtdblk->mtd->size, (u64 *)arg);#endif			case BLKFLSBUF:#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,0)		if(!capable(CAP_SYS_ADMIN))			return -EACCES;#endif		fsync_dev(inode->i_rdev);		invalidate_buffers(inode->i_rdev);		down(&mtdblk->cache_sem);		write_cached_data(mtdblk);		up(&mtdblk->cache_sem);		if (mtdblk->mtd->sync)			mtdblk->mtd->sync(mtdblk->mtd);		return 0;	default:		return -EINVAL;	}}#ifdef MAGIC_ROM_PTRstatic intmtdblock_romptr(kdev_t dev, struct vm_area_struct * vma){	struct mtd_info *mtd;	u_char *ptr;	size_t len;	mtd = __get_mtd_device(NULL, MINOR(dev));	if (!mtd->point)		return -ENOSYS; /* Can't do it, No function to point to correct addr */	if ((*mtd->point)(mtd,vma->vm_offset,vma->vm_end-vma->vm_start,&len,&ptr) != 0)		return -ENOSYS;	vma->vm_start = (unsigned long) ptr;	vma->vm_end = vma->vm_start + len;	return 0;}#endif#if LINUX_VERSION_CODE < 0x20326static struct file_operations mtd_fops ={	open: mtdblock_open,	ioctl: mtdblock_ioctl,	release: mtdblock_release,	read: block_read,#ifdef MAGIC_ROM_PTR	romptr: mtdblock_romptr,#endif	write: block_write};#elsestatic struct block_device_operations mtd_fops = {#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,14)	owner: THIS_MODULE,#endif	open: mtdblock_open,	release: mtdblock_release,#ifdef MAGIC_ROM_PTR	romptr: mtdblock_romptr,#endif	ioctl: mtdblock_ioctl};#endif#ifdef CONFIG_DEVFS_FS/* Notification that a new device has been added. Create the devfs entry for * it. */static void mtd_notify_add(struct mtd_info* mtd){        char name[8];        if (!mtd || mtd->type == MTD_ABSENT)                return;        sprintf(name, "%d", mtd->index);        devfs_rw_handle[mtd->index] = devfs_register(devfs_dir_handle, name,                        DEVFS_FL_DEFAULT, MTD_BLOCK_MAJOR, mtd->index,                        S_IFBLK | S_IRUGO | S_IWUGO,                        &mtd_fops, NULL);}static void mtd_notify_remove(struct mtd_info* mtd){        if (!mtd || mtd->type == MTD_ABSENT)                return;        devfs_unregister(devfs_rw_handle[mtd->index]);}#endifint __init init_mtdblock(void){	int i;	spin_lock_init(&mtdblks_lock);#ifdef CONFIG_DEVFS_FS	if (devfs_register_blkdev(MTD_BLOCK_MAJOR, DEVICE_NAME, &mtd_fops))	{		printk(KERN_NOTICE "Can't allocate major number %d for Memory Technology Devices.\n",			MTD_BLOCK_MAJOR);		return -EAGAIN;	}	devfs_dir_handle = devfs_mk_dir(NULL, DEVICE_NAME, NULL);	register_mtd_user(&notifier);#else	if (register_blkdev(MAJOR_NR,DEVICE_NAME,&mtd_fops)) {		printk(KERN_NOTICE "Can't allocate major number %d for Memory Technology Devices.\n",		       MTD_BLOCK_MAJOR);		return -EAGAIN;	}#endif	DEBUG(MTD_DEBUG_LEVEL3,		"init_mtdblock: allocated major number %d.\n", MTD_BLOCK_MAJOR);		/* We fill it in at open() time. */	for (i=0; i< MAX_MTD_DEVICES; i++) {		mtd_sizes[i] = 0;		mtd_blksizes[i] = BLOCK_SIZE;		//add by zxh		//printk("id is %d, blksize in init_mtdblock is %d\n",i,mtd_blksizes[i]);	}	init_waitqueue_head(&thr_wq);	/* Allow the block size to default to BLOCK_SIZE. */	blksize_size[MAJOR_NR] = mtd_blksizes;	blk_size[MAJOR_NR] = mtd_sizes;		blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), &mtdblock_request);	kernel_thread (mtdblock_thread, NULL, CLONE_FS|CLONE_FILES|CLONE_SIGHAND);	return 0;}static void __exit cleanup_mtdblock(void){	leaving = 1;	wake_up(&thr_wq);	down(&thread_sem);#ifdef CONFIG_DEVFS_FS	unregister_mtd_user(&notifier);	devfs_unregister(devfs_dir_handle);	devfs_unregister_blkdev(MTD_BLOCK_MAJOR, DEVICE_NAME);#else	unregister_blkdev(MAJOR_NR,DEVICE_NAME);#endif	blk_cleanup_queue(BLK_DEFAULT_QUEUE(MAJOR_NR));	blksize_size[MAJOR_NR] = NULL;	blk_size[MAJOR_NR] = NULL;}module_init(init_mtdblock);module_exit(cleanup_mtdblock);MODULE_LICENSE("GPL");MODULE_AUTHOR("Nicolas Pitre <nico@cam.org> et al.");MODULE_DESCRIPTION("Caching read/erase/writeback block device emulation access to MTD devices");

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -