📄 blkmtd-25.c
字号:
/* * $Id: blkmtd-25.c,v 1.5 2003/07/16 06:48:27 spse Exp $ * * blkmtd.c - use a block device as a fake MTD * * Author: Simon Evans <spse@secret.org.uk> * * Copyright (C) 2001,2002 Simon Evans * * Licence: GPL * * How it works: * The driver uses raw/io to read/write the device and the page * cache to cache access. Writes update the page cache with the * new data and mark it dirty and add the page into a BIO which * is then written out. * * It can be loaded Read-Only to prevent erases and writes to the * medium. * */#include <linux/config.h>#include <linux/module.h>#include <linux/fs.h>#include <linux/blkdev.h>#include <linux/bio.h>#include <linux/pagemap.h>#include <linux/list.h>#include <linux/init.h>#include <linux/mtd/mtd.h>#define err(format, arg...) printk(KERN_ERR "blkmtd: " format "\n" , ## arg)#define info(format, arg...) printk(KERN_INFO "blkmtd: " format "\n" , ## arg)#define warn(format, arg...) printk(KERN_WARNING "blkmtd: " format "\n" , ## arg)#define crit(format, arg...) printk(KERN_CRIT "blkmtd: " format "\n" , ## arg)/* Default erase size in K, always make it a multiple of PAGE_SIZE */#define CONFIG_MTD_BLKDEV_ERASESIZE (128 << 10) /* 128KiB */#define VERSION "$Revision: 1.5 $"/* Info for the block device */struct blkmtd_dev { struct list_head list; struct block_device *blkdev; struct mtd_info mtd_info; struct semaphore wrbuf_mutex;};/* Static info about the MTD, used in cleanup_module */static LIST_HEAD(blkmtd_device_list);static void blkmtd_sync(struct mtd_info *mtd);#define MAX_DEVICES 4/* Module parameters passed by insmod/modprobe */char *device[MAX_DEVICES]; /* the block device to use */int erasesz[MAX_DEVICES]; /* optional default erase size */int ro[MAX_DEVICES]; /* optional read only flag */int sync;MODULE_LICENSE("GPL");MODULE_AUTHOR("Simon Evans <spse@secret.org.uk>");MODULE_DESCRIPTION("Emulate an MTD using a block device");MODULE_PARM(device, "1-4s");MODULE_PARM_DESC(device, "block device to use");MODULE_PARM(erasesz, "1-4i");MODULE_PARM_DESC(erasesz, "optional erase size to use in KiB. eg 4=4KiB.");MODULE_PARM(ro, "1-4i");MODULE_PARM_DESC(ro, "1=Read only, writes and erases cause errors");MODULE_PARM(sync, "i");MODULE_PARM_DESC(sync, "1=Synchronous writes");/* completion handler for BIO reads */static int bi_read_complete(struct bio *bio, unsigned int bytes_done, int error){ if (bio->bi_size) return 1; complete((struct completion*)bio->bi_private); return 0;}/* completion handler for BIO writes */static int bi_write_complete(struct bio *bio, unsigned int bytes_done, int error){ const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; if (bio->bi_size) return 1; if(!uptodate) err("bi_write_complete: not uptodate\n"); do { struct page *page = bvec->bv_page; DEBUG(3, "Cleaning up page %ld\n", page->index); if (--bvec >= bio->bi_io_vec) prefetchw(&bvec->bv_page->flags); if (uptodate) { SetPageUptodate(page); } else { ClearPageUptodate(page); SetPageError(page); } ClearPageDirty(page); unlock_page(page); page_cache_release(page); } while (bvec >= bio->bi_io_vec); complete((struct completion*)bio->bi_private); return 0;}/* read one page from the block device */ static int blkmtd_readpage(struct blkmtd_dev *dev, struct page *page){ struct bio *bio; struct completion event; int err = -ENOMEM; if(PageUptodate(page)) { DEBUG(2, "blkmtd: readpage page %ld is already upto date\n", page->index); unlock_page(page); return 0; } ClearPageUptodate(page); ClearPageError(page); bio = bio_alloc(GFP_KERNEL, 1); if(bio) { init_completion(&event); bio->bi_bdev = dev->blkdev; bio->bi_sector = page->index << (PAGE_SHIFT-9); bio->bi_private = &event; bio->bi_end_io = bi_read_complete; if(bio_add_page(bio, page, PAGE_SIZE, 0) == PAGE_SIZE) { submit_bio(READ, bio); blk_run_queues(); wait_for_completion(&event); err = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : -EIO; bio_put(bio); } } if(err) SetPageError(page); else SetPageUptodate(page); flush_dcache_page(page); unlock_page(page); return err;}/* write out the current BIO and wait for it to finish */static int blkmtd_write_out(struct bio *bio){ struct completion event; int err; if(!bio->bi_vcnt) { bio_put(bio); return 0; } init_completion(&event); bio->bi_private = &event; bio->bi_end_io = bi_write_complete; submit_bio(WRITE, bio); blk_run_queues(); wait_for_completion(&event); DEBUG(3, "submit_bio completed, bi_vcnt = %d\n", bio->bi_vcnt); err = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : -EIO; bio_put(bio); return err; }/** * blkmtd_add_page - add a page to the current BIO * @bio: bio to add to (NULL to alloc initial bio) * @blkdev: block device * @page: page to add * @pagecnt: pages left to add * * Adds a page to the current bio, allocating it if necessary. If it cannot be * added, the current bio is written out and a new one is allocated. Returns * the new bio to add or NULL on error */static struct bio *blkmtd_add_page(struct bio *bio, struct block_device *blkdev, struct page *page, int pagecnt){ retry: if(!bio) { bio = bio_alloc(GFP_KERNEL, pagecnt); if(!bio) return NULL; bio->bi_sector = page->index << (PAGE_SHIFT-9); bio->bi_bdev = blkdev; } if(bio_add_page(bio, page, PAGE_SIZE, 0) != PAGE_SIZE) { blkmtd_write_out(bio); bio = NULL; goto retry; } return bio;}/** * write_pages - write block of data to device via the page cache * @dev: device to write to * @buf: data source or NULL if erase (output is set to 0xff) * @to: offset into output device * @len: amount to data to write * @retlen: amount of data written * * Grab pages from the page cache and fill them with the source data. * Non page aligned start and end result in a readin of the page and * part of the page being modified. Pages are added to the bio and then written * out. */static int write_pages(struct blkmtd_dev *dev, const u_char *buf, loff_t to, size_t len, size_t *retlen){ int pagenr, offset; size_t start_len = 0, end_len; int pagecnt = 0; int err = 0; struct bio *bio = NULL; size_t thislen = 0; pagenr = to >> PAGE_SHIFT; offset = to & ~PAGE_MASK; DEBUG(2, "blkmtd: write_pages: buf = %p to = %ld len = %d pagenr = %d offset = %d\n", buf, (long)to, len, pagenr, offset); /* see if we have to do a partial write at the start */ if(offset) { start_len = ((offset + len) > PAGE_SIZE) ? PAGE_SIZE - offset : len; len -= start_len; } /* calculate the length of the other two regions */ end_len = len & ~PAGE_MASK; len -= end_len; if(start_len) pagecnt++; if(len) pagecnt += len >> PAGE_SHIFT; if(end_len) pagecnt++; down(&dev->wrbuf_mutex); DEBUG(3, "blkmtd: write: start_len = %d len = %d end_len = %d pagecnt = %d\n", start_len, len, end_len, pagecnt); if(start_len) { /* do partial start region */ struct page *page; DEBUG(3, "blkmtd: write: doing partial start, page = %d len = %d offset = %d\n", pagenr, start_len, offset); BUG_ON(!buf); page = read_cache_page(dev->blkdev->bd_inode->i_mapping, pagenr, (filler_t *)blkmtd_readpage, dev); lock_page(page); if(PageDirty(page)) { err("to = %lld start_len = %d len = %d end_len = %d pagenr = %d\n", to, start_len, len, end_len, pagenr); BUG(); } memcpy(page_address(page)+offset, buf, start_len); SetPageDirty(page); SetPageUptodate(page); buf += start_len; thislen = start_len; bio = blkmtd_add_page(bio, dev->blkdev, page, pagecnt); if(!bio) { err = -ENOMEM; err("bio_add_page failed\n"); goto write_err; } pagecnt--; pagenr++; } /* Now do the main loop to a page aligned, n page sized output */ if(len) { int pagesc = len >> PAGE_SHIFT; DEBUG(3, "blkmtd: write: whole pages start = %d, count = %d\n", pagenr, pagesc); while(pagesc) { struct page *page; /* see if page is in the page cache */ DEBUG(3, "blkmtd: write: grabbing page %d from page cache\n", pagenr); page = grab_cache_page(dev->blkdev->bd_inode->i_mapping, pagenr); if(PageDirty(page)) { BUG(); } if(!page) { warn("write: cannot grab cache page %d", pagenr); err = -ENOMEM; goto write_err; } if(!buf) { memset(page_address(page), 0xff, PAGE_SIZE); } else { memcpy(page_address(page), buf, PAGE_SIZE); buf += PAGE_SIZE; } bio = blkmtd_add_page(bio, dev->blkdev, page, pagecnt); if(!bio) { err = -ENOMEM; err("bio_add_page failed\n"); goto write_err; } pagenr++; pagecnt--; SetPageDirty(page); SetPageUptodate(page); pagesc--; thislen += PAGE_SIZE; } } if(end_len) { /* do the third region */ struct page *page; DEBUG(3, "blkmtd: write: doing partial end, page = %d len = %d\n", pagenr, end_len); BUG_ON(!buf); page = read_cache_page(dev->blkdev->bd_inode->i_mapping, pagenr, (filler_t *)blkmtd_readpage, dev); lock_page(page); if(PageDirty(page)) { err("to = %lld start_len = %d len = %d end_len = %d pagenr = %d\n", to, start_len, len, end_len, pagenr); BUG(); } memcpy(page_address(page), buf, end_len); SetPageDirty(page); SetPageUptodate(page); DEBUG(3, "blkmtd: write: writing out partial end\n"); thislen += end_len; bio = blkmtd_add_page(bio, dev->blkdev, page, pagecnt); if(!bio) { err = -ENOMEM; err("bio_add_page failed\n"); goto write_err; } pagenr++; } DEBUG(3, "blkmtd: write: got %d vectors to write\n", bio->bi_vcnt); write_err: if(bio) blkmtd_write_out(bio); DEBUG(2, "blkmtd: write: end, retlen = %d, err = %d\n", *retlen, err); up(&dev->wrbuf_mutex); if(retlen) *retlen = thislen; return err;}/* erase a specified part of the device */static int blkmtd_erase(struct mtd_info *mtd, struct erase_info *instr){ struct blkmtd_dev *dev = mtd->priv; struct mtd_erase_region_info *einfo = mtd->eraseregions; int numregions = mtd->numeraseregions; size_t from; u_long len; int err = -EIO; int retlen; instr->state = MTD_ERASING; from = instr->addr; len = instr->len; /* check erase region has valid start and length */ DEBUG(2, "blkmtd: erase: dev = `%s' from = 0x%x len = 0x%lx\n", mtd->name+9, from, len); while(numregions) { DEBUG(3, "blkmtd: checking erase region = 0x%08X size = 0x%X num = 0x%x\n", einfo->offset, einfo->erasesize, einfo->numblocks); if(from >= einfo->offset && from < einfo->offset + (einfo->erasesize * einfo->numblocks)) { if(len == einfo->erasesize && ( (from - einfo->offset) % einfo->erasesize == 0)) break;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -