📄 blkmtd-24.c
字号:
/* * $Id: blkmtd-24.c,v 1.23 2004/08/09 18:49:42 dmarlin Exp $ * * blkmtd.c - use a block device as a fake MTD * * Author: Simon Evans <spse@secret.org.uk> * * Copyright (C) 2001,2002 Simon Evans * * Licence: GPL * * How it works: * The driver uses raw/io to read/write the device and the page * cache to cache access. Writes update the page cache with the * new data and mark it dirty and add the page into a kiobuf. * When the kiobuf becomes full or the next extry is to an earlier * block in the kiobuf then it is flushed to disk. This allows * writes to remained ordered and gives a small and simple outgoing * write cache. * * It can be loaded Read-Only to prevent erases and writes to the * medium. * */#include <linux/config.h>#include <linux/module.h>#include <linux/fs.h>#include <linux/blkdev.h>#include <linux/iobuf.h>#include <linux/slab.h>#include <linux/pagemap.h>#include <linux/list.h>#include <linux/mtd/mtd.h>#ifdef CONFIG_MTD_DEBUG#ifdef CONFIG_PROC_FS# include <linux/proc_fs.h># define BLKMTD_PROC_DEBUG static struct proc_dir_entry *blkmtd_proc;#endif#endif#define err(format, arg...) printk(KERN_ERR "blkmtd: " format "\n" , ## arg)#define info(format, arg...) printk(KERN_INFO "blkmtd: " format "\n" , ## arg)#define warn(format, arg...) printk(KERN_WARNING "blkmtd: " format "\n" , ## arg)#define crit(format, arg...) printk(KERN_CRIT "blkmtd: " format "\n" , ## arg)/* Default erase size in KiB, always make it a multiple of PAGE_SIZE */#define CONFIG_MTD_BLKDEV_ERASESIZE (128 << 10) /* 128KiB */#define VERSION "1.10"/* Info for the block device */struct blkmtd_dev { struct list_head list; struct block_device *binding; struct mtd_info mtd_info; struct kiobuf *rd_buf, *wr_buf; long iobuf_locks; struct semaphore wrbuf_mutex;};/* Static info about the MTD, used in cleanup_module */static LIST_HEAD(blkmtd_device_list);static void blkmtd_sync(struct mtd_info *mtd);#define MAX_DEVICES 4/* Module parameters passed by insmod/modprobe */char *device[MAX_DEVICES]; /* the block device to use */int erasesz[MAX_DEVICES]; /* optional default erase size */int ro[MAX_DEVICES]; /* optional read only flag */int sync;MODULE_LICENSE("GPL");MODULE_AUTHOR("Simon Evans <spse@secret.org.uk>");MODULE_DESCRIPTION("Emulate an MTD using a block device");MODULE_PARM(device, "1-4s");MODULE_PARM_DESC(device, "block device to use");MODULE_PARM(erasesz, "1-4i");MODULE_PARM_DESC(erasesz, "optional erase size to use in KiB. eg 4=4KiB.");MODULE_PARM(ro, "1-4i");MODULE_PARM_DESC(ro, "1=Read only, writes and erases cause errors");MODULE_PARM(sync, "i");MODULE_PARM_DESC(sync, "1=Synchronous writes");/** * read_pages - read in pages via the page cache * @dev: device to read from * @pagenrs: list of page numbers wanted * @pagelst: storage for struce page * pointers * @pages: count of pages wanted * * Read pages, getting them from the page cache if available * else reading them in from disk if not. pagelst must be preallocated * to hold the page count. */static int read_pages(struct blkmtd_dev *dev, int pagenrs[], struct page **pagelst, int pages){ kdev_t kdev; struct page *page; int cnt = 0; struct kiobuf *iobuf; int err = 0; if(!dev) { err("read_pages: PANIC dev == NULL"); return -EIO; } kdev = to_kdev_t(dev->binding->bd_dev); DEBUG(2, "read_pages: reading %d pages\n", pages); if(test_and_set_bit(0, &dev->iobuf_locks)) { err = alloc_kiovec(1, &iobuf); if (err) { crit("cant allocate kiobuf"); return -ENOMEM; }#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,4) iobuf->blocks = kmalloc(KIO_MAX_SECTORS * sizeof(unsigned long), GFP_KERNEL); if(iobuf->blocks == NULL) { crit("cant allocate iobuf blocks"); free_kiovec(1, &iobuf); return -ENOMEM; }#endif } else { iobuf = dev->rd_buf; } iobuf->nr_pages = 0; iobuf->length = 0; iobuf->offset = 0; iobuf->locked = 1; for(cnt = 0; cnt < pages; cnt++) { page = grab_cache_page(dev->binding->bd_inode->i_mapping, pagenrs[cnt]); pagelst[cnt] = page; if(!Page_Uptodate(page)) { iobuf->blocks[iobuf->nr_pages] = pagenrs[cnt]; iobuf->maplist[iobuf->nr_pages++] = page; } } if(iobuf->nr_pages) { iobuf->length = iobuf->nr_pages << PAGE_SHIFT; err = brw_kiovec(READ, 1, &iobuf, kdev, iobuf->blocks, PAGE_SIZE); DEBUG(3, "blkmtd: read_pages: finished, err = %d\n", err); if(err < 0) { while(pages--) { ClearPageUptodate(pagelst[pages]); unlock_page(pagelst[pages]); page_cache_release(pagelst[pages]); } } else { while(iobuf->nr_pages--) { SetPageUptodate(iobuf->maplist[iobuf->nr_pages]); } err = 0; } } if(iobuf != dev->rd_buf) {#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,4) kfree(iobuf->blocks);#endif free_kiovec(1, &iobuf); } else { clear_bit(0, &dev->iobuf_locks); } DEBUG(2, "read_pages: done, err = %d\n", err); return err;}/** * commit_pages - commit pages in the writeout kiobuf to disk * @dev: device to write to * * If the current dev has pages in the dev->wr_buf kiobuf, * they are written to disk using brw_kiovec() */static int commit_pages(struct blkmtd_dev *dev){ struct kiobuf *iobuf = dev->wr_buf; kdev_t kdev = to_kdev_t(dev->binding->bd_dev); int err = 0; iobuf->length = iobuf->nr_pages << PAGE_SHIFT; iobuf->locked = 1; if(iobuf->length) { int i; DEBUG(2, "blkmtd: commit_pages: nrpages = %d\n", iobuf->nr_pages); /* Check all the pages are dirty and lock them */ for(i = 0; i < iobuf->nr_pages; i++) { struct page *page = iobuf->maplist[i]; BUG_ON(!PageDirty(page)); lock_page(page); } err = brw_kiovec(WRITE, 1, &iobuf, kdev, iobuf->blocks, PAGE_SIZE); DEBUG(3, "commit_write: committed %d pages err = %d\n", iobuf->nr_pages, err); while(iobuf->nr_pages) { struct page *page = iobuf->maplist[--iobuf->nr_pages]; ClearPageDirty(page); SetPageUptodate(page); unlock_page(page); page_cache_release(page); } } DEBUG(2, "blkmtd: sync: end, err = %d\n", err); iobuf->offset = 0; iobuf->nr_pages = 0; iobuf->length = 0; return err;}/** * write_pages - write block of data to device via the page cache * @dev: device to write to * @buf: data source or NULL if erase (output is set to 0xff) * @to: offset into output device * @len: amount to data to write * @retlen: amount of data written * * Grab pages from the page cache and fill them with the source data. * Non page aligned start and end result in a readin of the page and * part of the page being modified. Pages are added to the wr_buf kiobuf * until this becomes full or the next page written to has a lower pagenr * then the current max pagenr in the kiobuf. */static int write_pages(struct blkmtd_dev *dev, const u_char *buf, loff_t to, size_t len, int *retlen){ int pagenr, offset; size_t start_len = 0, end_len; int pagecnt = 0; struct kiobuf *iobuf = dev->wr_buf; int err = 0; struct page *pagelst[2]; int pagenrs[2]; int readpages = 0; int ignorepage = -1; pagenr = to >> PAGE_SHIFT; offset = to & ~PAGE_MASK; DEBUG(2, "blkmtd: write_pages: buf = %p to = %ld len = %zd pagenr = %d offset = %d\n", buf, (long)to, len, pagenr, offset); *retlen = 0; /* see if we have to do a partial write at the start */ if(offset) { start_len = ((offset + len) > PAGE_SIZE) ? PAGE_SIZE - offset : len; len -= start_len; } /* calculate the length of the other two regions */ end_len = len & ~PAGE_MASK; len -= end_len; if(start_len) { pagenrs[0] = pagenr; readpages++; pagecnt++; } if(len) pagecnt += len >> PAGE_SHIFT; if(end_len) { pagenrs[readpages] = pagenr + pagecnt; readpages++; pagecnt++; } DEBUG(3, "blkmtd: write: start_len = %zd len = %zd end_len = %zd pagecnt = %d\n", start_len, len, end_len, pagecnt); down(&dev->wrbuf_mutex); if(iobuf->nr_pages && ((pagenr <= iobuf->blocks[iobuf->nr_pages-1]) || (iobuf->nr_pages + pagecnt) >= KIO_STATIC_PAGES)) { if((pagenr == iobuf->blocks[iobuf->nr_pages-1]) && ((iobuf->nr_pages + pagecnt) < KIO_STATIC_PAGES)) { iobuf->nr_pages--; ignorepage = pagenr; } else { DEBUG(3, "blkmtd: doing writeout pagenr = %d max_pagenr = %ld pagecnt = %d idx = %d\n", pagenr, iobuf->blocks[iobuf->nr_pages-1], pagecnt, iobuf->nr_pages); commit_pages(dev); } } if(readpages) { err = read_pages(dev, pagenrs, pagelst, readpages); if(err < 0) goto readin_err; } if(start_len) { /* do partial start region */ struct page *page; DEBUG(3, "blkmtd: write: doing partial start, page = %d len = %zd offset = %d\n", pagenr, start_len, offset); page = pagelst[0]; BUG_ON(!buf); if(PageDirty(page) && pagenr != ignorepage) { err("to = %lld start_len = %zd len = %zd end_len = %zd pagenr = %d ignorepage = %d\n", to, start_len, len, end_len, pagenr, ignorepage); BUG(); } memcpy(page_address(page)+offset, buf, start_len); SetPageDirty(page); SetPageUptodate(page); unlock_page(page); buf += start_len; *retlen = start_len; err = 0; iobuf->blocks[iobuf->nr_pages] = pagenr++; iobuf->maplist[iobuf->nr_pages] = page; iobuf->nr_pages++; } /* Now do the main loop to a page aligned, n page sized output */ if(len) { int pagesc = len >> PAGE_SHIFT; DEBUG(3, "blkmtd: write: whole pages start = %d, count = %d\n", pagenr, pagesc); while(pagesc) { struct page *page; /* see if page is in the page cache */ DEBUG(3, "blkmtd: write: grabbing page %d from page cache\n", pagenr); page = grab_cache_page(dev->binding->bd_inode->i_mapping, pagenr); if(PageDirty(page) && pagenr != ignorepage) { BUG(); } if(!page) { warn("write: cant grab cache page %d", pagenr); err = -ENOMEM; goto write_err; } if(!buf) { memset(page_address(page), 0xff, PAGE_SIZE); } else { memcpy(page_address(page), buf, PAGE_SIZE); buf += PAGE_SIZE; } iobuf->blocks[iobuf->nr_pages] = pagenr++; iobuf->maplist[iobuf->nr_pages] = page; iobuf->nr_pages++; SetPageDirty(page); SetPageUptodate(page); unlock_page(page); pagesc--; *retlen += PAGE_SIZE; } } if(end_len) { /* do the third region */ struct page *page; DEBUG(3, "blkmtd: write: doing partial end, page = %d len = %zd\n", pagenr, end_len); page = pagelst[readpages-1]; BUG_ON(!buf); if(PageDirty(page) && pagenr != ignorepage) { err("to = %lld start_len = %zd len = %zd end_len = %zd pagenr = %d ignorepage = %d\n", to, start_len, len, end_len, pagenr, ignorepage); BUG(); } memcpy(page_address(page), buf, end_len); SetPageDirty(page); SetPageUptodate(page); unlock_page(page); DEBUG(3, "blkmtd: write: writing out partial end\n"); *retlen += end_len; err = 0; iobuf->blocks[iobuf->nr_pages] = pagenr; iobuf->maplist[iobuf->nr_pages] = page; iobuf->nr_pages++; } DEBUG(2, "blkmtd: write: end, retlen = %zd, err = %d\n", *retlen, err); if(sync) {write_err: commit_pages(dev); }readin_err: up(&dev->wrbuf_mutex); return err;}/* erase a specified part of the device */static int blkmtd_erase(struct mtd_info *mtd, struct erase_info *instr){ struct blkmtd_dev *dev = mtd->priv; struct mtd_erase_region_info *einfo = mtd->eraseregions; int numregions = mtd->numeraseregions; size_t from; u_long len; int err = -EIO; size_t retlen; /* check readonly */ if(!dev->wr_buf) { err("error: mtd%d trying to erase readonly device %s", mtd->index, mtd->name); instr->state = MTD_ERASE_FAILED; goto erase_callback; } instr->state = MTD_ERASING; from = instr->addr; len = instr->len; /* check erase region has valid start and length */ DEBUG(2, "blkmtd: erase: dev = `%s' from = 0x%zx len = 0x%lx\n", bdevname(dev->binding->bd_dev), from, len); while(numregions) { DEBUG(3, "blkmtd: checking erase region = 0x%08X size = 0x%X num = 0x%x\n", einfo->offset, einfo->erasesize, einfo->numblocks); if(from >= einfo->offset && from < einfo->offset + (einfo->erasesize * einfo->numblocks)) { if(len == einfo->erasesize && ( (from - einfo->offset) % einfo->erasesize == 0)) break; } numregions--; einfo++; } if(!numregions) { /* Not a valid erase block */ err("erase: invalid erase request 0x%lX @ 0x%08zX", len, from); instr->state = MTD_ERASE_FAILED; err = -EIO; } if(instr->state != MTD_ERASE_FAILED) { /* do the erase */ DEBUG(3, "Doing erase from = %zd len = %ld\n", from, len); err = write_pages(dev, NULL, from, len, &retlen); if(err < 0) { err("erase failed err = %d", err); instr->state = MTD_ERASE_FAILED; } else { instr->state = MTD_ERASE_DONE; err = 0; } } DEBUG(3, "blkmtd: erase: checking callback\n"); erase_callback: mtd_erase_callback(instr); DEBUG(2, "blkmtd: erase: finished (err = %d)\n", err); return err;}/* read a range of the data via the page cache */static int blkmtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf){ struct blkmtd_dev *dev = mtd->priv; int err = 0; int offset; int pagenr, pages; struct page **pagelst; int *pagenrs; int i; *retlen = 0; DEBUG(2, "blkmtd: read: dev = `%s' from = %lld len = %zd buf = %p\n", bdevname(dev->binding->bd_dev), from, len, buf); pagenr = from >> PAGE_SHIFT; offset = from - (pagenr << PAGE_SHIFT); pages = (offset+len+PAGE_SIZE-1) >> PAGE_SHIFT; DEBUG(3, "blkmtd: read: pagenr = %d offset = %d, pages = %d\n", pagenr, offset, pages); pagelst = kmalloc(sizeof(struct page *) * pages, GFP_KERNEL); if(!pagelst) return -ENOMEM; pagenrs = kmalloc(sizeof(int) * pages, GFP_KERNEL); if(!pagenrs) { kfree(pagelst); return -ENOMEM; } for(i = 0; i < pages; i++) pagenrs[i] = pagenr+i; err = read_pages(dev, pagenrs, pagelst, pages); if(err) goto readerr; pagenr = 0; while(pages) { struct page *page; int cpylen; DEBUG(3, "blkmtd: read: looking for page: %d\n", pagenr); page = pagelst[pagenr]; cpylen = (PAGE_SIZE > len) ? len : PAGE_SIZE; if(offset+cpylen > PAGE_SIZE) cpylen = PAGE_SIZE-offset; memcpy(buf + *retlen, page_address(page) + offset, cpylen); offset = 0; len -= cpylen; *retlen += cpylen;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -