⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 blkmtd.c

📁 内核linux2.4.20,可跟rtlinux3.2打补丁 组成实时linux系统,编译内核
💻 C
📖 第 1 页 / 共 3 页
字号:
/*  * $Id: blkmtd.c,v 1.7 2001/11/10 17:06:30 spse Exp $ * * blkmtd.c - use a block device as a fake MTD * * Author: Simon Evans <spse@secret.org.uk> * * Copyright (C) 2001 Simon Evans *  * Licence: GPL * * How it works: *       The driver uses raw/io to read/write the device and the page *       cache to cache access. Writes update the page cache with the *       new data but make a copy of the new page(s) and then a kernel *       thread writes pages out to the device in the background. This *       ensures that writes are order even if a page is updated twice. *       Also, since pages in the page cache are never marked as dirty, *       we dont have to worry about writepage() being called on some  *       random page which may not be in the write order. *  *       Erases are handled like writes, so the callback is called after *       the page cache has been updated. Sync()ing will wait until it is  *       all done. * *       It can be loaded Read-Only to prevent erases and writes to the  *       medium. * * Todo: *       Make the write queue size dynamic so this it is not too big on *       small memory systems and too small on large memory systems. *  *       Page cache usage may still be a bit wrong. Check we are doing *       everything properly. *  *       Somehow allow writes to dirty the page cache so we dont use too *       much memory making copies of outgoing pages. Need to handle case *       where page x is written to, then page y, then page x again before *       any of them have been committed to disk. *  *       Reading should read multiple pages at once rather than using  *       readpage() for each one. This is easy and will be fixed asap. */#include <linux/config.h>#include <linux/module.h>#include <linux/fs.h>#include <linux/pagemap.h>#include <linux/iobuf.h>#include <linux/slab.h>#include <linux/pagemap.h>#include <linux/mtd/compatmac.h>#include <linux/mtd/mtd.h>#ifdef CONFIG_MTD_DEBUG#ifdef CONFIG_PROC_FS#  include <linux/proc_fs.h>#  define BLKMTD_PROC_DEBUG   static struct proc_dir_entry *blkmtd_proc;#endif#endif/* Default erase size in K, always make it a multiple of PAGE_SIZE */#define CONFIG_MTD_BLKDEV_ERASESIZE 128#define VERSION "1.7"extern int *blk_size[];extern int *blksize_size[];/* Info for the block device */typedef struct mtd_raw_dev_data_s {  struct block_device *binding;  int sector_size, sector_bits;  int partial_last_page;   // 0 if device ends on page boundary, else page no of last page  int last_page_sectors;   // Number of sectors in last page if partial_last_page != 0  size_t totalsize;  int readonly;  struct address_space as;  struct mtd_info mtd_info;} mtd_raw_dev_data_t;/* Info for each queue item in the write queue */typedef struct mtdblkdev_write_queue_s {  mtd_raw_dev_data_t *rawdevice;  struct page **pages;  int pagenr;  int pagecnt;  int iserase;} mtdblkdev_write_queue_t;/* Our erase page - always remains locked. */static struct page *erase_page;/* Static info about the MTD, used in cleanup_module */static mtd_raw_dev_data_t *mtd_rawdevice;/* Write queue fixed size */#define WRITE_QUEUE_SZ 512/* Storage for the write queue */static mtdblkdev_write_queue_t *write_queue;static int write_queue_sz = WRITE_QUEUE_SZ;static int volatile write_queue_head;static int volatile write_queue_tail;static int volatile write_queue_cnt;static spinlock_t mbd_writeq_lock = SPIN_LOCK_UNLOCKED;/* Tell the write thread to finish */static volatile int write_task_finish;/* ipc with the write thread */#if LINUX_VERSION_CODE > KERNEL_VERSION(2,3,0)static DECLARE_MUTEX_LOCKED(thread_sem);static DECLARE_WAIT_QUEUE_HEAD(thr_wq);static DECLARE_WAIT_QUEUE_HEAD(mtbd_sync_wq);#elsestatic struct semaphore thread_sem = MUTEX_LOCKED;DECLARE_WAIT_QUEUE_HEAD(thr_wq);DECLARE_WAIT_QUEUE_HEAD(mtbd_sync_wq);#endif/* Module parameters passed by insmod/modprobe */char *device;    /* the block device to use */int erasesz;     /* optional default erase size */int ro;          /* optional read only flag */int bs;          /* optionally force the block size (avoid using) */int count;       /* optionally force the block count (avoid using) */int wqs;         /* optionally set the write queue size */#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,0)MODULE_LICENSE("GPL");MODULE_AUTHOR("Simon Evans <spse@secret.org.uk>");MODULE_DESCRIPTION("Emulate an MTD using a block device");MODULE_PARM(device, "s");MODULE_PARM_DESC(device, "block device to use");MODULE_PARM(erasesz, "i");MODULE_PARM_DESC(erasesz, "optional erase size to use in KB. eg 4=4K.");MODULE_PARM(ro, "i");MODULE_PARM_DESC(ro, "1=Read only, writes and erases cause errors");MODULE_PARM(bs, "i");MODULE_PARM_DESC(bs, "force the block size in bytes");MODULE_PARM(count, "i");MODULE_PARM_DESC(count, "force the block count");MODULE_PARM(wqs, "i");#endif/* Page cache stuff *//* writepage() - should never be called - catch it anyway */static int blkmtd_writepage(struct page *page){  printk("blkmtd: writepage called!!!\n");  return -EIO;}/* readpage() - reads one page from the block device */                 static int blkmtd_readpage(mtd_raw_dev_data_t *rawdevice, struct page *page){    int err;  int sectornr, sectors, i;  struct kiobuf *iobuf;  kdev_t dev;  unsigned long *blocks;  if(!rawdevice) {    printk("blkmtd: readpage: PANIC file->private_data == NULL\n");    return -EIO;  }  dev = to_kdev_t(rawdevice->binding->bd_dev);  DEBUG(2, "blkmtd: readpage called, dev = `%s' page = %p index = %ld\n",	bdevname(dev), page, page->index);  if(Page_Uptodate(page)) {    DEBUG(2, "blkmtd: readpage page %ld is already upto date\n", page->index);    UnlockPage(page);    return 0;  }  ClearPageUptodate(page);  ClearPageError(page);  /* see if page is in the outgoing write queue */  spin_lock(&mbd_writeq_lock);  if(write_queue_cnt) {    int i = write_queue_tail;    while(i != write_queue_head) {      mtdblkdev_write_queue_t *item = &write_queue[i];      if(page->index >= item->pagenr && page->index < item->pagenr+item->pagecnt) {	/* yes it is */	int index = page->index - item->pagenr;			DEBUG(2, "blkmtd: readpage: found page %ld in outgoing write queue\n",	      page->index);	if(item->iserase) {	  memset(page_address(page), 0xff, PAGE_SIZE);	} else {	  memcpy(page_address(page), page_address(item->pages[index]), PAGE_SIZE);	}	SetPageUptodate(page);	flush_dcache_page(page);	UnlockPage(page);	spin_unlock(&mbd_writeq_lock);	return 0;      }      i++;      i %= write_queue_sz;    }  }  spin_unlock(&mbd_writeq_lock);  DEBUG(3, "blkmtd: readpage: getting kiovec\n");  err = alloc_kiovec(1, &iobuf);  if (err) {    printk("blkmtd: cant allocate kiobuf\n");    SetPageError(page);    return err;  }  /* Pre 2.4.4 doesnt have space for the block list in the kiobuf */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,4)  blocks = kmalloc(KIO_MAX_SECTORS * sizeof(unsigned long));  if(blocks == NULL) {    printk("blkmtd: cant allocate iobuf blocks\n");    free_kiovec(1, &iobuf);    SetPageError(page);    return -ENOMEM;  }#else   blocks = iobuf->blocks;#endif  iobuf->offset = 0;  iobuf->nr_pages = 1;  iobuf->length = PAGE_SIZE;  iobuf->locked = 1;  iobuf->maplist[0] = page;  sectornr = page->index << (PAGE_SHIFT - rawdevice->sector_bits);  sectors = 1 << (PAGE_SHIFT - rawdevice->sector_bits);  if(rawdevice->partial_last_page && page->index == rawdevice->partial_last_page) {    DEBUG(3, "blkmtd: handling partial last page\n");    sectors = rawdevice->last_page_sectors;  }  DEBUG(3, "blkmtd: readpage: sectornr = %d sectors = %d\n", sectornr, sectors);  for(i = 0; i < sectors; i++) {    blocks[i] = sectornr++;  }  /* If only a partial page read in, clear the rest of the page */  if(rawdevice->partial_last_page && page->index == rawdevice->partial_last_page) {    int offset = rawdevice->last_page_sectors << rawdevice->sector_bits;    int count = PAGE_SIZE-offset;    DEBUG(3, "blkmtd: clear last partial page: offset = %d count = %d\n", offset, count);    memset(page_address(page)+offset, 0, count);    sectors = rawdevice->last_page_sectors;  }  DEBUG(3, "bklmtd: readpage: starting brw_kiovec\n");  err = brw_kiovec(READ, 1, &iobuf, dev, blocks, rawdevice->sector_size);  DEBUG(3, "blkmtd: readpage: finished, err = %d\n", err);  iobuf->locked = 0;  free_kiovec(1, &iobuf);#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,4)  kfree(blocks);#endif  if(err != PAGE_SIZE) {    printk("blkmtd: readpage: error reading page %ld\n", page->index);    memset(page_address(page), 0, PAGE_SIZE);    SetPageError(page);    err = -EIO;  } else {    DEBUG(3, "blkmtd: readpage: setting page upto date\n");    SetPageUptodate(page);    err = 0;  }  flush_dcache_page(page);  UnlockPage(page);  DEBUG(2, "blkmtd: readpage: finished, err = %d\n", err);  return 0;}                    static struct address_space_operations blkmtd_aops = {  writepage:     blkmtd_writepage,  readpage:      NULL,}; /* This is the kernel thread that empties the write queue to disk */static int write_queue_task(void *data){  int err;  struct task_struct *tsk = current;  struct kiobuf *iobuf;  unsigned long *blocks;  DECLARE_WAITQUEUE(wait, tsk);  DEBUG(1, "blkmtd: writetask: starting (pid = %d)\n", tsk->pid);  daemonize();  strcpy(tsk->comm, "blkmtdd");  tsk->tty = NULL;  spin_lock_irq(&tsk->sigmask_lock);  sigfillset(&tsk->blocked);  recalc_sigpending(tsk);  spin_unlock_irq(&tsk->sigmask_lock);  exit_sighand(tsk);  if(alloc_kiovec(1, &iobuf)) {    printk("blkmtd: write_queue_task cant allocate kiobuf\n");    return 0;  }  /* Pre 2.4.4 doesnt have space for the block list in the kiobuf */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,4)  blocks = kmalloc(KIO_MAX_SECTORS * sizeof(unsigned long));  if(blocks == NULL) {    printk("blkmtd: write_queue_task cant allocate iobuf blocks\n");    free_kiovec(1, &iobuf);    return 0;  }#else   blocks = iobuf->blocks;#endif  DEBUG(2, "blkmtd: writetask: entering main loop\n");  add_wait_queue(&thr_wq, &wait);  while(1) {    spin_lock(&mbd_writeq_lock);    if(!write_queue_cnt) {      /* If nothing in the queue, wake up anyone wanting to know when there	 is space in the queue then sleep for 2*HZ */      spin_unlock(&mbd_writeq_lock);      DEBUG(4, "blkmtd: writetask: queue empty\n");      if(waitqueue_active(&mtbd_sync_wq))	 wake_up(&mtbd_sync_wq);      interruptible_sleep_on_timeout(&thr_wq, 2*HZ);      DEBUG(4, "blkmtd: writetask: woken up\n");      if(write_task_finish)	break;    } else {      /* we have stuff to write */      mtdblkdev_write_queue_t *item = &write_queue[write_queue_tail];      struct page **pages = item->pages;      int i;      int sectornr = item->pagenr << (PAGE_SHIFT - item->rawdevice->sector_bits);      int sectorcnt = item->pagecnt << (PAGE_SHIFT - item->rawdevice->sector_bits);      int max_sectors = KIO_MAX_SECTORS >> (item->rawdevice->sector_bits - 9);      kdev_t dev = to_kdev_t(item->rawdevice->binding->bd_dev);      /* If we are writing to the last page on the device and it doesnt end       * on a page boundary, subtract the number of sectors that dont exist.       */      if(item->rawdevice->partial_last_page && 	 (item->pagenr + item->pagecnt -1) == item->rawdevice->partial_last_page) {	sectorcnt -= (1 << (PAGE_SHIFT - item->rawdevice->sector_bits));	sectorcnt += item->rawdevice->last_page_sectors;      }      DEBUG(3, "blkmtd: writetask: got %d queue items\n", write_queue_cnt);      set_current_state(TASK_RUNNING);      spin_unlock(&mbd_writeq_lock);      DEBUG(2, "blkmtd: writetask: writing pagenr = %d pagecnt = %d sectornr = %d sectorcnt = %d\n", 	    item->pagenr, item->pagecnt, sectornr, sectorcnt);      iobuf->offset = 0;      iobuf->locked = 1;      /* Loop through all the pages to be written in the queue item, remembering	 we can only write KIO_MAX_SECTORS at a time */	       while(sectorcnt) {	int cursectors = (sectorcnt < max_sectors) ? sectorcnt : max_sectors;	int cpagecnt = (cursectors << item->rawdevice->sector_bits) + PAGE_SIZE-1;	cpagecnt >>= PAGE_SHIFT;		for(i = 0; i < cpagecnt; i++) {	  if(item->iserase) {	    iobuf->maplist[i] = erase_page;	  } else {	    iobuf->maplist[i] = *(pages++);	  }	}		for(i = 0; i < cursectors; i++) {	  blocks[i] = sectornr++;	}		iobuf->nr_pages = cpagecnt;	iobuf->length = cursectors << item->rawdevice->sector_bits;	DEBUG(3, "blkmtd: write_task: about to kiovec\n");	err = brw_kiovec(WRITE, 1, &iobuf, dev, blocks, item->rawdevice->sector_size);	DEBUG(3, "bklmtd: write_task: done, err = %d\n", err);	if(err != (cursectors << item->rawdevice->sector_bits)) {	  /* if an error occured - set this to exit the loop */	  sectorcnt = 0;	} else {	  sectorcnt -= cursectors;	}      }      /* free up the pages used in the write and list of pages used in the write	 queue item */      iobuf->locked = 0;      spin_lock(&mbd_writeq_lock);      write_queue_cnt--;      write_queue_tail++;      write_queue_tail %= write_queue_sz;      if(!item->iserase) {	for(i = 0 ; i < item->pagecnt; i++) {	  UnlockPage(item->pages[i]);	  __free_pages(item->pages[i], 0);	}	kfree(item->pages);      }      item->pages = NULL;      spin_unlock(&mbd_writeq_lock);      /* Tell others there is some space in the write queue */      if(waitqueue_active(&mtbd_sync_wq))	wake_up(&mtbd_sync_wq);    }  }  remove_wait_queue(&thr_wq, &wait);  DEBUG(1, "blkmtd: writetask: exiting\n");  free_kiovec(1, &iobuf);#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,4)

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -