📄 blkmtd.c
字号:
kfree(blocks);#endif /* Tell people we have exitd */ up(&thread_sem); return 0;}/* Add a range of pages into the outgoing write queue, making copies of them */static int queue_page_write(mtd_raw_dev_data_t *rawdevice, struct page **pages, int pagenr, int pagecnt, int iserase){ struct page *outpage; struct page **new_pages = NULL; mtdblkdev_write_queue_t *item; int i; DECLARE_WAITQUEUE(wait, current); DEBUG(2, "blkmtd: queue_page_write: adding pagenr = %d pagecnt = %d\n", pagenr, pagecnt); if(!pagecnt) return 0; if(pages == NULL && !iserase) return -EINVAL; /* create a array for the list of pages */ if(!iserase) { new_pages = kmalloc(pagecnt * sizeof(struct page *), GFP_KERNEL); if(new_pages == NULL) return -ENOMEM; /* make copies of the pages in the page cache */ for(i = 0; i < pagecnt; i++) { outpage = alloc_pages(GFP_KERNEL, 0); if(!outpage) { while(i--) { UnlockPage(new_pages[i]); __free_pages(new_pages[i], 0); } kfree(new_pages); return -ENOMEM; } lock_page(outpage); memcpy(page_address(outpage), page_address(pages[i]), PAGE_SIZE); new_pages[i] = outpage; } } /* wait until there is some space in the write queue */ test_lock: spin_lock(&mbd_writeq_lock); if(write_queue_cnt == write_queue_sz) { spin_unlock(&mbd_writeq_lock); DEBUG(3, "blkmtd: queue_page: Queue full\n"); current->state = TASK_UNINTERRUPTIBLE; add_wait_queue(&mtbd_sync_wq, &wait); wake_up_interruptible(&thr_wq); schedule(); current->state = TASK_RUNNING; remove_wait_queue(&mtbd_sync_wq, &wait); DEBUG(3, "blkmtd: queue_page_write: Queue has %d items in it\n", write_queue_cnt); goto test_lock; } DEBUG(3, "blkmtd: queue_page_write: qhead: %d qtail: %d qcnt: %d\n", write_queue_head, write_queue_tail, write_queue_cnt); /* fix up the queue item */ item = &write_queue[write_queue_head]; item->pages = new_pages; item->pagenr = pagenr; item->pagecnt = pagecnt; item->rawdevice = rawdevice; item->iserase = iserase; write_queue_head++; write_queue_head %= write_queue_sz; write_queue_cnt++; DEBUG(3, "blkmtd: queue_page_write: qhead: %d qtail: %d qcnt: %d\n", write_queue_head, write_queue_tail, write_queue_cnt); spin_unlock(&mbd_writeq_lock); DEBUG(2, "blkmtd: queue_page_write: finished\n"); return 0;}/* erase a specified part of the device */static int blkmtd_erase(struct mtd_info *mtd, struct erase_info *instr){ mtd_raw_dev_data_t *rawdevice = mtd->priv; struct mtd_erase_region_info *einfo = mtd->eraseregions; int numregions = mtd->numeraseregions; size_t from; u_long len; int err = 0; /* check readonly */ if(rawdevice->readonly) { printk("blkmtd: error: trying to erase readonly device %s\n", device); instr->state = MTD_ERASE_FAILED; goto erase_callback; } instr->state = MTD_ERASING; from = instr->addr; len = instr->len; /* check erase region has valid start and length */ DEBUG(2, "blkmtd: erase: dev = `%s' from = 0x%x len = 0x%lx\n", bdevname(rawdevice->binding->bd_dev), from, len); while(numregions) { DEBUG(3, "blkmtd: checking erase region = 0x%08X size = 0x%X num = 0x%x\n", einfo->offset, einfo->erasesize, einfo->numblocks); if(from >= einfo->offset && from < einfo->offset + (einfo->erasesize * einfo->numblocks)) { if(len == einfo->erasesize && ( (from - einfo->offset) % einfo->erasesize == 0)) break; } numregions--; einfo++; } if(!numregions) { /* Not a valid erase block */ printk("blkmtd: erase: invalid erase request 0x%lX @ 0x%08X\n", len, from); instr->state = MTD_ERASE_FAILED; err = -EIO; } if(instr->state != MTD_ERASE_FAILED) { /* start the erase */ int pagenr, pagecnt; struct page *page, **pages; int i = 0; /* Handle the last page of the device not being whole */ if(len < PAGE_SIZE) len = PAGE_SIZE; pagenr = from >> PAGE_SHIFT; pagecnt = len >> PAGE_SHIFT; DEBUG(3, "blkmtd: erase: pagenr = %d pagecnt = %d\n", pagenr, pagecnt); pages = kmalloc(pagecnt * sizeof(struct page *), GFP_KERNEL); if(pages == NULL) { err = -ENOMEM; instr->state = MTD_ERASE_FAILED; goto erase_out; } while(pagecnt) { /* get the page via the page cache */ DEBUG(3, "blkmtd: erase: doing grab_cache_page() for page %d\n", pagenr); page = grab_cache_page(&rawdevice->as, pagenr); if(!page) { DEBUG(3, "blkmtd: erase: grab_cache_page() failed for page %d\n", pagenr); kfree(pages); err = -EIO; instr->state = MTD_ERASE_FAILED; goto erase_out; } memset(page_address(page), 0xff, PAGE_SIZE); pages[i] = page; pagecnt--; pagenr++; i++; } DEBUG(3, "blkmtd: erase: queuing page write\n"); err = queue_page_write(rawdevice, NULL, from >> PAGE_SHIFT, len >> PAGE_SHIFT, 1); pagecnt = len >> PAGE_SHIFT; if(!err) { while(pagecnt--) { SetPageUptodate(pages[pagecnt]); UnlockPage(pages[pagecnt]); page_cache_release(pages[pagecnt]); flush_dcache_page(pages[pagecnt]); } kfree(pages); instr->state = MTD_ERASE_DONE; } else { while(pagecnt--) { SetPageError(pages[pagecnt]); page_cache_release(pages[pagecnt]); } kfree(pages); instr->state = MTD_ERASE_FAILED; } } erase_out: DEBUG(3, "blkmtd: erase: checking callback\n"); erase_callback: if (instr->callback) { (*(instr->callback))(instr); } DEBUG(2, "blkmtd: erase: finished (err = %d)\n", err); return err;}/* read a range of the data via the page cache */static int blkmtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf){ mtd_raw_dev_data_t *rawdevice = mtd->priv; int err = 0; int offset; int pagenr, pages; *retlen = 0; DEBUG(2, "blkmtd: read: dev = `%s' from = %ld len = %d buf = %p\n", bdevname(rawdevice->binding->bd_dev), (long int)from, len, buf); pagenr = from >> PAGE_SHIFT; offset = from - (pagenr << PAGE_SHIFT); pages = (offset+len+PAGE_SIZE-1) >> PAGE_SHIFT; DEBUG(3, "blkmtd: read: pagenr = %d offset = %d, pages = %d\n", pagenr, offset, pages); /* just loop through each page, getting it via readpage() - slow but easy */ while(pages) { struct page *page; int cpylen; DEBUG(3, "blkmtd: read: looking for page: %d\n", pagenr); page = read_cache_page(&rawdevice->as, pagenr, (filler_t *)blkmtd_readpage, rawdevice); if(IS_ERR(page)) { return PTR_ERR(page); } wait_on_page(page); if(!Page_Uptodate(page)) { /* error reading page */ printk("blkmtd: read: page not uptodate\n"); page_cache_release(page); return -EIO; } cpylen = (PAGE_SIZE > len) ? len : PAGE_SIZE; if(offset+cpylen > PAGE_SIZE) cpylen = PAGE_SIZE-offset; memcpy(buf + *retlen, page_address(page) + offset, cpylen); offset = 0; len -= cpylen; *retlen += cpylen; pagenr++; pages--; page_cache_release(page); } DEBUG(2, "blkmtd: end read: retlen = %d, err = %d\n", *retlen, err); return err;} /* write a range of the data via the page cache. * * Basic operation. break the write into three parts. * * 1. From a page unaligned start up until the next page boundary * 2. Page sized, page aligned blocks * 3. From end of last aligned block to end of range * * 1,3 are read via the page cache and readpage() since these are partial * pages, 2 we just grab pages from the page cache, not caring if they are * already in memory or not since they will be completly overwritten. * */ static int blkmtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf){ mtd_raw_dev_data_t *rawdevice = mtd->priv; int err = 0; int offset; int pagenr; size_t len1 = 0, len2 = 0, len3 = 0; struct page **pages; int pagecnt = 0; *retlen = 0; DEBUG(2, "blkmtd: write: dev = `%s' to = %ld len = %d buf = %p\n", bdevname(rawdevice->binding->bd_dev), (long int)to, len, buf); /* handle readonly and out of range numbers */ if(rawdevice->readonly) { printk("blkmtd: error: trying to write to a readonly device %s\n", device); return -EROFS; } if(to >= rawdevice->totalsize) { return -ENOSPC; } if(to + len > rawdevice->totalsize) { len = (rawdevice->totalsize - to); } pagenr = to >> PAGE_SHIFT; offset = to - (pagenr << PAGE_SHIFT); /* see if we have to do a partial write at the start */ if(offset) { if((offset + len) > PAGE_SIZE) { len1 = PAGE_SIZE - offset; len -= len1; } else { len1 = len; len = 0; } } /* calculate the length of the other two regions */ len3 = len & ~PAGE_MASK; len -= len3; len2 = len; if(len1) pagecnt++; if(len2) pagecnt += len2 >> PAGE_SHIFT; if(len3) pagecnt++; DEBUG(3, "blkmtd: write: len1 = %d len2 = %d len3 = %d pagecnt = %d\n", len1, len2, len3, pagecnt); /* get space for list of pages */ pages = kmalloc(pagecnt * sizeof(struct page *), GFP_KERNEL); if(pages == NULL) { return -ENOMEM; } pagecnt = 0; if(len1) { /* do partial start region */ struct page *page; DEBUG(3, "blkmtd: write: doing partial start, page = %d len = %d offset = %d\n", pagenr, len1, offset); page = read_cache_page(&rawdevice->as, pagenr, (filler_t *)blkmtd_readpage, rawdevice); if(IS_ERR(page)) { kfree(pages); return PTR_ERR(page); } memcpy(page_address(page)+offset, buf, len1); pages[pagecnt++] = page; buf += len1; *retlen = len1; err = 0; pagenr++; } /* Now do the main loop to a page aligned, n page sized output */ if(len2) { int pagesc = len2 >> PAGE_SHIFT; DEBUG(3, "blkmtd: write: whole pages start = %d, count = %d\n", pagenr, pagesc); while(pagesc) { struct page *page; /* see if page is in the page cache */ DEBUG(3, "blkmtd: write: grabbing page %d from page cache\n", pagenr); page = grab_cache_page(&rawdevice->as, pagenr); DEBUG(3, "blkmtd: write: got page %d from page cache\n", pagenr); if(!page) { printk("blkmtd: write: cant grab cache page %d\n", pagenr); err = -EIO; goto write_err; } memcpy(page_address(page), buf, PAGE_SIZE); pages[pagecnt++] = page; UnlockPage(page); SetPageUptodate(page); pagenr++; pagesc--; buf += PAGE_SIZE; *retlen += PAGE_SIZE; } } if(len3) { /* do the third region */ struct page *page; DEBUG(3, "blkmtd: write: doing partial end, page = %d len = %d\n", pagenr, len3); page = read_cache_page(&rawdevice->as, pagenr, (filler_t *)blkmtd_readpage, rawdevice); if(IS_ERR(page)) { err = PTR_ERR(page); goto write_err; } memcpy(page_address(page), buf, len3); DEBUG(3, "blkmtd: write: writing out partial end\n"); pages[pagecnt++] = page; *retlen += len3; err = 0; } DEBUG(2, "blkmtd: write: end, retlen = %d, err = %d\n", *retlen, err); /* submit it to the write task */ err = queue_page_write(rawdevice, pages, to >> PAGE_SHIFT, pagecnt, 0); if(!err) { while(pagecnt--) { SetPageUptodate(pages[pagecnt]); flush_dcache_page(pages[pagecnt]); page_cache_release(pages[pagecnt]); } kfree(pages); return 0; } write_err: while(--pagecnt) { SetPageError(pages[pagecnt]); page_cache_release(pages[pagecnt]); } kfree(pages); return err;}/* sync the device - wait until the write queue is empty */static void blkmtd_sync(struct mtd_info *mtd){ DECLARE_WAITQUEUE(wait, current); mtd_raw_dev_data_t *rawdevice = mtd->priv; if(rawdevice->readonly) return; DEBUG(2, "blkmtd: sync: called\n"); stuff_inq: spin_lock(&mbd_writeq_lock); if(write_queue_cnt) { spin_unlock(&mbd_writeq_lock); current->state = TASK_UNINTERRUPTIBLE; add_wait_queue(&mtbd_sync_wq, &wait); DEBUG(3, "blkmtd: sync: waking up task\n"); wake_up_interruptible(&thr_wq); schedule();
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -