📄 cfi_cmdset_0002.c
字号:
/* As before, drop back one to point at the region in which the address actually falls */ i--; if ((ofs + len) & (regions[i].erasesize-1)) return -EINVAL; chipnum = ofs >> cfi->chipshift; adr = ofs - (chipnum << cfi->chipshift); i=first; while (len) { ret = (*frob)(map, &cfi->chips[chipnum], adr, thunk); if (ret) return ret; adr += regions[i].erasesize; len -= regions[i].erasesize; if (adr % (1<< cfi->chipshift) == ((regions[i].offset + (regions[i].erasesize * regions[i].numblocks)) %( 1<< cfi->chipshift))) i++; if (adr >> cfi->chipshift) { adr = 0; chipnum++; if (chipnum >= cfi->numchips) break; } } return 0;}static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, void *thunk){ struct cfi_private *cfi = map->fldrv_priv; unsigned long timeo = jiffies + HZ; DECLARE_WAITQUEUE(wait, current); int ret = 0; int ta = 0; int status; adr += chip->start; cfi_spin_lock(chip->mutex); ret = get_chip(map, chip, adr, FL_ERASING); if (ret) { cfi_spin_unlock(chip->mutex); return ret; } DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n", __func__, adr ); ENABLE_VPP(map); cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL); cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL); cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL); cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL); cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL); cfi_write(map, CMD(0x30), adr); chip->state = FL_ERASING; chip->erase_suspended = 0; chip->in_progress_block_addr = adr; cfi_spin_unlock(chip->mutex); set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout((chip->erase_time*HZ)/(2*1000)); cfi_spin_lock(chip->mutex); timeo = jiffies + (HZ*20); for (;;) { if (chip->state != FL_ERASING) { /* Someone's suspended the erase. Sleep */ set_current_state(TASK_UNINTERRUPTIBLE); add_wait_queue(&chip->wq, &wait); cfi_spin_unlock(chip->mutex); schedule(); remove_wait_queue(&chip->wq, &wait); cfi_spin_lock(chip->mutex); continue; } if (chip->erase_suspended) { /* This erase was suspended and resumed. Adjust the timeout */ timeo = jiffies + (HZ*20); /* FIXME */ chip->erase_suspended = 0; } if ((status = chip_status(map, adr)) != CHIP_BUSY || ( ta = time_after(jiffies, timeo)) ) break; /* Latency issues. Drop the lock, wait a while and retry */ cfi_spin_unlock(chip->mutex); set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(1); cfi_spin_lock(chip->mutex); } if (status == CHIP_READY) goto op_done; if (status == CHIP_TIMEDOUT) printk(KERN_WARNING "MTD %s(): flash internal timeout\n", __func__); else if (ta) printk(KERN_WARNING "MTD %s(): software timeout\n", __func__ ); else printk(KERN_WARNING "MTD %s(): unexpected failure. status = %d\n", __func__, status); op_failed: /* reset on all failures. */ cfi_write( map, CMD(0xF0), chip->start ); /* FIXME - should have reset delay before continuing */ ret = -EIO; op_done: chip->state = FL_READY; put_chip(map, chip, adr); cfi_spin_unlock(chip->mutex); return ret;}int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr){ unsigned long ofs, len; int ret; ofs = instr->addr; len = instr->len; ret = cfi_amdstd_varsize_frob(mtd, do_erase_oneblock, ofs, len, 0); if (ret) return ret; instr->state = MTD_ERASE_DONE; if (instr->callback) instr->callback(instr); return 0;}static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr){ struct map_info *map = mtd->priv; struct cfi_private *cfi = map->fldrv_priv; int ret = 0; if (instr->addr != 0) return -EINVAL; if (instr->len != mtd->size) return -EINVAL; ret = do_erase_chip(map, &cfi->chips[0]); if (ret) return ret; instr->state = MTD_ERASE_DONE; if (instr->callback) instr->callback(instr); return 0;}static void cfi_amdstd_sync (struct mtd_info *mtd){ struct map_info *map = mtd->priv; struct cfi_private *cfi = map->fldrv_priv; int i; struct flchip *chip; int ret = 0; DECLARE_WAITQUEUE(wait, current); for (i=0; !ret && i<cfi->numchips; i++) { chip = &cfi->chips[i]; retry: cfi_spin_lock(chip->mutex); switch(chip->state) { case FL_READY: case FL_STATUS: case FL_CFI_QUERY: case FL_JEDEC_QUERY: chip->oldstate = chip->state; chip->state = FL_SYNCING; /* No need to wake_up() on this state change - * as the whole point is that nobody can do anything * with the chip now anyway. */ case FL_SYNCING: cfi_spin_unlock(chip->mutex); break; default: /* Not an idle state */ add_wait_queue(&chip->wq, &wait); cfi_spin_unlock(chip->mutex); schedule(); remove_wait_queue(&chip->wq, &wait); goto retry; } } /* Unlock the chips again */ for (i--; i >=0; i--) { chip = &cfi->chips[i]; cfi_spin_lock(chip->mutex); if (chip->state == FL_SYNCING) { chip->state = chip->oldstate; wake_up(&chip->wq); } cfi_spin_unlock(chip->mutex); }}static int cfi_amdstd_suspend(struct mtd_info *mtd){ struct map_info *map = mtd->priv; struct cfi_private *cfi = map->fldrv_priv; int i; struct flchip *chip; int ret = 0; for (i=0; !ret && i<cfi->numchips; i++) { chip = &cfi->chips[i]; cfi_spin_lock(chip->mutex); switch(chip->state) { case FL_READY: case FL_STATUS: case FL_CFI_QUERY: case FL_JEDEC_QUERY: chip->oldstate = chip->state; chip->state = FL_PM_SUSPENDED; /* No need to wake_up() on this state change - * as the whole point is that nobody can do anything * with the chip now anyway. */ case FL_PM_SUSPENDED: break; default: ret = -EAGAIN; break; } cfi_spin_unlock(chip->mutex); } /* Unlock the chips again */ if (ret) { for (i--; i >=0; i--) { chip = &cfi->chips[i]; cfi_spin_lock(chip->mutex); if (chip->state == FL_PM_SUSPENDED) { chip->state = chip->oldstate; wake_up(&chip->wq); } cfi_spin_unlock(chip->mutex); } } return ret;}static void cfi_amdstd_resume(struct mtd_info *mtd){ struct map_info *map = mtd->priv; struct cfi_private *cfi = map->fldrv_priv; int i; struct flchip *chip; for (i=0; i<cfi->numchips; i++) { chip = &cfi->chips[i]; cfi_spin_lock(chip->mutex); if (chip->state == FL_PM_SUSPENDED) { chip->state = FL_READY; cfi_write(map, CMD(0xF0), chip->start); wake_up(&chip->wq); } else printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n"); cfi_spin_unlock(chip->mutex); }}#ifdef DEBUG_LOCK_BITSstatic int do_printlockstatus_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, void *thunk){ struct cfi_private *cfi = map->fldrv_priv; int ofs_factor = cfi->interleave * cfi->device_type; cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL); printk(KERN_DEBUG "block status register for 0x%08lx is %x\n", adr, cfi_read_query(map, adr+(2*ofs_factor))); cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL); return 0;}#define debug_dump_locks(mtd, frob, ofs, len, thunk) \ cfi_amdstd_varsize_frob((mtd), (frob), (ofs), (len), (thunk))#else#define debug_dump_locks(...)#endif /* DEBUG_LOCK_BITS */struct xxlock_thunk { cfi_word val; flstate_t state;};#define DO_XXLOCK_ONEBLOCK_LOCK ((struct xxlock_thunk){0x01, FL_LOCKING})#define DO_XXLOCK_ONEBLOCK_UNLOCK ((struct xxlock_thunk){0x00, FL_UNLOCKING})/* * FIXME - this is *very* specific to a particular chip. It likely won't * work for all chips that require unlock. It also hasn't been tested * with interleaved chips. */static int do_xxlock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, void *thunk){ struct cfi_private *cfi = map->fldrv_priv; struct xxlock_thunk *xxlt = (struct xxlock_thunk *)thunk; int ret; /* * This is easy because these are writes to registers and not writes * to flash memory - that means that we don't have to check status * and timeout. */ adr += chip->start; /* * lock block registers: * - on 64k boundariesand * - bit 1 set high * - block lock registers are 4MiB lower - overflow subtract (danger) */ adr = ((adr & ~0xffff) | 0x2) + ~0x3fffff; cfi_spin_lock(chip->mutex); ret = get_chip(map, chip, adr, FL_LOCKING); if (ret) { cfi_spin_unlock(chip->mutex); return ret; } chip->state = xxlt->state; cfi_write(map, CMD(xxlt->val), adr); /* Done and happy. */ chip->state = FL_READY; put_chip(map, chip, adr); cfi_spin_unlock(chip->mutex); return 0;}static int cfi_amdstd_lock_varsize(struct mtd_info *mtd, loff_t ofs, size_t len){ int ret; DEBUG(MTD_DEBUG_LEVEL3, "%s: lock status before, ofs=0x%08llx, len=0x%08X\n", __func__, ofs, len); debug_dump_locks(mtd, do_printlockstatus_oneblock, ofs, len, 0); ret = cfi_amdstd_varsize_frob(mtd, do_xxlock_oneblock, ofs, len, (void *)&DO_XXLOCK_ONEBLOCK_LOCK); DEBUG(MTD_DEBUG_LEVEL3, "%s: lock status after, ret=%d\n", __func__, ret); debug_dump_locks(mtd, do_printlockstatus_oneblock, ofs, len, 0); return ret;}static int cfi_amdstd_unlock_varsize(struct mtd_info *mtd, loff_t ofs, size_t len){ int ret; DEBUG(MTD_DEBUG_LEVEL3, "%s: lock status before, ofs=0x%08llx, len=0x%08X\n", __func__, ofs, len); debug_dump_locks(mtd, do_printlockstatus_oneblock, ofs, len, 0); ret = cfi_amdstd_varsize_frob(mtd, do_xxlock_oneblock, ofs, len, (void *)&DO_XXLOCK_ONEBLOCK_UNLOCK); DEBUG(MTD_DEBUG_LEVEL3, "%s: lock status after, ret=%d\n", __func__, ret); debug_dump_locks(mtd, do_printlockstatus_oneblock, ofs, len, 0); return ret;}static void cfi_amdstd_destroy(struct mtd_info *mtd){ struct map_info *map = mtd->priv; struct cfi_private *cfi = map->fldrv_priv; kfree(cfi->cmdset_priv); kfree(cfi->cfiq); kfree(cfi); kfree(mtd->eraseregions);}static char im_name[]="cfi_cmdset_0002";int __init cfi_amdstd_init(void){ inter_module_register(im_name, THIS_MODULE, &cfi_cmdset_0002); return 0;}static void __exit cfi_amdstd_exit(void){ inter_module_unregister(im_name);}module_init(cfi_amdstd_init);module_exit(cfi_amdstd_exit);MODULE_LICENSE("GPL");MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al.");MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -