📄 cfi_cmdset_0001.c
字号:
cfi_fixup(mtd, jedec_fixup_table); } /* Apply generic fixups */ cfi_fixup(mtd, fixup_table); for (i=0; i< cfi->numchips; i++) { if (cfi->cfiq->WordWriteTimeoutTyp) cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp; else cfi->chips[i].word_write_time = 50000; if (cfi->cfiq->BufWriteTimeoutTyp) cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp; /* No default; if it isn't specified, we won't use it */ if (cfi->cfiq->BlockEraseTimeoutTyp) cfi->chips[i].erase_time = 1000<<cfi->cfiq->BlockEraseTimeoutTyp; else cfi->chips[i].erase_time = 2000000; if (cfi->cfiq->WordWriteTimeoutTyp && cfi->cfiq->WordWriteTimeoutMax) cfi->chips[i].word_write_time_max = 1<<(cfi->cfiq->WordWriteTimeoutTyp + cfi->cfiq->WordWriteTimeoutMax); else cfi->chips[i].word_write_time_max = 50000 * 8; if (cfi->cfiq->BufWriteTimeoutTyp && cfi->cfiq->BufWriteTimeoutMax) cfi->chips[i].buffer_write_time_max = 1<<(cfi->cfiq->BufWriteTimeoutTyp + cfi->cfiq->BufWriteTimeoutMax); if (cfi->cfiq->BlockEraseTimeoutTyp && cfi->cfiq->BlockEraseTimeoutMax) cfi->chips[i].erase_time_max = 1000<<(cfi->cfiq->BlockEraseTimeoutTyp + cfi->cfiq->BlockEraseTimeoutMax); else cfi->chips[i].erase_time_max = 2000000 * 8; cfi->chips[i].ref_point_counter = 0; init_waitqueue_head(&(cfi->chips[i].wq)); } map->fldrv = &cfi_intelext_chipdrv; return cfi_intelext_setup(mtd);}struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));EXPORT_SYMBOL_GPL(cfi_cmdset_0001);EXPORT_SYMBOL_GPL(cfi_cmdset_0003);EXPORT_SYMBOL_GPL(cfi_cmdset_0200);static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd){ struct map_info *map = mtd->priv; struct cfi_private *cfi = map->fldrv_priv; unsigned long offset = 0; int i,j; unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave; //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips); mtd->size = devsize * cfi->numchips; mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips; mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) * mtd->numeraseregions, GFP_KERNEL); if (!mtd->eraseregions) { printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n"); goto setup_err; } for (i=0; i<cfi->cfiq->NumEraseRegions; i++) { unsigned long ernum, ersize; ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave; ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1; if (mtd->erasesize < ersize) { mtd->erasesize = ersize; } for (j=0; j<cfi->numchips; j++) { mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset; mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize; mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum; mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap = kmalloc(ernum / 8 + 1, GFP_KERNEL); } offset += (ersize * ernum); } if (offset != devsize) { /* Argh */ printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize); goto setup_err; } for (i=0; i<mtd->numeraseregions;i++){ printk(KERN_DEBUG "erase region %d: offset=0x%x,size=0x%x,blocks=%d\n", i,mtd->eraseregions[i].offset, mtd->eraseregions[i].erasesize, mtd->eraseregions[i].numblocks); }#ifdef CONFIG_MTD_OTP mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg; mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg; mtd->write_user_prot_reg = cfi_intelext_write_user_prot_reg; mtd->lock_user_prot_reg = cfi_intelext_lock_user_prot_reg; mtd->get_fact_prot_info = cfi_intelext_get_fact_prot_info; mtd->get_user_prot_info = cfi_intelext_get_user_prot_info;#endif /* This function has the potential to distort the reality a bit and therefore should be called last. */ if (cfi_intelext_partition_fixup(mtd, &cfi) != 0) goto setup_err; __module_get(THIS_MODULE); register_reboot_notifier(&mtd->reboot_notifier); return mtd; setup_err: if(mtd) { kfree(mtd->eraseregions); kfree(mtd); } kfree(cfi->cmdset_priv); return NULL;}static int cfi_intelext_partition_fixup(struct mtd_info *mtd, struct cfi_private **pcfi){ struct map_info *map = mtd->priv; struct cfi_private *cfi = *pcfi; struct cfi_pri_intelext *extp = cfi->cmdset_priv; /* * Probing of multi-partition flash chips. * * To support multiple partitions when available, we simply arrange * for each of them to have their own flchip structure even if they * are on the same physical chip. This means completely recreating * a new cfi_private structure right here which is a blatent code * layering violation, but this is still the least intrusive * arrangement at this point. This can be rearranged in the future * if someone feels motivated enough. --nico */ if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3' && extp->FeatureSupport & (1 << 9)) { struct cfi_private *newcfi; struct flchip *chip; struct flchip_shared *shared; int offs, numregions, numparts, partshift, numvirtchips, i, j; /* Protection Register info */ offs = (extp->NumProtectionFields - 1) * sizeof(struct cfi_intelext_otpinfo); /* Burst Read info */ offs += extp->extra[offs+1]+2; /* Number of partition regions */ numregions = extp->extra[offs]; offs += 1; /* skip the sizeof(partregion) field in CFI 1.4 */ if (extp->MinorVersion >= '4') offs += 2; /* Number of hardware partitions */ numparts = 0; for (i = 0; i < numregions; i++) { struct cfi_intelext_regioninfo *rinfo; rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs]; numparts += rinfo->NumIdentPartitions; offs += sizeof(*rinfo) + (rinfo->NumBlockTypes - 1) * sizeof(struct cfi_intelext_blockinfo); } if (!numparts) numparts = 1; /* Programming Region info */ if (extp->MinorVersion >= '4') { struct cfi_intelext_programming_regioninfo *prinfo; prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs]; mtd->writesize = cfi->interleave << prinfo->ProgRegShift; mtd->flags &= ~MTD_BIT_WRITEABLE; printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n", map->name, mtd->writesize, cfi->interleave * prinfo->ControlValid, cfi->interleave * prinfo->ControlInvalid); } /* * All functions below currently rely on all chips having * the same geometry so we'll just assume that all hardware * partitions are of the same size too. */ partshift = cfi->chipshift - __ffs(numparts); if ((1 << partshift) < mtd->erasesize) { printk( KERN_ERR "%s: bad number of hw partitions (%d)\n", __func__, numparts); return -EINVAL; } numvirtchips = cfi->numchips * numparts; newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL); if (!newcfi) return -ENOMEM; shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL); if (!shared) { kfree(newcfi); return -ENOMEM; } memcpy(newcfi, cfi, sizeof(struct cfi_private)); newcfi->numchips = numvirtchips; newcfi->chipshift = partshift; chip = &newcfi->chips[0]; for (i = 0; i < cfi->numchips; i++) { shared[i].writing = shared[i].erasing = NULL; spin_lock_init(&shared[i].lock); for (j = 0; j < numparts; j++) { *chip = cfi->chips[i]; chip->start += j << partshift; chip->priv = &shared[i]; /* those should be reset too since they create memory references. */ init_waitqueue_head(&chip->wq); spin_lock_init(&chip->_spinlock); chip->mutex = &chip->_spinlock; chip++; } } printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips " "--> %d partitions of %d KiB\n", map->name, cfi->numchips, cfi->interleave, newcfi->numchips, 1<<(newcfi->chipshift-10)); map->fldrv_priv = newcfi; *pcfi = newcfi; kfree(cfi); } return 0;}/* * *********** CHIP ACCESS FUNCTIONS *********** */static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode){ DECLARE_WAITQUEUE(wait, current); struct cfi_private *cfi = map->fldrv_priv; map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01); struct cfi_pri_intelext *cfip = cfi->cmdset_priv; unsigned long timeo = jiffies + HZ; /* Prevent setting state FL_SYNCING for chip in suspended state. */ if (mode == FL_SYNCING && chip->oldstate != FL_READY) goto sleep; switch (chip->state) { case FL_STATUS: for (;;) { status = map_read(map, adr); if (map_word_andequal(map, status, status_OK, status_OK)) break; /* At this point we're fine with write operations in other partitions as they don't conflict. */ if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS)) break; spin_unlock(chip->mutex); cfi_udelay(1); spin_lock(chip->mutex); /* Someone else might have been playing with it. */ return -EAGAIN; } /* Fall through */ case FL_READY: case FL_CFI_QUERY: case FL_JEDEC_QUERY: return 0; case FL_ERASING: if (!cfip || !(cfip->FeatureSupport & 2) || !(mode == FL_READY || mode == FL_POINT || (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1)))) goto sleep; /* Erase suspend */ map_write(map, CMD(0xB0), adr); /* If the flash has finished erasing, then 'erase suspend' * appears to make some (28F320) flash devices switch to * 'read' mode. Make sure that we switch to 'read status' * mode so we get the right data. --rmk */ map_write(map, CMD(0x70), adr); chip->oldstate = FL_ERASING; chip->state = FL_ERASE_SUSPENDING; chip->erase_suspended = 1; for (;;) { status = map_read(map, adr); if (map_word_andequal(map, status, status_OK, status_OK)) break; if (time_after(jiffies, timeo)) { /* Urgh. Resume and pretend we weren't here. */ map_write(map, CMD(0xd0), adr); /* Make sure we're in 'read status' mode if it had finished */ map_write(map, CMD(0x70), adr); chip->state = FL_ERASING; chip->oldstate = FL_READY; printk(KERN_ERR "%s: Chip not ready after erase " "suspended: status = 0x%lx\n", map->name, status.x[0]); return -EIO; } spin_unlock(chip->mutex); cfi_udelay(1); spin_lock(chip->mutex); /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING. So we can just loop here. */ } chip->state = FL_STATUS; return 0; case FL_XIP_WHILE_ERASING: if (mode != FL_READY && mode != FL_POINT && (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1))) goto sleep; chip->oldstate = chip->state; chip->state = FL_READY; return 0; case FL_SHUTDOWN: /* The machine is rebooting now,so no one can get chip anymore */ return -EIO; case FL_POINT: /* Only if there's no operation suspended... */ if (mode == FL_READY && chip->oldstate == FL_READY) return 0; /* Fall through */ default: sleep: set_current_state(TASK_UNINTERRUPTIBLE); add_wait_queue(&chip->wq, &wait); spin_unlock(chip->mutex); schedule(); remove_wait_queue(&chip->wq, &wait); spin_lock(chip->mutex); return -EAGAIN; }}static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode){ int ret; DECLARE_WAITQUEUE(wait, current); retry: if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE || mode == FL_SHUTDOWN) && chip->state != FL_SYNCING) { /* * OK. We have possibility for contention on the write/erase * operations which are global to the real chip and not per * partition. So let's fight it over in the partition which * currently has authority on the operation. * * The rules are as follows: * * - any write operation must own shared->writing. * * - any erase operation must own _both_ shared->writing and * shared->erasing. * * - contention arbitration is handled in the owner's context. * * The 'shared' struct can be read and/or written only when * its lock is taken. */ struct flchip_shared *shared = chip->priv; struct flchip *contender; spin_lock(&shared->lock); contender = shared->writing; if (contender && contender != chip) { /* * The engine to perform desired operation on this * partition is already in use by someone else. * Let's fight over it in the context of the chip * currently using it. If it is possible to suspend, * that other partition will do just that, otherwise * it'll happily send us to sleep. In any case, when * get_chip returns success we're clear to go ahead. */ ret = spin_trylock(contender->mutex); spin_unlock(&shared->lock); if (!ret) goto retry; spin_unlock(chip->mutex); ret = chip_ready(map, contender, contender->start, mode); spin_lock(chip->mutex); if (ret == -EAGAIN) { spin_unlock(contender->mutex); goto retry; } if (ret) { spin_unlock(contender->mutex); return ret; } spin_lock(&shared->lock); /* We should not own chip if it is already * in FL_SYNCING state. Put contender and retry. */ if (chip->state == FL_SYNCING) { put_chip(map, contender, contender->start); spin_unlock(contender->mutex); goto retry; } spin_unlock(contender->mutex); } /* Check if we already have suspended erase * on this chip. Sleep. */ if (mode == FL_ERASING && shared->erasing && shared->erasing->oldstate == FL_ERASING) { spin_unlock(&shared->lock); set_current_state(TASK_UNINTERRUPTIBLE); add_wait_queue(&chip->wq, &wait); spin_unlock(chip->mutex); schedule(); remove_wait_queue(&chip->wq, &wait); spin_lock(chip->mutex); goto retry; } /* We now own it */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -