📄 ftl.c
字号:
static void ftl_erase_callback(struct erase_info *erase){ partition_t *part; struct xfer_info_t *xfer; int i; /* Look up the transfer unit */ part = (partition_t *)(erase->priv); for (i = 0; i < part->header.NumTransferUnits; i++) if (part->XferInfo[i].Offset == erase->addr) break; if (i == part->header.NumTransferUnits) { printk(KERN_NOTICE "ftl_cs: internal error: " "erase lookup failed!\n"); return; } xfer = &part->XferInfo[i]; if (erase->state == MTD_ERASE_DONE) xfer->state = XFER_ERASED; else { xfer->state = XFER_FAILED; printk(KERN_NOTICE "ftl_cs: erase failed: state = %d\n", erase->state); } kfree(erase);} /* ftl_erase_callback */static int prepare_xfer(partition_t *part, int i){ erase_unit_header_t header; struct xfer_info_t *xfer; int nbam, ret; u_int32_t ctl; ssize_t retlen; loff_t offset; xfer = &part->XferInfo[i]; xfer->state = XFER_FAILED; DEBUG(1, "ftl_cs: preparing xfer unit at 0x%x\n", xfer->Offset); /* Write the transfer unit header */ header = part->header; header.LogicalEUN = cpu_to_le16(0xffff); header.EraseCount = cpu_to_le32(xfer->EraseCount); ret = part->mbd.mtd->write(part->mbd.mtd, xfer->Offset, sizeof(header), &retlen, (u_char *)&header); if (ret) { return ret; } /* Write the BAM stub */ nbam = (part->BlocksPerUnit * sizeof(u_int32_t) + le32_to_cpu(part->header.BAMOffset) + SECTOR_SIZE - 1) / SECTOR_SIZE; offset = xfer->Offset + le32_to_cpu(part->header.BAMOffset); ctl = cpu_to_le32(BLOCK_CONTROL); for (i = 0; i < nbam; i++, offset += sizeof(u_int32_t)) { ret = part->mbd.mtd->write(part->mbd.mtd, offset, sizeof(u_int32_t), &retlen, (u_char *)&ctl); if (ret) return ret; } xfer->state = XFER_PREPARED; return 0;} /* prepare_xfer *//*====================================================================== Copy_erase_unit() takes a full erase block and a transfer unit, copies everything to the transfer unit, then swaps the block pointers. All data blocks are copied to the corresponding blocks in the target unit, so the virtual block map does not need to be updated.======================================================================*/static int copy_erase_unit(partition_t *part, u_int16_t srcunit, u_int16_t xferunit){ u_char buf[SECTOR_SIZE]; struct eun_info_t *eun; struct xfer_info_t *xfer; u_int32_t src, dest, free, i; u_int16_t unit; int ret; ssize_t retlen; loff_t offset; u_int16_t srcunitswap = cpu_to_le16(srcunit); eun = &part->EUNInfo[srcunit]; xfer = &part->XferInfo[xferunit]; DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n", eun->Offset, xfer->Offset); /* Read current BAM */ if (part->bam_index != srcunit) { offset = eun->Offset + le32_to_cpu(part->header.BAMOffset); ret = part->mbd.mtd->read(part->mbd.mtd, offset, part->BlocksPerUnit * sizeof(u_int32_t), &retlen, (u_char *) (part->bam_cache)); /* mark the cache bad, in case we get an error later */ part->bam_index = 0xffff; if (ret) { printk( KERN_WARNING "ftl: Failed to read BAM cache in copy_erase_unit()!\n"); return ret; } } /* Write the LogicalEUN for the transfer unit */ xfer->state = XFER_UNKNOWN; offset = xfer->Offset + 20; /* Bad! */ unit = cpu_to_le16(0x7fff); ret = part->mbd.mtd->write(part->mbd.mtd, offset, sizeof(u_int16_t), &retlen, (u_char *) &unit); if (ret) { printk( KERN_WARNING "ftl: Failed to write back to BAM cache in copy_erase_unit()!\n"); return ret; } /* Copy all data blocks from source unit to transfer unit */ src = eun->Offset; dest = xfer->Offset; free = 0; ret = 0; for (i = 0; i < part->BlocksPerUnit; i++) { switch (BLOCK_TYPE(le32_to_cpu(part->bam_cache[i]))) { case BLOCK_CONTROL: /* This gets updated later */ break; case BLOCK_DATA: case BLOCK_REPLACEMENT: ret = part->mbd.mtd->read(part->mbd.mtd, src, SECTOR_SIZE, &retlen, (u_char *) buf); if (ret) { printk(KERN_WARNING "ftl: Error reading old xfer unit in copy_erase_unit\n"); return ret; } ret = part->mbd.mtd->write(part->mbd.mtd, dest, SECTOR_SIZE, &retlen, (u_char *) buf); if (ret) { printk(KERN_WARNING "ftl: Error writing new xfer unit in copy_erase_unit\n"); return ret; } break; default: /* All other blocks must be free */ part->bam_cache[i] = cpu_to_le32(0xffffffff); free++; break; } src += SECTOR_SIZE; dest += SECTOR_SIZE; } /* Write the BAM to the transfer unit */ ret = part->mbd.mtd->write(part->mbd.mtd, xfer->Offset + le32_to_cpu(part->header.BAMOffset), part->BlocksPerUnit * sizeof(int32_t), &retlen, (u_char *)part->bam_cache); if (ret) { printk( KERN_WARNING "ftl: Error writing BAM in copy_erase_unit\n"); return ret; } /* All clear? Then update the LogicalEUN again */ ret = part->mbd.mtd->write(part->mbd.mtd, xfer->Offset + 20, sizeof(u_int16_t), &retlen, (u_char *)&srcunitswap); if (ret) { printk(KERN_WARNING "ftl: Error writing new LogicalEUN in copy_erase_unit\n"); return ret; } /* Update the maps and usage stats*/ i = xfer->EraseCount; xfer->EraseCount = eun->EraseCount; eun->EraseCount = i; i = xfer->Offset; xfer->Offset = eun->Offset; eun->Offset = i; part->FreeTotal -= eun->Free; part->FreeTotal += free; eun->Free = free; eun->Deleted = 0; /* Now, the cache should be valid for the new block */ part->bam_index = srcunit; return 0;} /* copy_erase_unit *//*====================================================================== reclaim_block() picks a full erase unit and a transfer unit and then calls copy_erase_unit() to copy one to the other. Then, it schedules an erase on the expired block. What's a good way to decide which transfer unit and which erase unit to use? Beats me. My way is to always pick the transfer unit with the fewest erases, and usually pick the data unit with the most deleted blocks. But with a small probability, pick the oldest data unit instead. This means that we generally postpone the next reclaimation as long as possible, but shuffle static stuff around a bit for wear leveling.======================================================================*/static int reclaim_block(partition_t *part){ u_int16_t i, eun, xfer; u_int32_t best; int queued, ret; DEBUG(0, "ftl_cs: reclaiming space...\n"); DEBUG(3, "NumTransferUnits == %x\n", part->header.NumTransferUnits); /* Pick the least erased transfer unit */ best = 0xffffffff; xfer = 0xffff; do { queued = 0; for (i = 0; i < part->header.NumTransferUnits; i++) { int n=0; if (part->XferInfo[i].state == XFER_UNKNOWN) { DEBUG(3,"XferInfo[%d].state == XFER_UNKNOWN\n",i); n=1; erase_xfer(part, i); } if (part->XferInfo[i].state == XFER_ERASING) { DEBUG(3,"XferInfo[%d].state == XFER_ERASING\n",i); n=1; queued = 1; } else if (part->XferInfo[i].state == XFER_ERASED) { DEBUG(3,"XferInfo[%d].state == XFER_ERASED\n",i); n=1; prepare_xfer(part, i); } if (part->XferInfo[i].state == XFER_PREPARED) { DEBUG(3,"XferInfo[%d].state == XFER_PREPARED\n",i); n=1; if (part->XferInfo[i].EraseCount <= best) { best = part->XferInfo[i].EraseCount; xfer = i; } } if (!n) DEBUG(3,"XferInfo[%d].state == %x\n",i, part->XferInfo[i].state); } if (xfer == 0xffff) { if (queued) { DEBUG(1, "ftl_cs: waiting for transfer " "unit to be prepared...\n"); if (part->mbd.mtd->sync) part->mbd.mtd->sync(part->mbd.mtd); } else { static int ne = 0; if (++ne < 5) printk(KERN_NOTICE "ftl_cs: reclaim failed: no " "suitable transfer units!\n"); else DEBUG(1, "ftl_cs: reclaim failed: no " "suitable transfer units!\n"); return -EIO; } } } while (xfer == 0xffff); eun = 0; if ((jiffies % shuffle_freq) == 0) { DEBUG(1, "ftl_cs: recycling freshest block...\n"); best = 0xffffffff; for (i = 0; i < part->DataUnits; i++) if (part->EUNInfo[i].EraseCount <= best) { best = part->EUNInfo[i].EraseCount; eun = i; } } else { best = 0; for (i = 0; i < part->DataUnits; i++) if (part->EUNInfo[i].Deleted >= best) { best = part->EUNInfo[i].Deleted; eun = i; } if (best == 0) { static int ne = 0; if (++ne < 5) printk(KERN_NOTICE "ftl_cs: reclaim failed: " "no free blocks!\n"); else DEBUG(1,"ftl_cs: reclaim failed: " "no free blocks!\n"); return -EIO; } } ret = copy_erase_unit(part, eun, xfer); if (!ret) erase_xfer(part, xfer); else printk(KERN_NOTICE "ftl_cs: copy_erase_unit failed!\n"); return ret;} /* reclaim_block *//*====================================================================== Find_free() searches for a free block. If necessary, it updates the BAM cache for the erase unit containing the free block. It returns the block index -- the erase unit is just the currently cached unit. If there are no free blocks, it returns 0 -- this is never a valid data block because it contains the header.======================================================================*/#ifdef PSYCHO_DEBUGstatic void dump_lists(partition_t *part){ int i; printk(KERN_DEBUG "ftl_cs: Free total = %d\n", part->FreeTotal); for (i = 0; i < part->DataUnits; i++) printk(KERN_DEBUG "ftl_cs: unit %d: %d phys, %d free, " "%d deleted\n", i, part->EUNInfo[i].Offset >> part->header.EraseUnitSize, part->EUNInfo[i].Free, part->EUNInfo[i].Deleted);}#endifstatic u_int32_t find_free(partition_t *part){ u_int16_t stop, eun; u_int32_t blk; size_t retlen; int ret; /* Find an erase unit with some free space */ stop = (part->bam_index == 0xffff) ? 0 : part->bam_index; eun = stop; do { if (part->EUNInfo[eun].Free != 0) break; /* Wrap around at end of table */ if (++eun == part->DataUnits) eun = 0; } while (eun != stop); if (part->EUNInfo[eun].Free == 0) return 0; /* Is this unit's BAM cached? */ if (eun != part->bam_index) {
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -