📄 wl.c
字号:
spin_unlock(&ubi->volumes_lock); ubi_err("no reserved physical eraseblocks"); goto out_ro; } spin_unlock(&ubi->volumes_lock); ubi_msg("mark PEB %d as bad", pnum); err = ubi_io_mark_bad(ubi, pnum); if (err) goto out_ro; spin_lock(&ubi->volumes_lock); ubi->beb_rsvd_pebs -= 1; ubi->bad_peb_count += 1; ubi->good_peb_count -= 1; ubi_calculate_reserved(ubi); if (ubi->beb_rsvd_pebs == 0) ubi_warn("last PEB from the reserved pool was used"); spin_unlock(&ubi->volumes_lock); return err;out_ro: ubi_ro_mode(ubi); return err;}/** * ubi_wl_put_peb - return a PEB to the wear-leveling sub-system. * @ubi: UBI device description object * @pnum: physical eraseblock to return * @torture: if this physical eraseblock has to be tortured * * This function is called to return physical eraseblock @pnum to the pool of * free physical eraseblocks. The @torture flag has to be set if an I/O error * occurred to this @pnum and it has to be tested. This function returns zero * in case of success, and a negative error code in case of failure. */int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture){ int err; struct ubi_wl_entry *e; dbg_wl("PEB %d", pnum); ubi_assert(pnum >= 0); ubi_assert(pnum < ubi->peb_count);retry: spin_lock(&ubi->wl_lock); e = ubi->lookuptbl[pnum]; if (e == ubi->move_from) { /* * User is putting the physical eraseblock which was selected to * be moved. It will be scheduled for erasure in the * wear-leveling worker. */ dbg_wl("PEB %d is being moved, wait", pnum); spin_unlock(&ubi->wl_lock); /* Wait for the WL worker by taking the @ubi->move_mutex */ mutex_lock(&ubi->move_mutex); mutex_unlock(&ubi->move_mutex); goto retry; } else if (e == ubi->move_to) { /* * User is putting the physical eraseblock which was selected * as the target the data is moved to. It may happen if the EBA * sub-system already re-mapped the LEB in 'ubi_eba_copy_leb()' * but the WL sub-system has not put the PEB to the "used" tree * yet, but it is about to do this. So we just set a flag which * will tell the WL worker that the PEB is not needed anymore * and should be scheduled for erasure. */ dbg_wl("PEB %d is the target of data moving", pnum); ubi_assert(!ubi->move_to_put); ubi->move_to_put = 1; spin_unlock(&ubi->wl_lock); return 0; } else { if (in_wl_tree(e, &ubi->used)) { paranoid_check_in_wl_tree(e, &ubi->used); rb_erase(&e->rb, &ubi->used); } else if (in_wl_tree(e, &ubi->scrub)) { paranoid_check_in_wl_tree(e, &ubi->scrub); rb_erase(&e->rb, &ubi->scrub); } else { err = prot_tree_del(ubi, e->pnum); if (err) { ubi_err("PEB %d not found", pnum); ubi_ro_mode(ubi); spin_unlock(&ubi->wl_lock); return err; } } } spin_unlock(&ubi->wl_lock); err = schedule_erase(ubi, e, torture); if (err) { spin_lock(&ubi->wl_lock); wl_tree_add(e, &ubi->used); spin_unlock(&ubi->wl_lock); } return err;}/** * ubi_wl_scrub_peb - schedule a physical eraseblock for scrubbing. * @ubi: UBI device description object * @pnum: the physical eraseblock to schedule * * If a bit-flip in a physical eraseblock is detected, this physical eraseblock * needs scrubbing. This function schedules a physical eraseblock for * scrubbing which is done in background. This function returns zero in case of * success and a negative error code in case of failure. */int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum){ struct ubi_wl_entry *e; dbg_msg("schedule PEB %d for scrubbing", pnum);retry: spin_lock(&ubi->wl_lock); e = ubi->lookuptbl[pnum]; if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub)) { spin_unlock(&ubi->wl_lock); return 0; } if (e == ubi->move_to) { /* * This physical eraseblock was used to move data to. The data * was moved but the PEB was not yet inserted to the proper * tree. We should just wait a little and let the WL worker * proceed. */ spin_unlock(&ubi->wl_lock); dbg_wl("the PEB %d is not in proper tree, retry", pnum); yield(); goto retry; } if (in_wl_tree(e, &ubi->used)) { paranoid_check_in_wl_tree(e, &ubi->used); rb_erase(&e->rb, &ubi->used); } else { int err; err = prot_tree_del(ubi, e->pnum); if (err) { ubi_err("PEB %d not found", pnum); ubi_ro_mode(ubi); spin_unlock(&ubi->wl_lock); return err; } } wl_tree_add(e, &ubi->scrub); spin_unlock(&ubi->wl_lock); /* * Technically scrubbing is the same as wear-leveling, so it is done * by the WL worker. */ return ensure_wear_leveling(ubi);}/** * ubi_wl_flush - flush all pending works. * @ubi: UBI device description object * * This function returns zero in case of success and a negative error code in * case of failure. */int ubi_wl_flush(struct ubi_device *ubi){ int err; /* * Erase while the pending works queue is not empty, but not more then * the number of currently pending works. */ dbg_wl("flush (%d pending works)", ubi->works_count); while (ubi->works_count) { err = do_work(ubi); if (err) return err; } /* * Make sure all the works which have been done in parallel are * finished. */ down_write(&ubi->work_sem); up_write(&ubi->work_sem); /* * And in case last was the WL worker and it cancelled the LEB * movement, flush again. */ while (ubi->works_count) { dbg_wl("flush more (%d pending works)", ubi->works_count); err = do_work(ubi); if (err) return err; } return 0;}/** * tree_destroy - destroy an RB-tree. * @root: the root of the tree to destroy */static void tree_destroy(struct rb_root *root){ struct rb_node *rb; struct ubi_wl_entry *e; rb = root->rb_node; while (rb) { if (rb->rb_left) rb = rb->rb_left; else if (rb->rb_right) rb = rb->rb_right; else { e = rb_entry(rb, struct ubi_wl_entry, rb); rb = rb_parent(rb); if (rb) { if (rb->rb_left == &e->rb) rb->rb_left = NULL; else rb->rb_right = NULL; } kmem_cache_free(ubi_wl_entry_slab, e); } }}/** * ubi_thread - UBI background thread. * @u: the UBI device description object pointer */int ubi_thread(void *u){ int failures = 0; struct ubi_device *ubi = u; ubi_msg("background thread \"%s\" started, PID %d", ubi->bgt_name, task_pid_nr(current)); set_freezable(); for (;;) { int err; if (kthread_should_stop()) break; if (try_to_freeze()) continue; spin_lock(&ubi->wl_lock); if (list_empty(&ubi->works) || ubi->ro_mode || !ubi->thread_enabled) { set_current_state(TASK_INTERRUPTIBLE); spin_unlock(&ubi->wl_lock); schedule(); continue; } spin_unlock(&ubi->wl_lock); err = do_work(ubi); if (err) { ubi_err("%s: work failed with error code %d", ubi->bgt_name, err); if (failures++ > WL_MAX_FAILURES) { /* * Too many failures, disable the thread and * switch to read-only mode. */ ubi_msg("%s: %d consecutive failures", ubi->bgt_name, WL_MAX_FAILURES); ubi_ro_mode(ubi); ubi->thread_enabled = 0; continue; } } else failures = 0; cond_resched(); } dbg_wl("background thread \"%s\" is killed", ubi->bgt_name); return 0;}/** * cancel_pending - cancel all pending works. * @ubi: UBI device description object */static void cancel_pending(struct ubi_device *ubi){ while (!list_empty(&ubi->works)) { struct ubi_work *wrk; wrk = list_entry(ubi->works.next, struct ubi_work, list); list_del(&wrk->list); wrk->func(ubi, wrk, 1); ubi->works_count -= 1; ubi_assert(ubi->works_count >= 0); }}/** * ubi_wl_init_scan - initialize the WL sub-system using scanning information. * @ubi: UBI device description object * @si: scanning information * * This function returns zero in case of success, and a negative error code in * case of failure. */int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si){ int err; struct rb_node *rb1, *rb2; struct ubi_scan_volume *sv; struct ubi_scan_leb *seb, *tmp; struct ubi_wl_entry *e; ubi->used = ubi->free = ubi->scrub = RB_ROOT; ubi->prot.pnum = ubi->prot.aec = RB_ROOT; spin_lock_init(&ubi->wl_lock); mutex_init(&ubi->move_mutex); init_rwsem(&ubi->work_sem); ubi->max_ec = si->max_ec; INIT_LIST_HEAD(&ubi->works); sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num); err = -ENOMEM; ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL); if (!ubi->lookuptbl) return err; list_for_each_entry_safe(seb, tmp, &si->erase, u.list) { cond_resched(); e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); if (!e) goto out_free; e->pnum = seb->pnum; e->ec = seb->ec; ubi->lookuptbl[e->pnum] = e; if (schedule_erase(ubi, e, 0)) { kmem_cache_free(ubi_wl_entry_slab, e); goto out_free; } } list_for_each_entry(seb, &si->free, u.list) { cond_resched(); e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); if (!e) goto out_free; e->pnum = seb->pnum; e->ec = seb->ec; ubi_assert(e->ec >= 0); wl_tree_add(e, &ubi->free); ubi->lookuptbl[e->pnum] = e; } list_for_each_entry(seb, &si->corr, u.list) { cond_resched(); e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); if (!e) goto out_free; e->pnum = seb->pnum; e->ec = seb->ec; ubi->lookuptbl[e->pnum] = e; if (schedule_erase(ubi, e, 0)) { kmem_cache_free(ubi_wl_entry_slab, e); goto out_free; } } ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) { ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) { cond_resched(); e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); if (!e) goto out_free; e->pnum = seb->pnum; e->ec = seb->ec; ubi->lookuptbl[e->pnum] = e; if (!seb->scrub) { dbg_wl("add PEB %d EC %d to the used tree", e->pnum, e->ec); wl_tree_add(e, &ubi->used); } else { dbg_wl("add PEB %d EC %d to the scrub tree", e->pnum, e->ec); wl_tree_add(e, &ubi->scrub); } } } if (ubi->avail_pebs < WL_RESERVED_PEBS) { ubi_err("no enough physical eraseblocks (%d, need %d)", ubi->avail_pebs, WL_RESERVED_PEBS); goto out_free; } ubi->avail_pebs -= WL_RESERVED_PEBS; ubi->rsvd_pebs += WL_RESERVED_PEBS; /* Schedule wear-leveling if needed */ err = ensure_wear_leveling(ubi); if (err) goto out_free; return 0;out_free: cancel_pending(ubi); tree_destroy(&ubi->used); tree_destroy(&ubi->free); tree_destroy(&ubi->scrub); kfree(ubi->lookuptbl); return err;}/** * protection_trees_destroy - destroy the protection RB-trees. * @ubi: UBI device description object */static void protection_trees_destroy(struct ubi_device *ubi){ struct rb_node *rb; struct ubi_wl_prot_entry *pe; rb = ubi->prot.aec.rb_node; while (rb) { if (rb->rb_left) rb = rb->rb_left; else if (rb->rb_right) rb = rb->rb_right; else { pe = rb_entry(rb, struct ubi_wl_prot_entry, rb_aec); rb = rb_parent(rb); if (rb) { if (rb->rb_left == &pe->rb_aec) rb->rb_left = NULL; else rb->rb_right = NULL; } kmem_cache_free(ubi_wl_entry_slab, pe->e); kfree(pe); } }}/** * ubi_wl_close - close the wear-leveling sub-system. * @ubi: UBI device description object */void ubi_wl_close(struct ubi_device *ubi){ dbg_wl("close the WL sub-system"); cancel_pending(ubi); protection_trees_destroy(ubi); tree_destroy(&ubi->used); tree_destroy(&ubi->free); tree_destroy(&ubi->scrub); kfree(ubi->lookuptbl);}#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID/** * paranoid_check_ec - make sure that the erase counter of a PEB is correct. * @ubi: UBI device description object * @pnum: the physical eraseblock number to check * @ec: the erase counter to check * * This function returns zero if the erase counter of physical eraseblock @pnum * is equivalent to @ec, %1 if not, and a negative error code if an error * occurred. */static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec){ int err; long long read_ec; struct ubi_ec_hdr *ec_hdr; ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS); if (!ec_hdr) return -ENOMEM; err = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0); if (err && err != UBI_IO_BITFLIPS) { /* The header does not have to exist */ err = 0; goto out_free; } read_ec = be64_to_cpu(ec_hdr->ec); if (ec != read_ec) { ubi_err("paranoid check failed for PEB %d", pnum); ubi_err("read EC is %lld, should be %d", read_ec, ec); ubi_dbg_dump_stack(); err = 1; } else err = 0;out_free: kfree(ec_hdr); return err;}/** * paranoid_check_in_wl_tree - check that wear-leveling entry is in WL RB-tree. * @e: the wear-leveling entry to check * @root: the root of the tree * * This function returns zero if @e is in the @root RB-tree and %1 if it is * not. */static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root){ if (in_wl_tree(e, root)) return 0; ubi_err("paranoid check failed for PEB %d, EC %d, RB-tree %p ", e->pnum, e->ec, root); ubi_dbg_dump_stack(); return 1;}#endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -