⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 wl.c

📁 基于linux-2.6.28的mtd驱动
💻 C
📖 第 1 页 / 共 3 页
字号:
		if (pnum < pe->e->pnum)			p = p->rb_left;		else			p = p->rb_right;	}	return -ENODEV;found:	ubi_assert(pe->e->pnum == pnum);	rb_erase(&pe->rb_aec, &ubi->prot.aec);	rb_erase(&pe->rb_pnum, &ubi->prot.pnum);	kfree(pe);	return 0;}/** * sync_erase - synchronously erase a physical eraseblock. * @ubi: UBI device description object * @e: the the physical eraseblock to erase * @torture: if the physical eraseblock has to be tortured * * This function returns zero in case of success and a negative error code in * case of failure. */static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,		      int torture){	int err;	struct ubi_ec_hdr *ec_hdr;	unsigned long long ec = e->ec;	dbg_wl("erase PEB %d, old EC %llu", e->pnum, ec);	err = paranoid_check_ec(ubi, e->pnum, e->ec);	if (err > 0)		return -EINVAL;	ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);	if (!ec_hdr)		return -ENOMEM;	err = ubi_io_sync_erase(ubi, e->pnum, torture);	if (err < 0)		goto out_free;	ec += err;	if (ec > UBI_MAX_ERASECOUNTER) {		/*		 * Erase counter overflow. Upgrade UBI and use 64-bit		 * erase counters internally.		 */		ubi_err("erase counter overflow at PEB %d, EC %llu",			e->pnum, ec);		err = -EINVAL;		goto out_free;	}	dbg_wl("erased PEB %d, new EC %llu", e->pnum, ec);	ec_hdr->ec = cpu_to_be64(ec);	err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr);	if (err)		goto out_free;	e->ec = ec;	spin_lock(&ubi->wl_lock);	if (e->ec > ubi->max_ec)		ubi->max_ec = e->ec;	spin_unlock(&ubi->wl_lock);out_free:	kfree(ec_hdr);	return err;}/** * check_protection_over - check if it is time to stop protecting some PEBs. * @ubi: UBI device description object * * This function is called after each erase operation, when the absolute erase * counter is incremented, to check if some physical eraseblock  have not to be * protected any longer. These physical eraseblocks are moved from the * protection trees to the used tree. */static void check_protection_over(struct ubi_device *ubi){	struct ubi_wl_prot_entry *pe;	/*	 * There may be several protected physical eraseblock to remove,	 * process them all.	 */	while (1) {		spin_lock(&ubi->wl_lock);		if (!ubi->prot.aec.rb_node) {			spin_unlock(&ubi->wl_lock);			break;		}		pe = rb_entry(rb_first(&ubi->prot.aec),			      struct ubi_wl_prot_entry, rb_aec);		if (pe->abs_ec > ubi->abs_ec) {			spin_unlock(&ubi->wl_lock);			break;		}		dbg_wl("PEB %d protection over, abs_ec %llu, PEB abs_ec %llu",		       pe->e->pnum, ubi->abs_ec, pe->abs_ec);		rb_erase(&pe->rb_aec, &ubi->prot.aec);		rb_erase(&pe->rb_pnum, &ubi->prot.pnum);		wl_tree_add(pe->e, &ubi->used);		spin_unlock(&ubi->wl_lock);		kfree(pe);		cond_resched();	}}/** * schedule_ubi_work - schedule a work. * @ubi: UBI device description object * @wrk: the work to schedule * * This function enqueues a work defined by @wrk to the tail of the pending * works list. */static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk){	spin_lock(&ubi->wl_lock);	list_add_tail(&wrk->list, &ubi->works);	ubi_assert(ubi->works_count >= 0);	ubi->works_count += 1;	if (ubi->thread_enabled)		wake_up_process(ubi->bgt_thread);	spin_unlock(&ubi->wl_lock);}static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,			int cancel);/** * schedule_erase - schedule an erase work. * @ubi: UBI device description object * @e: the WL entry of the physical eraseblock to erase * @torture: if the physical eraseblock has to be tortured * * This function returns zero in case of success and a %-ENOMEM in case of * failure. */static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,			  int torture){	struct ubi_work *wl_wrk;	dbg_wl("schedule erasure of PEB %d, EC %d, torture %d",	       e->pnum, e->ec, torture);	wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);	if (!wl_wrk)		return -ENOMEM;	wl_wrk->func = &erase_worker;	wl_wrk->e = e;	wl_wrk->torture = torture;	schedule_ubi_work(ubi, wl_wrk);	return 0;}/** * wear_leveling_worker - wear-leveling worker function. * @ubi: UBI device description object * @wrk: the work object * @cancel: non-zero if the worker has to free memory and exit * * This function copies a more worn out physical eraseblock to a less worn out * one. Returns zero in case of success and a negative error code in case of * failure. */static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,				int cancel){	int err, put = 0, scrubbing = 0, protect = 0;	struct ubi_wl_prot_entry *uninitialized_var(pe);	struct ubi_wl_entry *e1, *e2;	struct ubi_vid_hdr *vid_hdr;	kfree(wrk);	if (cancel)		return 0;	vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);	if (!vid_hdr)		return -ENOMEM;	mutex_lock(&ubi->move_mutex);	spin_lock(&ubi->wl_lock);	ubi_assert(!ubi->move_from && !ubi->move_to);	ubi_assert(!ubi->move_to_put);	if (!ubi->free.rb_node ||	    (!ubi->used.rb_node && !ubi->scrub.rb_node)) {		/*		 * No free physical eraseblocks? Well, they must be waiting in		 * the queue to be erased. Cancel movement - it will be		 * triggered again when a free physical eraseblock appears.		 *		 * No used physical eraseblocks? They must be temporarily		 * protected from being moved. They will be moved to the		 * @ubi->used tree later and the wear-leveling will be		 * triggered again.		 */		dbg_wl("cancel WL, a list is empty: free %d, used %d",		       !ubi->free.rb_node, !ubi->used.rb_node);		goto out_cancel;	}	if (!ubi->scrub.rb_node) {		/*		 * Now pick the least worn-out used physical eraseblock and a		 * highly worn-out free physical eraseblock. If the erase		 * counters differ much enough, start wear-leveling.		 */		e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, rb);		e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);		if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {			dbg_wl("no WL needed: min used EC %d, max free EC %d",			       e1->ec, e2->ec);			goto out_cancel;		}		paranoid_check_in_wl_tree(e1, &ubi->used);		rb_erase(&e1->rb, &ubi->used);		dbg_wl("move PEB %d EC %d to PEB %d EC %d",		       e1->pnum, e1->ec, e2->pnum, e2->ec);	} else {		/* Perform scrubbing */		scrubbing = 1;		e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, rb);		e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);		paranoid_check_in_wl_tree(e1, &ubi->scrub);		rb_erase(&e1->rb, &ubi->scrub);		dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);	}	paranoid_check_in_wl_tree(e2, &ubi->free);	rb_erase(&e2->rb, &ubi->free);	ubi->move_from = e1;	ubi->move_to = e2;	spin_unlock(&ubi->wl_lock);	/*	 * Now we are going to copy physical eraseblock @e1->pnum to @e2->pnum.	 * We so far do not know which logical eraseblock our physical	 * eraseblock (@e1) belongs to. We have to read the volume identifier	 * header first.	 *	 * Note, we are protected from this PEB being unmapped and erased. The	 * 'ubi_wl_put_peb()' would wait for moving to be finished if the PEB	 * which is being moved was unmapped.	 */	err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0);	if (err && err != UBI_IO_BITFLIPS) {		if (err == UBI_IO_PEB_FREE) {			/*			 * We are trying to move PEB without a VID header. UBI			 * always write VID headers shortly after the PEB was			 * given, so we have a situation when it did not have			 * chance to write it down because it was preempted.			 * Just re-schedule the work, so that next time it will			 * likely have the VID header in place.			 */			dbg_wl("PEB %d has no VID header", e1->pnum);			goto out_not_moved;		}		ubi_err("error %d while reading VID header from PEB %d",			err, e1->pnum);		if (err > 0)			err = -EIO;		goto out_error;	}	err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr);	if (err) {		if (err < 0)			goto out_error;		if (err == 1)			goto out_not_moved;		/*		 * For some reason the LEB was not moved - it might be because		 * the volume is being deleted. We should prevent this PEB from		 * being selected for wear-levelling movement for some "time",		 * so put it to the protection tree.		 */		dbg_wl("cancelled moving PEB %d", e1->pnum);		pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_NOFS);		if (!pe) {			err = -ENOMEM;			goto out_error;		}		protect = 1;	}	ubi_free_vid_hdr(ubi, vid_hdr);	if (scrubbing && !protect)		ubi_msg("scrubbed PEB %d, data moved to PEB %d",			e1->pnum, e2->pnum);	spin_lock(&ubi->wl_lock);	if (protect)		prot_tree_add(ubi, e1, pe, protect);	if (!ubi->move_to_put)		wl_tree_add(e2, &ubi->used);	else		put = 1;	ubi->move_from = ubi->move_to = NULL;	ubi->move_to_put = ubi->wl_scheduled = 0;	spin_unlock(&ubi->wl_lock);	if (put) {		/*		 * Well, the target PEB was put meanwhile, schedule it for		 * erasure.		 */		dbg_wl("PEB %d was put meanwhile, erase", e2->pnum);		err = schedule_erase(ubi, e2, 0);		if (err)			goto out_error;	}	if (!protect) {		err = schedule_erase(ubi, e1, 0);		if (err)			goto out_error;	}	dbg_wl("done");	mutex_unlock(&ubi->move_mutex);	return 0;	/*	 * For some reasons the LEB was not moved, might be an error, might be	 * something else. @e1 was not changed, so return it back. @e2 might	 * be changed, schedule it for erasure.	 */out_not_moved:	ubi_free_vid_hdr(ubi, vid_hdr);	spin_lock(&ubi->wl_lock);	if (scrubbing)		wl_tree_add(e1, &ubi->scrub);	else		wl_tree_add(e1, &ubi->used);	ubi->move_from = ubi->move_to = NULL;	ubi->move_to_put = ubi->wl_scheduled = 0;	spin_unlock(&ubi->wl_lock);	err = schedule_erase(ubi, e2, 0);	if (err)		goto out_error;	mutex_unlock(&ubi->move_mutex);	return 0;out_error:	ubi_err("error %d while moving PEB %d to PEB %d",		err, e1->pnum, e2->pnum);	ubi_free_vid_hdr(ubi, vid_hdr);	spin_lock(&ubi->wl_lock);	ubi->move_from = ubi->move_to = NULL;	ubi->move_to_put = ubi->wl_scheduled = 0;	spin_unlock(&ubi->wl_lock);	kmem_cache_free(ubi_wl_entry_slab, e1);	kmem_cache_free(ubi_wl_entry_slab, e2);	ubi_ro_mode(ubi);	mutex_unlock(&ubi->move_mutex);	return err;out_cancel:	ubi->wl_scheduled = 0;	spin_unlock(&ubi->wl_lock);	mutex_unlock(&ubi->move_mutex);	ubi_free_vid_hdr(ubi, vid_hdr);	return 0;}/** * ensure_wear_leveling - schedule wear-leveling if it is needed. * @ubi: UBI device description object * * This function checks if it is time to start wear-leveling and schedules it * if yes. This function returns zero in case of success and a negative error * code in case of failure. */static int ensure_wear_leveling(struct ubi_device *ubi){	int err = 0;	struct ubi_wl_entry *e1;	struct ubi_wl_entry *e2;	struct ubi_work *wrk;	spin_lock(&ubi->wl_lock);	if (ubi->wl_scheduled)		/* Wear-leveling is already in the work queue */		goto out_unlock;	/*	 * If the ubi->scrub tree is not empty, scrubbing is needed, and the	 * the WL worker has to be scheduled anyway.	 */	if (!ubi->scrub.rb_node) {		if (!ubi->used.rb_node || !ubi->free.rb_node)			/* No physical eraseblocks - no deal */			goto out_unlock;		/*		 * We schedule wear-leveling only if the difference between the		 * lowest erase counter of used physical eraseblocks and a high		 * erase counter of free physical eraseblocks is greater then		 * %UBI_WL_THRESHOLD.		 */		e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, rb);		e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);		if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD))			goto out_unlock;		dbg_wl("schedule wear-leveling");	} else		dbg_wl("schedule scrubbing");	ubi->wl_scheduled = 1;	spin_unlock(&ubi->wl_lock);	wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);	if (!wrk) {		err = -ENOMEM;		goto out_cancel;	}	wrk->func = &wear_leveling_worker;	schedule_ubi_work(ubi, wrk);	return err;out_cancel:	spin_lock(&ubi->wl_lock);	ubi->wl_scheduled = 0;out_unlock:	spin_unlock(&ubi->wl_lock);	return err;}/** * erase_worker - physical eraseblock erase worker function. * @ubi: UBI device description object * @wl_wrk: the work object * @cancel: non-zero if the worker has to free memory and exit * * This function erases a physical eraseblock and perform torture testing if * needed. It also takes care about marking the physical eraseblock bad if * needed. Returns zero in case of success and a negative error code in case of * failure. */static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,			int cancel){	struct ubi_wl_entry *e = wl_wrk->e;	int pnum = e->pnum, err, need;	if (cancel) {		dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec);		kfree(wl_wrk);		kmem_cache_free(ubi_wl_entry_slab, e);		return 0;	}	dbg_wl("erase PEB %d EC %d", pnum, e->ec);	err = sync_erase(ubi, e, wl_wrk->torture);	if (!err) {		/* Fine, we've erased it successfully */		kfree(wl_wrk);		spin_lock(&ubi->wl_lock);		ubi->abs_ec += 1;		wl_tree_add(e, &ubi->free);		spin_unlock(&ubi->wl_lock);		/*		 * One more erase operation has happened, take care about		 * protected physical eraseblocks.		 */		check_protection_over(ubi);		/* And take care about wear-leveling */		err = ensure_wear_leveling(ubi);		return err;	}	ubi_err("failed to erase PEB %d, error %d", pnum, err);	kfree(wl_wrk);	kmem_cache_free(ubi_wl_entry_slab, e);	if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||	    err == -EBUSY) {		int err1;		/* Re-schedule the LEB for erasure */		err1 = schedule_erase(ubi, e, 0);		if (err1) {			err = err1;			goto out_ro;		}		return err;	} else if (err != -EIO) {		/*		 * If this is not %-EIO, we have no idea what to do. Scheduling		 * this physical eraseblock for erasure again would cause		 * errors again and again. Well, lets switch to RO mode.		 */		goto out_ro;	}	/* It is %-EIO, the PEB went bad */	if (!ubi->bad_allowed) {		ubi_err("bad physical eraseblock %d detected", pnum);		goto out_ro;	}	spin_lock(&ubi->volumes_lock);	need = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs + 1;	if (need > 0) {		need = ubi->avail_pebs >= need ? need : ubi->avail_pebs;		ubi->avail_pebs -= need;		ubi->rsvd_pebs += need;		ubi->beb_rsvd_pebs += need;		if (need > 0)			ubi_msg("reserve more %d PEBs", need);	}	if (ubi->beb_rsvd_pebs == 0) {

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -