⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 gc.c

📁 eCos/RedBoot for勤研ARM AnywhereII(4510) 含全部源代码
💻 C
📖 第 1 页 / 共 3 页
字号:
/*
 * JFFS2 -- Journalling Flash File System, Version 2.
 *
 * Copyright (C) 2001-2003 Red Hat, Inc.
 *
 * Created by David Woodhouse <dwmw2@redhat.com>
 *
 * For licensing information, see the file 'LICENCE' in this directory.
 *
 * $Id: gc.c,v 1.133 2004/03/08 15:29:09 dwmw2 Exp $
 *
 */

#include <linux/kernel.h>
#include <linux/mtd/mtd.h>
#include <linux/slab.h>
#include <linux/pagemap.h>
#include <linux/crc32.h>
#include <linux/compiler.h>
#include <linux/stat.h>
#include "nodelist.h"

static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c, 
					  struct jffs2_inode_cache *ic,
					  struct jffs2_raw_node_ref *raw);
static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, 
					struct jffs2_inode_info *f, struct jffs2_full_dnode *fd);
static int jffs2_garbage_collect_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, 
					struct jffs2_inode_info *f, struct jffs2_full_dirent *fd);
static int jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, 
					struct jffs2_inode_info *f, struct jffs2_full_dirent *fd);
static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
				      struct jffs2_inode_info *f, struct jffs2_full_dnode *fn,
				      uint32_t start, uint32_t end);
static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
				       struct jffs2_inode_info *f, struct jffs2_full_dnode *fn,
				       uint32_t start, uint32_t end);
static int jffs2_garbage_collect_live(struct jffs2_sb_info *c,  struct jffs2_eraseblock *jeb,
			       struct jffs2_raw_node_ref *raw, struct jffs2_inode_info *f);

/* Called with erase_completion_lock held */
static struct jffs2_eraseblock *jffs2_find_gc_block(struct jffs2_sb_info *c)
{
	struct jffs2_eraseblock *ret;
	struct list_head *nextlist = NULL;
	int n = jiffies % 128;

	/* Pick an eraseblock to garbage collect next. This is where we'll
	   put the clever wear-levelling algorithms. Eventually.  */
	/* We possibly want to favour the dirtier blocks more when the
	   number of free blocks is low. */
	if (!list_empty(&c->bad_used_list) && c->nr_free_blocks > c->resv_blocks_gcbad) {
		D1(printk(KERN_DEBUG "Picking block from bad_used_list to GC next\n"));
		nextlist = &c->bad_used_list;
	} else if (n < 50 && !list_empty(&c->erasable_list)) {
		/* Note that most of them will have gone directly to be erased. 
		   So don't favour the erasable_list _too_ much. */
		D1(printk(KERN_DEBUG "Picking block from erasable_list to GC next\n"));
		nextlist = &c->erasable_list;
	} else if (n < 110 && !list_empty(&c->very_dirty_list)) {
		/* Most of the time, pick one off the very_dirty list */
		D1(printk(KERN_DEBUG "Picking block from very_dirty_list to GC next\n"));
		nextlist = &c->very_dirty_list;
	} else if (n < 126 && !list_empty(&c->dirty_list)) {
		D1(printk(KERN_DEBUG "Picking block from dirty_list to GC next\n"));
		nextlist = &c->dirty_list;
	} else if (!list_empty(&c->clean_list)) {
		D1(printk(KERN_DEBUG "Picking block from clean_list to GC next\n"));
		nextlist = &c->clean_list;
	} else if (!list_empty(&c->dirty_list)) {
		D1(printk(KERN_DEBUG "Picking block from dirty_list to GC next (clean_list was empty)\n"));

		nextlist = &c->dirty_list;
	} else if (!list_empty(&c->very_dirty_list)) {
		D1(printk(KERN_DEBUG "Picking block from very_dirty_list to GC next (clean_list and dirty_list were empty)\n"));
		nextlist = &c->very_dirty_list;
	} else if (!list_empty(&c->erasable_list)) {
		D1(printk(KERN_DEBUG "Picking block from erasable_list to GC next (clean_list and {very_,}dirty_list were empty)\n"));

		nextlist = &c->erasable_list;
	} else {
		/* Eep. All were empty */
		printk(KERN_NOTICE "jffs2: No clean, dirty _or_ erasable blocks to GC from! Where are they all?\n");
		return NULL;
	}

	ret = list_entry(nextlist->next, struct jffs2_eraseblock, list);
	list_del(&ret->list);
	c->gcblock = ret;
	ret->gc_node = ret->first_node;
	if (!ret->gc_node) {
		printk(KERN_WARNING "Eep. ret->gc_node for block at 0x%08x is NULL\n", ret->offset);
		BUG();
	}
	
	/* Have we accidentally picked a clean block with wasted space ? */
	if (ret->wasted_size) {
		D1(printk(KERN_DEBUG "Converting wasted_size %08x to dirty_size\n", ret->wasted_size));
		ret->dirty_size += ret->wasted_size;
		c->wasted_size -= ret->wasted_size;
		c->dirty_size += ret->wasted_size;
		ret->wasted_size = 0;
	}

	D1(jffs2_dump_block_lists(c));
	return ret;
}

/* jffs2_garbage_collect_pass
 * Make a single attempt to progress GC. Move one node, and possibly
 * start erasing one eraseblock.
 */
int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
{
	struct jffs2_inode_info *f;
	struct jffs2_inode_cache *ic;
	struct jffs2_eraseblock *jeb;
	struct jffs2_raw_node_ref *raw;
	int ret = 0, inum, nlink;

	if (down_interruptible(&c->alloc_sem))
		return -EINTR;

	for (;;) {
		spin_lock(&c->erase_completion_lock);
		if (!c->unchecked_size)
			break;

		/* We can't start doing GC yet. We haven't finished checking
		   the node CRCs etc. Do it now. */
		
		/* checked_ino is protected by the alloc_sem */
		if (c->checked_ino > c->highest_ino) {
			printk(KERN_CRIT "Checked all inodes but still 0x%x bytes of unchecked space?\n",
			       c->unchecked_size);
			D1(jffs2_dump_block_lists(c));
			spin_unlock(&c->erase_completion_lock);
			BUG();
		}

		spin_unlock(&c->erase_completion_lock);

		spin_lock(&c->inocache_lock);

		ic = jffs2_get_ino_cache(c, c->checked_ino++);

		if (!ic) {
			spin_unlock(&c->inocache_lock);
			continue;
		}

		if (!ic->nlink) {
			D1(printk(KERN_DEBUG "Skipping check of ino #%d with nlink zero\n",
				  ic->ino));
			spin_unlock(&c->inocache_lock);
			continue;
		}
		switch(ic->state) {
		case INO_STATE_CHECKEDABSENT:
		case INO_STATE_PRESENT:
			D1(printk(KERN_DEBUG "Skipping ino #%u already checked\n", ic->ino));
			spin_unlock(&c->inocache_lock);
			continue;

		case INO_STATE_GC:
		case INO_STATE_CHECKING:
			printk(KERN_WARNING "Inode #%u is in state %d during CRC check phase!\n", ic->ino, ic->state);
			spin_unlock(&c->inocache_lock);
			BUG();

		case INO_STATE_READING:
			/* We need to wait for it to finish, lest we move on
			   and trigger the BUG() above while we haven't yet 
			   finished checking all its nodes */
			D1(printk(KERN_DEBUG "Waiting for ino #%u to finish reading\n", ic->ino));
			up(&c->alloc_sem);
			sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
			return 0;

		default:
			BUG();

		case INO_STATE_UNCHECKED:
			;
		}
		ic->state = INO_STATE_CHECKING;
		spin_unlock(&c->inocache_lock);

		D1(printk(KERN_DEBUG "jffs2_garbage_collect_pass() triggering inode scan of ino#%u\n", ic->ino));

		ret = jffs2_do_crccheck_inode(c, ic);
		if (ret)
			printk(KERN_WARNING "Returned error for crccheck of ino #%u. Expect badness...\n", ic->ino);

		jffs2_set_inocache_state(c, ic, INO_STATE_CHECKEDABSENT);
		up(&c->alloc_sem);
		return ret;
	}

	/* First, work out which block we're garbage-collecting */
	jeb = c->gcblock;

	if (!jeb)
		jeb = jffs2_find_gc_block(c);

	if (!jeb) {
		printk(KERN_NOTICE "jffs2: Couldn't find erase block to garbage collect!\n");
		spin_unlock(&c->erase_completion_lock);
		up(&c->alloc_sem);
		return -EIO;
	}

	D1(printk(KERN_DEBUG "GC from block %08x, used_size %08x, dirty_size %08x, free_size %08x\n", jeb->offset, jeb->used_size, jeb->dirty_size, jeb->free_size));
	D1(if (c->nextblock)
	   printk(KERN_DEBUG "Nextblock at  %08x, used_size %08x, dirty_size %08x, wasted_size %08x, free_size %08x\n", c->nextblock->offset, c->nextblock->used_size, c->nextblock->dirty_size, c->nextblock->wasted_size, c->nextblock->free_size));

	if (!jeb->used_size) {
		up(&c->alloc_sem);
		goto eraseit;
	}

	raw = jeb->gc_node;
			
	while(ref_obsolete(raw)) {
		D1(printk(KERN_DEBUG "Node at 0x%08x is obsolete... skipping\n", ref_offset(raw)));
		raw = raw->next_phys;
		if (unlikely(!raw)) {
			printk(KERN_WARNING "eep. End of raw list while still supposedly nodes to GC\n");
			printk(KERN_WARNING "erase block at 0x%08x. free_size 0x%08x, dirty_size 0x%08x, used_size 0x%08x\n", 
			       jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size);
			jeb->gc_node = raw;
			spin_unlock(&c->erase_completion_lock);
			up(&c->alloc_sem);
			BUG();
		}
	}
	jeb->gc_node = raw;

	D1(printk(KERN_DEBUG "Going to garbage collect node at 0x%08x\n", ref_offset(raw)));

	if (!raw->next_in_ino) {
		/* Inode-less node. Clean marker, snapshot or something like that */
		/* FIXME: If it's something that needs to be copied, including something
		   we don't grok that has JFFS2_NODETYPE_RWCOMPAT_COPY, we should do so */
		spin_unlock(&c->erase_completion_lock);
		jffs2_mark_node_obsolete(c, raw);
		up(&c->alloc_sem);
		goto eraseit_lock;
	}

	ic = jffs2_raw_ref_to_ic(raw);

	/* We need to hold the inocache. Either the erase_completion_lock or
	   the inocache_lock are sufficient; we trade down since the inocache_lock 
	   causes less contention. */
	spin_lock(&c->inocache_lock);

	spin_unlock(&c->erase_completion_lock);

	D1(printk(KERN_DEBUG "jffs2_garbage_collect_pass collecting from block @0x%08x. Node @0x%08x(%d), ino #%u\n", jeb->offset, ref_offset(raw), ref_flags(raw), ic->ino));

	/* Three possibilities:
	   1. Inode is already in-core. We must iget it and do proper
	      updating to its fragtree, etc.
	   2. Inode is not in-core, node is REF_PRISTINE. We lock the
	      inocache to prevent a read_inode(), copy the node intact.
	   3. Inode is not in-core, node is not pristine. We must iget()
	      and take the slow path.
	*/

	switch(ic->state) {
	case INO_STATE_CHECKEDABSENT:
		/* It's been checked, but it's not currently in-core. 
		   We can just copy any pristine nodes, but have
		   to prevent anyone else from doing read_inode() while
		   we're at it, so we set the state accordingly */
		if (ref_flags(raw) == REF_PRISTINE)
			ic->state = INO_STATE_GC;
		else {
			D1(printk(KERN_DEBUG "Ino #%u is absent but node not REF_PRISTINE. Reading.\n", 
				  ic->ino));
		}
		break;

	case INO_STATE_PRESENT:
		/* It's in-core. GC must iget() it. */
		break;

	case INO_STATE_UNCHECKED:
	case INO_STATE_CHECKING:
	case INO_STATE_GC:
		/* Should never happen. We should have finished checking
		   by the time we actually start doing any GC, and since 
		   we're holding the alloc_sem, no other garbage collection 
		   can happen.
		*/
		printk(KERN_CRIT "Inode #%u already in state %d in jffs2_garbage_collect_pass()!\n",
		       ic->ino, ic->state);
		up(&c->alloc_sem);
		spin_unlock(&c->inocache_lock);
		BUG();

	case INO_STATE_READING:
		/* Someone's currently trying to read it. We must wait for
		   them to finish and then go through the full iget() route
		   to do the GC. However, sometimes read_inode() needs to get
		   the alloc_sem() (for marking nodes invalid) so we must
		   drop the alloc_sem before sleeping. */

		up(&c->alloc_sem);
		D1(printk(KERN_DEBUG "jffs2_garbage_collect_pass() waiting for ino #%u in state %d\n",
			  ic->ino, ic->state));
		sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
		/* And because we dropped the alloc_sem we must start again from the 
		   beginning. Ponder chance of livelock here -- we're returning success
		   without actually making any progress.

		   Q: What are the chances that the inode is back in INO_STATE_READING 
		   again by the time we next enter this function? And that this happens
		   enough times to cause a real delay?

		   A: Small enough that I don't care :) 
		*/
		return 0;
	}

	/* OK. Now if the inode is in state INO_STATE_GC, we are going to copy the
	   node intact, and we don't have to muck about with the fragtree etc. 
	   because we know it's not in-core. If it _was_ in-core, we go through
	   all the iget() crap anyway */

	if (ic->state == INO_STATE_GC) {
		spin_unlock(&c->inocache_lock);

		ret = jffs2_garbage_collect_pristine(c, ic, raw);

		spin_lock(&c->inocache_lock);
		ic->state = INO_STATE_CHECKEDABSENT;
		wake_up(&c->inocache_wq);

		if (ret != -EBADFD) {
			spin_unlock(&c->inocache_lock);
			goto release_sem;
		}

		/* Fall through if it wanted us to, with inocache_lock held */
	}

	/* Prevent the fairly unlikely race where the gcblock is
	   entirely obsoleted by the final close of a file which had
	   the only valid nodes in the block, followed by erasure,
	   followed by freeing of the ic because the erased block(s)
	   held _all_ the nodes of that inode.... never been seen but
	   it's vaguely possible. */

	inum = ic->ino;
	nlink = ic->nlink;
	spin_unlock(&c->inocache_lock);

	f = jffs2_gc_fetch_inode(c, inum, nlink);
	if (IS_ERR(f))
		return PTR_ERR(f);
	if (!f)
		return 0;

	ret = jffs2_garbage_collect_live(c, jeb, raw, f);

	jffs2_gc_release_inode(c, f);

 release_sem:
	up(&c->alloc_sem);

 eraseit_lock:
	/* If we've finished this block, start it erasing */
	spin_lock(&c->erase_completion_lock);

 eraseit:
	if (c->gcblock && !c->gcblock->used_size) {
		D1(printk(KERN_DEBUG "Block at 0x%08x completely obsoleted by GC. Moving to erase_pending_list\n", c->gcblock->offset));
		/* We're GC'ing an empty block? */
		list_add_tail(&c->gcblock->list, &c->erase_pending_list);
		c->gcblock = NULL;
		c->nr_erasing_blocks++;
		jffs2_erase_pending_trigger(c);
	}
	spin_unlock(&c->erase_completion_lock);

	return ret;
}

static int jffs2_garbage_collect_live(struct jffs2_sb_info *c,  struct jffs2_eraseblock *jeb,
				      struct jffs2_raw_node_ref *raw, struct jffs2_inode_info *f)
{
	struct jffs2_node_frag *frag;
	struct jffs2_full_dnode *fn = NULL;
	struct jffs2_full_dirent *fd;
	uint32_t start = 0, end = 0, nrfrags = 0;
	int ret = 0;

	down(&f->sem);

	/* Now we have the lock for this inode. Check that it's still the one at the head
	   of the list. */

	spin_lock(&c->erase_completion_lock);

	if (c->gcblock != jeb) {
		spin_unlock(&c->erase_completion_lock);
		D1(printk(KERN_DEBUG "GC block is no longer gcblock. Restart\n"));
		goto upnout;
	}
	if (ref_obsolete(raw)) {
		spin_unlock(&c->erase_completion_lock);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -