📄 nodelist.c
字号:
If it's actually overlapped, it'll get made NORMAL (or OBSOLETE)
when the overlapping node(s) get added to the tree anyway.
*/
if ((je32_to_cpu(node.i.dsize) >= PAGE_CACHE_SIZE) ||
( ((je32_to_cpu(node.i.offset)&(PAGE_CACHE_SIZE-1))==0) &&
(je32_to_cpu(node.i.dsize)+je32_to_cpu(node.i.offset) == je32_to_cpu(node.i.isize)))) {
D1(printk(KERN_DEBUG "Marking node at 0x%08x REF_PRISTINE\n", ref_offset(ref)));
ref->flash_offset = ref_offset(ref) | REF_PRISTINE;
} else {
D1(printk(KERN_DEBUG "Marking node at 0x%08x REF_NORMAL\n", ref_offset(ref)));
ref->flash_offset = ref_offset(ref) | REF_NORMAL;
}
spin_unlock(&c->erase_completion_lock);
}
tn = jffs2_alloc_tmp_dnode_info();
if (!tn) {
D1(printk(KERN_DEBUG "alloc tn failed\n"));
err = -ENOMEM;
goto free_out;
}
tn->fn = jffs2_alloc_full_dnode();
if (!tn->fn) {
D1(printk(KERN_DEBUG "alloc fn failed\n"));
err = -ENOMEM;
jffs2_free_tmp_dnode_info(tn);
goto free_out;
}
tn->version = je32_to_cpu(node.i.version);
tn->fn->ofs = je32_to_cpu(node.i.offset);
/* There was a bug where we wrote hole nodes out with
csize/dsize swapped. Deal with it */
if (node.i.compr == JFFS2_COMPR_ZERO && !je32_to_cpu(node.i.dsize) && je32_to_cpu(node.i.csize))
tn->fn->size = je32_to_cpu(node.i.csize);
else // normal case...
tn->fn->size = je32_to_cpu(node.i.dsize);
tn->fn->raw = ref;
D1(printk(KERN_DEBUG "dnode @%08x: ver %u, offset %04x, dsize %04x\n",
ref_offset(ref), je32_to_cpu(node.i.version),
je32_to_cpu(node.i.offset), je32_to_cpu(node.i.dsize)));
jffs2_add_tn_to_list(tn, &ret_tn);
break;
default:
if (ref_flags(ref) == REF_UNCHECKED) {
struct jffs2_eraseblock *jeb;
uint32_t len;
printk(KERN_ERR "Eep. Unknown node type %04x at %08x was marked REF_UNCHECKED\n",
je16_to_cpu(node.u.nodetype), ref_offset(ref));
/* Mark the node as having been checked and fix the accounting accordingly */
spin_lock(&c->erase_completion_lock);
jeb = &c->blocks[ref->flash_offset / c->sector_size];
len = ref_totlen(c, jeb, ref);
jeb->used_size += len;
jeb->unchecked_size -= len;
c->used_size += len;
c->unchecked_size -= len;
mark_ref_normal(ref);
spin_unlock(&c->erase_completion_lock);
}
node.u.nodetype = cpu_to_je16(JFFS2_NODE_ACCURATE | je16_to_cpu(node.u.nodetype));
if (crc32(0, &node, sizeof(struct jffs2_unknown_node)-4) != je32_to_cpu(node.u.hdr_crc)) {
/* Hmmm. This should have been caught at scan time. */
printk(KERN_ERR "Node header CRC failed at %08x. But it must have been OK earlier.\n",
ref_offset(ref));
printk(KERN_ERR "Node was: { %04x, %04x, %08x, %08x }\n",
je16_to_cpu(node.u.magic), je16_to_cpu(node.u.nodetype), je32_to_cpu(node.u.totlen),
je32_to_cpu(node.u.hdr_crc));
jffs2_mark_node_obsolete(c, ref);
} else switch(je16_to_cpu(node.u.nodetype) & JFFS2_COMPAT_MASK) {
case JFFS2_FEATURE_INCOMPAT:
printk(KERN_NOTICE "Unknown INCOMPAT nodetype %04X at %08x\n", je16_to_cpu(node.u.nodetype), ref_offset(ref));
/* EEP */
BUG();
break;
case JFFS2_FEATURE_ROCOMPAT:
printk(KERN_NOTICE "Unknown ROCOMPAT nodetype %04X at %08x\n", je16_to_cpu(node.u.nodetype), ref_offset(ref));
if (!(c->flags & JFFS2_SB_FLAG_RO))
BUG();
break;
case JFFS2_FEATURE_RWCOMPAT_COPY:
printk(KERN_NOTICE "Unknown RWCOMPAT_COPY nodetype %04X at %08x\n", je16_to_cpu(node.u.nodetype), ref_offset(ref));
break;
case JFFS2_FEATURE_RWCOMPAT_DELETE:
printk(KERN_NOTICE "Unknown RWCOMPAT_DELETE nodetype %04X at %08x\n", je16_to_cpu(node.u.nodetype), ref_offset(ref));
jffs2_mark_node_obsolete(c, ref);
break;
}
}
spin_lock(&c->erase_completion_lock);
}
spin_unlock(&c->erase_completion_lock);
*tnp = ret_tn;
*fdp = ret_fd;
return 0;
free_out:
jffs2_free_tmp_dnode_info_list(ret_tn);
jffs2_free_full_dirent_list(ret_fd);
return err;
}
void jffs2_set_inocache_state(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic, int state)
{
spin_lock(&c->inocache_lock);
ic->state = state;
wake_up(&c->inocache_wq);
spin_unlock(&c->inocache_lock);
}
/* During mount, this needs no locking. During normal operation, its
callers want to do other stuff while still holding the inocache_lock.
Rather than introducing special case get_ino_cache functions or
callbacks, we just let the caller do the locking itself. */
struct jffs2_inode_cache *jffs2_get_ino_cache(struct jffs2_sb_info *c, uint32_t ino)
{
struct jffs2_inode_cache *ret;
D2(printk(KERN_DEBUG "jffs2_get_ino_cache(): ino %u\n", ino));
ret = c->inocache_list[ino % INOCACHE_HASHSIZE];
while (ret && ret->ino < ino) {
ret = ret->next;
}
if (ret && ret->ino != ino)
ret = NULL;
D2(printk(KERN_DEBUG "jffs2_get_ino_cache found %p for ino %u\n", ret, ino));
return ret;
}
void jffs2_add_ino_cache (struct jffs2_sb_info *c, struct jffs2_inode_cache *new)
{
struct jffs2_inode_cache **prev;
D2(printk(KERN_DEBUG "jffs2_add_ino_cache: Add %p (ino #%u)\n", new, new->ino));
spin_lock(&c->inocache_lock);
prev = &c->inocache_list[new->ino % INOCACHE_HASHSIZE];
while ((*prev) && (*prev)->ino < new->ino) {
prev = &(*prev)->next;
}
new->next = *prev;
*prev = new;
spin_unlock(&c->inocache_lock);
}
void jffs2_del_ino_cache(struct jffs2_sb_info *c, struct jffs2_inode_cache *old)
{
struct jffs2_inode_cache **prev;
D2(printk(KERN_DEBUG "jffs2_del_ino_cache: Del %p (ino #%u)\n", old, old->ino));
spin_lock(&c->inocache_lock);
prev = &c->inocache_list[old->ino % INOCACHE_HASHSIZE];
while ((*prev) && (*prev)->ino < old->ino) {
prev = &(*prev)->next;
}
if ((*prev) == old) {
*prev = old->next;
}
spin_unlock(&c->inocache_lock);
}
void jffs2_free_ino_caches(struct jffs2_sb_info *c)
{
int i;
struct jffs2_inode_cache *this, *next;
for (i=0; i<INOCACHE_HASHSIZE; i++) {
this = c->inocache_list[i];
while (this) {
next = this->next;
D2(printk(KERN_DEBUG "jffs2_free_ino_caches: Freeing ino #%u at %p\n", this->ino, this));
jffs2_free_inode_cache(this);
this = next;
}
c->inocache_list[i] = NULL;
}
}
void jffs2_free_raw_node_refs(struct jffs2_sb_info *c)
{
int i;
struct jffs2_raw_node_ref *this, *next;
for (i=0; i<c->nr_blocks; i++) {
this = c->blocks[i].first_node;
while(this) {
next = this->next_phys;
jffs2_free_raw_node_ref(this);
this = next;
}
c->blocks[i].first_node = c->blocks[i].last_node = NULL;
}
}
struct jffs2_node_frag *jffs2_lookup_node_frag(struct rb_root *fragtree, uint32_t offset)
{
/* The common case in lookup is that there will be a node
which precisely matches. So we go looking for that first */
struct rb_node *next;
struct jffs2_node_frag *prev = NULL;
struct jffs2_node_frag *frag = NULL;
D2(printk(KERN_DEBUG "jffs2_lookup_node_frag(%p, %d)\n", fragtree, offset));
next = fragtree->rb_node;
while(next) {
frag = rb_entry(next, struct jffs2_node_frag, rb);
D2(printk(KERN_DEBUG "Considering frag %d-%d (%p). left %p, right %p\n",
frag->ofs, frag->ofs+frag->size, frag, frag->rb.rb_left, frag->rb.rb_right));
if (frag->ofs + frag->size <= offset) {
D2(printk(KERN_DEBUG "Going right from frag %d-%d, before the region we care about\n",
frag->ofs, frag->ofs+frag->size));
/* Remember the closest smaller match on the way down */
if (!prev || frag->ofs > prev->ofs)
prev = frag;
next = frag->rb.rb_right;
} else if (frag->ofs > offset) {
D2(printk(KERN_DEBUG "Going left from frag %d-%d, after the region we care about\n",
frag->ofs, frag->ofs+frag->size));
next = frag->rb.rb_left;
} else {
D2(printk(KERN_DEBUG "Returning frag %d,%d, matched\n",
frag->ofs, frag->ofs+frag->size));
return frag;
}
}
/* Exact match not found. Go back up looking at each parent,
and return the closest smaller one */
if (prev)
D2(printk(KERN_DEBUG "No match. Returning frag %d,%d, closest previous\n",
prev->ofs, prev->ofs+prev->size));
else
D2(printk(KERN_DEBUG "Returning NULL, empty fragtree\n"));
return prev;
}
/* Pass 'c' argument to indicate that nodes should be marked obsolete as
they're killed. */
void jffs2_kill_fragtree(struct rb_root *root, struct jffs2_sb_info *c)
{
struct jffs2_node_frag *frag;
struct jffs2_node_frag *parent;
if (!root->rb_node)
return;
frag = (rb_entry(root->rb_node, struct jffs2_node_frag, rb));
while(frag) {
if (frag->rb.rb_left) {
D2(printk(KERN_DEBUG "Going left from frag (%p) %d-%d\n",
frag, frag->ofs, frag->ofs+frag->size));
frag = frag_left(frag);
continue;
}
if (frag->rb.rb_right) {
D2(printk(KERN_DEBUG "Going right from frag (%p) %d-%d\n",
frag, frag->ofs, frag->ofs+frag->size));
frag = frag_right(frag);
continue;
}
D2(printk(KERN_DEBUG "jffs2_kill_fragtree: frag at 0x%x-0x%x: node %p, frags %d--\n",
frag->ofs, frag->ofs+frag->size, frag->node,
frag->node?frag->node->frags:0));
if (frag->node && !(--frag->node->frags)) {
/* Not a hole, and it's the final remaining frag
of this node. Free the node */
if (c)
jffs2_mark_node_obsolete(c, frag->node->raw);
jffs2_free_full_dnode(frag->node);
}
parent = frag_parent(frag);
if (parent) {
if (frag_left(parent) == frag)
parent->rb.rb_left = NULL;
else
parent->rb.rb_right = NULL;
}
jffs2_free_node_frag(frag);
frag = parent;
cond_resched();
}
}
void jffs2_fragtree_insert(struct jffs2_node_frag *newfrag, struct jffs2_node_frag *base)
{
struct rb_node *parent = &base->rb;
struct rb_node **link = &parent;
D2(printk(KERN_DEBUG "jffs2_fragtree_insert(%p; %d-%d, %p)\n", newfrag,
newfrag->ofs, newfrag->ofs+newfrag->size, base));
while (*link) {
parent = *link;
base = rb_entry(parent, struct jffs2_node_frag, rb);
D2(printk(KERN_DEBUG "fragtree_insert considering frag at 0x%x\n", base->ofs));
if (newfrag->ofs > base->ofs)
link = &base->rb.rb_right;
else if (newfrag->ofs < base->ofs)
link = &base->rb.rb_left;
else {
printk(KERN_CRIT "Duplicate frag at %08x (%p,%p)\n", newfrag->ofs, newfrag, base);
BUG();
}
}
rb_link_node(&newfrag->rb, &base->rb, link);
}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -