index.c
来自「Linux Kernel 2.6.9 for OMAP1710」· C语言 代码 · 共 517 行 · 第 1/2 页
C
517 行
if (!NInoIndexAllocPresent(idx_ni)) { ntfs_error(sb, "No index allocation attribute but index entry " "requires one. Inode 0x%lx is corrupt or " "driver bug.", idx_ni->mft_no); err = -EIO; goto err_out; } /* Get the starting vcn of the index_block holding the child node. */ vcn = sle64_to_cpup((sle64*)((u8*)ie + le16_to_cpu(ie->length) - 8)); ia_mapping = VFS_I(idx_ni)->i_mapping; /* * We are done with the index root and the mft record. Release them, * otherwise we deadlock with ntfs_map_page(). */ ntfs_attr_put_search_ctx(actx); unmap_mft_record(base_ni); m = NULL; actx = NULL;descend_into_child_node: /* * Convert vcn to index into the index allocation attribute in units * of PAGE_CACHE_SIZE and map the page cache page, reading it from * disk if necessary. */ page = ntfs_map_page(ia_mapping, vcn << idx_ni->itype.index.vcn_size_bits >> PAGE_CACHE_SHIFT); if (IS_ERR(page)) { ntfs_error(sb, "Failed to map index page, error %ld.", -PTR_ERR(page)); err = PTR_ERR(page); goto err_out; } lock_page(page); kaddr = (u8*)page_address(page);fast_descend_into_child_node: /* Get to the index allocation block. */ ia = (INDEX_ALLOCATION*)(kaddr + ((vcn << idx_ni->itype.index.vcn_size_bits) & ~PAGE_CACHE_MASK)); /* Bounds checks. */ if ((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE) { ntfs_error(sb, "Out of bounds check failed. Corrupt inode " "0x%lx or driver bug.", idx_ni->mft_no); err = -EIO; goto unm_err_out; } if (sle64_to_cpu(ia->index_block_vcn) != vcn) { ntfs_error(sb, "Actual VCN (0x%llx) of index buffer is " "different from expected VCN (0x%llx). Inode " "0x%lx is corrupt or driver bug.", (unsigned long long) sle64_to_cpu(ia->index_block_vcn), (unsigned long long)vcn, idx_ni->mft_no); err = -EIO; goto unm_err_out; } if (le32_to_cpu(ia->index.allocated_size) + 0x18 != idx_ni->itype.index.block_size) { ntfs_error(sb, "Index buffer (VCN 0x%llx) of inode 0x%lx has " "a size (%u) differing from the index " "specified size (%u). Inode is corrupt or " "driver bug.", (unsigned long long)vcn, idx_ni->mft_no, le32_to_cpu(ia->index.allocated_size) + 0x18, idx_ni->itype.index.block_size); err = -EIO; goto unm_err_out; } index_end = (u8*)ia + idx_ni->itype.index.block_size; if (index_end > kaddr + PAGE_CACHE_SIZE) { ntfs_error(sb, "Index buffer (VCN 0x%llx) of inode 0x%lx " "crosses page boundary. Impossible! Cannot " "access! This is probably a bug in the " "driver.", (unsigned long long)vcn, idx_ni->mft_no); err = -EIO; goto unm_err_out; } index_end = (u8*)&ia->index + le32_to_cpu(ia->index.index_length); if (index_end > (u8*)ia + idx_ni->itype.index.block_size) { ntfs_error(sb, "Size of index buffer (VCN 0x%llx) of inode " "0x%lx exceeds maximum size.", (unsigned long long)vcn, idx_ni->mft_no); err = -EIO; goto unm_err_out; } /* The first index entry. */ ie = (INDEX_ENTRY*)((u8*)&ia->index + le32_to_cpu(ia->index.entries_offset)); /* * Iterate similar to above big loop but applied to index buffer, thus * loop until we exceed valid memory (corruption case) or until we * reach the last entry. */ for (;; ie = (INDEX_ENTRY*)((u8*)ie + le16_to_cpu(ie->length))) { /* Bounds checks. */ if ((u8*)ie < (u8*)ia || (u8*)ie + sizeof(INDEX_ENTRY_HEADER) > index_end || (u8*)ie + le16_to_cpu(ie->length) > index_end) { ntfs_error(sb, "Index entry out of bounds in inode " "0x%lx.", idx_ni->mft_no); err = -EIO; goto unm_err_out; } /* * The last entry cannot contain a ket. It can however contain * a pointer to a child node in the B+tree so we just break out. */ if (ie->flags & INDEX_ENTRY_END) break; /* Further bounds checks. */ if ((u32)sizeof(INDEX_ENTRY_HEADER) + le16_to_cpu(ie->key_length) > le16_to_cpu(ie->data.vi.data_offset) || (u32)le16_to_cpu(ie->data.vi.data_offset) + le16_to_cpu(ie->data.vi.data_length) > le16_to_cpu(ie->length)) { ntfs_error(sb, "Index entry out of bounds in inode " "0x%lx.", idx_ni->mft_no); err = -EIO; goto unm_err_out; } /* If the keys match perfectly, we setup @ictx and return 0. */ if ((key_len == le16_to_cpu(ie->key_length)) && !memcmp(key, &ie->key, key_len)) {ia_done: ictx->is_in_root = FALSE; ictx->actx = NULL; ictx->base_ni = NULL; ictx->ia = ia; ictx->page = page; goto done; } /* * Not a perfect match, need to do full blown collation so we * know which way in the B+tree we have to go. */ rc = ntfs_collate(vol, idx_ni->itype.index.collation_rule, key, key_len, &ie->key, le16_to_cpu(ie->key_length)); /* * If @key collates before the key of the current entry, there * is definitely no such key in this index but we might need to * descend into the B+tree so we just break out of the loop. */ if (rc == -1) break; /* * A match should never happen as the memcmp() call should have * cought it, but we still treat it correctly. */ if (!rc) goto ia_done; /* The keys are not equal, continue the search. */ } /* * We have finished with this index buffer without success. Check for * the presence of a child node and if not present return -ENOENT. */ if (!(ie->flags & INDEX_ENTRY_NODE)) { ntfs_debug("Entry not found."); err = -ENOENT; goto ia_done; } if ((ia->index.flags & NODE_MASK) == LEAF_NODE) { ntfs_error(sb, "Index entry with child node found in a leaf " "node in inode 0x%lx.", idx_ni->mft_no); err = -EIO; goto unm_err_out; } /* Child node present, descend into it. */ old_vcn = vcn; vcn = sle64_to_cpup((sle64*)((u8*)ie + le16_to_cpu(ie->length) - 8)); if (vcn >= 0) { /* * If vcn is in the same page cache page as old_vcn we recycle * the mapped page. */ if (old_vcn << vol->cluster_size_bits >> PAGE_CACHE_SHIFT == vcn << vol->cluster_size_bits >> PAGE_CACHE_SHIFT) goto fast_descend_into_child_node; unlock_page(page); ntfs_unmap_page(page); goto descend_into_child_node; } ntfs_error(sb, "Negative child node vcn in inode 0x%lx.", idx_ni->mft_no); err = -EIO;unm_err_out: unlock_page(page); ntfs_unmap_page(page);err_out: if (actx) ntfs_attr_put_search_ctx(actx); if (m) unmap_mft_record(base_ni); return err;idx_err_out: ntfs_error(sb, "Corrupt index. Aborting lookup."); err = -EIO; goto err_out;}#ifdef NTFS_RW/** * __ntfs_index_entry_mark_dirty - mark an index allocation entry dirty * @ictx: ntfs index context describing the index entry * * NOTE: You want to use fs/ntfs/index.h::ntfs_index_entry_mark_dirty() instead! * * Mark the index allocation entry described by the index entry context @ictx * dirty. * * The index entry must be in an index block belonging to the index allocation * attribute. Mark the buffers belonging to the index record as well as the * page cache page the index block is in dirty. This automatically marks the * VFS inode of the ntfs index inode to which the index entry belongs dirty, * too (I_DIRTY_PAGES) and this in turn ensures the page buffers, and hence the * dirty index block, will be written out to disk later. */void __ntfs_index_entry_mark_dirty(ntfs_index_context *ictx){ ntfs_inode *ni; struct page *page; struct buffer_head *bh, *head; unsigned int rec_start, rec_end, bh_size, bh_start, bh_end; BUG_ON(ictx->is_in_root); ni = ictx->idx_ni; page = ictx->page; BUG_ON(!page_has_buffers(page)); /* * If the index block is the same size as the page cache page, set all * the buffers in the page, as well as the page itself, dirty. */ if (ni->itype.index.block_size == PAGE_CACHE_SIZE) { __set_page_dirty_buffers(page); return; } /* Set only the buffers in which the index block is located dirty. */ rec_start = (unsigned int)((u8*)ictx->ia - (u8*)page_address(page)); rec_end = rec_start + ni->itype.index.block_size; bh_size = ni->vol->sb->s_blocksize; bh_start = 0; bh = head = page_buffers(page); do { bh_end = bh_start + bh_size; if ((bh_start >= rec_start) && (bh_end <= rec_end)) set_buffer_dirty(bh); bh_start = bh_end; } while ((bh = bh->b_this_page) != head); /* Finally, set the page itself dirty, too. */ __set_page_dirty_nobuffers(page);}#endif /* NTFS_RW */
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?