⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 mft.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
/** * mft.c - NTFS kernel mft record operations. Part of the Linux-NTFS project. * * Copyright (c) 2001-2006 Anton Altaparmakov * Copyright (c) 2002 Richard Russon * * This program/include file is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program/include file is distributed in the hope that it will be * useful, but WITHOUT ANY WARRANTY; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program (in the main directory of the Linux-NTFS * distribution in the file COPYING); if not, write to the Free Software * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA */#include <linux/buffer_head.h>#include <linux/swap.h>#include "attrib.h"#include "aops.h"#include "bitmap.h"#include "debug.h"#include "dir.h"#include "lcnalloc.h"#include "malloc.h"#include "mft.h"#include "ntfs.h"/** * map_mft_record_page - map the page in which a specific mft record resides * @ni:		ntfs inode whose mft record page to map * * This maps the page in which the mft record of the ntfs inode @ni is situated * and returns a pointer to the mft record within the mapped page. * * Return value needs to be checked with IS_ERR() and if that is true PTR_ERR() * contains the negative error code returned. */static inline MFT_RECORD *map_mft_record_page(ntfs_inode *ni){	loff_t i_size;	ntfs_volume *vol = ni->vol;	struct inode *mft_vi = vol->mft_ino;	struct page *page;	unsigned long index, end_index;	unsigned ofs;	BUG_ON(ni->page);	/*	 * The index into the page cache and the offset within the page cache	 * page of the wanted mft record. FIXME: We need to check for	 * overflowing the unsigned long, but I don't think we would ever get	 * here if the volume was that big...	 */	index = (u64)ni->mft_no << vol->mft_record_size_bits >>			PAGE_CACHE_SHIFT;	ofs = (ni->mft_no << vol->mft_record_size_bits) & ~PAGE_CACHE_MASK;	i_size = i_size_read(mft_vi);	/* The maximum valid index into the page cache for $MFT's data. */	end_index = i_size >> PAGE_CACHE_SHIFT;	/* If the wanted index is out of bounds the mft record doesn't exist. */	if (unlikely(index >= end_index)) {		if (index > end_index || (i_size & ~PAGE_CACHE_MASK) < ofs +				vol->mft_record_size) {			page = ERR_PTR(-ENOENT);			ntfs_error(vol->sb, "Attemt to read mft record 0x%lx, "					"which is beyond the end of the mft.  "					"This is probably a bug in the ntfs "					"driver.", ni->mft_no);			goto err_out;		}	}	/* Read, map, and pin the page. */	page = ntfs_map_page(mft_vi->i_mapping, index);	if (likely(!IS_ERR(page))) {		/* Catch multi sector transfer fixup errors. */		if (likely(ntfs_is_mft_recordp((le32*)(page_address(page) +				ofs)))) {			ni->page = page;			ni->page_ofs = ofs;			return page_address(page) + ofs;		}		ntfs_error(vol->sb, "Mft record 0x%lx is corrupt.  "				"Run chkdsk.", ni->mft_no);		ntfs_unmap_page(page);		page = ERR_PTR(-EIO);		NVolSetErrors(vol);	}err_out:	ni->page = NULL;	ni->page_ofs = 0;	return (void*)page;}/** * map_mft_record - map, pin and lock an mft record * @ni:		ntfs inode whose MFT record to map * * First, take the mrec_lock mutex.  We might now be sleeping, while waiting * for the mutex if it was already locked by someone else. * * The page of the record is mapped using map_mft_record_page() before being * returned to the caller. * * This in turn uses ntfs_map_page() to get the page containing the wanted mft * record (it in turn calls read_cache_page() which reads it in from disk if * necessary, increments the use count on the page so that it cannot disappear * under us and returns a reference to the page cache page). * * If read_cache_page() invokes ntfs_readpage() to load the page from disk, it * sets PG_locked and clears PG_uptodate on the page. Once I/O has completed * and the post-read mst fixups on each mft record in the page have been * performed, the page gets PG_uptodate set and PG_locked cleared (this is done * in our asynchronous I/O completion handler end_buffer_read_mft_async()). * ntfs_map_page() waits for PG_locked to become clear and checks if * PG_uptodate is set and returns an error code if not. This provides * sufficient protection against races when reading/using the page. * * However there is the write mapping to think about. Doing the above described * checking here will be fine, because when initiating the write we will set * PG_locked and clear PG_uptodate making sure nobody is touching the page * contents. Doing the locking this way means that the commit to disk code in * the page cache code paths is automatically sufficiently locked with us as * we will not touch a page that has been locked or is not uptodate. The only * locking problem then is them locking the page while we are accessing it. * * So that code will end up having to own the mrec_lock of all mft * records/inodes present in the page before I/O can proceed. In that case we * wouldn't need to bother with PG_locked and PG_uptodate as nobody will be * accessing anything without owning the mrec_lock mutex.  But we do need to * use them because of the read_cache_page() invocation and the code becomes so * much simpler this way that it is well worth it. * * The mft record is now ours and we return a pointer to it. You need to check * the returned pointer with IS_ERR() and if that is true, PTR_ERR() will return * the error code. * * NOTE: Caller is responsible for setting the mft record dirty before calling * unmap_mft_record(). This is obviously only necessary if the caller really * modified the mft record... * Q: Do we want to recycle one of the VFS inode state bits instead? * A: No, the inode ones mean we want to change the mft record, not we want to * write it out. */MFT_RECORD *map_mft_record(ntfs_inode *ni){	MFT_RECORD *m;	ntfs_debug("Entering for mft_no 0x%lx.", ni->mft_no);	/* Make sure the ntfs inode doesn't go away. */	atomic_inc(&ni->count);	/* Serialize access to this mft record. */	mutex_lock(&ni->mrec_lock);	m = map_mft_record_page(ni);	if (likely(!IS_ERR(m)))		return m;	mutex_unlock(&ni->mrec_lock);	atomic_dec(&ni->count);	ntfs_error(ni->vol->sb, "Failed with error code %lu.", -PTR_ERR(m));	return m;}/** * unmap_mft_record_page - unmap the page in which a specific mft record resides * @ni:		ntfs inode whose mft record page to unmap * * This unmaps the page in which the mft record of the ntfs inode @ni is * situated and returns. This is a NOOP if highmem is not configured. * * The unmap happens via ntfs_unmap_page() which in turn decrements the use * count on the page thus releasing it from the pinned state. * * We do not actually unmap the page from memory of course, as that will be * done by the page cache code itself when memory pressure increases or * whatever. */static inline void unmap_mft_record_page(ntfs_inode *ni){	BUG_ON(!ni->page);	// TODO: If dirty, blah...	ntfs_unmap_page(ni->page);	ni->page = NULL;	ni->page_ofs = 0;	return;}/** * unmap_mft_record - release a mapped mft record * @ni:		ntfs inode whose MFT record to unmap * * We release the page mapping and the mrec_lock mutex which unmaps the mft * record and releases it for others to get hold of. We also release the ntfs * inode by decrementing the ntfs inode reference count. * * NOTE: If caller has modified the mft record, it is imperative to set the mft * record dirty BEFORE calling unmap_mft_record(). */void unmap_mft_record(ntfs_inode *ni){	struct page *page = ni->page;	BUG_ON(!page);	ntfs_debug("Entering for mft_no 0x%lx.", ni->mft_no);	unmap_mft_record_page(ni);	mutex_unlock(&ni->mrec_lock);	atomic_dec(&ni->count);	/*	 * If pure ntfs_inode, i.e. no vfs inode attached, we leave it to	 * ntfs_clear_extent_inode() in the extent inode case, and to the	 * caller in the non-extent, yet pure ntfs inode case, to do the actual	 * tear down of all structures and freeing of all allocated memory.	 */	return;}/** * map_extent_mft_record - load an extent inode and attach it to its base * @base_ni:	base ntfs inode * @mref:	mft reference of the extent inode to load * @ntfs_ino:	on successful return, pointer to the ntfs_inode structure * * Load the extent mft record @mref and attach it to its base inode @base_ni. * Return the mapped extent mft record if IS_ERR(result) is false.  Otherwise * PTR_ERR(result) gives the negative error code. * * On successful return, @ntfs_ino contains a pointer to the ntfs_inode * structure of the mapped extent inode. */MFT_RECORD *map_extent_mft_record(ntfs_inode *base_ni, MFT_REF mref,		ntfs_inode **ntfs_ino){	MFT_RECORD *m;	ntfs_inode *ni = NULL;	ntfs_inode **extent_nis = NULL;	int i;	unsigned long mft_no = MREF(mref);	u16 seq_no = MSEQNO(mref);	bool destroy_ni = false;	ntfs_debug("Mapping extent mft record 0x%lx (base mft record 0x%lx).",			mft_no, base_ni->mft_no);	/* Make sure the base ntfs inode doesn't go away. */	atomic_inc(&base_ni->count);	/*	 * Check if this extent inode has already been added to the base inode,	 * in which case just return it. If not found, add it to the base	 * inode before returning it.	 */	mutex_lock(&base_ni->extent_lock);	if (base_ni->nr_extents > 0) {		extent_nis = base_ni->ext.extent_ntfs_inos;		for (i = 0; i < base_ni->nr_extents; i++) {			if (mft_no != extent_nis[i]->mft_no)				continue;			ni = extent_nis[i];			/* Make sure the ntfs inode doesn't go away. */			atomic_inc(&ni->count);			break;		}	}	if (likely(ni != NULL)) {		mutex_unlock(&base_ni->extent_lock);		atomic_dec(&base_ni->count);		/* We found the record; just have to map and return it. */		m = map_mft_record(ni);		/* map_mft_record() has incremented this on success. */		atomic_dec(&ni->count);		if (likely(!IS_ERR(m))) {			/* Verify the sequence number. */			if (likely(le16_to_cpu(m->sequence_number) == seq_no)) {				ntfs_debug("Done 1.");				*ntfs_ino = ni;				return m;			}			unmap_mft_record(ni);			ntfs_error(base_ni->vol->sb, "Found stale extent mft "					"reference! Corrupt filesystem. "					"Run chkdsk.");			return ERR_PTR(-EIO);		}map_err_out:		ntfs_error(base_ni->vol->sb, "Failed to map extent "				"mft record, error code %ld.", -PTR_ERR(m));		return m;	}	/* Record wasn't there. Get a new ntfs inode and initialize it. */	ni = ntfs_new_extent_inode(base_ni->vol->sb, mft_no);	if (unlikely(!ni)) {		mutex_unlock(&base_ni->extent_lock);		atomic_dec(&base_ni->count);		return ERR_PTR(-ENOMEM);	}	ni->vol = base_ni->vol;	ni->seq_no = seq_no;	ni->nr_extents = -1;	ni->ext.base_ntfs_ino = base_ni;	/* Now map the record. */	m = map_mft_record(ni);	if (IS_ERR(m)) {		mutex_unlock(&base_ni->extent_lock);		atomic_dec(&base_ni->count);		ntfs_clear_extent_inode(ni);		goto map_err_out;	}	/* Verify the sequence number if it is present. */	if (seq_no && (le16_to_cpu(m->sequence_number) != seq_no)) {		ntfs_error(base_ni->vol->sb, "Found stale extent mft "				"reference! Corrupt filesystem. Run chkdsk.");		destroy_ni = true;		m = ERR_PTR(-EIO);		goto unm_err_out;	}	/* Attach extent inode to base inode, reallocating memory if needed. */	if (!(base_ni->nr_extents & 3)) {		ntfs_inode **tmp;		int new_size = (base_ni->nr_extents + 4) * sizeof(ntfs_inode *);		tmp = kmalloc(new_size, GFP_NOFS);		if (unlikely(!tmp)) {			ntfs_error(base_ni->vol->sb, "Failed to allocate "					"internal buffer.");			destroy_ni = true;			m = ERR_PTR(-ENOMEM);			goto unm_err_out;		}		if (base_ni->nr_extents) {			BUG_ON(!base_ni->ext.extent_ntfs_inos);			memcpy(tmp, base_ni->ext.extent_ntfs_inos, new_size -					4 * sizeof(ntfs_inode *));			kfree(base_ni->ext.extent_ntfs_inos);		}		base_ni->ext.extent_ntfs_inos = tmp;	}	base_ni->ext.extent_ntfs_inos[base_ni->nr_extents++] = ni;	mutex_unlock(&base_ni->extent_lock);	atomic_dec(&base_ni->count);	ntfs_debug("Done 2.");	*ntfs_ino = ni;	return m;unm_err_out:	unmap_mft_record(ni);	mutex_unlock(&base_ni->extent_lock);	atomic_dec(&base_ni->count);	/*	 * If the extent inode was not attached to the base inode we need to	 * release it or we will leak memory.	 */	if (destroy_ni)		ntfs_clear_extent_inode(ni);	return m;}#ifdef NTFS_RW/** * __mark_mft_record_dirty - set the mft record and the page containing it dirty * @ni:		ntfs inode describing the mapped mft record * * Internal function.  Users should call mark_mft_record_dirty() instead. * * Set the mapped (extent) mft record of the (base or extent) ntfs inode @ni, * as well as the page containing the mft record, dirty.  Also, mark the base * vfs inode dirty.  This ensures that any changes to the mft record are * written out to disk. * * NOTE:  We only set I_DIRTY_SYNC and I_DIRTY_DATASYNC (and not I_DIRTY_PAGES) * on the base vfs inode, because even though file data may have been modified,

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -