aops.c

来自「Linux Kernel 2.6.9 for OMAP1710」· C语言 代码 · 共 2,031 行 · 第 1/4 页

C
2,031
字号
/** * aops.c - NTFS kernel address space operations and page cache handling. *	    Part of the Linux-NTFS project. * * Copyright (c) 2001-2004 Anton Altaparmakov * Copyright (c) 2002 Richard Russon * * This program/include file is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program/include file is distributed in the hope that it will be * useful, but WITHOUT ANY WARRANTY; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program (in the main directory of the Linux-NTFS * distribution in the file COPYING); if not, write to the Free Software * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA */#include <linux/errno.h>#include <linux/mm.h>#include <linux/pagemap.h>#include <linux/swap.h>#include <linux/buffer_head.h>#include "ntfs.h"/** * ntfs_end_buffer_async_read - async io completion for reading attributes * @bh:		buffer head on which io is completed * @uptodate:	whether @bh is now uptodate or not * * Asynchronous I/O completion handler for reading pages belonging to the * attribute address space of an inode. The inodes can either be files or * directories or they can be fake inodes describing some attribute. * * If NInoMstProtected(), perform the post read mst fixups when all IO on the * page has been completed and mark the page uptodate or set the error bit on * the page. To determine the size of the records that need fixing up, we cheat * a little bit by setting the index_block_size in ntfs_inode to the ntfs * record size, and index_block_size_bits, to the log(base 2) of the ntfs * record size. */static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate){	static spinlock_t page_uptodate_lock = SPIN_LOCK_UNLOCKED;	unsigned long flags;	struct buffer_head *tmp;	struct page *page;	ntfs_inode *ni;	int page_uptodate = 1;	page = bh->b_page;	ni = NTFS_I(page->mapping->host);	if (likely(uptodate)) {		s64 file_ofs;		set_buffer_uptodate(bh);		file_ofs = ((s64)page->index << PAGE_CACHE_SHIFT) +				bh_offset(bh);		/* Check for the current buffer head overflowing. */		if (file_ofs + bh->b_size > ni->initialized_size) {			char *addr;			int ofs = 0;			if (file_ofs < ni->initialized_size)				ofs = ni->initialized_size - file_ofs;			addr = kmap_atomic(page, KM_BIO_SRC_IRQ);			memset(addr + bh_offset(bh) + ofs, 0, bh->b_size - ofs);			flush_dcache_page(page);			kunmap_atomic(addr, KM_BIO_SRC_IRQ);		}	} else {		clear_buffer_uptodate(bh);		ntfs_error(ni->vol->sb, "Buffer I/O error, logical block %llu.",				(unsigned long long)bh->b_blocknr);		SetPageError(page);	}	spin_lock_irqsave(&page_uptodate_lock, flags);	clear_buffer_async_read(bh);	unlock_buffer(bh);	tmp = bh;	do {		if (!buffer_uptodate(tmp))			page_uptodate = 0;		if (buffer_async_read(tmp)) {			if (likely(buffer_locked(tmp)))				goto still_busy;			/* Async buffers must be locked. */			BUG();		}		tmp = tmp->b_this_page;	} while (tmp != bh);	spin_unlock_irqrestore(&page_uptodate_lock, flags);	/*	 * If none of the buffers had errors then we can set the page uptodate,	 * but we first have to perform the post read mst fixups, if the	 * attribute is mst protected, i.e. if NInoMstProteced(ni) is true.	 */	if (!NInoMstProtected(ni)) {		if (likely(page_uptodate && !PageError(page)))			SetPageUptodate(page);	} else {		char *addr;		unsigned int i, recs, nr_err;		u32 rec_size;		rec_size = ni->itype.index.block_size;		recs = PAGE_CACHE_SIZE / rec_size;		addr = kmap_atomic(page, KM_BIO_SRC_IRQ);		for (i = nr_err = 0; i < recs; i++) {			if (likely(!post_read_mst_fixup((NTFS_RECORD*)(addr +					i * rec_size), rec_size)))				continue;			nr_err++;			ntfs_error(ni->vol->sb, "post_read_mst_fixup() failed, "					"corrupt %s record 0x%llx. Run chkdsk.",					ni->mft_no ? "index" : "mft",					(unsigned long long)(((s64)page->index					<< PAGE_CACHE_SHIFT >>					ni->itype.index.block_size_bits) + i));		}		flush_dcache_page(page);		kunmap_atomic(addr, KM_BIO_SRC_IRQ);		if (likely(!PageError(page))) {			if (likely(!nr_err && recs)) {				if (likely(page_uptodate))					SetPageUptodate(page);			} else {				ntfs_error(ni->vol->sb, "Setting page error, "						"index 0x%lx.", page->index);				SetPageError(page);			}		}	}	unlock_page(page);	return;still_busy:	spin_unlock_irqrestore(&page_uptodate_lock, flags);	return;}/** * ntfs_read_block - fill a @page of an address space with data * @page:	page cache page to fill with data * * Fill the page @page of the address space belonging to the @page->host inode. * We read each buffer asynchronously and when all buffers are read in, our io * completion handler ntfs_end_buffer_read_async(), if required, automatically * applies the mst fixups to the page before finally marking it uptodate and * unlocking it. * * We only enforce allocated_size limit because i_size is checked for in * generic_file_read(). * * Return 0 on success and -errno on error. * * Contains an adapted version of fs/buffer.c::block_read_full_page(). */static int ntfs_read_block(struct page *page){	VCN vcn;	LCN lcn;	ntfs_inode *ni;	ntfs_volume *vol;	runlist_element *rl;	struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];	sector_t iblock, lblock, zblock;	unsigned int blocksize, vcn_ofs;	int i, nr;	unsigned char blocksize_bits;	ni = NTFS_I(page->mapping->host);	vol = ni->vol;	blocksize_bits = VFS_I(ni)->i_blkbits;	blocksize = 1 << blocksize_bits;	if (!page_has_buffers(page))		create_empty_buffers(page, blocksize, 0);	bh = head = page_buffers(page);	if (unlikely(!bh)) {		unlock_page(page);		return -ENOMEM;	}	iblock = (s64)page->index << (PAGE_CACHE_SHIFT - blocksize_bits);	lblock = (ni->allocated_size + blocksize - 1) >> blocksize_bits;	zblock = (ni->initialized_size + blocksize - 1) >> blocksize_bits;#ifdef DEBUG	if (unlikely(!ni->runlist.rl && !ni->mft_no && !NInoAttr(ni)))		panic("NTFS: $MFT/$DATA runlist has been unmapped! This is a "				"very serious bug! Cannot continue...");#endif	/* Loop through all the buffers in the page. */	rl = NULL;	nr = i = 0;	do {		u8 *kaddr;		if (unlikely(buffer_uptodate(bh)))			continue;		if (unlikely(buffer_mapped(bh))) {			arr[nr++] = bh;			continue;		}		bh->b_bdev = vol->sb->s_bdev;		/* Is the block within the allowed limits? */		if (iblock < lblock) {			BOOL is_retry = FALSE;			/* Convert iblock into corresponding vcn and offset. */			vcn = (VCN)iblock << blocksize_bits >>					vol->cluster_size_bits;			vcn_ofs = ((VCN)iblock << blocksize_bits) &					vol->cluster_size_mask;			if (!rl) {lock_retry_remap:				down_read(&ni->runlist.lock);				rl = ni->runlist.rl;			}			if (likely(rl != NULL)) {				/* Seek to element containing target vcn. */				while (rl->length && rl[1].vcn <= vcn)					rl++;				lcn = ntfs_vcn_to_lcn(rl, vcn);			} else				lcn = (LCN)LCN_RL_NOT_MAPPED;			/* Successful remap. */			if (lcn >= 0) {				/* Setup buffer head to correct block. */				bh->b_blocknr = ((lcn << vol->cluster_size_bits)						+ vcn_ofs) >> blocksize_bits;				set_buffer_mapped(bh);				/* Only read initialized data blocks. */				if (iblock < zblock) {					arr[nr++] = bh;					continue;				}				/* Fully non-initialized data block, zero it. */				goto handle_zblock;			}			/* It is a hole, need to zero it. */			if (lcn == LCN_HOLE)				goto handle_hole;			/* If first try and runlist unmapped, map and retry. */			if (!is_retry && lcn == LCN_RL_NOT_MAPPED) {				is_retry = TRUE;				/*				 * Attempt to map runlist, dropping lock for				 * the duration.				 */				up_read(&ni->runlist.lock);				if (!ntfs_map_runlist(ni, vcn))					goto lock_retry_remap;				rl = NULL;			}			/* Hard error, zero out region. */			SetPageError(page);			ntfs_error(vol->sb, "ntfs_vcn_to_lcn(vcn = 0x%llx) "					"failed with error code 0x%llx%s.",					(unsigned long long)vcn,					(unsigned long long)-lcn,					is_retry ? " even after retrying" : "");			// FIXME: Depending on vol->on_errors, do something.		}		/*		 * Either iblock was outside lblock limits or ntfs_vcn_to_lcn()		 * returned error. Just zero that portion of the page and set		 * the buffer uptodate.		 */handle_hole:		bh->b_blocknr = -1UL;		clear_buffer_mapped(bh);handle_zblock:		kaddr = kmap_atomic(page, KM_USER0);		memset(kaddr + i * blocksize, 0, blocksize);		flush_dcache_page(page);		kunmap_atomic(kaddr, KM_USER0);		set_buffer_uptodate(bh);	} while (i++, iblock++, (bh = bh->b_this_page) != head);	/* Release the lock if we took it. */	if (rl)		up_read(&ni->runlist.lock);	/* Check we have at least one buffer ready for i/o. */	if (nr) {		struct buffer_head *tbh;		/* Lock the buffers. */		for (i = 0; i < nr; i++) {			tbh = arr[i];			lock_buffer(tbh);			tbh->b_end_io = ntfs_end_buffer_async_read;			set_buffer_async_read(tbh);		}		/* Finally, start i/o on the buffers. */		for (i = 0; i < nr; i++) {			tbh = arr[i];			if (likely(!buffer_uptodate(tbh)))				submit_bh(READ, tbh);			else				ntfs_end_buffer_async_read(tbh, 1);		}		return 0;	}	/* No i/o was scheduled on any of the buffers. */	if (likely(!PageError(page)))		SetPageUptodate(page);	else /* Signal synchronous i/o error. */		nr = -EIO;	unlock_page(page);	return nr;}/** * ntfs_readpage - fill a @page of a @file with data from the device * @file:	open file to which the page @page belongs or NULL * @page:	page cache page to fill with data * * For non-resident attributes, ntfs_readpage() fills the @page of the open * file @file by calling the ntfs version of the generic block_read_full_page() * function, ntfs_read_block(), which in turn creates and reads in the buffers * associated with the page asynchronously. * * For resident attributes, OTOH, ntfs_readpage() fills @page by copying the * data from the mft record (which at this stage is most likely in memory) and * fills the remainder with zeroes. Thus, in this case, I/O is synchronous, as * even if the mft record is not cached at this point in time, we need to wait * for it to be read in before we can do the copy. * * Return 0 on success and -errno on error. * * WARNING: Do not make this function static! It is used by mft.c! */int ntfs_readpage(struct file *file, struct page *page){	s64 attr_pos;	ntfs_inode *ni, *base_ni;	u8 *kaddr;	ntfs_attr_search_ctx *ctx;	MFT_RECORD *mrec;	u32 attr_len;	int err = 0;	BUG_ON(!PageLocked(page));	/*	 * This can potentially happen because we clear PageUptodate() during	 * ntfs_writepage() of MstProtected() attributes.	 */	if (PageUptodate(page)) {		unlock_page(page);		return 0;	}	ni = NTFS_I(page->mapping->host);	/* NInoNonResident() == NInoIndexAllocPresent() */	if (NInoNonResident(ni)) {		/*		 * Only unnamed $DATA attributes can be compressed or		 * encrypted.		 */		if (ni->type == AT_DATA && !ni->name_len) {			/* If file is encrypted, deny access, just like NT4. */			if (NInoEncrypted(ni)) {				err = -EACCES;				goto err_out;			}			/* Compressed data streams are handled in compress.c. */			if (NInoCompressed(ni))				return ntfs_read_compressed_block(page);		}		/* Normal data stream. */		return ntfs_read_block(page);	}	/* Attribute is resident, implying it is not compressed or encrypted. */	if (!NInoAttr(ni))		base_ni = ni;	else		base_ni = ni->ext.base_ntfs_ino;	/* Map, pin, and lock the mft record. */	mrec = map_mft_record(base_ni);	if (IS_ERR(mrec)) {		err = PTR_ERR(mrec);		goto err_out;	}	ctx = ntfs_attr_get_search_ctx(base_ni, mrec);	if (unlikely(!ctx)) {		err = -ENOMEM;		goto unm_err_out;	}	err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,			CASE_SENSITIVE, 0, NULL, 0, ctx);	if (unlikely(err))		goto put_unm_err_out;	/* Starting position of the page within the attribute value. */	attr_pos = page->index << PAGE_CACHE_SHIFT;	/* The total length of the attribute value. */	attr_len = le32_to_cpu(ctx->attr->data.resident.value_length);	kaddr = kmap_atomic(page, KM_USER0);	/* Copy over in bounds data, zeroing the remainder of the page. */	if (attr_pos < attr_len) {		u32 bytes = attr_len - attr_pos;		if (bytes > PAGE_CACHE_SIZE)			bytes = PAGE_CACHE_SIZE;		else if (bytes < PAGE_CACHE_SIZE)			memset(kaddr + bytes, 0, PAGE_CACHE_SIZE - bytes);		/* Copy the data to the page. */		memcpy(kaddr, attr_pos + (char*)ctx->attr +				le16_to_cpu(				ctx->attr->data.resident.value_offset), bytes);	} else		memset(kaddr, 0, PAGE_CACHE_SIZE);	flush_dcache_page(page);	kunmap_atomic(kaddr, KM_USER0);	SetPageUptodate(page);put_unm_err_out:	ntfs_attr_put_search_ctx(ctx);unm_err_out:	unmap_mft_record(base_ni);err_out:	unlock_page(page);	return err;}#ifdef NTFS_RW/** * ntfs_write_block - write a @page to the backing store * @wbc:	writeback control structure * @page:	page cache page to write out * * This function is for writing pages belonging to non-resident, non-mst * protected attributes to their backing store. * * For a page with buffers, map and write the dirty buffers asynchronously * under page writeback. For a page without buffers, create buffers for the * page, then proceed as above. * * If a page doesn't have buffers the page dirty state is definitive. If a page * does have buffers, the page dirty state is just a hint, and the buffer dirty * state is definitive. (A hint which has rules: dirty buffers against a clean * page is illegal. Other combinations are legal and need to be handled. In * particular a dirty page containing clean buffers for example.) * * Return 0 on success and -errno on error. * * Based on ntfs_read_block() and __block_write_full_page(). */static int ntfs_write_block(struct writeback_control *wbc, struct page *page){	VCN vcn;	LCN lcn;	sector_t block, dblock, iblock;	struct inode *vi;	ntfs_inode *ni;	ntfs_volume *vol;	runlist_element *rl;	struct buffer_head *bh, *head;	unsigned int blocksize, vcn_ofs;	int err;	BOOL need_end_writeback;	unsigned char blocksize_bits;	vi = page->mapping->host;	ni = NTFS_I(vi);	vol = ni->vol;	ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, page index "			"0x%lx.", vi->i_ino, ni->type, page->index);	BUG_ON(!NInoNonResident(ni));	BUG_ON(NInoMstProtected(ni));	blocksize_bits = vi->i_blkbits;	blocksize = 1 << blocksize_bits;	if (!page_has_buffers(page)) {		BUG_ON(!PageUptodate(page));		create_empty_buffers(page, blocksize,				(1 << BH_Uptodate) | (1 << BH_Dirty));	}	bh = head = page_buffers(page);	if (unlikely(!bh)) {		ntfs_warning(vol->sb, "Error allocating page buffers. "				"Redirtying page so we try again later.");		/*		 * Put the page back on mapping->dirty_pages, but leave its		 * buffer's dirty state as-is.		 */		redirty_page_for_writepage(wbc, page);

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?