⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 aops.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 4 页
字号:
/** * aops.c - NTFS kernel address space operations and page cache handling. *	    Part of the Linux-NTFS project. * * Copyright (c) 2001-2007 Anton Altaparmakov * Copyright (c) 2002 Richard Russon * * This program/include file is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program/include file is distributed in the hope that it will be * useful, but WITHOUT ANY WARRANTY; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program (in the main directory of the Linux-NTFS * distribution in the file COPYING); if not, write to the Free Software * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA */#include <linux/errno.h>#include <linux/fs.h>#include <linux/mm.h>#include <linux/pagemap.h>#include <linux/swap.h>#include <linux/buffer_head.h>#include <linux/writeback.h>#include <linux/bit_spinlock.h>#include "aops.h"#include "attrib.h"#include "debug.h"#include "inode.h"#include "mft.h"#include "runlist.h"#include "types.h"#include "ntfs.h"/** * ntfs_end_buffer_async_read - async io completion for reading attributes * @bh:		buffer head on which io is completed * @uptodate:	whether @bh is now uptodate or not * * Asynchronous I/O completion handler for reading pages belonging to the * attribute address space of an inode.  The inodes can either be files or * directories or they can be fake inodes describing some attribute. * * If NInoMstProtected(), perform the post read mst fixups when all IO on the * page has been completed and mark the page uptodate or set the error bit on * the page.  To determine the size of the records that need fixing up, we * cheat a little bit by setting the index_block_size in ntfs_inode to the ntfs * record size, and index_block_size_bits, to the log(base 2) of the ntfs * record size. */static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate){	unsigned long flags;	struct buffer_head *first, *tmp;	struct page *page;	struct inode *vi;	ntfs_inode *ni;	int page_uptodate = 1;	page = bh->b_page;	vi = page->mapping->host;	ni = NTFS_I(vi);	if (likely(uptodate)) {		loff_t i_size;		s64 file_ofs, init_size;		set_buffer_uptodate(bh);		file_ofs = ((s64)page->index << PAGE_CACHE_SHIFT) +				bh_offset(bh);		read_lock_irqsave(&ni->size_lock, flags);		init_size = ni->initialized_size;		i_size = i_size_read(vi);		read_unlock_irqrestore(&ni->size_lock, flags);		if (unlikely(init_size > i_size)) {			/* Race with shrinking truncate. */			init_size = i_size;		}		/* Check for the current buffer head overflowing. */		if (unlikely(file_ofs + bh->b_size > init_size)) {			int ofs;			ofs = 0;			if (file_ofs < init_size)				ofs = init_size - file_ofs;			local_irq_save(flags);			zero_user_page(page, bh_offset(bh) + ofs,					 bh->b_size - ofs, KM_BIO_SRC_IRQ);			local_irq_restore(flags);		}	} else {		clear_buffer_uptodate(bh);		SetPageError(page);		ntfs_error(ni->vol->sb, "Buffer I/O error, logical block "				"0x%llx.", (unsigned long long)bh->b_blocknr);	}	first = page_buffers(page);	local_irq_save(flags);	bit_spin_lock(BH_Uptodate_Lock, &first->b_state);	clear_buffer_async_read(bh);	unlock_buffer(bh);	tmp = bh;	do {		if (!buffer_uptodate(tmp))			page_uptodate = 0;		if (buffer_async_read(tmp)) {			if (likely(buffer_locked(tmp)))				goto still_busy;			/* Async buffers must be locked. */			BUG();		}		tmp = tmp->b_this_page;	} while (tmp != bh);	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);	local_irq_restore(flags);	/*	 * If none of the buffers had errors then we can set the page uptodate,	 * but we first have to perform the post read mst fixups, if the	 * attribute is mst protected, i.e. if NInoMstProteced(ni) is true.	 * Note we ignore fixup errors as those are detected when	 * map_mft_record() is called which gives us per record granularity	 * rather than per page granularity.	 */	if (!NInoMstProtected(ni)) {		if (likely(page_uptodate && !PageError(page)))			SetPageUptodate(page);	} else {		u8 *kaddr;		unsigned int i, recs;		u32 rec_size;		rec_size = ni->itype.index.block_size;		recs = PAGE_CACHE_SIZE / rec_size;		/* Should have been verified before we got here... */		BUG_ON(!recs);		local_irq_save(flags);		kaddr = kmap_atomic(page, KM_BIO_SRC_IRQ);		for (i = 0; i < recs; i++)			post_read_mst_fixup((NTFS_RECORD*)(kaddr +					i * rec_size), rec_size);		kunmap_atomic(kaddr, KM_BIO_SRC_IRQ);		local_irq_restore(flags);		flush_dcache_page(page);		if (likely(page_uptodate && !PageError(page)))			SetPageUptodate(page);	}	unlock_page(page);	return;still_busy:	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);	local_irq_restore(flags);	return;}/** * ntfs_read_block - fill a @page of an address space with data * @page:	page cache page to fill with data * * Fill the page @page of the address space belonging to the @page->host inode. * We read each buffer asynchronously and when all buffers are read in, our io * completion handler ntfs_end_buffer_read_async(), if required, automatically * applies the mst fixups to the page before finally marking it uptodate and * unlocking it. * * We only enforce allocated_size limit because i_size is checked for in * generic_file_read(). * * Return 0 on success and -errno on error. * * Contains an adapted version of fs/buffer.c::block_read_full_page(). */static int ntfs_read_block(struct page *page){	loff_t i_size;	VCN vcn;	LCN lcn;	s64 init_size;	struct inode *vi;	ntfs_inode *ni;	ntfs_volume *vol;	runlist_element *rl;	struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];	sector_t iblock, lblock, zblock;	unsigned long flags;	unsigned int blocksize, vcn_ofs;	int i, nr;	unsigned char blocksize_bits;	vi = page->mapping->host;	ni = NTFS_I(vi);	vol = ni->vol;	/* $MFT/$DATA must have its complete runlist in memory at all times. */	BUG_ON(!ni->runlist.rl && !ni->mft_no && !NInoAttr(ni));	blocksize = vol->sb->s_blocksize;	blocksize_bits = vol->sb->s_blocksize_bits;	if (!page_has_buffers(page)) {		create_empty_buffers(page, blocksize, 0);		if (unlikely(!page_has_buffers(page))) {			unlock_page(page);			return -ENOMEM;		}	}	bh = head = page_buffers(page);	BUG_ON(!bh);	/*	 * We may be racing with truncate.  To avoid some of the problems we	 * now take a snapshot of the various sizes and use those for the whole	 * of the function.  In case of an extending truncate it just means we	 * may leave some buffers unmapped which are now allocated.  This is	 * not a problem since these buffers will just get mapped when a write	 * occurs.  In case of a shrinking truncate, we will detect this later	 * on due to the runlist being incomplete and if the page is being	 * fully truncated, truncate will throw it away as soon as we unlock	 * it so no need to worry what we do with it.	 */	iblock = (s64)page->index << (PAGE_CACHE_SHIFT - blocksize_bits);	read_lock_irqsave(&ni->size_lock, flags);	lblock = (ni->allocated_size + blocksize - 1) >> blocksize_bits;	init_size = ni->initialized_size;	i_size = i_size_read(vi);	read_unlock_irqrestore(&ni->size_lock, flags);	if (unlikely(init_size > i_size)) {		/* Race with shrinking truncate. */		init_size = i_size;	}	zblock = (init_size + blocksize - 1) >> blocksize_bits;	/* Loop through all the buffers in the page. */	rl = NULL;	nr = i = 0;	do {		int err = 0;		if (unlikely(buffer_uptodate(bh)))			continue;		if (unlikely(buffer_mapped(bh))) {			arr[nr++] = bh;			continue;		}		bh->b_bdev = vol->sb->s_bdev;		/* Is the block within the allowed limits? */		if (iblock < lblock) {			bool is_retry = false;			/* Convert iblock into corresponding vcn and offset. */			vcn = (VCN)iblock << blocksize_bits >>					vol->cluster_size_bits;			vcn_ofs = ((VCN)iblock << blocksize_bits) &					vol->cluster_size_mask;			if (!rl) {lock_retry_remap:				down_read(&ni->runlist.lock);				rl = ni->runlist.rl;			}			if (likely(rl != NULL)) {				/* Seek to element containing target vcn. */				while (rl->length && rl[1].vcn <= vcn)					rl++;				lcn = ntfs_rl_vcn_to_lcn(rl, vcn);			} else				lcn = LCN_RL_NOT_MAPPED;			/* Successful remap. */			if (lcn >= 0) {				/* Setup buffer head to correct block. */				bh->b_blocknr = ((lcn << vol->cluster_size_bits)						+ vcn_ofs) >> blocksize_bits;				set_buffer_mapped(bh);				/* Only read initialized data blocks. */				if (iblock < zblock) {					arr[nr++] = bh;					continue;				}				/* Fully non-initialized data block, zero it. */				goto handle_zblock;			}			/* It is a hole, need to zero it. */			if (lcn == LCN_HOLE)				goto handle_hole;			/* If first try and runlist unmapped, map and retry. */			if (!is_retry && lcn == LCN_RL_NOT_MAPPED) {				is_retry = true;				/*				 * Attempt to map runlist, dropping lock for				 * the duration.				 */				up_read(&ni->runlist.lock);				err = ntfs_map_runlist(ni, vcn);				if (likely(!err))					goto lock_retry_remap;				rl = NULL;			} else if (!rl)				up_read(&ni->runlist.lock);			/*			 * If buffer is outside the runlist, treat it as a			 * hole.  This can happen due to concurrent truncate			 * for example.			 */			if (err == -ENOENT || lcn == LCN_ENOENT) {				err = 0;				goto handle_hole;			}			/* Hard error, zero out region. */			if (!err)				err = -EIO;			bh->b_blocknr = -1;			SetPageError(page);			ntfs_error(vol->sb, "Failed to read from inode 0x%lx, "					"attribute type 0x%x, vcn 0x%llx, "					"offset 0x%x because its location on "					"disk could not be determined%s "					"(error code %i).", ni->mft_no,					ni->type, (unsigned long long)vcn,					vcn_ofs, is_retry ? " even after "					"retrying" : "", err);		}		/*		 * Either iblock was outside lblock limits or		 * ntfs_rl_vcn_to_lcn() returned error.  Just zero that portion		 * of the page and set the buffer uptodate.		 */handle_hole:		bh->b_blocknr = -1UL;		clear_buffer_mapped(bh);handle_zblock:		zero_user_page(page, i * blocksize, blocksize, KM_USER0);		if (likely(!err))			set_buffer_uptodate(bh);	} while (i++, iblock++, (bh = bh->b_this_page) != head);	/* Release the lock if we took it. */	if (rl)		up_read(&ni->runlist.lock);	/* Check we have at least one buffer ready for i/o. */	if (nr) {		struct buffer_head *tbh;		/* Lock the buffers. */		for (i = 0; i < nr; i++) {			tbh = arr[i];			lock_buffer(tbh);			tbh->b_end_io = ntfs_end_buffer_async_read;			set_buffer_async_read(tbh);		}		/* Finally, start i/o on the buffers. */		for (i = 0; i < nr; i++) {			tbh = arr[i];			if (likely(!buffer_uptodate(tbh)))				submit_bh(READ, tbh);			else				ntfs_end_buffer_async_read(tbh, 1);		}		return 0;	}	/* No i/o was scheduled on any of the buffers. */	if (likely(!PageError(page)))		SetPageUptodate(page);	else /* Signal synchronous i/o error. */		nr = -EIO;	unlock_page(page);	return nr;}/** * ntfs_readpage - fill a @page of a @file with data from the device * @file:	open file to which the page @page belongs or NULL * @page:	page cache page to fill with data * * For non-resident attributes, ntfs_readpage() fills the @page of the open * file @file by calling the ntfs version of the generic block_read_full_page() * function, ntfs_read_block(), which in turn creates and reads in the buffers * associated with the page asynchronously. * * For resident attributes, OTOH, ntfs_readpage() fills @page by copying the * data from the mft record (which at this stage is most likely in memory) and * fills the remainder with zeroes. Thus, in this case, I/O is synchronous, as * even if the mft record is not cached at this point in time, we need to wait * for it to be read in before we can do the copy. * * Return 0 on success and -errno on error. */static int ntfs_readpage(struct file *file, struct page *page){	loff_t i_size;	struct inode *vi;	ntfs_inode *ni, *base_ni;	u8 *addr;	ntfs_attr_search_ctx *ctx;	MFT_RECORD *mrec;	unsigned long flags;	u32 attr_len;	int err = 0;retry_readpage:	BUG_ON(!PageLocked(page));	vi = page->mapping->host;	i_size = i_size_read(vi);	/* Is the page fully outside i_size? (truncate in progress) */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -