⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 xfs_lrw.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 2 页
字号:
/* * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc. * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write the Free Software Foundation, * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA */#include "xfs.h"#include "xfs_fs.h"#include "xfs_bit.h"#include "xfs_log.h"#include "xfs_inum.h"#include "xfs_trans.h"#include "xfs_sb.h"#include "xfs_ag.h"#include "xfs_dir2.h"#include "xfs_alloc.h"#include "xfs_dmapi.h"#include "xfs_quota.h"#include "xfs_mount.h"#include "xfs_bmap_btree.h"#include "xfs_alloc_btree.h"#include "xfs_ialloc_btree.h"#include "xfs_dir2_sf.h"#include "xfs_attr_sf.h"#include "xfs_dinode.h"#include "xfs_inode.h"#include "xfs_bmap.h"#include "xfs_btree.h"#include "xfs_ialloc.h"#include "xfs_rtalloc.h"#include "xfs_error.h"#include "xfs_itable.h"#include "xfs_rw.h"#include "xfs_acl.h"#include "xfs_attr.h"#include "xfs_inode_item.h"#include "xfs_buf_item.h"#include "xfs_utils.h"#include "xfs_iomap.h"#include "xfs_vnodeops.h"#include <linux/capability.h>#include <linux/writeback.h>#if defined(XFS_RW_TRACE)voidxfs_rw_enter_trace(	int			tag,	xfs_iocore_t		*io,	void			*data,	size_t			segs,	loff_t			offset,	int			ioflags){	xfs_inode_t	*ip = XFS_IO_INODE(io);	if (ip->i_rwtrace == NULL)		return;	ktrace_enter(ip->i_rwtrace,		(void *)(unsigned long)tag,		(void *)ip,		(void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),		(void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),		(void *)data,		(void *)((unsigned long)segs),		(void *)((unsigned long)((offset >> 32) & 0xffffffff)),		(void *)((unsigned long)(offset & 0xffffffff)),		(void *)((unsigned long)ioflags),		(void *)((unsigned long)((io->io_new_size >> 32) & 0xffffffff)),		(void *)((unsigned long)(io->io_new_size & 0xffffffff)),		(void *)((unsigned long)current_pid()),		(void *)NULL,		(void *)NULL,		(void *)NULL,		(void *)NULL);}voidxfs_inval_cached_trace(	xfs_iocore_t	*io,	xfs_off_t	offset,	xfs_off_t	len,	xfs_off_t	first,	xfs_off_t	last){	xfs_inode_t	*ip = XFS_IO_INODE(io);	if (ip->i_rwtrace == NULL)		return;	ktrace_enter(ip->i_rwtrace,		(void *)(__psint_t)XFS_INVAL_CACHED,		(void *)ip,		(void *)((unsigned long)((offset >> 32) & 0xffffffff)),		(void *)((unsigned long)(offset & 0xffffffff)),		(void *)((unsigned long)((len >> 32) & 0xffffffff)),		(void *)((unsigned long)(len & 0xffffffff)),		(void *)((unsigned long)((first >> 32) & 0xffffffff)),		(void *)((unsigned long)(first & 0xffffffff)),		(void *)((unsigned long)((last >> 32) & 0xffffffff)),		(void *)((unsigned long)(last & 0xffffffff)),		(void *)((unsigned long)current_pid()),		(void *)NULL,		(void *)NULL,		(void *)NULL,		(void *)NULL,		(void *)NULL);}#endif/* *	xfs_iozero * *	xfs_iozero clears the specified range of buffer supplied, *	and marks all the affected blocks as valid and modified.  If *	an affected block is not allocated, it will be allocated.  If *	an affected block is not completely overwritten, and is not *	valid before the operation, it will be read from disk before *	being partially zeroed. */STATIC intxfs_iozero(	struct inode		*ip,	/* inode			*/	loff_t			pos,	/* offset in file		*/	size_t			count)	/* size of data to zero		*/{	struct page		*page;	struct address_space	*mapping;	int			status;	mapping = ip->i_mapping;	do {		unsigned offset, bytes;		void *fsdata;		offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */		bytes = PAGE_CACHE_SIZE - offset;		if (bytes > count)			bytes = count;		status = pagecache_write_begin(NULL, mapping, pos, bytes,					AOP_FLAG_UNINTERRUPTIBLE,					&page, &fsdata);		if (status)			break;		zero_user_page(page, offset, bytes, KM_USER0);		status = pagecache_write_end(NULL, mapping, pos, bytes, bytes,					page, fsdata);		WARN_ON(status <= 0); /* can't return less than zero! */		pos += bytes;		count -= bytes;		status = 0;	} while (count);	return (-status);}ssize_t			/* bytes read, or (-)  error */xfs_read(	xfs_inode_t		*ip,	struct kiocb		*iocb,	const struct iovec	*iovp,	unsigned int		segs,	loff_t			*offset,	int			ioflags){	struct file		*file = iocb->ki_filp;	struct inode		*inode = file->f_mapping->host;	bhv_vnode_t		*vp = XFS_ITOV(ip);	xfs_mount_t		*mp = ip->i_mount;	size_t			size = 0;	ssize_t			ret = 0;	xfs_fsize_t		n;	unsigned long		seg;	XFS_STATS_INC(xs_read_calls);	/* START copy & waste from filemap.c */	for (seg = 0; seg < segs; seg++) {		const struct iovec *iv = &iovp[seg];		/*		 * If any segment has a negative length, or the cumulative		 * length ever wraps negative then return -EINVAL.		 */		size += iv->iov_len;		if (unlikely((ssize_t)(size|iv->iov_len) < 0))			return XFS_ERROR(-EINVAL);	}	/* END copy & waste from filemap.c */	if (unlikely(ioflags & IO_ISDIRECT)) {		xfs_buftarg_t	*target =			(ip->i_d.di_flags & XFS_DIFLAG_REALTIME) ?				mp->m_rtdev_targp : mp->m_ddev_targp;		if ((*offset & target->bt_smask) ||		    (size & target->bt_smask)) {			if (*offset == ip->i_size) {				return (0);			}			return -XFS_ERROR(EINVAL);		}	}	n = XFS_MAXIOFFSET(mp) - *offset;	if ((n <= 0) || (size == 0))		return 0;	if (n < size)		size = n;	if (XFS_FORCED_SHUTDOWN(mp))		return -EIO;	if (unlikely(ioflags & IO_ISDIRECT))		mutex_lock(&inode->i_mutex);	xfs_ilock(ip, XFS_IOLOCK_SHARED);	if (DM_EVENT_ENABLED(ip, DM_EVENT_READ) && !(ioflags & IO_INVIS)) {		bhv_vrwlock_t locktype = VRWLOCK_READ;		int dmflags = FILP_DELAY_FLAG(file) | DM_SEM_FLAG_RD(ioflags);		ret = -XFS_SEND_DATA(mp, DM_EVENT_READ, vp, *offset, size,					dmflags, &locktype);		if (ret) {			xfs_iunlock(ip, XFS_IOLOCK_SHARED);			if (unlikely(ioflags & IO_ISDIRECT))				mutex_unlock(&inode->i_mutex);			return ret;		}	}	if (unlikely(ioflags & IO_ISDIRECT)) {		if (VN_CACHED(vp))			ret = xfs_flushinval_pages(ip,					ctooff(offtoct(*offset)),					-1, FI_REMAPF_LOCKED);		mutex_unlock(&inode->i_mutex);		if (ret) {			xfs_iunlock(ip, XFS_IOLOCK_SHARED);			return ret;		}	}	xfs_rw_enter_trace(XFS_READ_ENTER, &ip->i_iocore,				(void *)iovp, segs, *offset, ioflags);	iocb->ki_pos = *offset;	ret = generic_file_aio_read(iocb, iovp, segs, *offset);	if (ret == -EIOCBQUEUED && !(ioflags & IO_ISAIO))		ret = wait_on_sync_kiocb(iocb);	if (ret > 0)		XFS_STATS_ADD(xs_read_bytes, ret);	xfs_iunlock(ip, XFS_IOLOCK_SHARED);	return ret;}ssize_txfs_splice_read(	xfs_inode_t		*ip,	struct file		*infilp,	loff_t			*ppos,	struct pipe_inode_info	*pipe,	size_t			count,	int			flags,	int			ioflags){	bhv_vnode_t		*vp = XFS_ITOV(ip);	xfs_mount_t		*mp = ip->i_mount;	ssize_t			ret;	XFS_STATS_INC(xs_read_calls);	if (XFS_FORCED_SHUTDOWN(ip->i_mount))		return -EIO;	xfs_ilock(ip, XFS_IOLOCK_SHARED);	if (DM_EVENT_ENABLED(ip, DM_EVENT_READ) && !(ioflags & IO_INVIS)) {		bhv_vrwlock_t locktype = VRWLOCK_READ;		int error;		error = XFS_SEND_DATA(mp, DM_EVENT_READ, vp, *ppos, count,					FILP_DELAY_FLAG(infilp), &locktype);		if (error) {			xfs_iunlock(ip, XFS_IOLOCK_SHARED);			return -error;		}	}	xfs_rw_enter_trace(XFS_SPLICE_READ_ENTER, &ip->i_iocore,			   pipe, count, *ppos, ioflags);	ret = generic_file_splice_read(infilp, ppos, pipe, count, flags);	if (ret > 0)		XFS_STATS_ADD(xs_read_bytes, ret);	xfs_iunlock(ip, XFS_IOLOCK_SHARED);	return ret;}ssize_txfs_splice_write(	xfs_inode_t		*ip,	struct pipe_inode_info	*pipe,	struct file		*outfilp,	loff_t			*ppos,	size_t			count,	int			flags,	int			ioflags){	bhv_vnode_t		*vp = XFS_ITOV(ip);	xfs_mount_t		*mp = ip->i_mount;	xfs_iocore_t		*io = &ip->i_iocore;	ssize_t			ret;	struct inode		*inode = outfilp->f_mapping->host;	xfs_fsize_t		isize, new_size;	XFS_STATS_INC(xs_write_calls);	if (XFS_FORCED_SHUTDOWN(ip->i_mount))		return -EIO;	xfs_ilock(ip, XFS_IOLOCK_EXCL);	if (DM_EVENT_ENABLED(ip, DM_EVENT_WRITE) && !(ioflags & IO_INVIS)) {		bhv_vrwlock_t locktype = VRWLOCK_WRITE;		int error;		error = XFS_SEND_DATA(mp, DM_EVENT_WRITE, vp, *ppos, count,					FILP_DELAY_FLAG(outfilp), &locktype);		if (error) {			xfs_iunlock(ip, XFS_IOLOCK_EXCL);			return -error;		}	}	new_size = *ppos + count;	xfs_ilock(ip, XFS_ILOCK_EXCL);	if (new_size > ip->i_size)		io->io_new_size = new_size;	xfs_iunlock(ip, XFS_ILOCK_EXCL);	xfs_rw_enter_trace(XFS_SPLICE_WRITE_ENTER, &ip->i_iocore,			   pipe, count, *ppos, ioflags);	ret = generic_file_splice_write(pipe, outfilp, ppos, count, flags);	if (ret > 0)		XFS_STATS_ADD(xs_write_bytes, ret);	isize = i_size_read(inode);	if (unlikely(ret < 0 && ret != -EFAULT && *ppos > isize))		*ppos = isize;	if (*ppos > ip->i_size) {		xfs_ilock(ip, XFS_ILOCK_EXCL);		if (*ppos > ip->i_size)			ip->i_size = *ppos;		xfs_iunlock(ip, XFS_ILOCK_EXCL);	}	if (io->io_new_size) {		xfs_ilock(ip, XFS_ILOCK_EXCL);		io->io_new_size = 0;		if (ip->i_d.di_size > ip->i_size)			ip->i_d.di_size = ip->i_size;		xfs_iunlock(ip, XFS_ILOCK_EXCL);	}	xfs_iunlock(ip, XFS_IOLOCK_EXCL);	return ret;}/* * This routine is called to handle zeroing any space in the last * block of the file that is beyond the EOF.  We do this since the * size is being increased without writing anything to that block * and we don't want anyone to read the garbage on the disk. */STATIC int				/* error (positive) */xfs_zero_last_block(	struct inode	*ip,	xfs_iocore_t	*io,	xfs_fsize_t	offset,	xfs_fsize_t	isize){	xfs_fileoff_t	last_fsb;	xfs_mount_t	*mp = io->io_mount;	int		nimaps;	int		zero_offset;	int		zero_len;	int		error = 0;	xfs_bmbt_irec_t	imap;	ASSERT(ismrlocked(io->io_lock, MR_UPDATE) != 0);	zero_offset = XFS_B_FSB_OFFSET(mp, isize);	if (zero_offset == 0) {		/*		 * There are no extra bytes in the last block on disk to		 * zero, so return.		 */		return 0;	}	last_fsb = XFS_B_TO_FSBT(mp, isize);	nimaps = 1;	error = XFS_BMAPI(mp, NULL, io, last_fsb, 1, 0, NULL, 0, &imap,			  &nimaps, NULL, NULL);	if (error) {		return error;	}	ASSERT(nimaps > 0);	/*	 * If the block underlying isize is just a hole, then there	 * is nothing to zero.	 */	if (imap.br_startblock == HOLESTARTBLOCK) {		return 0;	}	/*	 * Zero the part of the last block beyond the EOF, and write it	 * out sync.  We need to drop the ilock while we do this so we	 * don't deadlock when the buffer cache calls back to us.	 */	XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL| XFS_EXTSIZE_RD);	zero_len = mp->m_sb.sb_blocksize - zero_offset;	if (isize + zero_len > offset)		zero_len = offset - isize;	error = xfs_iozero(ip, isize, zero_len);	XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);	ASSERT(error >= 0);	return error;}/* * Zero any on disk space between the current EOF and the new, * larger EOF.  This handles the normal case of zeroing the remainder * of the last block in the file and the unusual case of zeroing blocks * out beyond the size of the file.  This second case only happens * with fixed size extents and when the system crashes before the inode * size was updated but after blocks were allocated.  If fill is set, * then any holes in the range are filled and zeroed.  If not, the holes * are left alone as holes. */int					/* error (positive) */xfs_zero_eof(	bhv_vnode_t	*vp,	xfs_iocore_t	*io,	xfs_off_t	offset,		/* starting I/O offset */	xfs_fsize_t	isize)		/* current inode size */{	struct inode	*ip = vn_to_inode(vp);	xfs_fileoff_t	start_zero_fsb;	xfs_fileoff_t	end_zero_fsb;	xfs_fileoff_t	zero_count_fsb;	xfs_fileoff_t	last_fsb;	xfs_fileoff_t	zero_off;	xfs_fsize_t	zero_len;	xfs_mount_t	*mp = io->io_mount;	int		nimaps;	int		error = 0;	xfs_bmbt_irec_t	imap;	ASSERT(ismrlocked(io->io_lock, MR_UPDATE));	ASSERT(ismrlocked(io->io_iolock, MR_UPDATE));	ASSERT(offset > isize);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -