⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 xfs_inode.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
 * at least do it for regular files. */#ifdef DEBUGvoidxfs_isize_check(	xfs_mount_t	*mp,	xfs_inode_t	*ip,	xfs_fsize_t	isize){	xfs_fileoff_t	map_first;	int		nimaps;	xfs_bmbt_irec_t	imaps[2];	if ((ip->i_d.di_mode & S_IFMT) != S_IFREG)		return;	if (ip->i_d.di_flags & (XFS_DIFLAG_REALTIME | XFS_DIFLAG_EXTSIZE))		return;	nimaps = 2;	map_first = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize);	/*	 * The filesystem could be shutting down, so bmapi may return	 * an error.	 */	if (xfs_bmapi(NULL, ip, map_first,			 (XFS_B_TO_FSB(mp,				       (xfs_ufsize_t)XFS_MAXIOFFSET(mp)) -			  map_first),			 XFS_BMAPI_ENTIRE, NULL, 0, imaps, &nimaps,			 NULL, NULL))	    return;	ASSERT(nimaps == 1);	ASSERT(imaps[0].br_startblock == HOLESTARTBLOCK);}#endif	/* DEBUG *//* * Calculate the last possible buffered byte in a file.  This must * include data that was buffered beyond the EOF by the write code. * This also needs to deal with overflowing the xfs_fsize_t type * which can happen for sizes near the limit. * * We also need to take into account any blocks beyond the EOF.  It * may be the case that they were buffered by a write which failed. * In that case the pages will still be in memory, but the inode size * will never have been updated. */xfs_fsize_txfs_file_last_byte(	xfs_inode_t	*ip){	xfs_mount_t	*mp;	xfs_fsize_t	last_byte;	xfs_fileoff_t	last_block;	xfs_fileoff_t	size_last_block;	int		error;	ASSERT(ismrlocked(&(ip->i_iolock), MR_UPDATE | MR_ACCESS));	mp = ip->i_mount;	/*	 * Only check for blocks beyond the EOF if the extents have	 * been read in.  This eliminates the need for the inode lock,	 * and it also saves us from looking when it really isn't	 * necessary.	 */	if (ip->i_df.if_flags & XFS_IFEXTENTS) {		error = xfs_bmap_last_offset(NULL, ip, &last_block,			XFS_DATA_FORK);		if (error) {			last_block = 0;		}	} else {		last_block = 0;	}	size_last_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)ip->i_size);	last_block = XFS_FILEOFF_MAX(last_block, size_last_block);	last_byte = XFS_FSB_TO_B(mp, last_block);	if (last_byte < 0) {		return XFS_MAXIOFFSET(mp);	}	last_byte += (1 << mp->m_writeio_log);	if (last_byte < 0) {		return XFS_MAXIOFFSET(mp);	}	return last_byte;}#if defined(XFS_RW_TRACE)STATIC voidxfs_itrunc_trace(	int		tag,	xfs_inode_t	*ip,	int		flag,	xfs_fsize_t	new_size,	xfs_off_t	toss_start,	xfs_off_t	toss_finish){	if (ip->i_rwtrace == NULL) {		return;	}	ktrace_enter(ip->i_rwtrace,		     (void*)((long)tag),		     (void*)ip,		     (void*)(unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff),		     (void*)(unsigned long)(ip->i_d.di_size & 0xffffffff),		     (void*)((long)flag),		     (void*)(unsigned long)((new_size >> 32) & 0xffffffff),		     (void*)(unsigned long)(new_size & 0xffffffff),		     (void*)(unsigned long)((toss_start >> 32) & 0xffffffff),		     (void*)(unsigned long)(toss_start & 0xffffffff),		     (void*)(unsigned long)((toss_finish >> 32) & 0xffffffff),		     (void*)(unsigned long)(toss_finish & 0xffffffff),		     (void*)(unsigned long)current_cpu(),		     (void*)(unsigned long)current_pid(),		     (void*)NULL,		     (void*)NULL,		     (void*)NULL);}#else#define	xfs_itrunc_trace(tag, ip, flag, new_size, toss_start, toss_finish)#endif/* * Start the truncation of the file to new_size.  The new size * must be smaller than the current size.  This routine will * clear the buffer and page caches of file data in the removed * range, and xfs_itruncate_finish() will remove the underlying * disk blocks. * * The inode must have its I/O lock locked EXCLUSIVELY, and it * must NOT have the inode lock held at all.  This is because we're * calling into the buffer/page cache code and we can't hold the * inode lock when we do so. * * We need to wait for any direct I/Os in flight to complete before we * proceed with the truncate. This is needed to prevent the extents * being read or written by the direct I/Os from being removed while the * I/O is in flight as there is no other method of synchronising * direct I/O with the truncate operation.  Also, because we hold * the IOLOCK in exclusive mode, we prevent new direct I/Os from being * started until the truncate completes and drops the lock. Essentially, * the vn_iowait() call forms an I/O barrier that provides strict ordering * between direct I/Os and the truncate operation. * * The flags parameter can have either the value XFS_ITRUNC_DEFINITE * or XFS_ITRUNC_MAYBE.  The XFS_ITRUNC_MAYBE value should be used * in the case that the caller is locking things out of order and * may not be able to call xfs_itruncate_finish() with the inode lock * held without dropping the I/O lock.  If the caller must drop the * I/O lock before calling xfs_itruncate_finish(), then xfs_itruncate_start() * must be called again with all the same restrictions as the initial * call. */intxfs_itruncate_start(	xfs_inode_t	*ip,	uint		flags,	xfs_fsize_t	new_size){	xfs_fsize_t	last_byte;	xfs_off_t	toss_start;	xfs_mount_t	*mp;	bhv_vnode_t	*vp;	int		error = 0;	ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE) != 0);	ASSERT((new_size == 0) || (new_size <= ip->i_size));	ASSERT((flags == XFS_ITRUNC_DEFINITE) ||	       (flags == XFS_ITRUNC_MAYBE));	mp = ip->i_mount;	vp = XFS_ITOV(ip);	/* wait for the completion of any pending DIOs */	if (new_size < ip->i_size)		vn_iowait(ip);	/*	 * Call toss_pages or flushinval_pages to get rid of pages	 * overlapping the region being removed.  We have to use	 * the less efficient flushinval_pages in the case that the	 * caller may not be able to finish the truncate without	 * dropping the inode's I/O lock.  Make sure	 * to catch any pages brought in by buffers overlapping	 * the EOF by searching out beyond the isize by our	 * block size. We round new_size up to a block boundary	 * so that we don't toss things on the same block as	 * new_size but before it.	 *	 * Before calling toss_page or flushinval_pages, make sure to	 * call remapf() over the same region if the file is mapped.	 * This frees up mapped file references to the pages in the	 * given range and for the flushinval_pages case it ensures	 * that we get the latest mapped changes flushed out.	 */	toss_start = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);	toss_start = XFS_FSB_TO_B(mp, toss_start);	if (toss_start < 0) {		/*		 * The place to start tossing is beyond our maximum		 * file size, so there is no way that the data extended		 * out there.		 */		return 0;	}	last_byte = xfs_file_last_byte(ip);	xfs_itrunc_trace(XFS_ITRUNC_START, ip, flags, new_size, toss_start,			 last_byte);	if (last_byte > toss_start) {		if (flags & XFS_ITRUNC_DEFINITE) {			xfs_tosspages(ip, toss_start,					-1, FI_REMAPF_LOCKED);		} else {			error = xfs_flushinval_pages(ip, toss_start,					-1, FI_REMAPF_LOCKED);		}	}#ifdef DEBUG	if (new_size == 0) {		ASSERT(VN_CACHED(vp) == 0);	}#endif	return error;}/* * Shrink the file to the given new_size.  The new * size must be smaller than the current size. * This will free up the underlying blocks * in the removed range after a call to xfs_itruncate_start() * or xfs_atruncate_start(). * * The transaction passed to this routine must have made * a permanent log reservation of at least XFS_ITRUNCATE_LOG_RES. * This routine may commit the given transaction and * start new ones, so make sure everything involved in * the transaction is tidy before calling here. * Some transaction will be returned to the caller to be * committed.  The incoming transaction must already include * the inode, and both inode locks must be held exclusively. * The inode must also be "held" within the transaction.  On * return the inode will be "held" within the returned transaction. * This routine does NOT require any disk space to be reserved * for it within the transaction. * * The fork parameter must be either xfs_attr_fork or xfs_data_fork, * and it indicates the fork which is to be truncated.  For the * attribute fork we only support truncation to size 0. * * We use the sync parameter to indicate whether or not the first * transaction we perform might have to be synchronous.  For the attr fork, * it needs to be so if the unlink of the inode is not yet known to be * permanent in the log.  This keeps us from freeing and reusing the * blocks of the attribute fork before the unlink of the inode becomes * permanent. * * For the data fork, we normally have to run synchronously if we're * being called out of the inactive path or we're being called * out of the create path where we're truncating an existing file. * Either way, the truncate needs to be sync so blocks don't reappear * in the file with altered data in case of a crash.  wsync filesystems * can run the first case async because anything that shrinks the inode * has to run sync so by the time we're called here from inactive, the * inode size is permanently set to 0. * * Calls from the truncate path always need to be sync unless we're * in a wsync filesystem and the file has already been unlinked. * * The caller is responsible for correctly setting the sync parameter. * It gets too hard for us to guess here which path we're being called * out of just based on inode state. */intxfs_itruncate_finish(	xfs_trans_t	**tp,	xfs_inode_t	*ip,	xfs_fsize_t	new_size,	int		fork,	int		sync){	xfs_fsblock_t	first_block;	xfs_fileoff_t	first_unmap_block;	xfs_fileoff_t	last_block;	xfs_filblks_t	unmap_len=0;	xfs_mount_t	*mp;	xfs_trans_t	*ntp;	int		done;	int		committed;	xfs_bmap_free_t	free_list;	int		error;	ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE) != 0);	ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE) != 0);	ASSERT((new_size == 0) || (new_size <= ip->i_size));	ASSERT(*tp != NULL);	ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES);	ASSERT(ip->i_transp == *tp);	ASSERT(ip->i_itemp != NULL);	ASSERT(ip->i_itemp->ili_flags & XFS_ILI_HOLD);	ntp = *tp;	mp = (ntp)->t_mountp;	ASSERT(! XFS_NOT_DQATTACHED(mp, ip));	/*	 * We only support truncating the entire attribute fork.	 */	if (fork == XFS_ATTR_FORK) {		new_size = 0LL;	}	first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);	xfs_itrunc_trace(XFS_ITRUNC_FINISH1, ip, 0, new_size, 0, 0);	/*	 * The first thing we do is set the size to new_size permanently	 * on disk.  This way we don't have to worry about anyone ever	 * being able to look at the data being freed even in the face	 * of a crash.  What we're getting around here is the case where	 * we free a block, it is allocated to another file, it is written	 * to, and then we crash.  If the new data gets written to the	 * file but the log buffers containing the free and reallocation	 * don't, then we'd end up with garbage in the blocks being freed.	 * As long as we make the new_size permanent before actually	 * freeing any blocks it doesn't matter if they get writtten to.	 *	 * The callers must signal into us whether or not the size	 * setting here must be synchronous.  There are a few cases	 * where it doesn't have to be synchronous.  Those cases	 * occur if the file is unlinked and we know the unlink is	 * permanent or if the blocks being truncated are guaranteed	 * to be beyond the inode eof (regardless of the link count)	 * and the eof value is permanent.  Both of these cases occur	 * only on wsync-mounted filesystems.  In those cases, we're	 * guaranteed that no user will ever see the data in the blocks	 * that are being truncated so the truncate can run async.	 * In the free beyond eof case, the file may wind up with	 * more blocks allocated to it than it needs if we crash	 * and that won't get fixed until the next time the file	 * is re-opened and closed but that's ok as that shouldn't	 * be too many blocks.	 *	 * However, we can't just make all wsync xactions run async	 * because there's one call out of the create path that needs	 * to run sync where it's truncating an existing file to size	 * 0 whose size is > 0.	 *	 * It's probably possible to come up with a test in this	 * routine that would correctly distinguish all the above	 * cases from the values of the function parameters and the	 * inode state but for sanity's sake, I've decided to let the	 * layers above just tell us.  It's simpler to correctly figure	 * out in the layer above exactly under what conditions we	 * can run async and I think it's easier for others read and	 * follow the logic in case something has to be changed.	 * cscope is your friend -- rcc.	 *	 * The attribute fork is much simpler.	 *	 * For the attribute fork we allow the caller to tell us whether	 * the unlink of the inode that led to this call is yet permanent	 * in the on disk log.  If it is not and we will be freeing extents	 * in this inode then we make the first transaction synchronous	 * to make sure that the unlink is permanent by the time we free	 * the blocks.	 */	if (fork == XFS_DATA_FORK) {		if (ip->i_d.di_nextents > 0) {			/*			 * If we are not changing the file size then do			 * not update the on-disk file size - we may be			 * called from xfs_inactive_free_eofblocks().  If we			 * update the on-disk file size and then the system			 * crashes before the contents of the file are			 * flushed to disk then the files may be full of			 * holes (ie NULL files bug).			 */			if (ip->i_size != new_size) {				ip->i_d.di_size = new_size;				ip->i_size = new_size;				xfs_trans_log_inode(ntp, ip, XFS_ILOG_CORE);			}		}	} else if (sync) {		ASSERT(!(mp->m_flags & XFS_MOUNT_WSYNC));		if (ip->i_d.di_anextents > 0)			xfs_trans_set_sync(ntp);	}	ASSERT(fork == XFS_DATA_FORK ||		(fork == XFS_ATTR_FORK &&			((sync && !(mp->m_flags & XFS_MOUNT_WSYNC)) ||			 (sync == 0 && (mp->m_flags & XFS_MOUNT_WSYNC)))));	/*	 * Since it is possible for space to become allocated beyond	 * the end of the file (in a crash where the space is allocated	 * but the inode size is not yet updated), simply remove any	 * blocks which show up between the new EOF and the maximum	 * possible file size.  If the first block to be removed is	 * beyond the maximum file size (ie it is the same as last_block),	 * then there is nothing to do.	 */	last_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp));	ASSERT(first_unmap_block <= last_block);	done = 0;	if (last_block == first_unmap_block) {		done = 1;	} else {		unmap_len = last_block - first_unmap_block + 1;	}	while (!done) {		/*		 * Free up up to XFS_ITRUNC_MAX_EXTENTS.  xfs_bunmapi()		 * will tell us whether it freed the entire range or		 * not.  If this is a synchronous mount (wsync),		 * then we can tell bunmapi to keep all the		 * transactions asynchronous since the unlink		 * transaction that made this inode inactive has		 * already hit the disk.  There's no danger of		 * the freed blocks being reused, there being a		 * crash, and the reused blocks suddenly reappearing		 * in this file with garbage in them once recovery		 * runs.		 */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -