⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 xfs_iomap.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 2 页
字号:
		resrtextents /= mp->m_sb.sb_rextsize;		resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);		quota_flag = XFS_QMOPT_RES_RTBLKS;	} else {		resrtextents = 0;		resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned);		quota_flag = XFS_QMOPT_RES_REGBLKS;	}	/*	 * Allocate and setup the transaction	 */	xfs_iunlock(ip, XFS_ILOCK_EXCL);	tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);	error = xfs_trans_reserve(tp, resblks,			XFS_WRITE_LOG_RES(mp), resrtextents,			XFS_TRANS_PERM_LOG_RES,			XFS_WRITE_LOG_COUNT);	/*	 * Check for running out of space, note: need lock to return	 */	if (error)		xfs_trans_cancel(tp, 0);	xfs_ilock(ip, XFS_ILOCK_EXCL);	if (error)		goto error_out;	error = XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, tp, ip,					      qblocks, 0, quota_flag);	if (error)		goto error1;	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);	xfs_trans_ihold(tp, ip);	bmapi_flag = XFS_BMAPI_WRITE;	if ((flags & BMAPI_DIRECT) && (offset < ip->i_size || extsz))		bmapi_flag |= XFS_BMAPI_PREALLOC;	/*	 * Issue the xfs_bmapi() call to allocate the blocks	 */	XFS_BMAP_INIT(&free_list, &firstfsb);	nimaps = 1;	error = XFS_BMAPI(mp, tp, io, offset_fsb, count_fsb, bmapi_flag,		&firstfsb, 0, &imap, &nimaps, &free_list, NULL);	if (error)		goto error0;	/*	 * Complete the transaction	 */	error = xfs_bmap_finish(&tp, &free_list, &committed);	if (error)		goto error0;	error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);	if (error)		goto error_out;	/*	 * Copy any maps to caller's array and return any error.	 */	if (nimaps == 0) {		error = ENOSPC;		goto error_out;	}	if (unlikely(!imap.br_startblock && !(io->io_flags & XFS_IOCORE_RT))) {		error = xfs_cmn_err_fsblock_zero(ip, &imap);		goto error_out;	}	*ret_imap = imap;	*nmaps = 1;	return 0;error0:	/* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */	xfs_bmap_cancel(&free_list);	XFS_TRANS_UNRESERVE_QUOTA_NBLKS(mp, tp, ip, qblocks, 0, quota_flag);error1:	/* Just cancel transaction */	xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);	*nmaps = 0;	/* nothing set-up here */error_out:	return XFS_ERROR(error);}/* * If the caller is doing a write at the end of the file, * then extend the allocation out to the file system's write * iosize.  We clean up any extra space left over when the * file is closed in xfs_inactive(). * * For sync writes, we are flushing delayed allocate space to * try to make additional space available for allocation near * the filesystem full boundary - preallocation hurts in that * situation, of course. */STATIC intxfs_iomap_eof_want_preallocate(	xfs_mount_t	*mp,	xfs_iocore_t	*io,	xfs_fsize_t	isize,	xfs_off_t	offset,	size_t		count,	int		ioflag,	xfs_bmbt_irec_t *imap,	int		nimaps,	int		*prealloc){	xfs_fileoff_t   start_fsb;	xfs_filblks_t   count_fsb;	xfs_fsblock_t	firstblock;	int		n, error, imaps;	*prealloc = 0;	if ((ioflag & BMAPI_SYNC) || (offset + count) <= isize)		return 0;	/*	 * If there are any real blocks past eof, then don't	 * do any speculative allocation.	 */	start_fsb = XFS_B_TO_FSBT(mp, ((xfs_ufsize_t)(offset + count - 1)));	count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp));	while (count_fsb > 0) {		imaps = nimaps;		firstblock = NULLFSBLOCK;		error = XFS_BMAPI(mp, NULL, io, start_fsb, count_fsb, 0,				  &firstblock, 0, imap, &imaps, NULL, NULL);		if (error)			return error;		for (n = 0; n < imaps; n++) {			if ((imap[n].br_startblock != HOLESTARTBLOCK) &&			    (imap[n].br_startblock != DELAYSTARTBLOCK))				return 0;			start_fsb += imap[n].br_blockcount;			count_fsb -= imap[n].br_blockcount;		}	}	*prealloc = 1;	return 0;}intxfs_iomap_write_delay(	xfs_inode_t	*ip,	xfs_off_t	offset,	size_t		count,	int		ioflag,	xfs_bmbt_irec_t *ret_imap,	int		*nmaps){	xfs_mount_t	*mp = ip->i_mount;	xfs_iocore_t	*io = &ip->i_iocore;	xfs_fileoff_t	offset_fsb;	xfs_fileoff_t	last_fsb;	xfs_off_t	aligned_offset;	xfs_fileoff_t	ioalign;	xfs_fsblock_t	firstblock;	xfs_extlen_t	extsz;	xfs_fsize_t	isize;	int		nimaps;	xfs_bmbt_irec_t imap[XFS_WRITE_IMAPS];	int		prealloc, fsynced = 0;	int		error;	ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE) != 0);	/*	 * Make sure that the dquots are there. This doesn't hold	 * the ilock across a disk read.	 */	error = XFS_QM_DQATTACH(mp, ip, XFS_QMOPT_ILOCKED);	if (error)		return XFS_ERROR(error);	extsz = xfs_get_extsz_hint(ip);	offset_fsb = XFS_B_TO_FSBT(mp, offset);retry:	isize = ip->i_size;	if (io->io_new_size > isize)		isize = io->io_new_size;	error = xfs_iomap_eof_want_preallocate(mp, io, isize, offset, count,				ioflag, imap, XFS_WRITE_IMAPS, &prealloc);	if (error)		return error;	if (prealloc) {		aligned_offset = XFS_WRITEIO_ALIGN(mp, (offset + count - 1));		ioalign = XFS_B_TO_FSBT(mp, aligned_offset);		last_fsb = ioalign + mp->m_writeio_blocks;	} else {		last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));	}	if (prealloc || extsz) {		error = xfs_iomap_eof_align_last_fsb(mp, io, isize, extsz,							&last_fsb);		if (error)			return error;	}	nimaps = XFS_WRITE_IMAPS;	firstblock = NULLFSBLOCK;	error = XFS_BMAPI(mp, NULL, io, offset_fsb,			  (xfs_filblks_t)(last_fsb - offset_fsb),			  XFS_BMAPI_DELAY | XFS_BMAPI_WRITE |			  XFS_BMAPI_ENTIRE, &firstblock, 1, imap,			  &nimaps, NULL, NULL);	if (error && (error != ENOSPC))		return XFS_ERROR(error);	/*	 * If bmapi returned us nothing, and if we didn't get back EDQUOT,	 * then we must have run out of space - flush delalloc, and retry..	 */	if (nimaps == 0) {		xfs_iomap_enter_trace(XFS_IOMAP_WRITE_NOSPACE,					io, offset, count);		if (xfs_flush_space(ip, &fsynced, &ioflag))			return XFS_ERROR(ENOSPC);		error = 0;		goto retry;	}	if (unlikely(!imap[0].br_startblock && !(io->io_flags & XFS_IOCORE_RT)))		return xfs_cmn_err_fsblock_zero(ip, &imap[0]);	*ret_imap = imap[0];	*nmaps = 1;	return 0;}/* * Pass in a delayed allocate extent, convert it to real extents; * return to the caller the extent we create which maps on top of * the originating callers request. * * Called without a lock on the inode. */intxfs_iomap_write_allocate(	xfs_inode_t	*ip,	xfs_off_t	offset,	size_t		count,	xfs_bmbt_irec_t *map,	int		*retmap){	xfs_mount_t	*mp = ip->i_mount;	xfs_iocore_t    *io = &ip->i_iocore;	xfs_fileoff_t	offset_fsb, last_block;	xfs_fileoff_t	end_fsb, map_start_fsb;	xfs_fsblock_t	first_block;	xfs_bmap_free_t	free_list;	xfs_filblks_t	count_fsb;	xfs_bmbt_irec_t	imap[XFS_STRAT_WRITE_IMAPS];	xfs_trans_t	*tp;	int		i, nimaps, committed;	int		error = 0;	int		nres;	*retmap = 0;	/*	 * Make sure that the dquots are there.	 */	if ((error = XFS_QM_DQATTACH(mp, ip, 0)))		return XFS_ERROR(error);	offset_fsb = XFS_B_TO_FSBT(mp, offset);	count_fsb = map->br_blockcount;	map_start_fsb = map->br_startoff;	XFS_STATS_ADD(xs_xstrat_bytes, XFS_FSB_TO_B(mp, count_fsb));	while (count_fsb != 0) {		/*		 * Set up a transaction with which to allocate the		 * backing store for the file.  Do allocations in a		 * loop until we get some space in the range we are		 * interested in.  The other space that might be allocated		 * is in the delayed allocation extent on which we sit		 * but before our buffer starts.		 */		nimaps = 0;		while (nimaps == 0) {			tp = xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE);			tp->t_flags |= XFS_TRANS_RESERVE;			nres = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK);			error = xfs_trans_reserve(tp, nres,					XFS_WRITE_LOG_RES(mp),					0, XFS_TRANS_PERM_LOG_RES,					XFS_WRITE_LOG_COUNT);			if (error) {				xfs_trans_cancel(tp, 0);				return XFS_ERROR(error);			}			xfs_ilock(ip, XFS_ILOCK_EXCL);			xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);			xfs_trans_ihold(tp, ip);			XFS_BMAP_INIT(&free_list, &first_block);			nimaps = XFS_STRAT_WRITE_IMAPS;			/*			 * Ensure we don't go beyond eof - it is possible			 * the extents changed since we did the read call,			 * we dropped the ilock in the interim.			 */			end_fsb = XFS_B_TO_FSB(mp, ip->i_size);			xfs_bmap_last_offset(NULL, ip, &last_block,				XFS_DATA_FORK);			last_block = XFS_FILEOFF_MAX(last_block, end_fsb);			if ((map_start_fsb + count_fsb) > last_block) {				count_fsb = last_block - map_start_fsb;				if (count_fsb == 0) {					error = EAGAIN;					goto trans_cancel;				}			}			/* Go get the actual blocks */			error = XFS_BMAPI(mp, tp, io, map_start_fsb, count_fsb,					XFS_BMAPI_WRITE, &first_block, 1,					imap, &nimaps, &free_list, NULL);			if (error)				goto trans_cancel;			error = xfs_bmap_finish(&tp, &free_list, &committed);			if (error)				goto trans_cancel;			error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);			if (error)				goto error0;			xfs_iunlock(ip, XFS_ILOCK_EXCL);		}		/*		 * See if we were able to allocate an extent that		 * covers at least part of the callers request		 */		for (i = 0; i < nimaps; i++) {			if (unlikely(!imap[i].br_startblock &&				     !(io->io_flags & XFS_IOCORE_RT)))				return xfs_cmn_err_fsblock_zero(ip, &imap[i]);			if ((offset_fsb >= imap[i].br_startoff) &&			    (offset_fsb < (imap[i].br_startoff +					   imap[i].br_blockcount))) {				*map = imap[i];				*retmap = 1;				XFS_STATS_INC(xs_xstrat_quick);				return 0;			}			count_fsb -= imap[i].br_blockcount;		}		/* So far we have not mapped the requested part of the		 * file, just surrounding data, try again.		 */		nimaps--;		map_start_fsb = imap[nimaps].br_startoff +				imap[nimaps].br_blockcount;	}trans_cancel:	xfs_bmap_cancel(&free_list);	xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);error0:	xfs_iunlock(ip, XFS_ILOCK_EXCL);	return XFS_ERROR(error);}intxfs_iomap_write_unwritten(	xfs_inode_t	*ip,	xfs_off_t	offset,	size_t		count){	xfs_mount_t	*mp = ip->i_mount;	xfs_iocore_t    *io = &ip->i_iocore;	xfs_fileoff_t	offset_fsb;	xfs_filblks_t	count_fsb;	xfs_filblks_t	numblks_fsb;	xfs_fsblock_t	firstfsb;	int		nimaps;	xfs_trans_t	*tp;	xfs_bmbt_irec_t imap;	xfs_bmap_free_t free_list;	uint		resblks;	int		committed;	int		error;	xfs_iomap_enter_trace(XFS_IOMAP_UNWRITTEN,				&ip->i_iocore, offset, count);	offset_fsb = XFS_B_TO_FSBT(mp, offset);	count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);	count_fsb = (xfs_filblks_t)(count_fsb - offset_fsb);	resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1;	do {		/*		 * set up a transaction to convert the range of extents		 * from unwritten to real. Do allocations in a loop until		 * we have covered the range passed in.		 */		tp = xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE);		tp->t_flags |= XFS_TRANS_RESERVE;		error = xfs_trans_reserve(tp, resblks,				XFS_WRITE_LOG_RES(mp), 0,				XFS_TRANS_PERM_LOG_RES,				XFS_WRITE_LOG_COUNT);		if (error) {			xfs_trans_cancel(tp, 0);			return XFS_ERROR(error);		}		xfs_ilock(ip, XFS_ILOCK_EXCL);		xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);		xfs_trans_ihold(tp, ip);		/*		 * Modify the unwritten extent state of the buffer.		 */		XFS_BMAP_INIT(&free_list, &firstfsb);		nimaps = 1;		error = XFS_BMAPI(mp, tp, io, offset_fsb, count_fsb,				  XFS_BMAPI_WRITE|XFS_BMAPI_CONVERT, &firstfsb,				  1, &imap, &nimaps, &free_list, NULL);		if (error)			goto error_on_bmapi_transaction;		error = xfs_bmap_finish(&(tp), &(free_list), &committed);		if (error)			goto error_on_bmapi_transaction;		error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);		xfs_iunlock(ip, XFS_ILOCK_EXCL);		if (error)			return XFS_ERROR(error);		if (unlikely(!imap.br_startblock &&			     !(io->io_flags & XFS_IOCORE_RT)))			return xfs_cmn_err_fsblock_zero(ip, &imap);		if ((numblks_fsb = imap.br_blockcount) == 0) {			/*			 * The numblks_fsb value should always get			 * smaller, otherwise the loop is stuck.			 */			ASSERT(imap.br_blockcount);			break;		}		offset_fsb += numblks_fsb;		count_fsb -= numblks_fsb;	} while (count_fsb > 0);	return 0;error_on_bmapi_transaction:	xfs_bmap_cancel(&free_list);	xfs_trans_cancel(tp, (XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT));	xfs_iunlock(ip, XFS_ILOCK_EXCL);	return XFS_ERROR(error);}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -