xfs_iomap.c

来自「Linux Kernel 2.6.9 for OMAP1710」· C语言 代码 · 共 1,001 行 · 第 1/2 页

C
1,001
字号
	if (error) {		goto error0;	}	error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL);	if (error) {		goto error_out;	}	/* copy any maps to caller's array and return any error. */	if (nimaps == 0) {		error = (ENOSPC);		goto error_out;	}	*ret_imap = imap[0];	*nmaps = 1;	if ( !(io->io_flags & XFS_IOCORE_RT)  && !ret_imap->br_startblock) {                cmn_err(CE_PANIC,"Access to block zero:  fs <%s> inode: %lld "                        "start_block : %llx start_off : %llx blkcnt : %llx "                        "extent-state : %x \n",                        (ip->i_mount)->m_fsname,                        (long long)ip->i_ino,                        ret_imap->br_startblock, ret_imap->br_startoff,                        ret_imap->br_blockcount,ret_imap->br_state);        }	return 0; error0:	/* Cancel bmap, unlock inode, and cancel trans */	xfs_bmap_cancel(&free_list); error1:	/* Just cancel transaction */	xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);	*nmaps = 0;	/* nothing set-up here */error_out:	return XFS_ERROR(error);}intxfs_iomap_write_delay(	xfs_inode_t	*ip,	loff_t		offset,	size_t		count,	int		ioflag,	xfs_bmbt_irec_t *ret_imap,	int		*nmaps){	xfs_mount_t	*mp = ip->i_mount;	xfs_iocore_t	*io = &ip->i_iocore;	xfs_fileoff_t	offset_fsb;	xfs_fileoff_t	last_fsb;	xfs_fsize_t	isize;	xfs_fsblock_t	firstblock;	int		nimaps;	int		error;	xfs_bmbt_irec_t imap[XFS_WRITE_IMAPS];	int		aeof;	int		fsynced = 0;	ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE) != 0);	/*	 * Make sure that the dquots are there. This doesn't hold	 * the ilock across a disk read.	 */	error = XFS_QM_DQATTACH(mp, ip, XFS_QMOPT_ILOCKED);	if (error)		return XFS_ERROR(error);retry:	isize = ip->i_d.di_size;	if (io->io_new_size > isize) {		isize = io->io_new_size;	}	aeof = 0;	offset_fsb = XFS_B_TO_FSBT(mp, offset);	last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));	/*	 * If the caller is doing a write at the end of the file,	 * then extend the allocation (and the buffer used for the write)	 * out to the file system's write iosize.  We clean up any extra	 * space left over when the file is closed in xfs_inactive().	 *	 * For sync writes, we are flushing delayed allocate space to	 * try to make additional space available for allocation near	 * the filesystem full boundary - preallocation hurts in that	 * situation, of course.	 */	if (!(ioflag & BMAPI_SYNC) && ((offset + count) > ip->i_d.di_size)) {		xfs_off_t	aligned_offset;		xfs_filblks_t   count_fsb;		unsigned int	iosize;		xfs_fileoff_t	ioalign;		int		n;		xfs_fileoff_t   start_fsb;		/*		 * If there are any real blocks past eof, then don't		 * do any speculative allocation.		 */		start_fsb = XFS_B_TO_FSBT(mp,					((xfs_ufsize_t)(offset + count - 1)));		count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp));		while (count_fsb > 0) {			nimaps = XFS_WRITE_IMAPS;			error = XFS_BMAPI(mp, NULL, io, start_fsb, count_fsb,					0, &firstblock, 0, imap, &nimaps, NULL);			if (error) {				return error;			}			for (n = 0; n < nimaps; n++) {				if ( !(io->io_flags & XFS_IOCORE_RT)  && 					!imap[n].br_startblock) {					cmn_err(CE_PANIC,"Access to block "						"zero:  fs <%s> inode: %lld "						"start_block : %llx start_off "						": %llx blkcnt : %llx "						"extent-state : %x \n",						(ip->i_mount)->m_fsname,						(long long)ip->i_ino,						imap[n].br_startblock,						imap[n].br_startoff,						imap[n].br_blockcount,						imap[n].br_state);        			}				if ((imap[n].br_startblock != HOLESTARTBLOCK) &&				    (imap[n].br_startblock != DELAYSTARTBLOCK)) {					goto write_map;				}				start_fsb += imap[n].br_blockcount;				count_fsb -= imap[n].br_blockcount;			}		}		iosize = mp->m_writeio_blocks;		aligned_offset = XFS_WRITEIO_ALIGN(mp, (offset + count - 1));		ioalign = XFS_B_TO_FSBT(mp, aligned_offset);		last_fsb = ioalign + iosize;		aeof = 1;	}write_map:	nimaps = XFS_WRITE_IMAPS;	firstblock = NULLFSBLOCK;	/*	 * If mounted with the "-o swalloc" option, roundup the allocation	 * request to a stripe width boundary if the file size is >=	 * stripe width and we are allocating past the allocation eof.	 */	if (!(io->io_flags & XFS_IOCORE_RT) && mp->m_swidth 	    && (mp->m_flags & XFS_MOUNT_SWALLOC)	    && (isize >= XFS_FSB_TO_B(mp, mp->m_swidth)) && aeof) {		int eof;		xfs_fileoff_t new_last_fsb;		new_last_fsb = roundup_64(last_fsb, mp->m_swidth);		error = xfs_bmap_eof(ip, new_last_fsb, XFS_DATA_FORK, &eof);		if (error) {			return error;		}		if (eof) {			last_fsb = new_last_fsb;		}	/*	 * Roundup the allocation request to a stripe unit (m_dalign) boundary	 * if the file size is >= stripe unit size, and we are allocating past	 * the allocation eof.	 */	} else if (!(io->io_flags & XFS_IOCORE_RT) && mp->m_dalign &&		   (isize >= XFS_FSB_TO_B(mp, mp->m_dalign)) && aeof) {		int eof;		xfs_fileoff_t new_last_fsb;		new_last_fsb = roundup_64(last_fsb, mp->m_dalign);		error = xfs_bmap_eof(ip, new_last_fsb, XFS_DATA_FORK, &eof);		if (error) {			return error;		}		if (eof) {			last_fsb = new_last_fsb;		}	/*	 * Round up the allocation request to a real-time extent boundary	 * if the file is on the real-time subvolume.	 */	} else if (io->io_flags & XFS_IOCORE_RT && aeof) {		int eof;		xfs_fileoff_t new_last_fsb;		new_last_fsb = roundup_64(last_fsb, mp->m_sb.sb_rextsize);		error = XFS_BMAP_EOF(mp, io, new_last_fsb, XFS_DATA_FORK, &eof);		if (error) {			return error;		}		if (eof)			last_fsb = new_last_fsb;	}	error = xfs_bmapi(NULL, ip, offset_fsb,			  (xfs_filblks_t)(last_fsb - offset_fsb),			  XFS_BMAPI_DELAY | XFS_BMAPI_WRITE |			  XFS_BMAPI_ENTIRE, &firstblock, 1, imap,			  &nimaps, NULL);	/*	 * This can be EDQUOT, if nimaps == 0	 */	if (error && (error != ENOSPC)) {		return XFS_ERROR(error);	}	/*	 * If bmapi returned us nothing, and if we didn't get back EDQUOT,	 * then we must have run out of space.	 */	if (nimaps == 0) {		xfs_iomap_enter_trace(XFS_IOMAP_WRITE_NOSPACE,					io, offset, count);		if (xfs_flush_space(ip, &fsynced, &ioflag))			return XFS_ERROR(ENOSPC);		error = 0;		goto retry;	}	*ret_imap = imap[0];	*nmaps = 1;	if ( !(io->io_flags & XFS_IOCORE_RT)  && !ret_imap->br_startblock) {		cmn_err(CE_PANIC,"Access to block zero:  fs <%s> inode: %lld "                        "start_block : %llx start_off : %llx blkcnt : %llx "                        "extent-state : %x \n",                        (ip->i_mount)->m_fsname,                        (long long)ip->i_ino,                        ret_imap->br_startblock, ret_imap->br_startoff,                        ret_imap->br_blockcount,ret_imap->br_state);	}	return 0;}/* * Pass in a delayed allocate extent, convert it to real extents; * return to the caller the extent we create which maps on top of * the originating callers request. * * Called without a lock on the inode. */intxfs_iomap_write_allocate(	xfs_inode_t	*ip,	xfs_bmbt_irec_t *map,	int		*retmap){	xfs_mount_t	*mp = ip->i_mount;	xfs_iocore_t    *io = &ip->i_iocore;	xfs_fileoff_t	offset_fsb, last_block;	xfs_fileoff_t	end_fsb, map_start_fsb;	xfs_fsblock_t	first_block;	xfs_bmap_free_t	free_list;	xfs_filblks_t	count_fsb;	xfs_bmbt_irec_t	imap[XFS_STRAT_WRITE_IMAPS];	xfs_trans_t	*tp;	int		i, nimaps, committed;	int		error = 0;	int		nres;	*retmap = 0;	/*	 * Make sure that the dquots are there.	 */	if ((error = XFS_QM_DQATTACH(mp, ip, 0)))		return XFS_ERROR(error);	offset_fsb = map->br_startoff;	count_fsb = map->br_blockcount;	map_start_fsb = offset_fsb;	XFS_STATS_ADD(xs_xstrat_bytes, XFS_FSB_TO_B(mp, count_fsb));	while (count_fsb != 0) {		/*		 * Set up a transaction with which to allocate the		 * backing store for the file.  Do allocations in a		 * loop until we get some space in the range we are		 * interested in.  The other space that might be allocated		 * is in the delayed allocation extent on which we sit		 * but before our buffer starts.		 */		nimaps = 0;		while (nimaps == 0) {			tp = xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE);			nres = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK);			error = xfs_trans_reserve(tp, nres,					XFS_WRITE_LOG_RES(mp),					0, XFS_TRANS_PERM_LOG_RES,					XFS_WRITE_LOG_COUNT);			if (error == ENOSPC) {				error = xfs_trans_reserve(tp, 0,						XFS_WRITE_LOG_RES(mp),						0,						XFS_TRANS_PERM_LOG_RES,						XFS_WRITE_LOG_COUNT);			}			if (error) {				xfs_trans_cancel(tp, 0);				return XFS_ERROR(error);			}			xfs_ilock(ip, XFS_ILOCK_EXCL);			xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);			xfs_trans_ihold(tp, ip);			XFS_BMAP_INIT(&free_list, &first_block);			nimaps = XFS_STRAT_WRITE_IMAPS;			/*			 * Ensure we don't go beyond eof - it is possible			 * the extents changed since we did the read call,			 * we dropped the ilock in the interim.			 */			end_fsb = XFS_B_TO_FSB(mp, ip->i_d.di_size);			xfs_bmap_last_offset(NULL, ip, &last_block,				XFS_DATA_FORK);			last_block = XFS_FILEOFF_MAX(last_block, end_fsb);			if ((map_start_fsb + count_fsb) > last_block) {				count_fsb = last_block - map_start_fsb;				if (count_fsb == 0) {					error = EAGAIN;					goto trans_cancel;				}			}			/* Go get the actual blocks */			error = xfs_bmapi(tp, ip, map_start_fsb, count_fsb,					XFS_BMAPI_WRITE, &first_block, 1,					imap, &nimaps, &free_list);			if (error)				goto trans_cancel;			error = xfs_bmap_finish(&tp, &free_list,					first_block, &committed);			if (error)				goto trans_cancel;			error = xfs_trans_commit(tp,					XFS_TRANS_RELEASE_LOG_RES, NULL);			if (error)				goto error0;			xfs_iunlock(ip, XFS_ILOCK_EXCL);		}		/*		 * See if we were able to allocate an extent that		 * covers at least part of the callers request		 */		for (i = 0; i < nimaps; i++) {			if ( !(io->io_flags & XFS_IOCORE_RT)  && 				!imap[i].br_startblock) {				cmn_err(CE_PANIC,"Access to block zero:  "					"fs <%s> inode: %lld "					"start_block : %llx start_off : %llx " 					"blkcnt : %llx extent-state : %x \n",					(ip->i_mount)->m_fsname,					(long long)ip->i_ino,					imap[i].br_startblock,					imap[i].br_startoff,				        imap[i].br_blockcount,imap[i].br_state);                        }			if ((map->br_startoff >= imap[i].br_startoff) &&			    (map->br_startoff < (imap[i].br_startoff +						 imap[i].br_blockcount))) {				*map = imap[i];				*retmap = 1;				XFS_STATS_INC(xs_xstrat_quick);				return 0;			}			count_fsb -= imap[i].br_blockcount;		}		/* So far we have not mapped the requested part of the		 * file, just surrounding data, try again.		 */		nimaps--;		offset_fsb = imap[nimaps].br_startoff +			     imap[nimaps].br_blockcount;		map_start_fsb = offset_fsb;	}trans_cancel:	xfs_bmap_cancel(&free_list);	xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);error0:	xfs_iunlock(ip, XFS_ILOCK_EXCL);	return XFS_ERROR(error);}intxfs_iomap_write_unwritten(	xfs_inode_t	*ip,	loff_t		offset,	size_t		count){	xfs_mount_t	*mp = ip->i_mount;	xfs_iocore_t    *io = &ip->i_iocore;	xfs_trans_t	*tp;	xfs_fileoff_t	offset_fsb;	xfs_filblks_t	count_fsb;	xfs_filblks_t	numblks_fsb;	xfs_bmbt_irec_t	imap;	int		committed;	int		error;	int		nres;	int		nimaps;	xfs_fsblock_t	firstfsb;	xfs_bmap_free_t	free_list;	xfs_iomap_enter_trace(XFS_IOMAP_UNWRITTEN,				&ip->i_iocore, offset, count);	offset_fsb = XFS_B_TO_FSBT(mp, offset);	count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);	count_fsb = (xfs_filblks_t)(count_fsb - offset_fsb);	do {		nres = XFS_DIOSTRAT_SPACE_RES(mp, 0);		/*		 * set up a transaction to convert the range of extents		 * from unwritten to real. Do allocations in a loop until		 * we have covered the range passed in.		 */		tp = xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE);		error = xfs_trans_reserve(tp, nres,				XFS_WRITE_LOG_RES(mp), 0,				XFS_TRANS_PERM_LOG_RES,				XFS_WRITE_LOG_COUNT);		if (error) {			xfs_trans_cancel(tp, 0);			goto error0;		}		xfs_ilock(ip, XFS_ILOCK_EXCL);		xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);		xfs_trans_ihold(tp, ip);		/*		 * Modify the unwritten extent state of the buffer.		 */		XFS_BMAP_INIT(&free_list, &firstfsb);		nimaps = 1;		error = xfs_bmapi(tp, ip, offset_fsb, count_fsb,				  XFS_BMAPI_WRITE, &firstfsb,				  1, &imap, &nimaps, &free_list);		if (error)			goto error_on_bmapi_transaction;		error = xfs_bmap_finish(&(tp), &(free_list),				firstfsb, &committed);		if (error)			goto error_on_bmapi_transaction;		error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL);		xfs_iunlock(ip, XFS_ILOCK_EXCL);		if (error)			goto error0;				if ( !(io->io_flags & XFS_IOCORE_RT)  && !imap.br_startblock) {			cmn_err(CE_PANIC,"Access to block zero:  fs <%s> "				"inode: %lld start_block : %llx start_off : "				"%llx blkcnt : %llx extent-state : %x \n",				(ip->i_mount)->m_fsname,				(long long)ip->i_ino,				imap.br_startblock,imap.br_startoff,				imap.br_blockcount,imap.br_state);        	}		if ((numblks_fsb = imap.br_blockcount) == 0) {			/*			 * The numblks_fsb value should always get			 * smaller, otherwise the loop is stuck.			 */			ASSERT(imap.br_blockcount);			break;		}		offset_fsb += numblks_fsb;		count_fsb -= numblks_fsb;	} while (count_fsb > 0);	return 0;error_on_bmapi_transaction:	xfs_bmap_cancel(&free_list);	xfs_trans_cancel(tp, (XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT));	xfs_iunlock(ip, XFS_ILOCK_EXCL);error0:	return XFS_ERROR(error);}

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?