⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 xfs_vnodeops.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
	XFS_QM_DQRELE(mp, udqp);	XFS_QM_DQRELE(mp, gdqp);	*vpp = vp;	/* Fallthrough to std_return with error = 0  */std_return:	if ((*vpp || (error != 0 && dm_event_sent != 0)) &&	    DM_EVENT_ENABLED(dp, DM_EVENT_POSTCREATE)) {		(void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTCREATE,			dir_vp, DM_RIGHT_NULL,			*vpp ? vp:NULL,			DM_RIGHT_NULL, name, NULL,			mode, error, 0);	}	return error; abort_return:	cancel_flags |= XFS_TRANS_ABORT;	/* FALLTHROUGH */ error_return:	if (tp != NULL)		xfs_trans_cancel(tp, cancel_flags);	XFS_QM_DQRELE(mp, udqp);	XFS_QM_DQRELE(mp, gdqp);	if (unlock_dp_on_error)		xfs_iunlock(dp, XFS_ILOCK_EXCL);	goto std_return; abort_rele:	/*	 * Wait until after the current transaction is aborted to	 * release the inode.  This prevents recursive transactions	 * and deadlocks from xfs_inactive.	 */	cancel_flags |= XFS_TRANS_ABORT;	xfs_trans_cancel(tp, cancel_flags);	IRELE(ip);	XFS_QM_DQRELE(mp, udqp);	XFS_QM_DQRELE(mp, gdqp);	goto std_return;}#ifdef DEBUG/* * Some counters to see if (and how often) we are hitting some deadlock * prevention code paths. */int xfs_rm_locks;int xfs_rm_lock_delays;int xfs_rm_attempts;#endif/* * The following routine will lock the inodes associated with the * directory and the named entry in the directory. The locks are * acquired in increasing inode number. * * If the entry is "..", then only the directory is locked. The * vnode ref count will still include that from the .. entry in * this case. * * There is a deadlock we need to worry about. If the locked directory is * in the AIL, it might be blocking up the log. The next inode we lock * could be already locked by another thread waiting for log space (e.g * a permanent log reservation with a long running transaction (see * xfs_itruncate_finish)). To solve this, we must check if the directory * is in the ail and use lock_nowait. If we can't lock, we need to * drop the inode lock on the directory and try again. xfs_iunlock will * potentially push the tail if we were holding up the log. */STATIC intxfs_lock_dir_and_entry(	xfs_inode_t	*dp,	xfs_inode_t	*ip)	/* inode of entry 'name' */{	int		attempts;	xfs_ino_t	e_inum;	xfs_inode_t	*ips[2];	xfs_log_item_t	*lp;#ifdef DEBUG	xfs_rm_locks++;#endif	attempts = 0;again:	xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);	e_inum = ip->i_ino;	ITRACE(ip);	/*	 * We want to lock in increasing inum. Since we've already	 * acquired the lock on the directory, we may need to release	 * if if the inum of the entry turns out to be less.	 */	if (e_inum > dp->i_ino) {		/*		 * We are already in the right order, so just		 * lock on the inode of the entry.		 * We need to use nowait if dp is in the AIL.		 */		lp = (xfs_log_item_t *)dp->i_itemp;		if (lp && (lp->li_flags & XFS_LI_IN_AIL)) {			if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {				attempts++;#ifdef DEBUG				xfs_rm_attempts++;#endif				/*				 * Unlock dp and try again.				 * xfs_iunlock will try to push the tail				 * if the inode is in the AIL.				 */				xfs_iunlock(dp, XFS_ILOCK_EXCL);				if ((attempts % 5) == 0) {					delay(1); /* Don't just spin the CPU */#ifdef DEBUG					xfs_rm_lock_delays++;#endif				}				goto again;			}		} else {			xfs_ilock(ip, XFS_ILOCK_EXCL);		}	} else if (e_inum < dp->i_ino) {		xfs_iunlock(dp, XFS_ILOCK_EXCL);		ips[0] = ip;		ips[1] = dp;		xfs_lock_inodes(ips, 2, 0, XFS_ILOCK_EXCL);	}	/* else	 e_inum == dp->i_ino */	/*     This can happen if we're asked to lock /x/..	 *     the entry is "..", which is also the parent directory.	 */	return 0;}#ifdef DEBUGint xfs_locked_n;int xfs_small_retries;int xfs_middle_retries;int xfs_lots_retries;int xfs_lock_delays;#endif/* * Bump the subclass so xfs_lock_inodes() acquires each lock with * a different value */static inline intxfs_lock_inumorder(int lock_mode, int subclass){	if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL))		lock_mode |= (subclass + XFS_LOCK_INUMORDER) << XFS_IOLOCK_SHIFT;	if (lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL))		lock_mode |= (subclass + XFS_LOCK_INUMORDER) << XFS_ILOCK_SHIFT;	return lock_mode;}/* * The following routine will lock n inodes in exclusive mode. * We assume the caller calls us with the inodes in i_ino order. * * We need to detect deadlock where an inode that we lock * is in the AIL and we start waiting for another inode that is locked * by a thread in a long running transaction (such as truncate). This can * result in deadlock since the long running trans might need to wait * for the inode we just locked in order to push the tail and free space * in the log. */voidxfs_lock_inodes(	xfs_inode_t	**ips,	int		inodes,	int		first_locked,	uint		lock_mode){	int		attempts = 0, i, j, try_lock;	xfs_log_item_t	*lp;	ASSERT(ips && (inodes >= 2)); /* we need at least two */	if (first_locked) {		try_lock = 1;		i = 1;	} else {		try_lock = 0;		i = 0;	}again:	for (; i < inodes; i++) {		ASSERT(ips[i]);		if (i && (ips[i] == ips[i-1]))	/* Already locked */			continue;		/*		 * If try_lock is not set yet, make sure all locked inodes		 * are not in the AIL.		 * If any are, set try_lock to be used later.		 */		if (!try_lock) {			for (j = (i - 1); j >= 0 && !try_lock; j--) {				lp = (xfs_log_item_t *)ips[j]->i_itemp;				if (lp && (lp->li_flags & XFS_LI_IN_AIL)) {					try_lock++;				}			}		}		/*		 * If any of the previous locks we have locked is in the AIL,		 * we must TRY to get the second and subsequent locks. If		 * we can't get any, we must release all we have		 * and try again.		 */		if (try_lock) {			/* try_lock must be 0 if i is 0. */			/*			 * try_lock means we have an inode locked			 * that is in the AIL.			 */			ASSERT(i != 0);			if (!xfs_ilock_nowait(ips[i], xfs_lock_inumorder(lock_mode, i))) {				attempts++;				/*				 * Unlock all previous guys and try again.				 * xfs_iunlock will try to push the tail				 * if the inode is in the AIL.				 */				for(j = i - 1; j >= 0; j--) {					/*					 * Check to see if we've already					 * unlocked this one.					 * Not the first one going back,					 * and the inode ptr is the same.					 */					if ((j != (i - 1)) && ips[j] ==								ips[j+1])						continue;					xfs_iunlock(ips[j], lock_mode);				}				if ((attempts % 5) == 0) {					delay(1); /* Don't just spin the CPU */#ifdef DEBUG					xfs_lock_delays++;#endif				}				i = 0;				try_lock = 0;				goto again;			}		} else {			xfs_ilock(ips[i], xfs_lock_inumorder(lock_mode, i));		}	}#ifdef DEBUG	if (attempts) {		if (attempts < 5) xfs_small_retries++;		else if (attempts < 100) xfs_middle_retries++;		else xfs_lots_retries++;	} else {		xfs_locked_n++;	}#endif}#ifdef	DEBUG#define	REMOVE_DEBUG_TRACE(x)	{remove_which_error_return = (x);}int remove_which_error_return = 0;#else /* ! DEBUG */#define	REMOVE_DEBUG_TRACE(x)#endif	/* ! DEBUG */intxfs_remove(	xfs_inode_t             *dp,	bhv_vname_t		*dentry){	bhv_vnode_t		*dir_vp = XFS_ITOV(dp);	char			*name = VNAME(dentry);	xfs_mount_t		*mp = dp->i_mount;	xfs_inode_t             *ip;	xfs_trans_t             *tp = NULL;	int                     error = 0;	xfs_bmap_free_t         free_list;	xfs_fsblock_t           first_block;	int			cancel_flags;	int			committed;	int			dm_di_mode = 0;	int			link_zero;	uint			resblks;	int			namelen;	vn_trace_entry(dp, __FUNCTION__, (inst_t *)__return_address);	if (XFS_FORCED_SHUTDOWN(mp))		return XFS_ERROR(EIO);	namelen = VNAMELEN(dentry);	if (!xfs_get_dir_entry(dentry, &ip)) {	        dm_di_mode = ip->i_d.di_mode;		IRELE(ip);	}	if (DM_EVENT_ENABLED(dp, DM_EVENT_REMOVE)) {		error = XFS_SEND_NAMESP(mp, DM_EVENT_REMOVE, dir_vp,					DM_RIGHT_NULL, NULL, DM_RIGHT_NULL,					name, NULL, dm_di_mode, 0, 0);		if (error)			return error;	}	/* From this point on, return through std_return */	ip = NULL;	/*	 * We need to get a reference to ip before we get our log	 * reservation. The reason for this is that we cannot call	 * xfs_iget for an inode for which we do not have a reference	 * once we've acquired a log reservation. This is because the	 * inode we are trying to get might be in xfs_inactive going	 * for a log reservation. Since we'll have to wait for the	 * inactive code to complete before returning from xfs_iget,	 * we need to make sure that we don't have log space reserved	 * when we call xfs_iget.  Instead we get an unlocked reference	 * to the inode before getting our log reservation.	 */	error = xfs_get_dir_entry(dentry, &ip);	if (error) {		REMOVE_DEBUG_TRACE(__LINE__);		goto std_return;	}	dm_di_mode = ip->i_d.di_mode;	vn_trace_entry(ip, __FUNCTION__, (inst_t *)__return_address);	ITRACE(ip);	error = XFS_QM_DQATTACH(mp, dp, 0);	if (!error && dp != ip)		error = XFS_QM_DQATTACH(mp, ip, 0);	if (error) {		REMOVE_DEBUG_TRACE(__LINE__);		IRELE(ip);		goto std_return;	}	tp = xfs_trans_alloc(mp, XFS_TRANS_REMOVE);	cancel_flags = XFS_TRANS_RELEASE_LOG_RES;	/*	 * We try to get the real space reservation first,	 * allowing for directory btree deletion(s) implying	 * possible bmap insert(s).  If we can't get the space	 * reservation then we use 0 instead, and avoid the bmap	 * btree insert(s) in the directory code by, if the bmap	 * insert tries to happen, instead trimming the LAST	 * block from the directory.	 */	resblks = XFS_REMOVE_SPACE_RES(mp);	error = xfs_trans_reserve(tp, resblks, XFS_REMOVE_LOG_RES(mp), 0,			XFS_TRANS_PERM_LOG_RES, XFS_REMOVE_LOG_COUNT);	if (error == ENOSPC) {		resblks = 0;		error = xfs_trans_reserve(tp, 0, XFS_REMOVE_LOG_RES(mp), 0,				XFS_TRANS_PERM_LOG_RES, XFS_REMOVE_LOG_COUNT);	}	if (error) {		ASSERT(error != ENOSPC);		REMOVE_DEBUG_TRACE(__LINE__);		xfs_trans_cancel(tp, 0);		IRELE(ip);		return error;	}	error = xfs_lock_dir_and_entry(dp, ip);	if (error) {		REMOVE_DEBUG_TRACE(__LINE__);		xfs_trans_cancel(tp, cancel_flags);		IRELE(ip);		goto std_return;	}	/*	 * At this point, we've gotten both the directory and the entry	 * inodes locked.	 */	xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);	if (dp != ip) {		/*		 * Increment vnode ref count only in this case since		 * there's an extra vnode reference in the case where		 * dp == ip.		 */		IHOLD(dp);		xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);	}	/*	 * Entry must exist since we did a lookup in xfs_lock_dir_and_entry.	 */	XFS_BMAP_INIT(&free_list, &first_block);	error = xfs_dir_removename(tp, dp, name, namelen, ip->i_ino,					&first_block, &free_list, 0);	if (error) {		ASSERT(error != ENOENT);		REMOVE_DEBUG_TRACE(__LINE__);		goto error1;	}	xfs_ichgtime(dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);	dp->i_gen++;	xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);	error = xfs_droplink(tp, ip);	if (error) {		REMOVE_DEBUG_TRACE(__LINE__);		goto error1;	}	/* Determine if this is the last link while	 * we are in the transaction.	 */	link_zero = (ip)->i_d.di_nlink==0;	/*	 * Take an extra ref on the inode so that it doesn't	 * go to xfs_inactive() from within the commit.	 */	IHOLD(ip);	/*	 * If this is a synchronous mount, make sure that the	 * remove transaction goes to disk before returning to	 * the user.	 */	if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC)) {		xfs_trans_set_sync(tp);	}	error = xfs_bmap_finish(&tp, &free_list, &committed);	if (error) {		REMOVE_DEBUG_TRACE(__LINE__);		goto error_rele;	}	error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);	if (error) {		IRELE(ip);		goto std_return;	}	/*	 * Before we drop our extra reference to the inode, purge it	 * from the refcache if it is there.  By waiting until afterwards	 * to do the IRELE, we ensure that we won't go inactive in the	 * xfs_refcache_purge_ip routine (although that would be OK).	 */	xfs_refcache_purge_ip(ip);	/*	 * If we are using filestreams, kill the stream association.	 * If the file is still open it may get a new one but that	 * will get killed on last close in xfs_close() so we don't	 * have 

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -