xfs_iget.c

来自「Linux Kernel 2.6.9 for OMAP1710」· C语言 代码 · 共 994 行 · 第 1/2 页

C
994
字号
 * Do the setup for the various locks within the incore inode. */voidxfs_inode_lock_init(	xfs_inode_t	*ip,	vnode_t		*vp){	mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,		     "xfsino", (long)vp->v_number);	mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", vp->v_number);	init_waitqueue_head(&ip->i_ipin_wait);	atomic_set(&ip->i_pincount, 0);	init_sema(&ip->i_flock, 1, "xfsfino", vp->v_number);}/* * Look for the inode corresponding to the given ino in the hash table. * If it is there and its i_transp pointer matches tp, return it. * Otherwise, return NULL. */xfs_inode_t *xfs_inode_incore(xfs_mount_t	*mp,		 xfs_ino_t	ino,		 xfs_trans_t	*tp){	xfs_ihash_t	*ih;	xfs_inode_t	*ip;	ih = XFS_IHASH(mp, ino);	read_lock(&ih->ih_lock);	for (ip = ih->ih_next; ip != NULL; ip = ip->i_next) {		if (ip->i_ino == ino) {			/*			 * If we find it and tp matches, return it.			 * Otherwise break from the loop and return			 * NULL.			 */			if (ip->i_transp == tp) {				read_unlock(&ih->ih_lock);				return (ip);			}			break;		}	}	read_unlock(&ih->ih_lock);	return (NULL);}/* * Decrement reference count of an inode structure and unlock it. * * ip -- the inode being released * lock_flags -- this parameter indicates the inode's locks to be *       to be released.  See the comment on xfs_iunlock() for a list *	 of valid values. */voidxfs_iput(xfs_inode_t	*ip,	 uint		lock_flags){	vnode_t	*vp = XFS_ITOV(ip);	vn_trace_entry(vp, "xfs_iput", (inst_t *)__return_address);	xfs_iunlock(ip, lock_flags);	VN_RELE(vp);}/* * Special iput for brand-new inodes that are still locked */voidxfs_iput_new(xfs_inode_t	*ip,	     uint		lock_flags){	vnode_t		*vp = XFS_ITOV(ip);	struct inode	*inode = LINVFS_GET_IP(vp);	vn_trace_entry(vp, "xfs_iput_new", (inst_t *)__return_address);	if (inode->i_state & I_NEW)		unlock_new_inode(inode);	if (lock_flags)		xfs_iunlock(ip, lock_flags);	VN_RELE(vp);}/* * This routine embodies the part of the reclaim code that pulls * the inode from the inode hash table and the mount structure's * inode list. * This should only be called from xfs_reclaim(). */voidxfs_ireclaim(xfs_inode_t *ip){	vnode_t		*vp;	/*	 * Remove from old hash list and mount list.	 */	XFS_STATS_INC(xs_ig_reclaims);	xfs_iextract(ip);	/*	 * Here we do a spurious inode lock in order to coordinate with	 * xfs_sync().  This is because xfs_sync() references the inodes	 * in the mount list without taking references on the corresponding	 * vnodes.  We make that OK here by ensuring that we wait until	 * the inode is unlocked in xfs_sync() before we go ahead and	 * free it.  We get both the regular lock and the io lock because	 * the xfs_sync() code may need to drop the regular one but will	 * still hold the io lock.	 */	xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);	/*	 * Release dquots (and their references) if any. An inode may escape	 * xfs_inactive and get here via vn_alloc->vn_reclaim path.	 */	XFS_QM_DQDETACH(ip->i_mount, ip);	/*	 * Pull our behavior descriptor from the vnode chain.	 */	vp = XFS_ITOV_NULL(ip);	if (vp) {		vn_bhv_remove(VN_BHV_HEAD(vp), XFS_ITOBHV(ip));	}	/*	 * Free all memory associated with the inode.	 */	xfs_idestroy(ip);}/* * This routine removes an about-to-be-destroyed inode from * all of the lists in which it is located with the exception * of the behavior chain. */voidxfs_iextract(	xfs_inode_t	*ip){	xfs_ihash_t	*ih;	xfs_inode_t	*iq;	xfs_mount_t	*mp;	xfs_chash_t	*ch;	xfs_chashlist_t *chl, *chm;	SPLDECL(s);	ih = ip->i_hash;	write_lock(&ih->ih_lock);	if ((iq = ip->i_next)) {		iq->i_prevp = ip->i_prevp;	}	*ip->i_prevp = iq;	write_unlock(&ih->ih_lock);	/*	 * Remove from cluster hash list	 *   1) delete the chashlist if this is the last inode on the chashlist	 *   2) unchain from list of inodes	 *   3) point chashlist->chl_ip to 'chl_next' if to this inode.	 */	mp = ip->i_mount;	ch = XFS_CHASH(mp, ip->i_blkno);	s = mutex_spinlock(&ch->ch_lock);	if (ip->i_cnext == ip) {		/* Last inode on chashlist */		ASSERT(ip->i_cnext == ip && ip->i_cprev == ip);		ASSERT(ip->i_chash != NULL);		chm=NULL;		for (chl = ch->ch_list; chl != NULL; chl = chl->chl_next) {			if (chl->chl_blkno == ip->i_blkno) {				if (chm == NULL) {					/* first item on the list */					ch->ch_list = chl->chl_next;				} else {					chm->chl_next = chl->chl_next;				}				kmem_zone_free(xfs_chashlist_zone, chl);				break;			} else {				ASSERT(chl->chl_ip != ip);				chm = chl;			}		}		ASSERT_ALWAYS(chl != NULL);       } else {		/* delete one inode from a non-empty list */		iq = ip->i_cnext;		iq->i_cprev = ip->i_cprev;		ip->i_cprev->i_cnext = iq;		if (ip->i_chash->chl_ip == ip) {			ip->i_chash->chl_ip = iq;		}		ip->i_chash = __return_address;		ip->i_cprev = __return_address;		ip->i_cnext = __return_address;	}	mutex_spinunlock(&ch->ch_lock, s);	/*	 * Remove from mount's inode list.	 */	XFS_MOUNT_ILOCK(mp);	ASSERT((ip->i_mnext != NULL) && (ip->i_mprev != NULL));	iq = ip->i_mnext;	iq->i_mprev = ip->i_mprev;	ip->i_mprev->i_mnext = iq;	/*	 * Fix up the head pointer if it points to the inode being deleted.	 */	if (mp->m_inodes == ip) {		if (ip == iq) {			mp->m_inodes = NULL;		} else {			mp->m_inodes = iq;		}	}	/* Deal with the deleted inodes list */	list_del_init(&ip->i_reclaim);	mp->m_ireclaims++;	XFS_MOUNT_IUNLOCK(mp);}/* * This is a wrapper routine around the xfs_ilock() routine * used to centralize some grungy code.  It is used in places * that wish to lock the inode solely for reading the extents. * The reason these places can't just call xfs_ilock(SHARED) * is that the inode lock also guards to bringing in of the * extents from disk for a file in b-tree format.  If the inode * is in b-tree format, then we need to lock the inode exclusively * until the extents are read in.  Locking it exclusively all * the time would limit our parallelism unnecessarily, though. * What we do instead is check to see if the extents have been * read in yet, and only lock the inode exclusively if they * have not. * * The function returns a value which should be given to the * corresponding xfs_iunlock_map_shared().  This value is * the mode in which the lock was actually taken. */uintxfs_ilock_map_shared(	xfs_inode_t	*ip){	uint	lock_mode;	if ((ip->i_d.di_format == XFS_DINODE_FMT_BTREE) &&	    ((ip->i_df.if_flags & XFS_IFEXTENTS) == 0)) {		lock_mode = XFS_ILOCK_EXCL;	} else {		lock_mode = XFS_ILOCK_SHARED;	}	xfs_ilock(ip, lock_mode);	return lock_mode;}/* * This is simply the unlock routine to go with xfs_ilock_map_shared(). * All it does is call xfs_iunlock() with the given lock_mode. */voidxfs_iunlock_map_shared(	xfs_inode_t	*ip,	unsigned int	lock_mode){	xfs_iunlock(ip, lock_mode);}/* * The xfs inode contains 2 locks: a multi-reader lock called the * i_iolock and a multi-reader lock called the i_lock.  This routine * allows either or both of the locks to be obtained. * * The 2 locks should always be ordered so that the IO lock is * obtained first in order to prevent deadlock. * * ip -- the inode being locked * lock_flags -- this parameter indicates the inode's locks *       to be locked.  It can be: *		XFS_IOLOCK_SHARED, *		XFS_IOLOCK_EXCL, *		XFS_ILOCK_SHARED, *		XFS_ILOCK_EXCL, *		XFS_IOLOCK_SHARED | XFS_ILOCK_SHARED, *		XFS_IOLOCK_SHARED | XFS_ILOCK_EXCL, *		XFS_IOLOCK_EXCL | XFS_ILOCK_SHARED, *		XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL */voidxfs_ilock(xfs_inode_t	*ip,	  uint		lock_flags){	/*	 * You can't set both SHARED and EXCL for the same lock,	 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,	 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.	 */	ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=	       (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));	ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=	       (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));	ASSERT((lock_flags & ~XFS_LOCK_MASK) == 0);	if (lock_flags & XFS_IOLOCK_EXCL) {		mrupdate(&ip->i_iolock);	} else if (lock_flags & XFS_IOLOCK_SHARED) {		mraccess(&ip->i_iolock);	}	if (lock_flags & XFS_ILOCK_EXCL) {		mrupdate(&ip->i_lock);	} else if (lock_flags & XFS_ILOCK_SHARED) {		mraccess(&ip->i_lock);	}	xfs_ilock_trace(ip, 1, lock_flags, (inst_t *)__return_address);}/* * This is just like xfs_ilock(), except that the caller * is guaranteed not to sleep.  It returns 1 if it gets * the requested locks and 0 otherwise.  If the IO lock is * obtained but the inode lock cannot be, then the IO lock * is dropped before returning. * * ip -- the inode being locked * lock_flags -- this parameter indicates the inode's locks to be *       to be locked.  See the comment for xfs_ilock() for a list *	 of valid values. * */intxfs_ilock_nowait(xfs_inode_t	*ip,		 uint		lock_flags){	int	iolocked;	int	ilocked;	/*	 * You can't set both SHARED and EXCL for the same lock,	 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,	 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.	 */	ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=	       (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));	ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=	       (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));	ASSERT((lock_flags & ~XFS_LOCK_MASK) == 0);	iolocked = 0;	if (lock_flags & XFS_IOLOCK_EXCL) {		iolocked = mrtryupdate(&ip->i_iolock);		if (!iolocked) {			return 0;		}	} else if (lock_flags & XFS_IOLOCK_SHARED) {		iolocked = mrtryaccess(&ip->i_iolock);		if (!iolocked) {			return 0;		}	}	if (lock_flags & XFS_ILOCK_EXCL) {		ilocked = mrtryupdate(&ip->i_lock);		if (!ilocked) {			if (iolocked) {				mrunlock(&ip->i_iolock);			}			return 0;		}	} else if (lock_flags & XFS_ILOCK_SHARED) {		ilocked = mrtryaccess(&ip->i_lock);		if (!ilocked) {			if (iolocked) {				mrunlock(&ip->i_iolock);			}			return 0;		}	}	xfs_ilock_trace(ip, 2, lock_flags, (inst_t *)__return_address);	return 1;}/* * xfs_iunlock() is used to drop the inode locks acquired with * xfs_ilock() and xfs_ilock_nowait().  The caller must pass * in the flags given to xfs_ilock() or xfs_ilock_nowait() so * that we know which locks to drop. * * ip -- the inode being unlocked * lock_flags -- this parameter indicates the inode's locks to be *       to be unlocked.  See the comment for xfs_ilock() for a list *	 of valid values for this parameter. * */voidxfs_iunlock(xfs_inode_t	*ip,	    uint	lock_flags){	/*	 * You can't set both SHARED and EXCL for the same lock,	 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,	 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.	 */	ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=	       (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));	ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=	       (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));	ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_IUNLOCK_NONOTIFY)) == 0);	ASSERT(lock_flags != 0);	if (lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) {		ASSERT(!(lock_flags & XFS_IOLOCK_SHARED) ||		       (ismrlocked(&ip->i_iolock, MR_ACCESS)));		ASSERT(!(lock_flags & XFS_IOLOCK_EXCL) ||		       (ismrlocked(&ip->i_iolock, MR_UPDATE)));		mrunlock(&ip->i_iolock);	}	if (lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) {		ASSERT(!(lock_flags & XFS_ILOCK_SHARED) ||		       (ismrlocked(&ip->i_lock, MR_ACCESS)));		ASSERT(!(lock_flags & XFS_ILOCK_EXCL) ||		       (ismrlocked(&ip->i_lock, MR_UPDATE)));		mrunlock(&ip->i_lock);		/*		 * Let the AIL know that this item has been unlocked in case		 * it is in the AIL and anyone is waiting on it.  Don't do		 * this if the caller has asked us not to.		 */		if (!(lock_flags & XFS_IUNLOCK_NONOTIFY) &&		     ip->i_itemp != NULL) {			xfs_trans_unlocked_item(ip->i_mount,						(xfs_log_item_t*)(ip->i_itemp));		}	}	xfs_ilock_trace(ip, 3, lock_flags, (inst_t *)__return_address);}/* * give up write locks.  the i/o lock cannot be held nested * if it is being demoted. */voidxfs_ilock_demote(xfs_inode_t	*ip,		 uint		lock_flags){	ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL));	ASSERT((lock_flags & ~(XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);	if (lock_flags & XFS_ILOCK_EXCL) {		ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE));		mrdemote(&ip->i_lock);	}	if (lock_flags & XFS_IOLOCK_EXCL) {		ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE));		mrdemote(&ip->i_iolock);	}}/* * The following three routines simply manage the i_flock * semaphore embedded in the inode.  This semaphore synchronizes * processes attempting to flush the in-core inode back to disk. */voidxfs_iflock(xfs_inode_t *ip){	psema(&(ip->i_flock), PINOD|PLTWAIT);}intxfs_iflock_nowait(xfs_inode_t *ip){	return (cpsema(&(ip->i_flock)));}voidxfs_ifunlock(xfs_inode_t *ip){	ASSERT(valusema(&(ip->i_flock)) <= 0);	vsema(&(ip->i_flock));}

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?