vfs_subr.c

来自「基于组件方式开发操作系统的OSKIT源代码」· C语言 代码 · 共 2,733 行 · 第 1/5 页

C
2,733
字号
			nvp->v_flag |= VALIASED;			vp->v_flag |= VALIASED;			vput(vp);		}		return (NULLVP);	}	simple_unlock(&spechash_slock);	VOP_UNLOCK(vp, 0);	simple_lock(&vp->v_interlock);	vclean(vp, 0, p);	vp->v_op = nvp->v_op;	vp->v_tag = nvp->v_tag;	vp->v_vnlock = &vp->v_lock;	lockinit(vp->v_vnlock, PVFS, "vnlock", 0, 0);	nvp->v_type = VNON;	insmntque(vp, mp);	return (vp);}/* * Grab a particular vnode from the free list, increment its * reference count and lock it. If the vnode lock bit is set the * vnode is being eliminated in vgone. In that case, we can not * grab the vnode, so the process is awakened when the transition is * completed, and an error returned to indicate that the vnode is no * longer usable (possibly having been changed to a new file system type). */intvget(vp, flags)	struct vnode *vp;	int flags;{	int error;	/*	 * If the vnode is in the process of being cleaned out for	 * another use, we wait for the cleaning to finish and then	 * return failure. Cleaning is determined by checking that	 * the VXLOCK flag is set.	 */	if ((flags & LK_INTERLOCK) == 0)		simple_lock(&vp->v_interlock);	if (vp->v_flag & VXLOCK) {		if (flags & LK_NOWAIT) {			return EBUSY;		}		vp->v_flag |= VXWANT;		ltsleep((caddr_t)vp, PINOD|PNORELOCK,		    "vget", 0, &vp->v_interlock);		return (ENOENT);	}	if (vp->v_usecount == 0) {		simple_lock(&vnode_free_list_slock);		if (vp->v_holdcnt > 0)			TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist);		else			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);		simple_unlock(&vnode_free_list_slock);	}	vp->v_usecount++;#ifdef DIAGNOSTIC	if (vp->v_usecount == 0) {		vprint("vget", vp);		panic("vget: usecount overflow, vp %p", vp);	}#endif	if (flags & LK_TYPE_MASK) {		if ((error = vn_lock(vp, flags | LK_INTERLOCK))) {			/*			 * must expand vrele here because we do not want			 * to call VOP_INACTIVE if the reference count			 * drops back to zero since it was never really			 * active. We must remove it from the free list			 * before sleeping so that multiple processes do			 * not try to recycle it.			 */			simple_lock(&vp->v_interlock);			vp->v_usecount--;			if (vp->v_usecount > 0) {				simple_unlock(&vp->v_interlock);				return (error);			}			/*			 * insert at tail of LRU list			 */			simple_lock(&vnode_free_list_slock);			if (vp->v_holdcnt > 0)				TAILQ_INSERT_TAIL(&vnode_hold_list, vp,				    v_freelist);			else				TAILQ_INSERT_TAIL(&vnode_free_list, vp,				    v_freelist);			simple_unlock(&vnode_free_list_slock);			simple_unlock(&vp->v_interlock);		}		return (error);	}	simple_unlock(&vp->v_interlock);	return (0);}/* * vput(), just unlock and vrele() */voidvput(vp)	struct vnode *vp;{	struct proc *p = curproc;	/* XXX */#ifdef DIAGNOSTIC	if (vp == NULL)		panic("vput: null vp");#endif	simple_lock(&vp->v_interlock);	vp->v_usecount--;	if (vp->v_usecount > 0) {		simple_unlock(&vp->v_interlock);		VOP_UNLOCK(vp, 0);		return;	}#ifdef DIAGNOSTIC	if (vp->v_usecount < 0 || vp->v_writecount != 0) {		vprint("vput: bad ref count", vp);		panic("vput: ref cnt");	}#endif	/*	 * Insert at tail of LRU list.	 */	simple_lock(&vnode_free_list_slock);	if (vp->v_holdcnt > 0)		TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist);	else		TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);	simple_unlock(&vnode_free_list_slock);	vp->v_flag &= ~VTEXT;	simple_unlock(&vp->v_interlock);	VOP_INACTIVE(vp, p);}/* * Vnode release. * If count drops to zero, call inactive routine and return to freelist. */voidvrele(vp)	struct vnode *vp;{	struct proc *p = curproc;	/* XXX */#ifdef DIAGNOSTIC	if (vp == NULL)		panic("vrele: null vp");#endif	simple_lock(&vp->v_interlock);	vp->v_usecount--;	if (vp->v_usecount > 0) {		simple_unlock(&vp->v_interlock);		return;	}#ifdef DIAGNOSTIC	if (vp->v_usecount < 0 || vp->v_writecount != 0) {		vprint("vrele: bad ref count", vp);		panic("vrele: ref cnt vp %p", vp);	}#endif	/*	 * Insert at tail of LRU list.	 */	simple_lock(&vnode_free_list_slock);	if (vp->v_holdcnt > 0)		TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist);	else		TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);	simple_unlock(&vnode_free_list_slock);	vp->v_flag &= ~VTEXT;	if (vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK) == 0)		VOP_INACTIVE(vp, p);}#endif /*OSKIT*/#ifdef DIAGNOSTIC/* * Page or buffer structure gets a reference. */voidvhold(vp)	struct vnode *vp;{	/*	 * If it is on the freelist and the hold count is currently	 * zero, move it to the hold list. The test of the back	 * pointer and the use reference count of zero is because	 * it will be removed from a free list by getnewvnode,	 * but will not have its reference count incremented until	 * after calling vgone. If the reference count were	 * incremented first, vgone would (incorrectly) try to	 * close the previous instance of the underlying object.	 * So, the back pointer is explicitly set to `0xdeadb' in	 * getnewvnode after removing it from a freelist to ensure	 * that we do not try to move it here.	 */  	simple_lock(&vp->v_interlock);	if ((vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb) &&	    vp->v_holdcnt == 0 && vp->v_usecount == 0) {		simple_lock(&vnode_free_list_slock);		TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);		TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist);		simple_unlock(&vnode_free_list_slock);	}	vp->v_holdcnt++;	simple_unlock(&vp->v_interlock);}/* * Page or buffer structure frees a reference. */voidholdrele(vp)	struct vnode *vp;{	simple_lock(&vp->v_interlock);	if (vp->v_holdcnt <= 0)		panic("holdrele: holdcnt vp %p", vp);	vp->v_holdcnt--;	/*	 * If it is on the holdlist and the hold count drops to	 * zero, move it to the free list. The test of the back	 * pointer and the use reference count of zero is because	 * it will be removed from a free list by getnewvnode,	 * but will not have its reference count incremented until	 * after calling vgone. If the reference count were	 * incremented first, vgone would (incorrectly) try to	 * close the previous instance of the underlying object.	 * So, the back pointer is explicitly set to `0xdeadb' in	 * getnewvnode after removing it from a freelist to ensure	 * that we do not try to move it here.	 */	if ((vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb) &&	    vp->v_holdcnt == 0 && vp->v_usecount == 0) {		simple_lock(&vnode_free_list_slock);		TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist);		TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);		simple_unlock(&vnode_free_list_slock);	}	simple_unlock(&vp->v_interlock);}#ifndef OSKIT/* * Vnode reference. */voidvref(vp)	struct vnode *vp;{	simple_lock(&vp->v_interlock);	if (vp->v_usecount <= 0)		panic("vref used where vget required, vp %p", vp);	vp->v_usecount++;#ifdef DIAGNOSTIC	if (vp->v_usecount == 0) {		vprint("vref", vp);		panic("vref: usecount overflow, vp %p", vp);	}#endif	simple_unlock(&vp->v_interlock);}#endif /*OSKIT*/#endif /* DIAGNOSTIC */#ifndef OSKIT/* * Remove any vnodes in the vnode table belonging to mount point mp. * * If MNT_NOFORCE is specified, there should not be any active ones, * return error if any are found (nb: this is a user error, not a * system error). If MNT_FORCE is specified, detach any active vnodes * that are found. */#ifdef DEBUGint busyprt = 0;	/* print out busy vnodes */struct ctldebug debug1 = { "busyprt", &busyprt };#endifintvflush(mp, skipvp, flags)	struct mount *mp;	struct vnode *skipvp;	int flags;{	struct proc *p = curproc;	/* XXX */	struct vnode *vp, *nvp;	int busy = 0;	simple_lock(&mntvnode_slock);loop:	for (vp = mp->mnt_vnodelist.lh_first; vp; vp = nvp) {		if (vp->v_mount != mp)			goto loop;		nvp = vp->v_mntvnodes.le_next;		/*		 * Skip over a selected vnode.		 */		if (vp == skipvp)			continue;		simple_lock(&vp->v_interlock);		/*		 * Skip over a vnodes marked VSYSTEM.		 */		if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) {			simple_unlock(&vp->v_interlock);			continue;		}		/*		 * If WRITECLOSE is set, only flush out regular file		 * vnodes open for writing.		 */		if ((flags & WRITECLOSE) &&		    (vp->v_writecount == 0 || vp->v_type != VREG)) {			simple_unlock(&vp->v_interlock);			continue;		}		/*		 * With v_usecount == 0, all we need to do is clear		 * out the vnode data structures and we are done.		 */		if (vp->v_usecount == 0) {			simple_unlock(&mntvnode_slock);			vgonel(vp, p);			simple_lock(&mntvnode_slock);			continue;		}		/*		 * If FORCECLOSE is set, forcibly close the vnode.		 * For block or character devices, revert to an		 * anonymous device. For all other files, just kill them.		 */		if (flags & FORCECLOSE) {			simple_unlock(&mntvnode_slock);			if (vp->v_type != VBLK && vp->v_type != VCHR) {				vgonel(vp, p);			} else {				vclean(vp, 0, p);				vp->v_op = spec_vnodeop_p;				insmntque(vp, (struct mount *)0);			}			simple_lock(&mntvnode_slock);			continue;		}#ifdef DEBUG		if (busyprt)			vprint("vflush: busy vnode", vp);#endif		simple_unlock(&vp->v_interlock);		busy++;	}	simple_unlock(&mntvnode_slock);	if (busy)		return (EBUSY);	return (0);}/* * Disassociate the underlying file system from a vnode. */voidvclean(vp, flags, p)	struct vnode *vp;	int flags;	struct proc *p;{	int active;	/*	 * Check to see if the vnode is in use.	 * If so we have to reference it before we clean it out	 * so that its count cannot fall to zero and generate a	 * race against ourselves to recycle it.	 */	if ((active = vp->v_usecount) != 0) {		/* We have the vnode interlock. */		vp->v_usecount++;#ifdef DIAGNOSTIC		if (vp->v_usecount == 0) {			vprint("vclean", vp);			panic("vclean: usecount overflow");		}#endif	}	/*	 * Prevent the vnode from being recycled or	 * brought into use while we clean it out.	 */	if (vp->v_flag & VXLOCK)		panic("vclean: deadlock, vp %p", vp);	vp->v_flag |= VXLOCK;	vp->v_flag &= ~VTEXT;	/*	 * Even if the count is zero, the VOP_INACTIVE routine may still	 * have the object locked while it cleans it out. The VOP_LOCK	 * ensures that the VOP_INACTIVE routine is done with its work.	 * For active vnodes, it ensures that no other activity can	 * occur while the underlying object is being cleaned out.	 */	VOP_LOCK(vp, LK_DRAIN | LK_INTERLOCK);	/*	 * Clean out any cached data associated with the vnode.	 */	if (flags & DOCLOSE)		vinvalbuf(vp, V_SAVE, NOCRED, p, 0, 0);	/*	 * If purging an active vnode, it must be closed and	 * deactivated before being reclaimed. Note that the	 * VOP_INACTIVE will unlock the vnode.	 */	if (active) {		if (flags & DOCLOSE)			VOP_CLOSE(vp, FNONBLOCK, NOCRED, NULL);		VOP_INACTIVE(vp, p);	} else {		/*		 * Any other processes trying to obtain this lock must first		 * wait for VXLOCK to clear, then call the new lock operation.		 */		VOP_UNLOCK(vp, 0);	}	/*	 * Reclaim the vnode.	 */	if (VOP_RECLAIM(vp, p))		panic("vclean: cannot reclaim, vp %p", vp);	if (active) {		/*		 * Inline copy of vrele() since VOP_INACTIVE		 * has already been called.		 */		simple_lock(&vp->v_interlock);		if (--vp->v_usecount <= 0) {#ifdef DIAGNOSTIC			if (vp->v_usecount < 0 || vp->v_writecount != 0) {				vprint("vclean: bad ref count", vp);				panic("vclean: ref cnt");			}#endif			/*			 * Insert at tail of LRU list.			 */			simple_unlock(&vp->v_interlock);			simple_lock(&vnode_free_list_slock);#ifdef DIAGNOSTIC			if (vp->v_vnlock) {				if ((vp->v_vnlock->lk_flags & LK_DRAINED) == 0)					vprint("vclean: lock not drained", vp);			}			if (vp->v_holdcnt > 0)				panic("vclean: not clean, vp %p", vp);#endif			TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);			simple_unlock(&vnode_free_list_slock);		} else			simple_unlock(&vp->v_interlock);	}	cache_purge(vp);	/*	 * Done with purge, notify sleepers of the grim news.	 */	vp->v_op = dead_vnodeop_p;	vp->v_tag = VT_NON;	simple_lock(&vp->v_interlock);	vp->v_flag &= ~VXLOCK;	if (vp->v_flag & VXWANT) {		vp->v_flag &= ~VXWANT;		simple_unlock(&vp->v_interlock);		wakeup((caddr_t)vp);	} else		simple_unlock(&vp->v_interlock);}/* * Recycle an unused vnode to the front of the free list. * Release the passed interlock if the vnode will be recycled. */intvrecycle(vp, inter_lkp, p)	struct vnode *vp; 	struct simplelock *inter_lkp;	struct proc *p;{                    	simple_lock(&vp->v_interlock);	if (vp->v_usecount == 0) {		if (inter_lkp)			simple_unlock(inter_lkp);		vgonel(vp, p);		return (1);	}	simple_unlock(&vp->v_interlock);	return (0);}/* * Eliminate all activity associated with a vnode * in preparation for reuse. */voidvgone(vp)	struct vnode *vp;{	struct proc *p = curproc;	/* XXX */	simple_lock(&vp->v_interlock);	vgonel(vp, p);}/* * vgone, with the vp interlock held. */voidvgonel(vp, p)	struct vnode *vp;	struct proc *p;{	struct vnode *vq;	struct vnode *vx;	/*	 * If a vgone (or vclean) is already in progress,	 * wait until it is done and return.	 */	if (vp->v_flag & VXLOCK) {		vp->v_flag |= VXWANT;		ltsleep((caddr_t)vp, PINOD | PNORELOCK,

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?