⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 vfs_subr.c

📁 早期freebsd实现
💻 C
📖 第 1 页 / 共 3 页
字号:
		panic("brelvp: NULL");	/*	 * Delete from old vnode list, if on one.	 */	if (bp->b_vnbufs.le_next != NOLIST)		bufremvn(bp);	vp = bp->b_vp;	bp->b_vp = (struct vnode *) 0;	HOLDRELE(vp);}/* * Reassign a buffer from one vnode to another. * Used to assign file specific control information * (indirect blocks) to the vnode to which they belong. */reassignbuf(bp, newvp)	register struct buf *bp;	register struct vnode *newvp;{	register struct buflists *listheadp;	if (newvp == NULL) {		printf("reassignbuf: NULL");		return;	}	/*	 * Delete from old vnode list, if on one.	 */	if (bp->b_vnbufs.le_next != NOLIST)		bufremvn(bp);	/*	 * If dirty, put on list of dirty buffers;	 * otherwise insert onto list of clean buffers.	 */	if (bp->b_flags & B_DELWRI)		listheadp = &newvp->v_dirtyblkhd;	else		listheadp = &newvp->v_cleanblkhd;	bufinsvn(bp, listheadp);}/* * Create a vnode for a block device. * Used for root filesystem, argdev, and swap areas. * Also used for memory file system special devices. */bdevvp(dev, vpp)	dev_t dev;	struct vnode **vpp;{	register struct vnode *vp;	struct vnode *nvp;	int error;	if (dev == NODEV)		return (0);	error = getnewvnode(VT_NON, (struct mount *)0, spec_vnodeop_p, &nvp);	if (error) {		*vpp = 0;		return (error);	}	vp = nvp;	vp->v_type = VBLK;	if (nvp = checkalias(vp, dev, (struct mount *)0)) {		vput(vp);		vp = nvp;	}	*vpp = vp;	return (0);}/* * Check to see if the new vnode represents a special device * for which we already have a vnode (either because of * bdevvp() or because of a different vnode representing * the same block device). If such an alias exists, deallocate * the existing contents and return the aliased vnode. The * caller is responsible for filling it with its new contents. */struct vnode *checkalias(nvp, nvp_rdev, mp)	register struct vnode *nvp;	dev_t nvp_rdev;	struct mount *mp;{	register struct vnode *vp;	struct vnode **vpp;	if (nvp->v_type != VBLK && nvp->v_type != VCHR)		return (NULLVP);	vpp = &speclisth[SPECHASH(nvp_rdev)];loop:	for (vp = *vpp; vp; vp = vp->v_specnext) {		if (nvp_rdev != vp->v_rdev || nvp->v_type != vp->v_type)			continue;		/*		 * Alias, but not in use, so flush it out.		 */		if (vp->v_usecount == 0) {			vgone(vp);			goto loop;		}		if (vget(vp, 1))			goto loop;		break;	}	if (vp == NULL || vp->v_tag != VT_NON) {		MALLOC(nvp->v_specinfo, struct specinfo *,			sizeof(struct specinfo), M_VNODE, M_WAITOK);		nvp->v_rdev = nvp_rdev;		nvp->v_hashchain = vpp;		nvp->v_specnext = *vpp;		nvp->v_specflags = 0;		*vpp = nvp;		if (vp != NULL) {			nvp->v_flag |= VALIASED;			vp->v_flag |= VALIASED;			vput(vp);		}		return (NULLVP);	}	VOP_UNLOCK(vp);	vclean(vp, 0);	vp->v_op = nvp->v_op;	vp->v_tag = nvp->v_tag;	nvp->v_type = VNON;	insmntque(vp, mp);	return (vp);}/* * Grab a particular vnode from the free list, increment its * reference count and lock it. The vnode lock bit is set the * vnode is being eliminated in vgone. The process is awakened * when the transition is completed, and an error returned to * indicate that the vnode is no longer usable (possibly having * been changed to a new file system type). */vget(vp, lockflag)	register struct vnode *vp;	int lockflag;{	/*	 * If the vnode is in the process of being cleaned out for	 * another use, we wait for the cleaning to finish and then	 * return failure. Cleaning is determined either by checking	 * that the VXLOCK flag is set, or that the use count is	 * zero with the back pointer set to show that it has been	 * removed from the free list by getnewvnode. The VXLOCK	 * flag may not have been set yet because vclean is blocked in	 * the VOP_LOCK call waiting for the VOP_INACTIVE to complete.	 */	if ((vp->v_flag & VXLOCK) ||	    (vp->v_usecount == 0 &&	     vp->v_freelist.tqe_prev == (struct vnode **)0xdeadb)) {		vp->v_flag |= VXWANT;		sleep((caddr_t)vp, PINOD);		return (1);	}	if (vp->v_usecount == 0)		TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);	vp->v_usecount++;	if (lockflag)		VOP_LOCK(vp);	return (0);}/* * Vnode reference, just increment the count */void vref(vp)	struct vnode *vp;{	if (vp->v_usecount <= 0)		panic("vref used where vget required");	vp->v_usecount++;}/* * vput(), just unlock and vrele() */void vput(vp)	register struct vnode *vp;{	VOP_UNLOCK(vp);	vrele(vp);}/* * Vnode release. * If count drops to zero, call inactive routine and return to freelist. */void vrele(vp)	register struct vnode *vp;{#ifdef DIAGNOSTIC	if (vp == NULL)		panic("vrele: null vp");#endif	vp->v_usecount--;	if (vp->v_usecount > 0)		return;#ifdef DIAGNOSTIC	if (vp->v_usecount != 0 || vp->v_writecount != 0) {		vprint("vrele: bad ref count", vp);		panic("vrele: ref cnt");	}#endif	/*	 * insert at tail of LRU list	 */	TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);	VOP_INACTIVE(vp);}/* * Page or buffer structure gets a reference. */void vhold(vp)	register struct vnode *vp;{	vp->v_holdcnt++;}/* * Page or buffer structure frees a reference. */void holdrele(vp)	register struct vnode *vp;{	if (vp->v_holdcnt <= 0)		panic("holdrele: holdcnt");	vp->v_holdcnt--;}/* * Remove any vnodes in the vnode table belonging to mount point mp. * * If MNT_NOFORCE is specified, there should not be any active ones, * return error if any are found (nb: this is a user error, not a * system error). If MNT_FORCE is specified, detach any active vnodes * that are found. */#ifdef DIAGNOSTICint busyprt = 0;	/* print out busy vnodes */struct ctldebug debug1 = { "busyprt", &busyprt };#endifvflush(mp, skipvp, flags)	struct mount *mp;	struct vnode *skipvp;	int flags;{	register struct vnode *vp, *nvp;	int busy = 0;	if ((mp->mnt_flag & MNT_MPBUSY) == 0)		panic("vflush: not busy");loop:	for (vp = mp->mnt_vnodelist.lh_first; vp; vp = nvp) {		if (vp->v_mount != mp)			goto loop;		nvp = vp->v_mntvnodes.le_next;		/*		 * Skip over a selected vnode.		 */		if (vp == skipvp)			continue;		/*		 * Skip over a vnodes marked VSYSTEM.		 */		if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM))			continue;		/*		 * If WRITECLOSE is set, only flush out regular file		 * vnodes open for writing.		 */		if ((flags & WRITECLOSE) &&		    (vp->v_writecount == 0 || vp->v_type != VREG))			continue;		/*		 * With v_usecount == 0, all we need to do is clear		 * out the vnode data structures and we are done.		 */		if (vp->v_usecount == 0) {			vgone(vp);			continue;		}		/*		 * If FORCECLOSE is set, forcibly close the vnode.		 * For block or character devices, revert to an		 * anonymous device. For all other files, just kill them.		 */		if (flags & FORCECLOSE) {			if (vp->v_type != VBLK && vp->v_type != VCHR) {				vgone(vp);			} else {				vclean(vp, 0);				vp->v_op = spec_vnodeop_p;				insmntque(vp, (struct mount *)0);			}			continue;		}#ifdef DIAGNOSTIC		if (busyprt)			vprint("vflush: busy vnode", vp);#endif		busy++;	}	if (busy)		return (EBUSY);	return (0);}/* * Disassociate the underlying file system from a vnode. */voidvclean(vp, flags)	register struct vnode *vp;	int flags;{	int active;	/*	 * Check to see if the vnode is in use.	 * If so we have to reference it before we clean it out	 * so that its count cannot fall to zero and generate a	 * race against ourselves to recycle it.	 */	if (active = vp->v_usecount)		VREF(vp);	/*	 * Even if the count is zero, the VOP_INACTIVE routine may still	 * have the object locked while it cleans it out. The VOP_LOCK	 * ensures that the VOP_INACTIVE routine is done with its work.	 * For active vnodes, it ensures that no other activity can	 * occur while the underlying object is being cleaned out.	 */	VOP_LOCK(vp);	/*	 * Prevent the vnode from being recycled or	 * brought into use while we clean it out.	 */	if (vp->v_flag & VXLOCK)		panic("vclean: deadlock");	vp->v_flag |= VXLOCK;	/*	 * Clean out any buffers associated with the vnode.	 */	if (flags & DOCLOSE)		vinvalbuf(vp, V_SAVE, NOCRED, NULL, 0, 0);	/*	 * Any other processes trying to obtain this lock must first	 * wait for VXLOCK to clear, then call the new lock operation.	 */	VOP_UNLOCK(vp);	/*	 * If purging an active vnode, it must be closed and	 * deactivated before being reclaimed.	 */	if (active) {		if (flags & DOCLOSE)			VOP_CLOSE(vp, IO_NDELAY, NOCRED, NULL);		VOP_INACTIVE(vp);	}	/*	 * Reclaim the vnode.	 */	if (VOP_RECLAIM(vp))		panic("vclean: cannot reclaim");	if (active)		vrele(vp);	/*	 * Done with purge, notify sleepers of the grim news.	 */	vp->v_op = dead_vnodeop_p;	vp->v_tag = VT_NON;	vp->v_flag &= ~VXLOCK;	if (vp->v_flag & VXWANT) {		vp->v_flag &= ~VXWANT;		wakeup((caddr_t)vp);	}}/* * Eliminate all activity associated with  the requested vnode * and with all vnodes aliased to the requested vnode. */void vgoneall(vp)	register struct vnode *vp;{	register struct vnode *vq;	if (vp->v_flag & VALIASED) {		/*		 * If a vgone (or vclean) is already in progress,		 * wait until it is done and return.		 */		if (vp->v_flag & VXLOCK) {			vp->v_flag |= VXWANT;			sleep((caddr_t)vp, PINOD);			return;		}		/*		 * Ensure that vp will not be vgone'd while we		 * are eliminating its aliases.		 */		vp->v_flag |= VXLOCK;		while (vp->v_flag & VALIASED) {			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {				if (vq->v_rdev != vp->v_rdev ||				    vq->v_type != vp->v_type || vp == vq)					continue;				vgone(vq);				break;			}		}		/*		 * Remove the lock so that vgone below will		 * really eliminate the vnode after which time		 * vgone will awaken any sleepers.		 */		vp->v_flag &= ~VXLOCK;	}	vgone(vp);}/* * Eliminate all activity associated with a vnode * in preparation for reuse. */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -