⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 gfs_bio.c

📁 <B>Digital的Unix操作系统VAX 4.2源码</B>
💻 C
📖 第 1 页 / 共 2 页
字号:
		if (bp->b_flags & B_DELWRI) {			++bufstats.delwrite;			bwrite(bp);			return (0);		}		++bufstats.realloc;		if (smp) {	/* we don't move memory around w/smp */			bp->b_bcount = size;			return (1);		}		return (allocbuf(bp, size));	}	bp->b_flags &= ~B_DONE;	if (bp->b_dev == NODEV) {		++bufstats.realloc;		if (smp) {	/* we don't move memory around w/smp */			bp->b_bcount = size;			return (1);		}		return (allocbuf(bp, size));	}	/*	 * Search cache for any buffers that overlap the one that we	 * are trying to allocate. Overlapping buffers must be marked	 * invalid, after being written out if they are dirty (indicated	 * by B_DELWRI). A disk block must be mapped by at most one buffer	 * at any point in time. Care must be taken to avoid deadlocking	 * when two buffers are trying to get the same set of disk blocks.	 */	start = bp->b_blkno;	last = start + btodb(size) - 1;	gp = bp->b_gp;	dp = BUFHASH(bp->b_dev, bp->b_blkno, gp);loop:	s = splbio();	smp_lock(&lk_bio, LK_RETRY);	for (ep = dp->b_forw; ep != dp; ep = ep->b_forw) {		if ((ep->b_dev != bp->b_dev) || (ep == bp) ||		    (ep->state&B_INVAL) || !matchgp(ep, gp))			continue;		/* look for overlap */		if (ep->b_bcount == 0 || ep->b_blkno > last ||		    ep->b_blkno + btodb(ep->b_bcount) <= start)			continue;		if (ep->state&B_BUSY) {			ep->state |= B_WANTED;			sleep_unlock((caddr_t)ep, PRIBIO+1, &lk_bio);			splx(s);			goto loop;		}		if (!matchgid(ep, gp))			continue;		notavail(ep);		if (ep->b_flags & B_DELWRI) {			smp_unlock(&lk_bio);			splx(s);			++bufstats.delwrite;			bwrite(ep);			goto loop;		}		ep->state |= B_INVAL;		smp_unlock(&lk_bio);		splx(s);		brelse(ep);		goto loop;	}	smp_unlock(&lk_bio);	splx(s);	++bufstats.realloc;	if (smp) {	/* we don't move memory around w/smp */		bp->b_bcount = size;		return (1);	}	return (allocbuf(bp, size));}/* * Find a buffer which is available for use. * Select something from a free list. * Preference is to first search CLEAN list, then the DIRTY list * (flushing a DIRTY buffer takes time). */struct buf *getnewbuf(){	register struct buf *bp, *dp;	register int s;loop:	s = splbio();	smp_lock(&lk_bio, LK_RETRY);	for (dp = &bfreelist[BQ_CLEAN]; dp > bfreelist; dp--)		if (dp->av_forw != dp)			break;	if (dp == bfreelist) {		/* no free blocks */		bfreelist_wanted = 1;		sleep_unlock((caddr_t)dp, PRIBIO+1, &lk_bio);		splx(s);		goto loop;	}	bp = dp->av_forw;	bp->state = 0;	/*	 * Remove the buffer from the free list and push it out if delayed.	 */	notavail(bp);	smp_unlock(&lk_bio);	splx(s);	if (bp->b_flags & B_DELWRI) {		bp->b_flags |= B_ASYNC;		bp->b_flags |= B_FORCEWRITE;		++bufstats.delwrite;		++bufstats.forcewrite;		bwrite(bp);		goto loop;	}	trace(TR_BRELSE, bp->b_dev, bp->b_blkno);	++bufstats.newbuf;	/*	 * Clean out the flag word, but leave the B_BUSY bit 	 * (notavail set state)	 */	bp->b_flags = B_BUSY;	return (bp);}/* * Wait for I/O completion on the buffer; return errors * to the user. */biowait(bp)	register struct buf *bp;{	register int s;	s = splbio();	smp_lock(&lk_bio, LK_RETRY);	while (!(bp->b_flags&B_DONE)) {		sleep_unlock((caddr_t)bp, PRIBIO, &lk_bio);		smp_lock(&lk_bio, LK_RETRY);	}	smp_unlock(&lk_bio);	splx(s);	if (u.u_error == 0)			/* XXX */		u.u_error = geterror(bp);}/* * Mark I/O complete on a buffer. * If someone should be called, e.g. the pageout * daemon, do so.  Otherwise, wake up anyone * waiting for it. */biodone(bp)	register struct buf *bp;{	register int s;	s = splbio();	smp_lock(&lk_bio, LK_RETRY);	if (bp->b_flags & B_ERROR)		++bufstats.biodone_errs;	if (bp->b_flags & B_DONE)		panic("dup biodone");	bp->b_flags |= B_DONE;	if (bp->b_flags & B_CALL) {		bp->b_flags &= ~B_CALL;		smp_unlock(&lk_bio);		splx(s);		(*bp->b_iodone)(bp);	} else 	if (bp->b_flags&B_ASYNC) {		smp_unlock(&lk_bio);		splx(s);		brelse(bp);	} else {		bp->state &= ~B_WANTED;		smp_unlock(&lk_bio);		splx(s);		wakeup((caddr_t)bp);	}}flushblocks(gp)register struct gnode *gp;{	register int ret;	if (ISLOCAL(gp->g_mp)) 	        ret = bflush(gp->g_dev, (struct gnode *) 0, 0);	else 	        ret = bflush(NODEV, gp, 0); /* nfs is easy since bp has gp */}/* * Insure that no part of a specified block is in an incore buffer. */blkflush(dev, blkno, size, gp)	dev_t dev;	register daddr_t blkno;	long size;	register struct gnode *gp;	{	register struct buf *ep;	register struct buf *dp;	register daddr_t start, last;	register int flushed = 0;	register int s;		++bufstats.blkflush_call;	if (gp)		++bufstats.blkflushgp;	start = blkno;	last = start + btodb(size) - 1;	dp = BUFHASH(dev, blkno, gp);loop:	s = splbio();	smp_lock(&lk_bio, LK_RETRY);	for (ep = dp->b_forw; ep != dp; ep = ep->b_forw) {		++bufstats.blkflush_look;		if ((ep->b_dev != dev) || (ep->state & B_INVAL) ||		     !matchgp(ep, gp))			continue;		/* look for overlap */		if (ep->b_bcount == 0 || ep->b_blkno > last ||		    ep->b_blkno + btodb(ep->b_bcount) <= start)			continue;		if (ep->state&B_BUSY) {			++bufstats.blkflush_sleep;			ep->state |= B_WANTED;			sleep_unlock((caddr_t)ep, PRIBIO+1, &lk_bio);			splx(s);			goto loop;		}		if (!matchgid(ep, gp))			continue;		if (ep->b_flags & B_DELWRI) {			++bufstats.blkflush_flush;			notavail(ep);			++bufstats.delwrite;			smp_unlock(&lk_bio);			splx(s);			bwrite(ep);			flushed++;			goto loop;		}	}	smp_unlock(&lk_bio);	splx(s);	return (flushed);}/* * Make sure all write-behind blocks associated with: *	gp, if specified, *	else dev (NODEV for all) * are flushed out. * Note that a non-NULL gp overrides dev; * if gp is NULL, and dev is NODEV, then all write behind blocks * are flushed. *  * Called from unmount routines, update(), and sfs sync routines. * * The waitfor argument specifies whether the busy list should be waited * on after the dirty list is flushed. * * We look ONLY at the dirty list, because that's the only place * a DELWRI buffer should be. * * Whenever we find one, we rescan the list from scratch, to avoid * races.  This should not really add to the running time, since * we are just grabbing the first item on the list. * * This routine is a merge of the old bflush() and bflushgp(). */bflush(dev, gp, waitfor)	dev_t dev;	struct gnode *gp;	int waitfor;{	register struct buf *bp;	register struct buf *flist;	register int s;	register int flushed = 0;	register int n_look = 0;	register long start_time;	struct bflush_dirty *bd;	struct bflush_busy *bb;	int n_sleep = 0;	int n_clean = 0;	int n_dirty = 0;	int n_empty = 0;	int n_busy = 0;	/* account for type of call */	if (!waitfor) { /* asynchronous call */		if (gp)			bd = &bufstats.gp_async;		else if (dev != NODEV)			bd = &bufstats.dev_async;		else			bd = &bufstats.all_async;	}	else { /* synchronous call */		if (gp) {			bd = &bufstats.gp_sync;			bb = &bufstats.gp_busy;		}		else if (dev != NODEV) {			bd = &bufstats.dev_sync;			bb = &bufstats.dev_busy;		}		else {			bd = &bufstats.all_sync;			bb = &bufstats.all_busy;		}	}	++bd->call;	/* if debugging, count the queues */	if (bflush_debug) {		s = splbio();		smp_lock(&lk_bio, LK_RETRY);		flist = &bfreelist[BQ_CLEAN];		for (bp = flist->av_forw; bp != flist; bp = bp->av_forw)			++n_clean;		flist = &bfreelist[BQ_DIRTY];		for (bp = flist->av_forw; bp != flist; bp = bp->av_forw)			++n_dirty;		flist = &bfreelist[BQ_EMPTY];		for (bp = flist->av_forw; bp != flist; bp = bp->av_forw)			++n_empty;		flist = &bbusylist;		for (bp = flist->av_forw; bp != flist; bp = bp->av_forw)			++n_busy;		smp_unlock(&lk_bio);		splx(s);        }	/* Flush dirty buffers.	/* Look only at the dirty queue. */	/* If we find a buffer that fits the bill, push it and start over. */	flist = &(bfreelist[BQ_DIRTY]);loop:	s = splbio();	smp_lock(&lk_bio, LK_RETRY);	for (bp = flist->av_forw; bp != flist; bp = bp->av_forw) {		++n_look;		if ((bp->b_flags & B_DELWRI) == 0) {			printf("bflush(%x) bp %x not DELWRI\n", dev, bp);			continue;		}						if (gp) { /* gp, if specified, has priority */			if (!matchgp(bp, gp))				continue;		}		else { /* gp was not specified */			if (dev != NODEV && dev != bp->b_dev)				continue;		}		/* now, we have a dirty buffer that interests us */		notavail(bp);		smp_unlock(&lk_bio);		splx(s);		bp->b_flags |= B_ASYNC;		++bufstats.delwrite;		bwrite(bp);		++flushed;		/* put an upper bound on how many buffers we'll push */		if (flushed == nbuf) {	/* we've tried long enough! */			++bd->loop;			goto done1;		}		goto loop;	}	smp_unlock(&lk_bio);	splx(s);done1:	if (bflush_debug) {	mprintf("bflush %d,%d gp 0x%x n %d free %d %d %d %d - %d chk %d wrt %d\n",	       major(dev), minor(dev), gp, nbuf, n_dirty, n_clean, n_empty,	       n_busy, nbuf-n_clean-n_dirty-n_empty, n_look, flushed);        }	/* account for number of buffers inspected and pushed */	bd->look += n_look;	bd->flush += flushed;	/* If requested, wait for busy buffers. */	/* Look only at the busy queue. */	/* If we find a buffer that fits the bill, sleep and start over. */	if (!waitfor)		return (flushed);	start_time = time.tv_sec; /* remember our start time */	n_look = 0;	flist = &bbusylist;	s = splbio();	if (bflush_debug) {		smp_lock(&lk_bio, LK_RETRY);		for (bp = flist->busy_forw; bp != flist; bp = bp->busy_forw) {			++n_busy;		}		if (dev == NODEV)			printf("bflush busy: %d busy\n", n_busy);		else			mprintf("bflush busy: %d busy\n", n_busy);		smp_unlock(&lk_bio);	}loop2:	smp_lock(&lk_bio, LK_RETRY);	for (bp = flist->busy_forw; bp != flist; bp = bp->busy_forw) {		/* put an upper bound on how many times we'll sleep */		if (n_sleep == nbuf) { /* we've tried long enough! */			++bb->loop;			goto done2;		}		++n_look;		/* look only at writes for real devs */		if (bp->b_flags & B_READ || bp->b_dev == NODEV ||		    bp->state & B_INVAL)			continue;		if (gp) { /* gp, if specified, has priority */			if (!matchgp(bp, gp))				continue;		}		else { /* gp was not specified */			if (dev == NODEV) { /* dev was wildcarded */				/*				 * special case (i.e. hack):   				 * if NODEV, i.e. coming down, don't sleep				 * on anything but local buffers.				 */				if (major(bp->b_dev) >= nblkdev)					continue;			}			else if (dev != bp->b_dev) /* unique dev specified */				continue;	   /* but doesn't match */		}		/* we have found a busy buffer that interests us */		/* skip ones that have arrived since we started */ 		if (bp->busy_time > start_time)			continue;		if (!(bp->b_flags & B_DONE)) {			++n_sleep;			bp->state |= B_WANTED;			sleep_unlock((caddr_t)bp, PRIBIO, &lk_bio);			goto loop2;		}	}done2:	smp_unlock(&lk_bio);	splx(s);	if (bflush_debug) {		if (dev == NODEV)					printf("bflush busy: n %d chk %d sleep %d\n",			       n_busy, n_look, n_sleep);		else			mprintf("bflush busy: n %d chk %d sleep %d\n",				n_busy, n_look, n_sleep);	}	/* account for number of buffers inspected and slept on */	bb->look += n_look;	bb->sleep += n_sleep;	if (n_sleep > flushed)		++bb->more;	return (flushed);}/* * Pick up the device's error number and pass it to the user; * if there is an error but the number is 0 set a generalized * code.  Actually the latter is always true because devices * don't yet return specific errors. */geterror(bp)	register struct buf *bp;{	register int error = 0;	if (bp->b_flags&B_ERROR)		if ((error = bp->b_error)==0)			return (EIO);	return (error);}/* * Invalidate in core blocks belonging to closed or umounted filesystem * * This is not nicely done at all - the buffer ought to be removed from the * hash chains & have its dev/blkno fields clobbered, but unfortunately we * can't do that here, as it is quite possible that the block is still * being used for i/o. Eventually, all disc drivers should be forced to * have a close routine, which ought ensure that the queue is empty, then * properly flush the queues. Until that happy day, this suffices for * correctness.						... kre */binval(dev, gp)	dev_t dev;	register struct gnode *gp;{	register struct buf *bp;	register struct bufhd *hp;	register int s;#define dp ((struct buf *)hp)	++bufstats.binval_call;	s = splbio();	smp_lock(&lk_bio, LK_RETRY);	for (hp = bufhash; hp < &bufhash[bufhsz]; hp++) {		for (bp = dp->b_forw; bp != dp; bp = bp->b_forw) {			++bufstats.binval_look;			if ((bp->b_dev == dev) || (gp && matchgp(bp, gp))) {				++bufstats.binval_inval;				bp->state |= B_INVAL;			}		}	}	smp_unlock(&lk_bio);	splx(s);}/* * Invalidate all buffers that are holding soft references * to gnodes (for now this is NFS only). */binvalallgid(){	register struct buf *bp;	register struct bufhd *hp;	register int s;#define dp ((struct buf *)hp)	++bufstats.binvalallgid_call;	s = splbio();	smp_lock(&lk_bio, LK_RETRY);	for (hp = bufhash; hp < &bufhash[bufhsz]; hp++) {		for (bp = dp->b_forw; bp != dp; bp = bp->b_forw) {			if (bp->b_gp)				bp->state |= B_INVAL;		}	}	smp_unlock(&lk_bio);	splx(s);}/* * Invalidate blocks associated with gp which are on the clean list. * Make sure all write-behind blocks associated with gp are flushed out. * Used only for NFS buffers. */int binvalfree_inval=0;binvalfree(gp)struct gnode *gp;{	register struct buf *bp;	register struct buf *flist;	register int s;	++bufstats.binvalfree_call;	bflush(NODEV, gp, 0); /* flush delayed write blocks asynchronously */	/*	 * The cheap, dirty, fast way to invalidate buffers	 * is to disassociate the gnode from them.	 * This leaves the buffers in place in the clean list, however.	 * A more laborious (historical) method is to search them out	 * and place them at the head of the clean list for re-use.	 */	if (binvalfree_inval) {		flist = &bfreelist[BQ_CLEAN];		s = splbio();		smp_lock(&lk_bio, LK_RETRY);		for (bp = flist->av_forw; bp != flist; bp = bp->av_forw) {			++bufstats.binvalfree_look;			if (!matchgp(bp, gp))				continue;			++bufstats.binvalfree_inval;			bp->state |= B_INVAL;		}		smp_unlock(&lk_bio);		(void) splx(s);	}	else		cacheinval(gp);}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -