⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 nfs_bio.c

📁 早期freebsd实现
💻 C
📖 第 1 页 / 共 2 页
字号:
	 * Maybe this should be above the vnode op call, but so long as	 * file servers have no limits, i don't think it matters	 */	if (p && uio->uio_offset + uio->uio_resid >	      p->p_rlimit[RLIMIT_FSIZE].rlim_cur) {		psignal(p, SIGXFSZ);		return (EFBIG);	}	/*	 * I use nm_rsize, not nm_wsize so that all buffer cache blocks	 * will be the same size within a filesystem. nfs_writerpc will	 * still use nm_wsize when sizing the rpc's.	 */	biosize = nmp->nm_rsize;	do {		/*		 * Check for a valid write lease.		 * If non-cachable, just do the rpc		 */		if ((nmp->nm_flag & NFSMNT_NQNFS) &&		    NQNFS_CKINVALID(vp, np, NQL_WRITE)) {			do {				error = nqnfs_getlease(vp, NQL_WRITE, cred, p);			} while (error == NQNFS_EXPIRED);			if (error)				return (error);			if (np->n_lrev != np->n_brev ||			    (np->n_flag & NQNFSNONCACHE)) {				if (error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1))					return (error);				np->n_brev = np->n_lrev;			}		}		if (np->n_flag & NQNFSNONCACHE)			return (nfs_writerpc(vp, uio, cred, ioflag));		nfsstats.biocache_writes++;		lbn = uio->uio_offset / biosize;		on = uio->uio_offset & (biosize-1);		n = min((unsigned)(biosize - on), uio->uio_resid);		bn = lbn * (biosize / DEV_BSIZE);again:		bp = nfs_getcacheblk(vp, bn, biosize, p);		if (!bp)			return (EINTR);		if (bp->b_wcred == NOCRED) {			crhold(cred);			bp->b_wcred = cred;		}		np->n_flag |= NMODIFIED;		if (uio->uio_offset + n > np->n_size) {			np->n_size = uio->uio_offset + n;			vnode_pager_setsize(vp, (u_long)np->n_size);		}		/*		 * If the new write will leave a contiguous dirty		 * area, just update the b_dirtyoff and b_dirtyend,		 * otherwise force a write rpc of the old dirty area.		 */		if (bp->b_dirtyend > 0 &&		    (on > bp->b_dirtyend || (on + n) < bp->b_dirtyoff)) {			bp->b_proc = p;			if (VOP_BWRITE(bp) == EINTR)				return (EINTR);			goto again;		}		/*		 * Check for valid write lease and get one as required.		 * In case getblk() and/or bwrite() delayed us.		 */		if ((nmp->nm_flag & NFSMNT_NQNFS) &&		    NQNFS_CKINVALID(vp, np, NQL_WRITE)) {			do {				error = nqnfs_getlease(vp, NQL_WRITE, cred, p);			} while (error == NQNFS_EXPIRED);			if (error) {				brelse(bp);				return (error);			}			if (np->n_lrev != np->n_brev ||			    (np->n_flag & NQNFSNONCACHE)) {				brelse(bp);				if (error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1))					return (error);				np->n_brev = np->n_lrev;				goto again;			}		}		if (error = uiomove((char *)bp->b_data + on, n, uio)) {			bp->b_flags |= B_ERROR;			brelse(bp);			return (error);		}		if (bp->b_dirtyend > 0) {			bp->b_dirtyoff = min(on, bp->b_dirtyoff);			bp->b_dirtyend = max((on + n), bp->b_dirtyend);		} else {			bp->b_dirtyoff = on;			bp->b_dirtyend = on + n;		}#ifndef notdef		if (bp->b_validend == 0 || bp->b_validend < bp->b_dirtyoff ||		    bp->b_validoff > bp->b_dirtyend) {			bp->b_validoff = bp->b_dirtyoff;			bp->b_validend = bp->b_dirtyend;		} else {			bp->b_validoff = min(bp->b_validoff, bp->b_dirtyoff);			bp->b_validend = max(bp->b_validend, bp->b_dirtyend);		}#else		bp->b_validoff = bp->b_dirtyoff;		bp->b_validend = bp->b_dirtyend;#endif		if (ioflag & IO_APPEND)			bp->b_flags |= B_APPENDWRITE;		/*		 * If the lease is non-cachable or IO_SYNC do bwrite().		 */		if ((np->n_flag & NQNFSNONCACHE) || (ioflag & IO_SYNC)) {			bp->b_proc = p;			if (error = VOP_BWRITE(bp))				return (error);		} else if ((n + on) == biosize &&			(nmp->nm_flag & NFSMNT_NQNFS) == 0) {			bp->b_proc = (struct proc *)0;			bawrite(bp);		} else			bdwrite(bp);	} while (uio->uio_resid > 0 && n > 0);	return (0);}/* * Get an nfs cache block. * Allocate a new one if the block isn't currently in the cache * and return the block marked busy. If the calling process is * interrupted by a signal for an interruptible mount point, return * NULL. */struct buf *nfs_getcacheblk(vp, bn, size, p)	struct vnode *vp;	daddr_t bn;	int size;	struct proc *p;{	register struct buf *bp;	struct nfsmount *nmp = VFSTONFS(vp->v_mount);	if (nmp->nm_flag & NFSMNT_INT) {		bp = getblk(vp, bn, size, PCATCH, 0);		while (bp == (struct buf *)0) {			if (nfs_sigintr(nmp, (struct nfsreq *)0, p))				return ((struct buf *)0);			bp = getblk(vp, bn, size, 0, 2 * hz);		}	} else		bp = getblk(vp, bn, size, 0, 0);	return (bp);}/* * Flush and invalidate all dirty buffers. If another process is already * doing the flush, just wait for completion. */nfs_vinvalbuf(vp, flags, cred, p, intrflg)	struct vnode *vp;	int flags;	struct ucred *cred;	struct proc *p;	int intrflg;{	register struct nfsnode *np = VTONFS(vp);	struct nfsmount *nmp = VFSTONFS(vp->v_mount);	int error = 0, slpflag, slptimeo;	if ((nmp->nm_flag & NFSMNT_INT) == 0)		intrflg = 0;	if (intrflg) {		slpflag = PCATCH;		slptimeo = 2 * hz;	} else {		slpflag = 0;		slptimeo = 0;	}	/*	 * First wait for any other process doing a flush to complete.	 */	while (np->n_flag & NFLUSHINPROG) {		np->n_flag |= NFLUSHWANT;		error = tsleep((caddr_t)&np->n_flag, PRIBIO + 2, "nfsvinval",			slptimeo);		if (error && intrflg && nfs_sigintr(nmp, (struct nfsreq *)0, p))			return (EINTR);	}	/*	 * Now, flush as required.	 */	np->n_flag |= NFLUSHINPROG;	error = vinvalbuf(vp, flags, cred, p, slpflag, 0);	while (error) {		if (intrflg && nfs_sigintr(nmp, (struct nfsreq *)0, p)) {			np->n_flag &= ~NFLUSHINPROG;			if (np->n_flag & NFLUSHWANT) {				np->n_flag &= ~NFLUSHWANT;				wakeup((caddr_t)&np->n_flag);			}			return (EINTR);		}		error = vinvalbuf(vp, flags, cred, p, 0, slptimeo);	}	np->n_flag &= ~(NMODIFIED | NFLUSHINPROG);	if (np->n_flag & NFLUSHWANT) {		np->n_flag &= ~NFLUSHWANT;		wakeup((caddr_t)&np->n_flag);	}	return (0);}/* * Initiate asynchronous I/O. Return an error if no nfsiods are available. * This is mainly to avoid queueing async I/O requests when the nfsiods * are all hung on a dead server. */nfs_asyncio(bp, cred)	register struct buf *bp;	struct ucred *cred;{	register int i;	if (nfs_numasync == 0)		return (EIO);	for (i = 0; i < NFS_MAXASYNCDAEMON; i++)	    if (nfs_iodwant[i]) {		if (bp->b_flags & B_READ) {			if (bp->b_rcred == NOCRED && cred != NOCRED) {				crhold(cred);				bp->b_rcred = cred;			}		} else {			if (bp->b_wcred == NOCRED && cred != NOCRED) {				crhold(cred);				bp->b_wcred = cred;			}		}			TAILQ_INSERT_TAIL(&nfs_bufq, bp, b_freelist);		nfs_iodwant[i] = (struct proc *)0;		wakeup((caddr_t)&nfs_iodwant[i]);		return (0);	    }	return (EIO);}/* * Do an I/O operation to/from a cache block. This may be called * synchronously or from an nfsiod. */intnfs_doio(bp, cr, p)	register struct buf *bp;	struct cred *cr;	struct proc *p;{	register struct uio *uiop;	register struct vnode *vp;	struct nfsnode *np;	struct nfsmount *nmp;	int error, diff, len;	struct uio uio;	struct iovec io;	vp = bp->b_vp;	np = VTONFS(vp);	nmp = VFSTONFS(vp->v_mount);	uiop = &uio;	uiop->uio_iov = &io;	uiop->uio_iovcnt = 1;	uiop->uio_segflg = UIO_SYSSPACE;	uiop->uio_procp = p;	/*	 * Historically, paging was done with physio, but no more.	 */	if (bp->b_flags & B_PHYS)	    panic("doio phys");	if (bp->b_flags & B_READ) {	    io.iov_len = uiop->uio_resid = bp->b_bcount;	    io.iov_base = bp->b_data;	    uiop->uio_rw = UIO_READ;	    switch (vp->v_type) {	    case VREG:		uiop->uio_offset = bp->b_blkno * DEV_BSIZE;		nfsstats.read_bios++;		error = nfs_readrpc(vp, uiop, cr);		if (!error) {		    bp->b_validoff = 0;		    if (uiop->uio_resid) {			/*			 * If len > 0, there is a hole in the file and			 * no writes after the hole have been pushed to			 * the server yet.			 * Just zero fill the rest of the valid area.			 */			diff = bp->b_bcount - uiop->uio_resid;			len = np->n_size - (bp->b_blkno * DEV_BSIZE				+ diff);			if (len > 0) {			    len = min(len, uiop->uio_resid);			    bzero((char *)bp->b_data + diff, len);			    bp->b_validend = diff + len;			} else			    bp->b_validend = diff;		    } else			bp->b_validend = bp->b_bcount;		}		if (p && (vp->v_flag & VTEXT) &&			(((nmp->nm_flag & NFSMNT_NQNFS) &&			  np->n_lrev != np->n_brev) ||			 (!(nmp->nm_flag & NFSMNT_NQNFS) &&			  np->n_mtime != np->n_vattr.va_mtime.ts_sec))) {			uprintf("Process killed due to text file modification\n");			psignal(p, SIGKILL);			p->p_flag |= P_NOSWAP;		}		break;	    case VLNK:		uiop->uio_offset = 0;		nfsstats.readlink_bios++;		error = nfs_readlinkrpc(vp, uiop, cr);		break;	    case VDIR:		uiop->uio_offset = bp->b_lblkno;		nfsstats.readdir_bios++;		if (VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NQNFS)		    error = nfs_readdirlookrpc(vp, uiop, cr);		else		    error = nfs_readdirrpc(vp, uiop, cr);		/*		 * Save offset cookie in b_blkno.		 */		bp->b_blkno = uiop->uio_offset;		break;	    };	    if (error) {		bp->b_flags |= B_ERROR;		bp->b_error = error;	    }	} else {	    io.iov_len = uiop->uio_resid = bp->b_dirtyend		- bp->b_dirtyoff;	    uiop->uio_offset = (bp->b_blkno * DEV_BSIZE)		+ bp->b_dirtyoff;	    io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;	    uiop->uio_rw = UIO_WRITE;	    nfsstats.write_bios++;	    if (bp->b_flags & B_APPENDWRITE)		error = nfs_writerpc(vp, uiop, cr, IO_APPEND);	    else		error = nfs_writerpc(vp, uiop, cr, 0);	    bp->b_flags &= ~(B_WRITEINPROG | B_APPENDWRITE);	    /*	     * For an interrupted write, the buffer is still valid and the	     * write hasn't been pushed to the server yet, so we can't set	     * B_ERROR and report the interruption by setting B_EINTR. For	     * the B_ASYNC case, B_EINTR is not relevant, so the rpc attempt	     * is essentially a noop.	     */	    if (error == EINTR) {		bp->b_flags &= ~B_INVAL;		bp->b_flags |= B_DELWRI;		/*		 * Since for the B_ASYNC case, nfs_bwrite() has reassigned the		 * buffer to the clean list, we have to reassign it back to the		 * dirty one. Ugh.		 */		if (bp->b_flags & B_ASYNC)		    reassignbuf(bp, vp);		else		    bp->b_flags |= B_EINTR;	    } else {		if (error) {		    bp->b_flags |= B_ERROR;		    bp->b_error = np->n_error = error;		    np->n_flag |= NWRITEERR;		}		bp->b_dirtyoff = bp->b_dirtyend = 0;	    }	}	bp->b_resid = uiop->uio_resid;	biodone(bp);	return (error);}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -