📄 nfs_vnodeops.c
字号:
*vpp = makenfsnode(&dr->dr_fhandle, NULL, dvp->v_vfsp, ((struct gnode *) dvp)->g_mp); if (*vpp == NULL) { error = u.u_error; } else { nfsattr_inval(*vpp); /* don't believe attributes */ /* * This getattr should be removed if if turns * out that it is safe to leave uninitialized * attributes. For now, an extra getattr is a small * price to pay for correctness. -chet */ error = nfs_getattr(*vpp, cred); if (!error && nfs_dnlc) { dnlc_enter(dvp, nm, *vpp, cred); } } } else { *vpp = (struct vnode *)0; } kmem_free(dr, KM_NFS); return (error);}intnfs_rmdir(dvp, nm, cred) struct vnode *dvp; char *nm; struct ucred *cred;{ int error; enum nfsstat status; struct nfsdiropargs da; setdiropargs(&da, nm, dvp); dnlc_purge_vp(dvp); error = rfscall(vtomi(dvp), RFS_RMDIR, xdr_diropargs, (caddr_t)&da, xdr_enum, (caddr_t)&status, cred); nfsattr_inval(dvp); if (!error) { error = geterrno(status); check_stale_fh(error, dvp); } return (error);}intnfs_symlink(dvp, lnm, tva, tnm, cred) struct vnode *dvp; char *lnm; struct vattr *tva; char *tnm; struct ucred *cred;{ int error; struct nfsslargs args; enum nfsstat status; setdiropargs(&args.sla_from, lnm, dvp); vattr_to_sattr(tva, &args.sla_sa); args.sla_tnm = tnm; error = rfscall(vtomi(dvp), RFS_SYMLINK, xdr_slargs, (caddr_t)&args, xdr_enum, (caddr_t)&status, cred); nfsattr_inval(dvp); if (!error) { error = geterrno(status); check_stale_fh(error, dvp); } return (error);}/* * Read directory entries. * There are some weird things to look out for here. The uio_offset * field is either 0 or it is the offset returned from a previous * readdir. It is an opaque value used by the server to find the * correct directory block to read. The byte count must be at least * vtoblksz(vp) bytes. The count field is the number of blocks to * read on the server. This is advisory only, the server may return * only one block's worth of entries. Entries may be compressed on * the server. */intnfs_readdir(vp, uiop, cred) struct vnode *vp; register struct uio *uiop; struct ucred *cred;{ register int error = 0; register struct iovec *iovp; register unsigned count; register struct rnode *rp = vtor(vp); struct nfsrddirargs rda; struct nfsrddirres rd; nfs_lock(vp); /* synchronize with any other process */ /* that has vp locked before the access() check, */ /* where it may be temporarily unlocked */ if (access(vp, GREAD)) { /* must be able to read the dir */ nfs_unlock(vp); return; } nfs_unlock(vp); if ((rp->r_flags & REOF) && (vp->g_size == (u_long)uiop->uio_offset)) { return; } iovp = uiop->uio_iov; count = iovp->iov_len; /* * XXX We should do some kind of test for count >= DEV_BSIZE */ if (uiop->uio_iovcnt != 1) { u.u_error = EINVAL; return; } count = MIN(count, vtomi(vp)->mi_tsize); rda.rda_count = count; rda.rda_offset = uiop->uio_offset; rda.rda_fh = *vtofh(vp); rd.rd_size = count; kmem_alloc(rd.rd_entries, struct direct *, (u_int)count, KM_NFS); error = rfscall(vtomi(vp), RFS_READDIR, xdr_rddirargs, (caddr_t)&rda, xdr_getrddirres, (caddr_t)&rd, cred); if (!error) { error = geterrno(rd.rd_status); check_stale_fh(error, vp); } if (!error) { /* * move dir entries to user land */ if (rd.rd_size) { error = uiomove((caddr_t)rd.rd_entries, (int)rd.rd_size, UIO_READ, uiop); rda.rda_offset = rd.rd_offset; uiop->uio_offset = rd.rd_offset; } if (rd.rd_eof) { rp->r_flags |= REOF; /* removed for VMS server compatibility, */ /* not sure if it was right anyway */ /* vp->g_size = uiop->uio_offset; */ } } kmem_free(rd.rd_entries, KM_NFS); if (error) u.u_error = error; return;}/* * GFS operation for getting block maps */intnfs_gbmap(vp, vbn, rw, size, sync) register struct vnode *vp; /* gnode */ register daddr_t vbn; /* virtual block */ int rw, size, sync; /* ignore for nfs */{ daddr_t lbn; nfs_bmap(vp, vbn, &lbn); return((int)lbn);}/* * Convert from file system blocks to device blocks */intnfs_bmap(vp, bn, bnp) struct vnode *vp; /* file's vnode */ daddr_t bn; /* fs block number */ daddr_t *bnp; /* RETURN device block number */{ int bsize; /* server's block size in bytes */ if (bnp) { bsize = vtoblksz(vp); *bnp = bn * (bsize / DEV_BSIZE); } return (0);}struct buf *async_bufhead;int async_daemon_count; /* number of nfs_biod() processes available for work */#include "../h/vm.h"#include "../h/map.h"#include "../machine/pte.h"int async_buf_count; /* number of buffers on nfs_biod() work list */intnfs_strategy(bp) register struct buf *bp;{ register struct buf *bp1; register struct gnode *gp = bp->b_gp; register struct rnode *rp = vtor((struct vnode *)gp); /* * If there was an asynchronous write error on this gnode * then we just return the old error code. This continues * until the gnode goes away (zero ref count). We do this because * there can be many procs writing this gnode. */ if (rp->r_error) { bp->b_error = rp->r_error; bp->b_flags |= B_ERROR; iodone(bp); return; } if (bp->b_flags & B_PHYS) { register int npte; register int n; register long a; register struct pte *pte, *kpte; caddr_t va; int o; caddr_t saddr; struct proc *p; unsigned v; if (!(bp->b_flags & B_PAGET)) { int user_addr = 0; /* * Buffer's data is in userland, or in some other * currently inaccessable place. We get a hunk of * kernel address space and map it in. */ v = btop(bp->b_un.b_addr); o = (int)bp->b_un.b_addr & PGOFSET; npte = btoc(bp->b_bcount + o); p = bp->b_flags&B_DIRTY ? &proc[2] : bp->b_proc; if (bp->b_flags & B_UAREA) /* [ dgg001 ] */ pte = &p->p_addr[v]; else if ((bp->b_flags & B_SMEM) && ((bp->b_flags & B_DIRTY) == 0)) pte = ((struct smem *)p)->sm_ptaddr + v; else { pte = (struct pte *)0; user_addr++; } while ((a = rmalloc(kernelmap, (long)clrnd(npte))) == NULL) { kmapwnt++; sleep((caddr_t)kernelmap, PSWP+4); } kpte = &Usrptmap[a]; for (n = npte; n--; kpte++, pte++, v++) { if (user_addr && (((int)pte & PGOFSET) < CLSIZE*sizeof(struct pte) || pte->pg_pfnum == 0)) pte = vtopte(p, v); if (pte->pg_pfnum == 0) panic("nfs zero uentry");#ifdef mips *(int *)kpte = (*(int *)pte & PG_PFNUM);#endif mips#ifdef vax *(int *)kpte = PG_NOACC | (*(int *)pte & PG_PFNUM);#endif vax } va = (caddr_t)kmxtob(a);#ifdef mips vmaccess(&Usrptmap[a], va, npte, DO_CACHE);#endif mips#ifdef vax vmaccess(&Usrptmap[a], va, npte);#endif vax saddr = bp->b_un.b_addr; bp->b_un.b_addr = va + o; } /* * do the io */ do_bio(bp); /* * Release kernel maps */ if (!(bp->b_flags & B_PAGET)) { bp->b_un.b_addr = saddr; kpte = &Usrptmap[a]; for (n = npte; n-- ; kpte++)#ifdef mips *(int *)kpte = 0;#endif mips#ifdef vax *(int *)kpte = PG_NOACC;#endif vax rmfree(kernelmap, (long)clrnd(npte), a); } } else if (async_daemon_count && (bp->b_flags & B_ASYNC) && async_buf_count < async_daemon_count) { /* * We never allow more buffers onto async_bufhead than * there are biods waiting to process them. Since * biods may need to go through nfs_attrcache() (which * locks gp), and since nfs_fsync() holds gp locked for * its duration, if a buffer needed to complete nfs_fsync() * is placed where we cannot guarantee processing * up to iodone(), then it's deadlock time. This may be * viewed as conservative, but if there is no biod, * then why wait to start I/O? */ smp_lock(&lk_nfsbiod, LK_RETRY); if (!async_daemon_count || async_buf_count >= async_daemon_count) { smp_unlock(&lk_nfsbiod); do_bio(bp); } else { if (async_bufhead) { bp1 = async_bufhead; while (bp1->b_actf) { bp1 = bp1->b_actf; } bp1->b_actf = bp; } else { async_bufhead = bp; } gref(gp); bp->b_actf = NULL; ++async_buf_count; smp_unlock(&lk_nfsbiod); wakeup_type((caddr_t) &async_bufhead, WAKE_ONE); } } else { do_bio(bp); }}intnfs_biod(){ register struct buf *bp; register struct gnode *gp; register struct proc *p = u.u_procp; if (setjmp(&u.u_qsave)) { /* Protect counters */ smp_lock(&lk_nfsbiod, LK_RETRY); async_daemon_count--; if (async_buf_count > async_daemon_count) { /* * We must follow rules described in * nfs_strategy(), but we lost a race with a * new buffer. Note there are too many by * at most one. If this i/o doesn't complete * it will take two signals to get us! */ bp = async_bufhead; async_bufhead = bp->b_actf; --async_buf_count; smp_unlock(&lk_nfsbiod); gp = bp->b_gp; do_bio(bp); grele(gp); exit (0); } smp_unlock(&lk_nfsbiod); exit(0); } for (;;) { /* Protect biod buffer list */ smp_lock(&lk_nfsbiod, LK_RETRY); async_daemon_count++; while (async_bufhead == NULL) { sleep_unlock((caddr_t)&async_bufhead, PZERO + 1, &lk_nfsbiod); smp_lock(&lk_nfsbiod, LK_RETRY); if (async_bufhead == NULL) biod_has_no_work++; else biod_has_work++; } async_daemon_count--; bp = async_bufhead; async_bufhead = bp->b_actf; --async_buf_count; smp_unlock(&lk_nfsbiod); gp = bp->b_gp; do_bio(bp); grele(gp); /* See if this I/O was interrupted */ if (p->p_cursig) { mprintf("NFS biod (pid %d) exiting on signal %d\n", u.u_procp->p_pid, p->p_cursig); exit (0); } }}intdo_bio(bp) register struct buf *bp;{ register struct gnode *gp = bp->b_gp; register struct rnode *rp = vtor((struct vnode *)gp); /* * Ref the gnode to handle sync/close races that might * drop the gnode ref count to zero */ gref(gp); /* all NFS buffers hold refs */ if ((bp->b_flags & B_READ) == B_READ) { nfsread(bp, (struct vnode *)(gp), bp->b_un.b_addr, bp->b_blkno * DEV_BSIZE, (int)bp->b_bcount, rp->r_cred, NFS_BLOCKIO); } else { nfswrite(bp, (struct vnode *)(gp), bp->b_un.b_addr, bp->b_blkno * DEV_BSIZE, bp->b_bcount - bp->b_resid, rp->r_cred, NFS_BLOCKIO); } grele(gp); /* all NFS buffers hold refs */}intnfs_badop(){ panic("nfs_badop");}/* * Remote Record-locking requests are passed to the local Lock-Manager daemon * to be passed along to the server Lock-Manager daemon. */intnfs_rlock(gp, ld, cmd, fp) struct gnode *gp; struct flock *ld; int cmd; struct file *fp;{ register struct rnode *rp = vtor((struct vnode *)gp); lockhandle_t lh; extern int kernel_locking; /* Sys-V locking system: 1 = kernel */ /* 0 = daemon */ /* * ULTRIX supports both kernel based and daemon based * region locking; however, only daemon based locking * supports NFS file locking. * If daemon based locking has not been enabled, then * kernel locking is enabled, and NFS lock requests are * not permissible. * * Note: this routine is called when an attempt is made to * lock a remote file. */ if (kernel_locking) { return (EACCES); }#ifndef lint if (sizeof (lh.lh_id) != sizeof (fhandle_t)) panic("fhandle and lockhandle-id are not the same size!");#endif /* * If we are setting a lock, mark the rnode NOCACHE so the buffer * cache does not give inconsistent results on locked files shared * between clients. The NOCACHE flag is never turned off as long * as the gnode is active because it is hard to figure out when * the last lock is gone. */ if (((rp->r_flags & RNOCACHE) == 0) && (ld->l_type != F_UNLCK) && (cmd != F_GETLK)) { rp->r_flags |= RNOCACHE; binvalfree(gp); } lh.lh_gp = gp; lh.lh_servername = vtomi((struct vnode *)gp)->mi_hostname; bcopy((caddr_t)vtofh((struct vnode *)gp), (caddr_t)&lh.lh_id, sizeof(fhandle_t)); return (klm_lockctl(&lh, ld, cmd, fp->f_cred));}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -