📄 vm_page.c
字号:
* * The most general worst case here would be for * a reclaim, a modify and a swapout to occur * all before the single page transfer completes. */ if (dirtycl(pte)#ifdef vax || (c->c_type == CSMEM && dirtysm(sp,v)) /* SHMEM */#endif vax ) { /* * Limit pushes to avoid saturating * pageout device. */ if (pushes > maxpgio / 4) return (0); pushes++; /* * Now carefully make sure that there will * be a header available for the push so that * we will not block waiting for a header in * swap(). The reason this is important is * that we (proc[2]) are the one who cleans * dirty swap headers and we could otherwise * deadlock waiting for ourselves to clean * swap headers. The sleep here on &proc[2] * is actually (effectively) a sleep on both * ourselves and &bswlist, and this is known * to swdone and swap in vm_swp.c. That is, * &proc[2] will be awakened both when dirty * headers show up and also to get the pageout * daemon moving. */loop2:#ifdef vax (void) spl6();#endif vax#ifdef mips (void) splbio();#endif mips if (bclnlist != NULL) { (void) spl0(); cleanup(); goto loop2; } if (bswlist.av_forw == NULL) { bswlist.b_flags |= B_WANTED; sleep((caddr_t)&proc[2], PSWP+2); (void) spl0(); /* * Page disposition may have changed * since process may have exec'ed, * forked, exited or just about * anything else... try this page * frame again, from the top. */ goto top; } (void) spl0(); MLOCK(c); #ifdef vax if (c->c_type != CSMEM) /* SHMEM */ uaccess(rp, Pushmap, &pushutl);#endif vax#ifdef mips ppushutl = (struct user *)PHYS_TO_K0( ptob(rp->p_addr[0].pg_pfnum));#endif mips /* * Now committed to pushing the page... */#ifdef mips flushpte(rp, v, CLSIZE, c->c_type); pte->pg_swapm = 0;#endif mips pte->pg_m = 0; distcl(pte); if (c->c_type == CTEXT) { xp->x_poip++;#ifdef vax distpte(xp, vtotp(rp, v), pte);#endif vax } else if (c->c_type == CSMEM) { /* SHMEM */ sp->sm_poip++;#ifdef vax distsmpte(sp, v, pte, PG_CLEARM);#endif vax } else rp->p_poip++; if(c->c_type == CSMEM){ /* SHMEM */ smpage = ctod(c->c_page); daddr = sp->sm_daddr[smpage/dmtext] + smpage%dmtext; swap(sp, daddr, ptob(c->c_page), ctob(CLSIZE), B_WRITE, B_DIRTY|B_SMEM, swapdev, pte->pg_pfnum); } else { v = kluster(rp, v, pte, B_WRITE, &klsize, klout, (daddr_t)0); if (klsize == 0) panic("pageout klsize"); daddr = vtod(rp, v, &pushutl.u_dmap, &pushutl.u_smap); swap(rp, daddr, ptob(v), klsize * ctob(CLSIZE), B_WRITE, B_DIRTY, swapdev, pte->pg_pfnum); } /* * The cleaning of this page will be * completed later, in cleanup() called * (synchronously) by us (proc[2]). In * the meantime, the page frame is locked * so no havoc can result. */ return (1); /* well, it'll be free soon */ } /* * Decrement the resident set size of the current * text object/process, and put the page in the * free list. Note that we don't give memfree the * pte as its argument, since we don't want to destroy * the pte. If it hasn't already been discarded * it may yet have a chance to be reclaimed from * the free list. */ if (c->c_gone == 0) { if (c->c_type == CTEXT) xp->x_rssize -= CLSIZE; else if (c->c_type == CSMEM) /* SHMEM */ sp->sm_rssize -= CLSIZE; else rp->p_rssize -= CLSIZE; } memfree(pte, CLSIZE, 0); cnt.v_dfree += CLSIZE; return (1); /* freed a page! */#ifdef mips#undef pushutl#endif mips}/* * Process the ``cleaned'' list. * * Scan through the linked list of swap I/O headers * and free the corresponding pages that have been * cleaned by being written back to the paging area. * If the page has been reclaimed during this time, * we do not free the page. As they are processed, * the swap I/O headers are removed from the cleaned * list and inserted into the free list. */cleanup(){ register struct buf *bp; register struct proc *rp; register struct text *xp; register struct smem *sp; /* SHMEM */ register struct cmap *c; register struct pte *pte; struct pte *upte; unsigned pf; register int i; int s, center;#ifdef mips XPRINTF(XPR_VM,"enter cleanup",0,0,0,0);#endif mips for (;;) {#ifdef vax s = spl6();#endif vax#ifdef mips s = splbio();#endif mips if ((bp = bclnlist) == 0) break; bclnlist = bp->av_forw; splx(s); pte = vtopte(&proc[2], btop(bp->b_un.b_addr)); center = 0; for (i = 0; i < bp->b_bcount; i += CLSIZE * NBPG) { pf = pte->pg_pfnum; c = &cmap[pgtocm(pf)]; MUNLOCK(c); if (pf != bp->b_pfcent) { if (c->c_gone) { memfree(pte, CLSIZE, 0); cnt.v_dfree += CLSIZE; } goto skip; } center++; switch (c->c_type) { case CSYS: panic("cleanup CSYS"); case CTEXT: xp = &text[c->c_ndx]; xp->x_poip--; if (xp->x_poip == 0) wakeup((caddr_t)&xp->x_poip); break; case CDATA: case CSTACK: rp = &proc[c->c_ndx]; while (rp->p_flag & SNOVM) rp = rp->p_xlink; rp->p_poip--; if (rp->p_poip == 0) wakeup((caddr_t)&rp->p_poip); break; case CSMEM: /* SHMEM */ sp = &smem[c->c_ndx]; if(--(sp->sm_poip) == 0) wakeup((caddr_t)&sp->sm_poip); break; } if (c->c_gone == 0) { switch (c->c_type) { case CTEXT: upte = tptopte(xp->x_caddr, c->c_page); break; case CDATA: upte = dptopte(rp, c->c_page); break; case CSTACK: upte = sptopte(rp, c->c_page); break; case CSMEM: /* SHMEM */ upte = sp->sm_ptaddr + c->c_page; break; } if (upte->pg_v) goto skip; if (c->c_type == CTEXT) xp->x_rssize -= CLSIZE; else if (c->c_type == CSMEM) /* SHMEM */ sp->sm_rssize -= CLSIZE; else rp->p_rssize -= CLSIZE; } memfree(pte, CLSIZE, 0); cnt.v_dfree += CLSIZE;skip: pte += CLSIZE; } if (center != 1) panic("cleanup center"); bp->b_flags = 0; bp->av_forw = bswlist.av_forw; bswlist.av_forw = bp; if (bswlist.b_flags & B_WANTED) { bswlist.b_flags &= ~B_WANTED; wakeup((caddr_t)&bswlist); } } splx(s);}/* * Kluster locates pages adjacent to the argument pages * that are immediately available to include in the pagein/pageout, * and given the availability of memory includes them. * It knows that the process image is contiguous in chunks; * an assumption here is that CLSIZE * KLMAX is a divisor of dmmin, * so that by looking at KLMAX chunks of pages, all such will * necessarily be mapped swap contiguous. */int noklust;int klicnt[KLMAX];int klocnt[KLMAX];kluster(p, v, pte0, rw, pkl, klsize, bn0) register struct proc *p; unsigned v; struct pte *pte0; int rw, *pkl, klsize; daddr_t bn0;{ int type, cl, clmax; int kloff, k, klmax; register struct pte *pte; int klback, klforw; register int i; unsigned v0; daddr_t bn; register struct cmap *c;#ifdef mips XPRINTF(XPR_VM,"enter kluster",0,0,0,0);#endif mips if (rw == B_READ) klicnt[0]++; else klocnt[0]++; *pkl = 1; if (noklust || klsize <= 1 || klsize > KLMAX || (klsize & (klsize - 1))) return (v); if (rw == B_READ && freemem < CLSIZE * KLMAX) return (v); if (isassv(p, v)) { type = CSTACK; cl = vtosp(p, v) / CLSIZE; clmax = p->p_ssize / CLSIZE; } else if (isadsv(p, v)) { type = CDATA; cl = vtodp(p, v) / CLSIZE; clmax = p->p_dsize / CLSIZE; } else { type = CTEXT; cl = vtotp(p, v) / CLSIZE; clmax = p->p_textp->x_size / CLSIZE; } kloff = cl & (klsize - 1); pte = pte0; bn = bn0; for (k = kloff; --k >= 0;) { if (type == CSTACK) pte += CLSIZE; else pte -= CLSIZE; if (type == CTEXT && rw == B_READ && bn) { bn -= CLBYTES / DEV_BSIZE; if (mfind(swapdev, bn, p->p_textp->x_gptr)) break; } if (!klok(pte, rw)) break; } klback = (kloff - k) - 1; pte = pte0; if ((cl - kloff) + klsize > clmax) klmax = clmax - (cl - kloff); else klmax = klsize; bn = bn0; for (k = kloff; ++k < klmax;) { if (type == CSTACK) pte -= CLSIZE; else pte += CLSIZE; if (type == CTEXT && rw == B_READ && bn) { bn += (CLBYTES / DEV_BSIZE); if (mfind(swapdev, bn, p->p_textp->x_gptr)) break; } if (!klok(pte, rw)) break; } klforw = (k - kloff) - 1; if (klforw + klback == 0) return (v); pte = pte0; if (type == CSTACK) { pte -= klforw * CLSIZE; v -= klforw * CLSIZE; } else { pte -= klback * CLSIZE; v -= klback * CLSIZE; } *pkl = klforw + klback + 1; if (rw == B_READ) klicnt[0]--, klicnt[*pkl - 1]++; else klocnt[0]--, klocnt[*pkl - 1]++; v0 = v; for (i = 0; i < *pkl; i++) { if (pte == pte0) goto cont; if (rw == B_WRITE) { c = &cmap[pgtocm(pte->pg_pfnum)]; MLOCK(c);#ifdef mips newptes(p, v, CLSIZE); pte->pg_swapm = 0;#endif mips pte->pg_m = 0; distcl(pte);#ifdef vax if (type == CTEXT) distpte(p->p_textp, vtotp(p, v), pte);#endif vax } else { struct pte opte; int pf; opte = *pte; if (memall(pte, CLSIZE, p, type, v, V_CACHE) == 0) panic("kluster"); pte->pg_prot = opte.pg_prot; pf = pte->pg_pfnum; cmap[pgtocm(pf)].c_intrans = 1; distcl(pte); if (type == CTEXT) { p->p_textp->x_rssize += CLSIZE;#ifdef vax distpte(p->p_textp, vtotp(p, v), pte);#endif vax } else p->p_rssize += CLSIZE; distcl(pte); }cont: pte += CLSIZE; v += CLSIZE; } return (v0);}klok(pte, rw) register struct pte *pte; int rw;{ register struct cmap *c;#ifdef mips XPRINTF(XPR_VM,"enter klok",0,0,0,0);#endif mips if (rw == B_WRITE) { if (pte->pg_fod) return (0); if (pte->pg_pfnum == 0) return (0); c = &cmap[pgtocm(pte->pg_pfnum)]; if (c->c_lock || c->c_intrans) return (0); if (!dirtycl(pte)) return (0); return (1); } else { if (pte->pg_fod) return (0); if (pte->pg_pfnum) return (0); return (1); }}/* * Fodkluster locates pages adjacent to the argument pages * that are immediately available to include in the pagein, * and given the availability of memory includes them. * It wants to page in a file system block if it can. */int nofodklust = 0;int fodklcnt[KLMAX];fodkluster(p, v0, pte0, pkl, dev, pbn, klmax) register struct proc *p; unsigned v0; struct pte *pte0; int *pkl; dev_t dev; daddr_t *pbn; int klmax;{ register struct pte *pte; register struct fpte *fpte; struct cmap *c; register daddr_t bn; daddr_t bnswap; register int klsize; int klback, type, i; long v, vmin, vmax;#ifdef mips XPRINTF(XPR_VM,"enter fodkluster",0,0,0,0);#endif mips if (nofodklust) return (v0); fodklcnt[0]++; *pkl = 1; if (freemem < klmax) return (v0); if (isatsv(p, v0)) { type = CTEXT; vmin = tptov(p, 0); vmax = tptov(p, clrnd(p->p_tsize) - CLSIZE); } else { type = CDATA; vmin = dptov(p, 0); vmax = dptov(p, clrnd(p->p_dsize) - CLSIZE); } fpte = (struct fpte *)pte0; bn = *pbn; v = (long) v0; for (klsize = 1; klsize < klmax; klsize++) { v -= CLSIZE; if (v < vmin) break; fpte -= CLSIZE; if (fpte->pg_fod == 0) break; bn -= btodb(CLBYTES);#ifdef mips if (PG_BLKNO(fpte) != bn) /* correct block? */ break;#endif mips#ifdef vax if (fpte->pg_blkno != bn) break;#endif vax if (type == CTEXT) { if (mfind(dev, bn, p->p_textp->x_gptr)) break; /* * Flush any previous text page use of this * swap device block. */ bnswap = vtod(p, v, &u.u_dmap, &u.u_smap); c = mfind(swapdev, bnswap, p->p_textp->x_gptr); if (c) munhash(swapdev, bnswap, p->p_textp->x_gptr); } } klback = klsize - 1; fpte = (struct fpte *)pte0; bn = *pbn; v = v0; for (; klsize < klmax; klsize++) { v += CLSIZE; if (v > vmax) break; fpte += CLSIZE; if (fpte->pg_fod == 0) break; bn += btodb(CLBYTES);#ifdef mips if (PG_BLKNO(fpte) != bn) /* correct block? */ break;#endif mips#ifdef vax if (fpte->pg_blkno != bn) break;#endif vax if (type == CTEXT) { if (mfind(dev, bn, p->p_textp->x_gptr)) break; /* * Flush any previous text page use of this * swap device block. */ bnswap = vtod(p, v, &u.u_dmap, &u.u_smap); c = mfind(swapdev, bnswap, p->p_textp->x_gptr); if (c) munhash(swapdev, bnswap, p->p_textp->x_gptr); } } if (klsize == 1) return (v0); pte = pte0; pte -= klback * CLSIZE; v0 -= klback * CLSIZE; *pbn -= klback * btodb(CLBYTES); *pkl = klsize; fodklcnt[0]--; fodklcnt[klsize - 1]++; v = v0; for (i = 0; i < klsize; i++) { if (pte != pte0) { struct pte opte; int pf; opte = *pte; if (memall(pte, CLSIZE, p, type, v, V_CACHE) == 0) panic("fodkluster"); pte->pg_prot = opte.pg_prot; pf = pte->pg_pfnum;#ifdef maybe pte->pg_m = 1;#endif maybe cmap[pgtocm(pf)].c_intrans = 1;#ifdef vax distcl(pte);#endif vax if (type == CTEXT) { p->p_textp->x_rssize += CLSIZE;#ifdef vax distpte(p->p_textp, vtotp(p, v), pte);#endif vax#ifdef mips pte->pg_swapm = 1;#endif mips } else {#ifdef mips pte->pg_m = 1;#endif mips p->p_rssize += CLSIZE; } distcl(pte); } pte += CLSIZE; v += CLSIZE; } return (v0);}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -