📄 vm_pt.c
字号:
*/vgetu(p, palloc, map, newu, oldu) register struct proc *p; int (*palloc)(); register struct pte *map; register struct user *newu; struct user *oldu;{ register int i; int ret, memall();#ifdef mips XPRINTF(XPR_VM,"enter vgetu",0,0,0,0);#endif mips if(palloc == memall) ret = (*palloc)(p->p_addr, clrnd(UPAGES), p, CSYS, NULL,V_NOOP); else ret = (*palloc)(p->p_addr, clrnd(UPAGES), p, CSYS); if(ret == 0) return (0); /* * New u. pages are to be accessible in map/newu as well * as in process p's virtual memory. */ for (i = 0; i < UPAGES; i++) { map[i] = p->p_addr[i];#ifdef mips *(int *)(p->p_addr + i) |= PG_KW | PG_V | PG_G | PG_M;#endif mips#ifdef vax *(int *)(p->p_addr + i) |= PG_URKW | PG_V;#endif vax } setredzone(p->p_addr, (caddr_t)0);#ifdef mips vmaccess(map, (caddr_t)newu, UPAGES, DO_CACHE);#endif mips#ifdef vax vmaccess(map, (caddr_t)newu, UPAGES);#endif vax /* * New u.'s come from forking or inswap. */ if (oldu) {#ifdef vax bcopy((caddr_t)oldu, (caddr_t)newu, UPAGES * NBPG);#endif vax#ifdef mips /* * Avoid copying the entire ublock by just doing what is * known to be active. TODO: this hack knows that if an old * u block is provided, it is indeed the active u and * therefore measuring the current stack depth is the right * thing to do. Also, if stack depth measurement is to be * done, pattern initialization should take place here. */ if (oldu != &u) panic("vgetu bad upage"); bcopy((caddr_t)oldu, (caddr_t)newu, sizeof(struct user)); i = stackdepth(); bcopy( (caddr_t)((int)oldu + UPAGES*NBPG -i), (caddr_t)((int)newu + UPAGES*NBPG -i), i);#endif mips newu->u_procp = p; } else { swap(p, p->p_swaddr, (caddr_t)0, ctob(UPAGES), B_READ, B_UAREA, swapdev, 0); if (#ifdef vax newu->u_pcb.pcb_ssp != -1 || newu->u_pcb.pcb_esp != -1 ||#endif newu->u_tsize != p->p_tsize || newu->u_dsize != p->p_dsize || newu->u_ssize != p->p_ssize || newu->u_procp != p) panic("vgetu"); } /* * Initialize the pcb copies of the p0 and p1 region bases and * software page table size from the information in the proc structure. */#ifdef vax newu->u_pcb.pcb_p0br = p->p_p0br; newu->u_pcb.pcb_p1br = initp1br(p->p_p0br + p->p_szpt * NPTEPG); newu->u_pcb.pcb_szpt = p->p_szpt;#endif vax return (1);}/* * Release swap space for a u. area. */vrelswu(p, utl) struct proc *p; struct user *utl;{#ifdef mips XPRINTF(XPR_VM,"enter vrelswu",0,0,0,0);#endif mips rmfree(swapmap, (long)ctod(vusize(p, utl)), p->p_swaddr); /* p->p_swaddr = 0; */ /* leave for post-mortems */}/* * Get swap space for a u. area. */vgetswu(p, utl) struct proc *p; struct user *utl;{#ifdef mips XPRINTF(XPR_VM,"enter vgetswu",0,0,0,0);#endif mips p->p_swaddr = rmalloc(swapmap, (long)ctod(vusize(p, utl))); return (p->p_swaddr);}/* * Release u. area, swapping it out if desired. * * To fix the "stale u-area" problem, this routine will indicate to * [v]memall that the u-area must be put in a temp "u" list until * after the context switch. This will only happen if the u-area in * question is currently in context. */vrelu(p, swapu) register struct proc *p;{ register int i; struct pte uu[UPAGES];#ifdef mips XPRINTF(XPR_VM,"enter vrelu",0,0,0,0);#endif mips if (swapu) swap(p, p->p_swaddr, (caddr_t)0, ctob(UPAGES), B_WRITE, B_UAREA, swapdev, 0); for (i = 0; i < UPAGES; i++) uu[i] = p->p_addr[i];#ifdef vax if (u.u_procp == p) (void) vmemfree(uu, -(clrnd(UPAGES))); else (void) vmemfree(uu, clrnd(UPAGES));#endif vax#ifdef mips /* * If freeing the user structure and kernel stack * for the current process, have to run a bit longer * using the pages which have already been freed... * block memory allocation from the network by raising ipl. */#ifdef ultrix if (u.u_procp == p) { (void) splimp(); /* XXX */ (void) vmemfree(uu, -(clrnd(UPAGES))); } else (void) vmemfree(uu, clrnd(UPAGES));#else mips if (p == u.u_procp) (void) splimp(); /* XXX */ (void) vmemfree(uu, clrnd(UPAGES));#endif mips#endif mips}#ifdef unneededint ptforceswap;#endif#ifdef vax/* * Expand a page table, assigning new kernel virtual * space and copying the page table entries over both * in the system map and as necessary in the user page table space. */ptexpand(change, ods, oss, osms) register int change; size_t ods, oss; size_t osms; /* SHMEM */{ register struct pte *p1, *p2; register int i; register int spages, ss = P1PAGES - u.u_pcb.pcb_p1lr; register int kold = btokmx(u.u_pcb.pcb_p0br); int knew, tdpages; int szpt = u.u_pcb.pcb_szpt; int s; if (change <= 0 || change % CLSIZE) panic("ptexpand"); /* * Change is the number of new page table pages needed. * Kold is the old index in the kernelmap of the page tables. * Allocate a new kernel map segment of size szpt+change for * the page tables, and the new page table pages in the * middle of this new region. */top:#ifdef unneeded if (ptforceswap) goto bad;#endif if ((knew=rmalloc(kernelmap, (long)(szpt+change))) == 0) goto bad; spages = ss/NPTEPG; tdpages = szpt - spages; if (memall(&Usrptmap[knew+tdpages], change, u.u_procp, CSYS, NULL, V_NOOP) == 0) { rmfree(kernelmap, (long)(szpt+change), (long)knew); goto bad; } /* * Spages pages of u.+stack page tables go over unchanged. * Tdpages of text+data page table may contain a few stack * pages which need to go in one of the newly allocated pages; * this is a rough cut. */ kmcopy(knew, kold, tdpages); kmcopy(knew+tdpages+change, kold+tdpages, spages); /* * Validate and clear the newly allocated page table pages in the * center of the new region of the kernelmap. */ i = knew + tdpages; p1 = &Usrptmap[i]; p2 = p1 + change; while (p1 < p2) { /* tptov BELOW WORKS ONLY FOR VAX */ mapin(p1, tptov(u.u_procp, i), p1->pg_pfnum, 1, (int)(PG_V|PG_KW)); clearseg(p1->pg_pfnum); p1++; i++; }#ifdef vax mtpr(TBIA, 0);#endif /* * Move the stack and u. pte's which are before the newly * allocated pages into the last of the newly allocated pages. * They are taken from the end of the current p1 region, * and moved to the end of the new p1 region. */ p1 = u.u_pcb.pcb_p1br + u.u_pcb.pcb_p1lr; p2 = initp1br(kmxtob(knew+szpt+change)) + u.u_pcb.pcb_p1lr; for (i = kmxtob(kold+szpt) - p1; i != 0; i--) *p2++ = *p1++; /* * Now switch to the new page tables. */#ifdef vax mtpr(TBIA, 0); /* paranoid */#endif if(extracpu) tbsync(); s = spl7(); /* conservative */ lock(LOCK_RQ); u.u_procp->p_p0br = kmxtob(knew); setp0br(u.u_procp->p_p0br); u.u_pcb.pcb_p1br = initp1br(kmxtob(knew+szpt+change)); setp1br(u.u_pcb.pcb_p1br); u.u_pcb.pcb_szpt += change; u.u_procp->p_szpt += change; u.u_procp->p_addr = uaddr(u.u_procp);#ifdef vax mtpr(TBIA, 0);#endif unlock(LOCK_RQ); splx(s); /* * Finally, free old kernelmap. */ if (szpt) rmfree(kernelmap, (long)szpt, (long)kold); return;bad: /* * Swap out the process so that the unavailable * resource will be allocated upon swapin. * * When resume is executed for the process, * here is where it will resume. */ resume(pcbb(u.u_procp)); if (savectx(&u.u_ssave)) { splx(s); return; } if (swapout(u.u_procp, ods, oss, osms) == 0) { /* * No space to swap... it is inconvenient to try * to exit, so just wait a bit and hope something * turns up. Could deadlock here. * * SOMEDAY REFLECT ERROR BACK THROUGH expand TO CALLERS * (grow, sbreak) SO CAN'T DEADLOCK HERE. */ sleep((caddr_t)&lbolt, PRIBIO); goto top; } /* * Set SSWAP bit, so that when process is swapped back in * swapin will set u.u_pcb.pcb_sswap to u_sswap and force a * return from the savectx() above. */ u.u_procp->p_flag |= SSWAP | SMASTER; (void) spl6(); lock(LOCK_RQ); swtch(); /* no return */}#endif vax#ifdef mips/* * Expand a page table, assigning new kernel virtual * space and copying the page table entries over both * in the system map and as necessary in the user page table space. */ptexpand(change, ods, oss, region) register int change; size_t ods, oss; int region;{ register struct pte *p1, *p2; register int i; register int kold; int knew; int szpt; int s;XPRINTF(XPR_VM,"enter ptexpand",0,0,0,0); if (change <= 0 || change % CLSIZE) panic("ptexpand"); /* * Change is the number of new page table pages needed. * Kold is the old index in the kernelmap of the page tables. * Allocate a new kernel map segment of size szpt+change for * the page tables. */top:#ifdef unneeded if (ptforceswap) goto bad;#endif unneeded if (region == 0) { szpt = u.u_procp->p_datapt; kold = btokmx(u.u_procp->p_databr); } else { szpt = u.u_procp->p_stakpt; kold = btokmx(u.u_procp->p_stakbr); } if ((knew = rmalloc(kernelmap, (long)(szpt+change))) == 0) goto bad; if (region == 0) { if (memall(&Usrptmap[knew+szpt], change, u.u_procp, CSYS, NULL, V_NOOP) == 0) { rmfree(kernelmap, (long)(szpt+change), (long)knew); goto bad; } } else { if (memall(&Usrptmap[knew], change, u.u_procp, CSYS, NULL, V_NOOP) == 0) { rmfree(kernelmap, (long)(szpt+change), (long)knew); goto bad; } } /* * Copy over stack and data page tables. */ if (region == 0) kmcopy(knew, kold, szpt); else kmcopy(knew+change, kold, szpt); /* * Validate and clear the newly allocated page table pages in * the new region of the kernelmap. */ if (region == 0) i = knew + szpt; else i = knew; p1 = &Usrptmap[i]; p2 = p1 + change; while (p1 < p2) { mapin(btop(kmxtob(i)), p1->pg_pfnum, (int)(PG_V|PG_KW));#ifdef USE_IDLE /* don't bother if it's already done */ if (cmap[pgtocm(p1->pg_pfnum)].c_zero) { extern int v_zero_pt_hits; v_zero_pt_hits++; cmap[pgtocm(p1->pg_pfnum)].c_zero=0; } else { extern int v_zero_pt_misses; v_zero_pt_misses++; clearseg(p1->pg_pfnum); }#else clearseg(p1->pg_pfnum);#endif USE_IDLE p1++; i++; } /* * Now switch to the new page tables. */ s = splhigh(); /* conservative */ if (region == 0) { u.u_procp->p_databr = kmxtob(knew); u.u_procp->p_datapt += change; } else { u.u_procp->p_stakpt += change; u.u_procp->p_stakbr = kmxtob(knew); } splx(s); /* * Finally, free old kernelmap. */ if (szpt) rmfree(kernelmap, (long)szpt, (long)kold); return;bad: /* * Swap out the process so that the unavailable * resource will be allocated upon swapin. * * When resume is executed for the process, * here is where it will resume. */ save(); if (setjmp(&u.u_ssave)) return; if (swapout(u.u_procp, ods, oss, u.u_osmsize) == 0) { /* * No space to swap... it is inconvenient to try * to exit, so just wait a bit and hope something * turns up. Could deadlock here. * * SOMEDAY REFLECT ERROR BACK THROUGH expand TO CALLERS * (grow, sbreak) SO CAN'T DEADLOCK HERE. */ sleep((caddr_t)&lbolt, PRIBIO); goto top; } /* * Set SSWAP bit, so that when process is swapped back in * swapin will set u.u_pcb.pcb_sswap to u_sswap and force a * return from the setjmp() above. */ u.u_procp->p_flag |= SSWAP; swtch(); /* no return */}#endif mipskmcopy(to, from, count) register int to; int from; register int count;{ register struct pte *tp = &Usrptmap[to]; register struct pte *fp = &Usrptmap[from];#ifdef mips XPRINTF(XPR_VM,"enter kmcopy",0,0,0,0);#endif mips while (count != 0) {#ifdef vax mapin(tp, tptov(u.u_procp, to), fp->pg_pfnum, 1, (int)(*((int *)fp) & (PG_V|PG_PROT)));#endif vax#ifdef mips mapin(btop(kmxtob(to)), fp->pg_pfnum, (int)(*((int *)fp) & (PG_V|PG_PROT)));#endif mips tp++; fp++; to++; count--; }}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -