📄 pt_machdep.c
字号:
if (unlock_text) /* Did this function take lk_text? */ smp_unlock(&lk_text); /* now for next proc in linked list */ if((p = p->p_sm[i].sm_link) == NULL) break; if(p->p_sm == (struct p_sm *) NULL) { panic("distmpte: p_sm2"); } for(i=0; i < sminfo.smseg; i++) if(p->p_sm[i].sm_p == sp) break; if(i >= sminfo.smseg) panic("distsmpte #2"); }}#endif distsmpte#ifndef dirtysm/* * DIRTYSM -- checks for dirty ptes in process space */dirtysm(sp, smp) register struct smem *sp; /* pointer to smem structure */ register size_t smp; /* offset into SMS */{ register struct proc *p; /* Proc pointer */ register struct pte *pte; /* pointer to Proc's pte */ register int i; XPRINTF(XPR_VM,"enter dirtysm",0,0,0,0); /* SMS offset must be on cluster boundary */ if (smp % CLSIZE) panic("dirtysm: smp"); /* if the SMS is currently not attached to any process then return */ if(sp->sm_ccount == 0) return (0); /* find SMS of first attached proc */ p = sp->sm_caddr; if(p->p_sm == (struct p_sm *) NULL) { panic("dirtysm: p_sm1"); } for(i = 0; i < sminfo.smseg; i++) if(p->p_sm[i].sm_p == sp) break; if(i >= sminfo.smseg) panic("dirtysm: no SMS"); /* * follow attached proc list, looking for associated dirty * PTEs. If one is found, then return (1), else return (0) */ while(p){ pte = p->p_p0br + p->p_sm[i].sm_saddr+smp; if (dirtycl(pte)) return(1); /* now for next proc in linked list */ if((p = p->p_sm[i].sm_link) == NULL) break; if(p->p_sm == (struct p_sm *) NULL) { panic("dirtysm: p_sm2"); } /* find next proc's SMS */ for(i=0; i < sminfo.smseg; i++) if(p->p_sm[i].sm_p == sp) break; if(i >= sminfo.smseg) panic("dirtysm: no SMS #2"); } return (0);}#endif dirtysm/* * Release page tables of process p. If the process is in context, then * "vmemfree" is notified that the released page frames should be placed * on the "ucmap" free list until after context switch. */vrelpt(p) register struct proc *p;{ register int a; if (p->p_szpt == 0) return; a = btokmx(p->p_p0br); if (u.u_procp == p) (void) vmemfree(&Usrptmap[a],-(p->p_szpt)); else (void) vmemfree(&Usrptmap[a], p->p_szpt); rmfree(kernelmap, (long)p->p_szpt, (long)a);}#define Xu(a) {register int t; \ t = up->u_pcb.a; up->u_pcb.a = uq->u_pcb.a; uq->u_pcb.a = t;}#define Xup(a) {register struct pte *tp; \ tp = up->u_pcb.a; up->u_pcb.a = uq->u_pcb.a; uq->u_pcb.a = tp;}#define Xp(a) {register int t; t = p->a; p->a = q->a; q->a = t;}#define Xpp(a) {register struct pte *tp; tp = p->a; p->a = q->a; q->a = tp;}/* * Pass the page tables of process p to process q. * Used during vfork(). P and q are not symmetric; * p is the giver and q the receiver; after calling vpasspt * p will be ``cleaned out''. Thus before vfork() we call vpasspt * with the child as q and give it our resources; after vfork() we * call vpasspt with the child as p to steal our resources back. * We are cognizant of whether we are p or q because we have to * be careful to keep our u. area and restore the other u. area from * umap after we temporarily put our u. area in both p and q's page tables. */vpasspt(p, q, up, uq, umap) register struct proc *p, *q; register struct user *up, *uq; struct pte *umap;{#define P0PAGES P1PAGES int old_p0length, old_p1length; unsigned old_p1start; XPRINTF(XPR_VM,"enter vpasspt",0,0,0,0); if (up != &u) { /* Force p==parent and q==child. */ register caddr_t x; x = (caddr_t)p; p = q; q = (struct proc *)x; x = (caddr_t)up; up = uq; uq = (struct user *)x; } /* * Double map the parent's u. area and fork window into the * child's u. area and fork window. A copy of the child's * u. area map remains in umap. This code is executed by * the parent while the child is not running. */ bcopy(Forkmap, q->p_addr - FORKPAGES, HIGHPAGES*sizeof(struct pte)); /* Switch the page tables. */ old_p0length = u.u_pcb.pcb_p0lr; old_p1start = P0PAGES + u.u_pcb.pcb_p1lr; old_p1length = P1PAGES - u.u_pcb.pcb_p1lr; Xu(pcb_szpt); Xu(pcb_p0lr); Xu(pcb_p1lr); Xup(pcb_p0br); Xup(pcb_p1br); Xpp(p_p0br); Xp(p_szpt); Xpp(p_addr); mtpr(P0BR, u.u_pcb.pcb_p0br); mtpr(P1BR, u.u_pcb.pcb_p1br); mtpr(P0LR, u.u_pcb.pcb_p0lr &~ AST_CLR); mtpr(P1LR, u.u_pcb.pcb_p1lr); /* Invalidate address translations to our former self. */ newptes(u.u_procp, LOWPAGES, old_p0length); newptes(u.u_procp, old_p1start, old_p1length); /* * Restore the child's u. area. Use only page frame numbers * from umap, as protection values in umap are not the * "natural" protections for u. area pages. */ { register int i; for (i = 0; i < UPAGES; i++) q->p_addr[i].pg_pfnum = umap[i].pg_pfnum; }}/* * Get u area for process p. If a old u area is given, * then copy the new area from the old, else * swap in as specified in the proc structure. * * Since argument map/newu is potentially shared * when an old u. is provided we have to be careful not * to block after beginning to use them in this case. * (This is not true when called from swapin() with no old u.) */vgetu(p, palloc, map, newu, oldu) register struct proc *p; int (*palloc)(); register struct pte *map; register struct user *newu; struct user *oldu;{ register int i; int ret, memall(); XPRINTF(XPR_VM,"enter vgetu",0,0,0,0); if((*palloc)(p->p_addr, clrnd(UPAGES), p, CSYS, NULL,V_NOOP) == 0) return(0); /* * New u. pages are to be accessible in map/newu as well * as in process p's virtual memory. */ for (i = 0; i < UPAGES; i++) { *(int *)(map+i) = *(int *)(p->p_addr+i) & PG_PFNUM | PG_V | PG_M | PG_KW; *(int *)(p->p_addr + i) |= PG_URKW | PG_V | PG_M; } Sysmap[btop((long)(p->p_pcb)&0x7fffffff)] = *p->p_addr; setredzone(p->p_addr, (caddr_t)0); vmaccess(map, (caddr_t)newu, UPAGES); /* * New u.'s come from forking or inswap. */ if (oldu) { bcopy((caddr_t)oldu, (caddr_t)newu, UPAGES * NBPG); newu->u_procp = p; } else { swap(p, *(p->p_smap->dm_ptdaddr), (caddr_t)0, ctob(UPAGES), B_READ, B_UAREA, swapdev, 0); if ( newu->u_pcb.pcb_ssp != -1 || newu->u_tsize != p->p_tsize || newu->u_dsize != p->p_dsize || newu->u_ssize != p->p_ssize || newu->u_procp != p) panic("vgetu"); } /* * Initialize the pcb copies of the p0 and p1 region bases and * software page table size from the information in the proc structure. */ newu->u_pcb.pcb_p0br = p->p_p0br; newu->u_pcb.pcb_p1br = initp1br(p->p_p0br + p->p_szpt * NPTEPG); newu->u_pcb.pcb_szpt = p->p_szpt; return (1);}/* * Release u. area, swapping it out if desired. * * To fix the "stale u-area" problem, this routine will indicate to * [v]memall that the u-area must be put in a temp "u" list until * after the context switch. This will only happen if the u-area in * question is currently in context. */vrelu(p, swapu) register struct proc *p;{ register int i; struct pte uu[UPAGES]; XPRINTF(XPR_VM,"enter vrelu",0,0,0,0); if (swapu) swap(p, *(p->p_smap->dm_ptdaddr), (caddr_t)0, ctob(UPAGES), B_WRITE, B_UAREA, swapdev, 0); for (i = 0; i < UPAGES; i++) uu[i] = p->p_addr[i]; if (u.u_procp == p) (void) vmemfree(uu, -(clrnd(UPAGES))); else (void) vmemfree(uu, clrnd(UPAGES));}#ifdef unneededint ptforceswap;#endif/* * Expand a page table, assigning new kernel virtual * space and copying the page table entries over both * in the system map and as necessary in the user page table space. *//*ARGSUSED*/ptexpand(change, region) register int change; int region; /* this arg only used in MIPS version */{ register int kold = btokmx(u.u_pcb.pcb_p0br); register int knew; /* Must leave a register for p1br below */ int spages, tdpages; int ss = P1PAGES - u.u_pcb.pcb_p1lr; int szpt = u.u_pcb.pcb_szpt; int s; struct cpudata *pcpu; if (change <= 0 || change % CLSIZE) panic("ptexpand"); /* * Change is the number of new page table pages needed. * Kold is the old index in the kernelmap of the page tables. * Allocate a new kernel map segment of size szpt+change for * the page tables, and the new page table pages in the * middle of this new region. */top:#ifdef unneeded if (ptforceswap) goto bad;#endif if ((knew=rmalloc(kernelmap, (long)(szpt+change))) == 0) goto bad; spages = ss/NPTEPG; tdpages = szpt - spages; if (memall(&Usrptmap[knew+tdpages], change, u.u_procp, CSYS, NULL, V_NOOP) == 0) { rmfree(kernelmap, (long)(szpt+change), (long)knew); goto bad; } /* Quiesce the vector processor if necessary */ VPSYNC (); /* * Spages pages of u.+stack page tables go over unchanged. * Tdpages of text+data page table may contain a few stack * pages which need to go in one of the newly allocated pages; * this is a rough cut. */ kmcopy(knew, kold, tdpages); kmcopy(knew+tdpages+change, kold+tdpages, spages); { /* This block localizes register usage. */ register struct pte *p1, *p2; register int i; /* * Validate and clear the newly allocated page table pages in the * center of the new region of the kernelmap. */ for (i = knew + tdpages, p1 = &Usrptmap[i], p2 = p1 + change; p1 < p2; p1++, i++) { Hard(p1) &= ~PG_PROT; Hard(p1) |= (int)(PG_V | PG_KW | PG_M); mtpr(TBIS, kmxtob(i)); bzero(kmxtob(i), NBPG); } /* * Move the stack and u. pte's which are before the newly * allocated pages into the last of the newly allocated pages. * They are taken from the end of the current p1 region, * and moved to the end of the new p1 region. */ p1 = u.u_pcb.pcb_p1br + u.u_pcb.pcb_p1lr; p2 = initp1br(kmxtob(knew+szpt+change)) + u.u_pcb.pcb_p1lr; s = splimp(); smp_lock(&lk_text, LK_RETRY); for (i = kmxtob(kold+szpt) - p1; i != 0; i--) *p2++ = *p1++; } { /* This block localizes register usage. */ register struct pte *p1br; /* MANDATORY REGISTER (see below) */ register struct proc *p = u.u_procp; /* * Now switch to the new page tables. This change must be * synchronized with other processes sharing the text, with any * process removing one of this process' reclaimable pages from * the free list, and perhaps someday with pageout. */ p->p_p0br = kmxtob(knew); p->p_szpt += change; p->p_addr = uaddr(p); tbsync(); smp_unlock(&lk_text); (void) splx(s); setp0br(p->p_p0br); /* * Here the process remaps the stack on which it is running. * Use a register to avoid confusion. */ p1br = initp1br(kmxtob(knew+szpt+change)); setp1br(p1br); mtpr(TBIA, 0); } u.u_pcb.pcb_szpt += change; /* * Finally, free old kernelmap. */ if (szpt) rmfree(kernelmap, (long)szpt, (long)kold); return;bad: /* * Swap out the process so that the unavailable * resource will be allocated upon swapin. */ SET_P_VM(u.u_procp, SSWAP); do { swapself++; sleep((caddr_t)&swapself, PSWP); } while (u.u_procp->p_vm & SSWAP);}/** This is a vax only routine. Will not exist in MIPS*/vax_noaccess(p)register struct proc *p;{ register struct pte *t_begin, *t_end; t_end = p->p_p0br+(p->p_szpt*NPTEPG) - (p->p_ssize+HIGHPAGES); t_begin = p->p_p0br+p->p_tsize+p->p_dsize; blkclr((caddr_t)t_begin, (t_end-t_begin)*sizeof(struct pte));}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -