📄 pmap.c
字号:
/* * MMU management. *//* * Change contexts. We need the old context number as well as the new * one. If the context is changing, we must write all user windows * first, lest an interrupt cause them to be written to the (other) * user whose context we set here. */#define CHANGE_CONTEXTS(old, new) \ if ((old) != (new)) { \ write_user_windows(); \ setcontext(new); \ }/* * Allocate an MMU entry (i.e., a PMEG). * If necessary, steal one from someone else. * Put it on the tail of the given queue * (which is either the LRU list or the locked list). * The locked list is not actually ordered, but this is easiest. * Also put it on the given (new) pmap's chain, * enter its pmeg number into that pmap's segmap, * and store the pmeg's new virtual segment number (me->me_vseg). * * This routine is large and complicated, but it must be fast * since it implements the dynamic allocation of MMU entries. */struct mmuentry *me_alloc(mh, newpm, newvseg) register struct mmuhd *mh; register struct pmap *newpm; register int newvseg;{ register struct mmuentry *me; register struct pmap *pm; register int i, va, pa, *pte, tpte; int ctx; /* try free list first */ if ((me = me_freelist) != NULL) { me_freelist = me->me_next;#ifdef DEBUG if (me->me_pmap != NULL) panic("me_alloc: freelist entry has pmap"); if (pmapdebug & PDB_MMU_ALLOC) printf("me_alloc: got pmeg %x\n", me->me_pmeg);#endif insque(me, mh->mh_prev); /* onto end of queue */ /* onto on pmap chain; pmap is already locked, if needed */ me->me_pmforw = NULL; me->me_pmback = newpm->pm_mmuback; *newpm->pm_mmuback = me; newpm->pm_mmuback = &me->me_pmforw; /* into pmap segment table, with backpointers */ newpm->pm_segmap[newvseg] = me->me_pmeg; me->me_pmap = newpm; me->me_vseg = newvseg; return (me); } /* no luck, take head of LRU list */ if ((me = me_lru.mh_next) == (struct mmuentry *)&me_lru) panic("me_alloc: all pmegs gone"); pm = me->me_pmap;#ifdef DEBUG if (pm == NULL) panic("me_alloc: LRU entry has no pmap"); if (pm == kernel_pmap) panic("me_alloc: stealing from kernel"); pte = pm->pm_pte[me->me_vseg]; if (pte == NULL) panic("me_alloc: LRU entry's pmap has no ptes"); if (pmapdebug & (PDB_MMU_ALLOC | PDB_MMU_STEAL)) printf("me_alloc: stealing pmeg %x from pmap %x\n", me->me_pmeg, pm);#endif /* * Remove from LRU list, and insert at end of new list * (probably the LRU list again, but so what?). */ remque(me); insque(me, mh->mh_prev); /* * The PMEG must be mapped into some context so that we can * read its PTEs. Use its current context if it has one; * if not, and since context 0 is reserved for the kernel, * the simplest method is to switch to 0 and map the PMEG * to virtual address 0---which, being a user space address, * is by definition not in use. * * XXX for ncpus>1 must use per-cpu VA? * XXX do not have to flush cache immediately */ ctx = getcontext(); if (pm->pm_ctx) { CHANGE_CONTEXTS(ctx, pm->pm_ctxnum);#ifdef notdef if (vactype != VAC_NONE)#endif cache_flush_segment(me->me_vseg); va = VSTOVA(me->me_vseg); } else { CHANGE_CONTEXTS(ctx, 0); setsegmap(0, me->me_pmeg); /* * No cache flush needed: it happened earlier when * the old context was taken. */ va = 0; } /* * Record reference and modify bits for each page, * and copy PTEs into kernel memory so that they can * be reloaded later. */ i = NPTESG; do { tpte = getpte(va); if (tpte & PG_V) { pa = ptoa(HWTOSW(tpte & PG_PFNUM)); if (managed(pa)) pvhead(pa)->pv_flags |= MR(tpte); } *pte++ = tpte & ~(PG_U|PG_M); va += NBPG; } while (--i > 0); /* update segment tables */ simple_lock(&pm->pm_lock); /* what if other cpu takes mmuentry ?? */ if (pm->pm_ctx) setsegmap(VSTOVA(me->me_vseg), seginval); pm->pm_segmap[me->me_vseg] = seginval; /* off old pmap chain */ if ((*me->me_pmback = me->me_pmforw) != NULL) { me->me_pmforw->me_pmback = me->me_pmback; me->me_pmforw = NULL; } else pm->pm_mmuback = me->me_pmback; simple_unlock(&pm->pm_lock); setcontext(ctx); /* done with old context */ /* onto new pmap chain; new pmap is already locked, if needed */ /* me->me_pmforw = NULL; */ /* done earlier */ me->me_pmback = newpm->pm_mmuback; *newpm->pm_mmuback = me; newpm->pm_mmuback = &me->me_pmforw; /* into new segment table, with backpointers */ newpm->pm_segmap[newvseg] = me->me_pmeg; me->me_pmap = newpm; me->me_vseg = newvseg; return (me);}/* * Free an MMU entry. * * Assumes the corresponding pmap is already locked. * Does NOT flush cache, but does record ref and mod bits. * The rest of each PTE is discarded. * CALLER MUST SET CONTEXT to pm->pm_ctxnum (if pmap has * a context) or to 0 (if not). Caller must also update * pm->pm_segmap and (possibly) the hardware. */voidme_free(pm, pmeg) register struct pmap *pm; register u_int pmeg;{ register struct mmuentry *me = &mmuentry[pmeg]; register int i, va, pa, tpte;#ifdef DEBUG if (pmapdebug & PDB_MMU_ALLOC) printf("me_free: freeing pmeg %x from pmap %x\n", me->me_pmeg, pm); if (me->me_pmeg != pmeg) panic("me_free: wrong mmuentry"); if (pm != me->me_pmap) panic("me_free: pm != me_pmap");#endif /* just like me_alloc, but no cache flush, and context already set */ if (pm->pm_ctx) va = VSTOVA(me->me_vseg); else { setsegmap(0, me->me_pmeg); va = 0; } i = NPTESG; do { tpte = getpte(va); if (tpte & PG_V) { pa = ptoa(HWTOSW(tpte & PG_PFNUM)); if (managed(pa)) pvhead(pa)->pv_flags |= MR(tpte); } va += NBPG; } while (--i > 0); /* take mmu entry off pmap chain */ *me->me_pmback = me->me_pmforw; if ((*me->me_pmback = me->me_pmforw) != NULL) me->me_pmforw->me_pmback = me->me_pmback; else pm->pm_mmuback = me->me_pmback; /* ... and remove from segment map */ pm->pm_segmap[me->me_vseg] = seginval; /* off LRU or lock chain */ remque(me); /* no associated pmap; on free list */ me->me_pmap = NULL; me->me_next = me_freelist; me_freelist = me;}/* * `Page in' (load or inspect) an MMU entry; called on page faults. * Returns 1 if we reloaded the segment, -1 if the segment was * already loaded and the page was marked valid (in which case the * fault must be a bus error or something), or 0 (segment loaded but * PTE not valid, or segment not loaded at all). */intmmu_pagein(pm, va, bits) register struct pmap *pm; register int va, bits;{ register int *pte; register struct mmuentry *me; register int vseg = VA_VSEG(va), pmeg, i, s; /* return 0 if we have no PTEs to load */ if ((pte = pm->pm_pte[vseg]) == NULL) return (0); /* return -1 if the fault is `hard', 0 if not */ if (pm->pm_segmap[vseg] != seginval) return (bits && (getpte(va) & bits) == bits ? -1 : 0); /* reload segment: write PTEs into a new LRU entry */ va = VA_ROUNDDOWNTOSEG(va); s = splpmap(); /* paranoid */ pmeg = me_alloc(&me_lru, pm, vseg)->me_pmeg; setsegmap(va, pmeg); i = NPTESG; do { setpte(va, *pte++); va += NBPG; } while (--i > 0); splx(s); return (1);}/* * Allocate a context. If necessary, steal one from someone else. * Changes hardware context number and loads segment map. * * This routine is only ever called from locore.s just after it has * saved away the previous process, so there are no active user windows. */voidctx_alloc(pm) register struct pmap *pm;{ register union ctxinfo *c; register int cnum, i, va; register pmeg_t *segp;#ifdef DEBUG if (pm->pm_ctx) panic("ctx_alloc pm_ctx"); if (pmapdebug & PDB_CTX_ALLOC) printf("ctx_alloc(%x)\n", pm);#endif if ((c = ctx_freelist) != NULL) { ctx_freelist = c->c_nextfree; cnum = c - ctxinfo; setcontext(cnum); } else { if ((ctx_kick += ctx_kickdir) >= ncontext) { ctx_kick = ncontext - 1; ctx_kickdir = -1; } else if (ctx_kick < 1) { ctx_kick = 1; ctx_kickdir = 1; } c = &ctxinfo[cnum = ctx_kick];#ifdef DEBUG if (c->c_pmap == NULL) panic("ctx_alloc cu_pmap"); if (pmapdebug & (PDB_CTX_ALLOC | PDB_CTX_STEAL)) printf("ctx_alloc: steal context %x from %x\n", cnum, c->c_pmap);#endif c->c_pmap->pm_ctx = NULL; setcontext(cnum);#ifdef notdef if (vactype != VAC_NONE)#endif cache_flush_context(); } c->c_pmap = pm; pm->pm_ctx = c; pm->pm_ctxnum = cnum; /* * XXX loop below makes 3584 iterations ... could reduce * by remembering valid ranges per context: two ranges * should suffice (for text/data/bss and for stack). */ segp = pm->pm_rsegmap; for (va = 0, i = NUSEG; --i >= 0; va += NBPSG) setsegmap(va, *segp++);}/* * Give away a context. Flushes cache and sets current context to 0. */voidctx_free(pm) struct pmap *pm;{ register union ctxinfo *c; register int newc, oldc; if ((c = pm->pm_ctx) == NULL) panic("ctx_free"); pm->pm_ctx = NULL; oldc = getcontext(); if (vactype != VAC_NONE) { newc = pm->pm_ctxnum; CHANGE_CONTEXTS(oldc, newc); cache_flush_context(); setcontext(0); } else { CHANGE_CONTEXTS(oldc, 0); } c->c_nextfree = ctx_freelist; ctx_freelist = c;}/*----------------------------------------------------------------*//* * pvlist functions. *//* * Walk the given pv list, and for each PTE, set or clear some bits * (e.g., PG_W or PG_NC). * * As a special case, this never clears PG_W on `pager' pages. * These, being kernel addresses, are always in hardware and have * a context. * * This routine flushes the cache for any page whose PTE changes, * as long as the process has a context; this is overly conservative. * It also copies ref and mod bits to the pvlist, on the theory that * this might save work later. (XXX should test this theory) */voidpv_changepte(pv0, bis, bic) register struct pvlist *pv0; register int bis, bic;{ register int *pte; register struct pvlist *pv; register struct pmap *pm; register int va, vseg, pmeg, i, flags; int ctx, s; write_user_windows(); /* paranoid? */ s = splpmap(); /* paranoid? */ if (pv0->pv_pmap == NULL) { splx(s); return; } ctx = getcontext(); flags = pv0->pv_flags; for (pv = pv0; pv != NULL; pv = pv->pv_next) { pm = pv->pv_pmap;if(pm==NULL)panic("pv_changepte 1"); va = pv->pv_va; vseg = VA_VSEG(va); pte = pm->pm_pte[vseg]; if ((pmeg = pm->pm_segmap[vseg]) != seginval) { register int tpte; /* in hardware: fix hardware copy */ if (pm->pm_ctx) { extern vm_offset_t pager_sva, pager_eva; /* * Bizarreness: we never clear PG_W on * pager pages, nor PG_NC on DVMA pages. */ if (bic == PG_W && va >= pager_sva && va < pager_eva) continue; if (bic == PG_NC && va >= DVMA_BASE && va < DVMA_END) continue; setcontext(pm->pm_ctxnum); /* XXX should flush only when necessary */#ifdef notdef if (vactype != VAC_NONE)#endif cache_flush_page(va); } else { /* XXX per-cpu va? */ setcontext(0); setsegmap(0, pmeg); va = VA_VPG(va) * NBPG; } tpte = getpte(va); if (tpte & PG_V) flags |= (tpte >> PG_M_SHIFT) & (PV_MOD|PV_REF); tpte = (tpte | bis) & ~bic; setpte(va, tpte); if (pte != NULL) /* update software copy */ pte[VA_VPG(va)] = tpte; } else { /* not in hardware: just fix software copy */ if (pte == NULL) panic("pv_changepte 2"); pte += VA_VPG(va); *pte = (*pte | bis) & ~bic; } } pv0->pv_flags = flags; setcontext(ctx); splx(s);}/* * Sync ref and mod bits in pvlist (turns off same in hardware PTEs). * Returns the new flags. * * This is just like pv_changepte, but we never add or remove bits, * hence never need to adjust software copies. */intpv_syncflags(pv0) register struct pvlist *pv0;{ register struct pvlist *pv; register struct pmap *pm; register int tpte, va, vseg, pmeg, i, flags; int ctx, s; write_user_windows(); /* paranoid? */ s = splpmap(); /* paranoid? */ if (pv0->pv_pmap == NULL) { /* paranoid */ splx(s); return (0); } ctx = getcontext(); flags = pv0->pv_flags; for (pv = pv0; pv != NULL; pv = pv->pv_next) { pm = pv->pv_pmap; va = pv->pv_va; vseg = VA_VSEG(va); if ((pmeg = pm->pm_segmap[vseg]) == seginval) continue; if (pm->pm_ctx) { setcontext(pm->pm_ctxnum); /* XXX should flush only when necessary */#ifdef notdef if (vactype != VAC_NONE)#endif cache_flush_page(va); } else { /* XXX per-cpu va? */ setcontext(0); setsegmap(0, pmeg); va = VA_VPG(va) * NBPG; } tpte = getpte(va); if (tpte & (PG_M|PG_U) && tpte & PG_V) { flags |= (tpte >> PG_M_SHIFT) & (PV_MOD|PV_REF); tpte &= ~(PG_M|PG_U); setpte(va, tpte); } } pv0->pv_flags = flags; setcontext(ctx); splx(s); return (flags);}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -