📄 pmap.c
字号:
* * It is assumed that the start and end are properly * rounded to the page size. */voidpmap_remove(pmap, sva, eva) register struct pmap *pmap; vm_offset_t sva, eva;{ register vm_offset_t pa, va; register pt_entry_t *pte; register pv_entry_t pv, npv; register int ix; pmap_t ptpmap; int *pde, s, bits; boolean_t firstpage = TRUE; boolean_t flushcache = FALSE;#ifdef DEBUG pt_entry_t opte; if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT)) printf("pmap_remove(%x, %x, %x)", pmap, sva, eva); if (eva >= USRSTACK && eva <= UPT_MAX_ADDRESS) nullop();#endif if (pmap == NULL) return;#ifdef DEBUG remove_stats.calls++;#endif for (va = sva; va < eva; va += PAGE_SIZE) { /* * Weed out invalid mappings. * Note: we assume that the page directory table is * always allocated, and in kernel virtual. */ if (!pmap_pde_v(pmap_pde(pmap, va))) continue; pte = pmap_pte(pmap, va); if (pte == 0) continue; pa = pmap_pte_pa(pte); if (pa == 0) continue;#ifdef DEBUG opte = *pte; remove_stats.removes++;#endif /* * Update statistics */ if (pmap_pte_w(pte)) pmap->pm_stats.wired_count--; pmap->pm_stats.resident_count--; /* * Invalidate the PTEs. * XXX: should cluster them up and invalidate as many * as possible at once. */#ifdef DEBUG if (pmapdebug & PDB_REMOVE) printf("remove: inv %x ptes at %x(%x) ", i386pagesperpage, pte, *(int *)pte);#endif bits = ix = 0; do { bits |= *(int *)pte & (PG_U|PG_M); *(int *)pte++ = 0; /*TBIS(va + ix * I386_PAGE_SIZE);*/ } while (++ix != i386pagesperpage); if (pmap == &curproc->p_vmspace->vm_pmap) pmap_activate(pmap, (struct pcb *)curproc->p_addr); /* are we current address space or kernel? */ /*if (pmap->pm_pdir[PTDPTDI].pd_pfnum == PTDpde.pd_pfnum || pmap == kernel_pmap) load_cr3(curpcb->pcb_ptd);*/ tlbflush();#ifdef needednotdonereduce wiring count on page table pages as references drop#endif /* * Remove from the PV table (raise IPL since we * may be called at interrupt time). */ if (pa < vm_first_phys || pa >= vm_last_phys) continue; pv = pa_to_pvh(pa); s = splimp(); /* * If it is the first entry on the list, it is actually * in the header and we must copy the following entry up * to the header. Otherwise we must search the list for * the entry. In either case we free the now unused entry. */ if (pmap == pv->pv_pmap && va == pv->pv_va) { npv = pv->pv_next; if (npv) { *pv = *npv; free((caddr_t)npv, M_VMPVENT); } else pv->pv_pmap = NULL;#ifdef DEBUG remove_stats.pvfirst++;#endif } else { for (npv = pv->pv_next; npv; npv = npv->pv_next) {#ifdef DEBUG remove_stats.pvsearch++;#endif if (pmap == npv->pv_pmap && va == npv->pv_va) break; pv = npv; }#ifdef DEBUG if (npv == NULL) panic("pmap_remove: PA not in pv_tab");#endif pv->pv_next = npv->pv_next; free((caddr_t)npv, M_VMPVENT); pv = pa_to_pvh(pa); }#ifdef notdef[tally number of pagetable pages, if sharing of ptpages adjust here]#endif /* * Update saved attributes for managed page */ pmap_attributes[pa_index(pa)] |= bits; splx(s); }#ifdef notdef[cache and tlb flushing, if needed]#endif}/* * Routine: pmap_remove_all * Function: * Removes this physical page from * all physical maps in which it resides. * Reflects back modify bits to the pager. */voidpmap_remove_all(pa) vm_offset_t pa;{ register pv_entry_t pv; int s;#ifdef DEBUG if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT)) printf("pmap_remove_all(%x)", pa); /*pmap_pvdump(pa);*/#endif /* * Not one of ours */ if (pa < vm_first_phys || pa >= vm_last_phys) return; pv = pa_to_pvh(pa); s = splimp(); /* * Do it the easy way for now */ while (pv->pv_pmap != NULL) {#ifdef DEBUG if (!pmap_pde_v(pmap_pde(pv->pv_pmap, pv->pv_va)) || pmap_pte_pa(pmap_pte(pv->pv_pmap, pv->pv_va)) != pa) panic("pmap_remove_all: bad mapping");#endif pmap_remove(pv->pv_pmap, pv->pv_va, pv->pv_va + PAGE_SIZE); } splx(s);}/* * Routine: pmap_copy_on_write * Function: * Remove write privileges from all * physical maps for this physical page. */voidpmap_copy_on_write(pa) vm_offset_t pa;{#ifdef DEBUG if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) printf("pmap_copy_on_write(%x)", pa);#endif pmap_changebit(pa, PG_RO, TRUE);}/* * Set the physical protection on the * specified range of this map as requested. */voidpmap_protect(pmap, sva, eva, prot) register pmap_t pmap; vm_offset_t sva, eva; vm_prot_t prot;{ register pt_entry_t *pte; register vm_offset_t va; register int ix; int i386prot; boolean_t firstpage = TRUE;#ifdef DEBUG if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) printf("pmap_protect(%x, %x, %x, %x)", pmap, sva, eva, prot);#endif if (pmap == NULL) return; if ((prot & VM_PROT_READ) == VM_PROT_NONE) { pmap_remove(pmap, sva, eva); return; } if (prot & VM_PROT_WRITE) return; for (va = sva; va < eva; va += PAGE_SIZE) { /* * Page table page is not allocated. * Skip it, we don't want to force allocation * of unnecessary PTE pages just to set the protection. */ if (!pmap_pde_v(pmap_pde(pmap, va))) { /* XXX: avoid address wrap around */ if (va >= i386_trunc_pdr((vm_offset_t)-1)) break; va = i386_round_pdr(va + PAGE_SIZE) - PAGE_SIZE; continue; } else pte = pmap_pte(pmap, va); /* * Page not valid. Again, skip it. * Should we do this? Or set protection anyway? */ if (!pmap_pte_v(pte)) continue; ix = 0; i386prot = pte_prot(pmap, prot); if(va < UPT_MAX_ADDRESS) i386prot |= 2 /*PG_u*/; do { /* clear VAC here if PG_RO? */ pmap_pte_set_prot(pte++, i386prot); /*TBIS(va + ix * I386_PAGE_SIZE);*/ } while (++ix != i386pagesperpage); }out: if (pmap == &curproc->p_vmspace->vm_pmap) pmap_activate(pmap, (struct pcb *)curproc->p_addr);}/* * Insert the given physical page (p) at * the specified virtual address (v) in the * target physical map with the protection requested. * * If specified, the page will be wired down, meaning * that the related pte can not be reclaimed. * * NB: This is the only routine which MAY NOT lazy-evaluate * or lose information. That is, this routine must actually * insert this page into the given map NOW. */voidpmap_enter(pmap, va, pa, prot, wired) register pmap_t pmap; vm_offset_t va; register vm_offset_t pa; vm_prot_t prot; boolean_t wired;{ register pt_entry_t *pte; register int npte, ix; vm_offset_t opa; boolean_t cacheable = TRUE; boolean_t checkpv = TRUE;#ifdef DEBUG if (pmapdebug & (PDB_FOLLOW|PDB_ENTER)) printf("pmap_enter(%x, %x, %x, %x, %x)", pmap, va, pa, prot, wired); if(!pmap_isvalidphys(pa)) panic("invalid phys");#endif if (pmap == NULL) return; if(va > VM_MAX_KERNEL_ADDRESS)panic("pmap_enter: toobig"); /* also, should not muck with PTD va! */#ifdef DEBUG if (pmap == kernel_pmap) enter_stats.kernel++; else enter_stats.user++;#endif /* * Page Directory table entry not valid, we need a new PT page */ if (!pmap_pde_v(pmap_pde(pmap, va))) { pg("ptdi %x", pmap->pm_pdir[PTDPTDI]); } pte = pmap_pte(pmap, va); opa = pmap_pte_pa(pte);#ifdef DEBUG if (pmapdebug & PDB_ENTER) printf("enter: pte %x, *pte %x ", pte, *(int *)pte);#endif /* * Mapping has not changed, must be protection or wiring change. */ if (opa == pa) {#ifdef DEBUG enter_stats.pwchange++;#endif /* * Wiring change, just update stats. * We don't worry about wiring PT pages as they remain * resident as long as there are valid mappings in them. * Hence, if a user page is wired, the PT page will be also. */ if (wired && !pmap_pte_w(pte) || !wired && pmap_pte_w(pte)) {#ifdef DEBUG if (pmapdebug & PDB_ENTER) pg("enter: wiring change -> %x ", wired);#endif if (wired) pmap->pm_stats.wired_count++; else pmap->pm_stats.wired_count--;#ifdef DEBUG enter_stats.wchange++;#endif } goto validate; } /* * Mapping has changed, invalidate old range and fall through to * handle validating new mapping. */ if (opa) {#ifdef DEBUG if (pmapdebug & PDB_ENTER) printf("enter: removing old mapping %x pa %x ", va, opa);#endif pmap_remove(pmap, va, va + PAGE_SIZE);#ifdef DEBUG enter_stats.mchange++;#endif } /* * Enter on the PV list if part of our managed memory * Note that we raise IPL while manipulating pv_table * since pmap_enter can be called at interrupt time. */ if (pa >= vm_first_phys && pa < vm_last_phys) { register pv_entry_t pv, npv; int s;#ifdef DEBUG enter_stats.managed++;#endif pv = pa_to_pvh(pa); s = splimp();#ifdef DEBUG if (pmapdebug & PDB_ENTER) printf("enter: pv at %x: %x/%x/%x ", pv, pv->pv_va, pv->pv_pmap, pv->pv_next);#endif /* * No entries yet, use header as the first entry */ if (pv->pv_pmap == NULL) {#ifdef DEBUG enter_stats.firstpv++;#endif pv->pv_va = va; pv->pv_pmap = pmap; pv->pv_next = NULL; pv->pv_flags = 0; } /* * There is at least one other VA mapping this page. * Place this entry after the header. */ else { /*printf("second time: ");*/#ifdef DEBUG for (npv = pv; npv; npv = npv->pv_next) if (pmap == npv->pv_pmap && va == npv->pv_va) panic("pmap_enter: already in pv_tab");#endif npv = (pv_entry_t) malloc(sizeof *npv, M_VMPVENT, M_NOWAIT); npv->pv_va = va; npv->pv_pmap = pmap; npv->pv_next = pv->pv_next; pv->pv_next = npv;#ifdef DEBUG if (!npv->pv_next) enter_stats.secondpv++;#endif splx(s); } } /* * Assumption: if it is not part of our managed memory * then it must be device memory which may be volitile. */ if (pmap_initialized) { checkpv = cacheable = FALSE;#ifdef DEBUG enter_stats.unmanaged++;#endif } /* * Increment counters */ pmap->pm_stats.resident_count++; if (wired) pmap->pm_stats.wired_count++;validate: /* * Now validate mapping with desired protection/wiring. * Assume uniform modified and referenced status for all * I386 pages in a MACH page. */ npte = (pa & PG_FRAME) | pte_prot(pmap, prot) | PG_V; npte |= (*(int *)pte & (PG_M|PG_U)); if (wired) npte |= PG_W; if(va < UPT_MIN_ADDRESS) npte |= PG_u; else if(va < UPT_MAX_ADDRESS) npte |= PG_u | PG_RW;#ifdef DEBUG if (pmapdebug & PDB_ENTER) printf("enter: new pte value %x ", npte);#endif ix = 0; do { *(int *)pte++ = npte; /*TBIS(va);*/ npte += I386_PAGE_SIZE; va += I386_PAGE_SIZE; } while (++ix != i386pagesperpage); pte--;#ifdef DEBUGxcache, tlb flushes#endif/*pads(pmap);*/ /*load_cr3(((struct pcb *)curproc->p_addr)->pcb_ptd);*/ tlbflush();}/* * pmap_page_protect: * * Lower the permission for all mappings to a given page. */voidpmap_page_protect(phys, prot) vm_offset_t phys; vm_prot_t prot;{ switch (prot) { case VM_PROT_READ: case VM_PROT_READ|VM_PROT_EXECUTE: pmap_copy_on_write(phys); break; case VM_PROT_ALL: break; default: pmap_remove_all(phys); break; }}/* * Routine: pmap_change_wiring * Function: Change the wiring attribute for a map/virtual-address * pair. * In/out conditions: * The mapping must already exist in the pmap. */voidpmap_change_wiring(pmap, va, wired) register pmap_t pmap; vm_offset_t va; boolean_t wired;{ register pt_entry_t *pte; register int ix;#ifdef DEBUG if (pmapdebug & PDB_FOLLOW) printf("pmap_change_wiring(%x, %x, %x)", pmap, va, wired);#endif if (pmap == NULL) return; pte = pmap_pte(pmap, va);#ifdef DEBUG /* * Page table page is not allocated. * Should this ever happen? Ignore it for now, * we don't want to force allocation of unnecessary PTE pages. */ if (!pmap_pde_v(pmap_pde(pmap, va))) { if (pmapdebug & PDB_PARANOIA) pg("pmap_change_wiring: invalid PDE for %x ", va); return; } /* * Page not valid. Should this ever happen? * Just continue and change wiring anyway. */ if (!pmap_pte_v(pte)) { if (pmapdebug & PDB_PARANOIA) pg("pmap_change_wiring: invalid PTE for %x ", va); }#endif if (wired && !pmap_pte_w(pte) || !wired && pmap_pte_w(pte)) { if (wired) pmap->pm_stats.wired_count++; else pmap->pm_stats.wired_count--; } /* * Wiring is not a hardware characteristic so there is no need * to invalidate TLB. */ ix = 0; do { pmap_pte_set_w(pte++, wired); } while (++ix != i386pagesperpage);}/* * Routine: pmap_pte * Function: * Extract the page table entry associated * with the given map/virtual_address pair. * [ what about induced faults -wfj] */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -