📄 pmap.c
字号:
struct pte *pmap_pte(pmap, va) register pmap_t pmap; vm_offset_t va;{#ifdef DEBUGx if (pmapdebug & PDB_FOLLOW) printf("pmap_pte(%x, %x) ->\n", pmap, va);#endif if (pmap && pmap_pde_v(pmap_pde(pmap, va))) { /* are we current address space or kernel? */ if (pmap->pm_pdir[PTDPTDI].pd_pfnum == PTDpde.pd_pfnum || pmap == kernel_pmap) return ((struct pte *) vtopte(va)); /* otherwise, we are alternate address space */ else { if (pmap->pm_pdir[PTDPTDI].pd_pfnum != APTDpde.pd_pfnum) { APTDpde = pmap->pm_pdir[PTDPTDI]; tlbflush(); } return((struct pte *) avtopte(va)); } } return(0);}/* * Routine: pmap_extract * Function: * Extract the physical page address associated * with the given map/virtual_address pair. */vm_offset_tpmap_extract(pmap, va) register pmap_t pmap; vm_offset_t va;{ register vm_offset_t pa;#ifdef DEBUGx if (pmapdebug & PDB_FOLLOW) pg("pmap_extract(%x, %x) -> ", pmap, va);#endif pa = 0; if (pmap && pmap_pde_v(pmap_pde(pmap, va))) { pa = *(int *) pmap_pte(pmap, va); } if (pa) pa = (pa & PG_FRAME) | (va & ~PG_FRAME);#ifdef DEBUGx if (pmapdebug & PDB_FOLLOW) printf("%x\n", pa);#endif return(pa);}/* * Copy the range specified by src_addr/len * from the source map to the range dst_addr/len * in the destination map. * * This routine is only advisory and need not do anything. */void pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr) pmap_t dst_pmap; pmap_t src_pmap; vm_offset_t dst_addr; vm_size_t len; vm_offset_t src_addr;{#ifdef DEBUG if (pmapdebug & PDB_FOLLOW) printf("pmap_copy(%x, %x, %x, %x, %x)", dst_pmap, src_pmap, dst_addr, len, src_addr);#endif}/* * Require that all active physical maps contain no * incorrect entries NOW. [This update includes * forcing updates of any address map caching.] * * Generally used to insure that a thread about * to run will see a semantically correct world. */void pmap_update(){#ifdef DEBUG if (pmapdebug & PDB_FOLLOW) printf("pmap_update()");#endif tlbflush();}/* * Routine: pmap_collect * Function: * Garbage collects the physical map system for * pages which are no longer used. * Success need not be guaranteed -- that is, there * may well be pages which are not referenced, but * others may be collected. * Usage: * Called by the pageout daemon when pages are scarce. * [ needs to be written -wfj ] */voidpmap_collect(pmap) pmap_t pmap;{ register vm_offset_t pa; register pv_entry_t pv; register int *pte; vm_offset_t kpa; int s;#ifdef DEBUG int *pde; int opmapdebug; printf("pmap_collect(%x) ", pmap);#endif if (pmap != kernel_pmap) return;}/* [ macro again?, should I force kstack into user map here? -wfj ] */voidpmap_activate(pmap, pcbp) register pmap_t pmap; struct pcb *pcbp;{int x;#ifdef DEBUG if (pmapdebug & (PDB_FOLLOW|PDB_PDRTAB)) pg("pmap_activate(%x, %x) ", pmap, pcbp);#endif PMAP_ACTIVATE(pmap, pcbp);/*printf("pde ");for(x=0x3f6; x < 0x3fA; x++) printf("%x ", pmap->pm_pdir[x]);*//*pads(pmap);*//*pg(" pcb_cr3 %x", pcbp->pcb_cr3);*/}/* * pmap_zero_page zeros the specified (machine independent) * page by mapping the page into virtual memory and using * bzero to clear its contents, one machine dependent page * at a time. */voidpmap_zero_page(phys) register vm_offset_t phys;{ register int ix;#ifdef DEBUG if (pmapdebug & PDB_FOLLOW) printf("pmap_zero_page(%x)", phys);#endif phys >>= PG_SHIFT; ix = 0; do { clearseg(phys++); } while (++ix != i386pagesperpage);}/* * pmap_copy_page copies the specified (machine independent) * page by mapping the page into virtual memory and using * bcopy to copy the page, one machine dependent page at a * time. */voidpmap_copy_page(src, dst) register vm_offset_t src, dst;{ register int ix;#ifdef DEBUG if (pmapdebug & PDB_FOLLOW) printf("pmap_copy_page(%x, %x)", src, dst);#endif src >>= PG_SHIFT; dst >>= PG_SHIFT; ix = 0; do { physcopyseg(src++, dst++); } while (++ix != i386pagesperpage);}/* * Routine: pmap_pageable * Function: * Make the specified pages (by pmap, offset) * pageable (or not) as requested. * * A page which is not pageable may not take * a fault; therefore, its page table entry * must remain valid for the duration. * * This routine is merely advisory; pmap_enter * will specify that these pages are to be wired * down (or not) as appropriate. */voidpmap_pageable(pmap, sva, eva, pageable) pmap_t pmap; vm_offset_t sva, eva; boolean_t pageable;{#ifdef DEBUG if (pmapdebug & PDB_FOLLOW) printf("pmap_pageable(%x, %x, %x, %x)", pmap, sva, eva, pageable);#endif /* * If we are making a PT page pageable then all valid * mappings must be gone from that page. Hence it should * be all zeros and there is no need to clean it. * Assumptions: * - we are called with only one page at a time * - PT pages have only one pv_table entry */ if (pmap == kernel_pmap && pageable && sva + PAGE_SIZE == eva) { register pv_entry_t pv; register vm_offset_t pa;#ifdef DEBUG if ((pmapdebug & (PDB_FOLLOW|PDB_PTPAGE)) == PDB_PTPAGE) printf("pmap_pageable(%x, %x, %x, %x)", pmap, sva, eva, pageable);#endif /*if (!pmap_pde_v(pmap_pde(pmap, sva))) return;*/ if(pmap_pte(pmap, sva) == 0) return; pa = pmap_pte_pa(pmap_pte(pmap, sva)); if (pa < vm_first_phys || pa >= vm_last_phys) return; pv = pa_to_pvh(pa); /*if (!ispt(pv->pv_va)) return;*/#ifdef DEBUG if (pv->pv_va != sva || pv->pv_next) { pg("pmap_pageable: bad PT page va %x next %x\n", pv->pv_va, pv->pv_next); return; }#endif /* * Mark it unmodified to avoid pageout */ pmap_clear_modify(pa);#ifdef needsomethinglikethis if (pmapdebug & PDB_PTPAGE) pg("pmap_pageable: PT page %x(%x) unmodified\n", sva, *(int *)pmap_pte(pmap, sva)); if (pmapdebug & PDB_WIRING) pmap_check_wiring("pageable", sva);#endif }}/* * Clear the modify bits on the specified physical page. */voidpmap_clear_modify(pa) vm_offset_t pa;{#ifdef DEBUG if (pmapdebug & PDB_FOLLOW) printf("pmap_clear_modify(%x)", pa);#endif pmap_changebit(pa, PG_M, FALSE);}/* * pmap_clear_reference: * * Clear the reference bit on the specified physical page. */void pmap_clear_reference(pa) vm_offset_t pa;{#ifdef DEBUG if (pmapdebug & PDB_FOLLOW) printf("pmap_clear_reference(%x)", pa);#endif pmap_changebit(pa, PG_U, FALSE);}/* * pmap_is_referenced: * * Return whether or not the specified physical page is referenced * by any physical maps. */boolean_tpmap_is_referenced(pa) vm_offset_t pa;{#ifdef DEBUG if (pmapdebug & PDB_FOLLOW) { boolean_t rv = pmap_testbit(pa, PG_U); printf("pmap_is_referenced(%x) -> %c", pa, "FT"[rv]); return(rv); }#endif return(pmap_testbit(pa, PG_U));}/* * pmap_is_modified: * * Return whether or not the specified physical page is modified * by any physical maps. */boolean_tpmap_is_modified(pa) vm_offset_t pa;{#ifdef DEBUG if (pmapdebug & PDB_FOLLOW) { boolean_t rv = pmap_testbit(pa, PG_M); printf("pmap_is_modified(%x) -> %c", pa, "FT"[rv]); return(rv); }#endif return(pmap_testbit(pa, PG_M));}vm_offset_tpmap_phys_address(ppn) int ppn;{ return(i386_ptob(ppn));}/* * Miscellaneous support routines follow */i386_protection_init(){ register int *kp, prot; kp = protection_codes; for (prot = 0; prot < 8; prot++) { switch (prot) { case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE: *kp++ = 0; break; case VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE: case VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE: case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE: *kp++ = PG_RO; break; case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE: case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE: case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE: case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE: *kp++ = PG_RW; break; } }}staticboolean_tpmap_testbit(pa, bit) register vm_offset_t pa; int bit;{ register pv_entry_t pv; register int *pte, ix; int s; if (pa < vm_first_phys || pa >= vm_last_phys) return(FALSE); pv = pa_to_pvh(pa); s = splimp(); /* * Check saved info first */ if (pmap_attributes[pa_index(pa)] & bit) { splx(s); return(TRUE); } /* * Not found, check current mappings returning * immediately if found. */ if (pv->pv_pmap != NULL) { for (; pv; pv = pv->pv_next) { pte = (int *) pmap_pte(pv->pv_pmap, pv->pv_va); ix = 0; do { if (*pte++ & bit) { splx(s); return(TRUE); } } while (++ix != i386pagesperpage); } } splx(s); return(FALSE);}pmap_changebit(pa, bit, setem) register vm_offset_t pa; int bit; boolean_t setem;{ register pv_entry_t pv; register int *pte, npte, ix; vm_offset_t va; int s; boolean_t firstpage = TRUE;#ifdef DEBUG if (pmapdebug & PDB_BITS) printf("pmap_changebit(%x, %x, %s)", pa, bit, setem ? "set" : "clear");#endif if (pa < vm_first_phys || pa >= vm_last_phys) return; pv = pa_to_pvh(pa); s = splimp(); /* * Clear saved attributes (modify, reference) */ if (!setem) pmap_attributes[pa_index(pa)] &= ~bit; /* * Loop over all current mappings setting/clearing as appropos * If setting RO do we need to clear the VAC? */ if (pv->pv_pmap != NULL) {#ifdef DEBUG int toflush = 0;#endif for (; pv; pv = pv->pv_next) {#ifdef DEBUG toflush |= (pv->pv_pmap == kernel_pmap) ? 2 : 1;#endif va = pv->pv_va; /* * XXX don't write protect pager mappings */ if (bit == PG_RO) { extern vm_offset_t pager_sva, pager_eva; if (va >= pager_sva && va < pager_eva) continue; } pte = (int *) pmap_pte(pv->pv_pmap, va); ix = 0; do { if (setem) npte = *pte | bit; else npte = *pte & ~bit; if (*pte != npte) { *pte = npte; /*TBIS(va);*/ } va += I386_PAGE_SIZE; pte++; } while (++ix != i386pagesperpage); if (pv->pv_pmap == &curproc->p_vmspace->vm_pmap) pmap_activate(pv->pv_pmap, (struct pcb *)curproc->p_addr); }#ifdef somethinglikethis if (setem && bit == PG_RO && (pmapvacflush & PVF_PROTECT)) { if ((pmapvacflush & PVF_TOTAL) || toflush == 3) DCIA(); else if (toflush == 2) DCIS(); else DCIU(); }#endif } splx(s);}#ifdef DEBUGpmap_pvdump(pa) vm_offset_t pa;{ register pv_entry_t pv; printf("pa %x", pa); for (pv = pa_to_pvh(pa); pv; pv = pv->pv_next) { printf(" -> pmap %x, va %x, flags %x", pv->pv_pmap, pv->pv_va, pv->pv_flags); pads(pv->pv_pmap); } printf(" ");}#ifdef notyetpmap_check_wiring(str, va) char *str; vm_offset_t va;{ vm_map_entry_t entry; register int count, *pte; va = trunc_page(va); if (!pmap_pde_v(pmap_pde(kernel_pmap, va)) || !pmap_pte_v(pmap_pte(kernel_pmap, va))) return; if (!vm_map_lookup_entry(pt_map, va, &entry)) { pg("wired_check: entry for %x not found\n", va); return; } count = 0; for (pte = (int *)va; pte < (int *)(va+PAGE_SIZE); pte++) if (*pte) count++; if (entry->wired_count != count) pg("*%s*: %x: w%d/a%d\n", str, va, entry->wired_count, count);}#endif/* print address space of pmap*/pads(pm) pmap_t pm; { unsigned va, i, j; struct pte *ptep; if(pm == kernel_pmap) return; for (i = 0; i < 1024; i++) if(pm->pm_pdir[i].pd_v) for (j = 0; j < 1024 ; j++) { va = (i<<22)+(j<<12); if (pm == kernel_pmap && va < 0xfe000000) continue; if (pm != kernel_pmap && va > UPT_MAX_ADDRESS) continue; ptep = pmap_pte(pm, va); if(pmap_pte_v(ptep)) printf("%x:%x ", va, *(int *)ptep); } ; }#endif
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -