📄 pmap.c
字号:
simple_unlock(&pmap->pm_obj.vmobjlock); if (old_ldt != NULL) uvm_km_free(kernel_map, (vaddr_t)old_ldt, len);}#endif /* USER_LDT *//* * pmap_activate: activate a process' pmap (fill in %cr3 and LDT info) * * => called from cpu_switch() * => if proc is the curproc, then load it into the MMU */voidpmap_activate(p) struct proc *p;{ struct pcb *pcb = &p->p_addr->u_pcb; struct pmap *pmap = p->p_vmspace->vm_map.pmap; pcb->pcb_pmap = pmap;#ifndef OSKIT pcb->pcb_ldt_sel = pmap->pm_ldt_sel;#endif pcb->pcb_cr3 = pmap->pm_pdirpa; if (p == curproc) lcr3(pcb->pcb_cr3);#ifndef OSKIT if (pcb == curpcb) lldt(pcb->pcb_ldt_sel);#endif}/* * pmap_deactivate: deactivate a process' pmap * * => XXX: what should this do, if anything? */voidpmap_deactivate(p) struct proc *p;{}/* * end of lifecycle functions *//* * some misc. functions *//* * pmap_extract: extract a PA for the given VA */boolean_tpmap_extract(pmap, va, pap) struct pmap *pmap; vaddr_t va; paddr_t *pap;{ pt_entry_t *ptes, pte; pd_entry_t pde; if (__predict_true((pde = pmap->pm_pdir[pdei(va)]) != 0)) {#ifdef LARGEPAGES if (pde & PG_PS) { if (pap != NULL) *pap = (pde & PG_LGFRAME) | (va & ~PG_LGFRAME); return (TRUE); }#endif ptes = pmap_map_ptes(pmap); pte = ptes[i386_btop(va)]; pmap_unmap_ptes(pmap); if (__predict_true((pte & PG_V) != 0)) { if (pap != NULL) *pap = (pte & PG_FRAME) | (va & ~PG_FRAME); return (TRUE); } } return (FALSE);}/* * vtophys: virtual address to physical address. For use by * machine-dependent code only. */paddr_tvtophys(va) vaddr_t va;{ paddr_t pa; if (pmap_extract(pmap_kernel(), va, &pa) == TRUE) return (pa); return (0);}/* * pmap_virtual_space: used during bootup [pmap_steal_memory] to * determine the bounds of the kernel virtual addess space. */voidpmap_virtual_space(startp, endp) vaddr_t *startp; vaddr_t *endp;{ *startp = virtual_avail; *endp = virtual_end;}/* * pmap_map: map a range of PAs into kvm * * => used during crash dump * => XXX: pmap_map() should be phased out? */vaddr_tpmap_map(va, spa, epa, prot) vaddr_t va; paddr_t spa, epa; vm_prot_t prot;{ while (spa < epa) { pmap_enter(pmap_kernel(), va, spa, prot, 0); va += PAGE_SIZE; spa += PAGE_SIZE; } return va;}/* * pmap_zero_page: zero a page */voidpmap_zero_page(pa) paddr_t pa;{ simple_lock(&pmap_zero_page_lock); *zero_pte = (pa & PG_FRAME) | PG_V | PG_RW; /* map in */ pmap_update_pg((vaddr_t)zerop); /* flush TLB */ memset(zerop, 0, PAGE_SIZE); /* zero */ simple_unlock(&pmap_zero_page_lock);}/* * pmap_zero_page_uncached: the same, except uncached. Returns * TRUE if the page was zero'd, FALSE if we aborted for some * reason. */boolean_tpmap_zero_page_uncached(pa) paddr_t pa;{ boolean_t rv = TRUE;#ifndef OSKIT int i, *ptr;#endif simple_lock(&pmap_zero_page_lock); *zero_pte = (pa & PG_FRAME) | PG_V | PG_RW | /* map in */ ((cpu_class != CPUCLASS_386) ? PG_N : 0); pmap_update_pg((vaddr_t)zerop); /* flush TLB */#ifndef OSKIT for (i = 0, ptr = (int *) zerop; i < PAGE_SIZE / sizeof(int); i++) { if (sched_whichqs != 0) { /* * A process has become ready. Abort now, * so we don't keep it waiting while we * do slow memory access to finish this * page. */ rv = FALSE; break; } *ptr++ = 0; }#endif simple_unlock(&pmap_zero_page_lock); return (rv);}/* * pmap_copy_page: copy a page */voidpmap_copy_page(srcpa, dstpa) paddr_t srcpa, dstpa;{ simple_lock(&pmap_copy_page_lock);#ifdef DIAGNOSTIC if (*csrc_pte || *cdst_pte) panic("pmap_copy_page: lock botch");#endif *csrc_pte = (srcpa & PG_FRAME) | PG_V | PG_RW; *cdst_pte = (dstpa & PG_FRAME) | PG_V | PG_RW; memcpy(cdstp, csrcp, PAGE_SIZE); *csrc_pte = *cdst_pte = 0; /* zap! */ pmap_update_2pg((vaddr_t)csrcp, (vaddr_t)cdstp); simple_unlock(&pmap_copy_page_lock);}/* * p m a p r e m o v e f u n c t i o n s * * functions that remove mappings *//* * pmap_remove_ptes: remove PTEs from a PTP * * => must have proper locking on pmap_master_lock * => caller must hold pmap's lock * => PTP must be mapped into KVA * => PTP should be null if pmap == pmap_kernel() */static voidpmap_remove_ptes(pmap, pmap_rr, ptp, ptpva, startva, endva, flags) struct pmap *pmap; struct pmap_remove_record *pmap_rr; struct vm_page *ptp; vaddr_t ptpva; vaddr_t startva, endva; int flags;{ struct pv_entry *pv_tofree = NULL; /* list of pv_entrys to free */ struct pv_entry *pve; pt_entry_t *pte = (pt_entry_t *) ptpva; pt_entry_t opte; int bank, off; /* * note that ptpva points to the PTE that maps startva. this may * or may not be the first PTE in the PTP. * * we loop through the PTP while there are still PTEs to look at * and the wire_count is greater than 1 (because we use the wire_count * to keep track of the number of real PTEs in the PTP). */ for (/*null*/; startva < endva && (ptp == NULL || ptp->wire_count > 1) ; pte++, startva += PAGE_SIZE) { if (!pmap_valid_entry(*pte)) continue; /* VA not mapped */ if ((flags & PMAP_REMOVE_SKIPWIRED) && (*pte & PG_W)) { continue; } opte = *pte; /* save the old PTE */ *pte = 0; /* zap! */ if (opte & PG_W) pmap->pm_stats.wired_count--; pmap->pm_stats.resident_count--; if (pmap_rr) { /* worried about tlb flushing? */ if (opte & PG_G) { /* PG_G requires this */ pmap_update_pg(startva); } else { if (pmap_rr->prr_npages < PMAP_RR_MAX) { pmap_rr->prr_vas[pmap_rr->prr_npages++] = startva; } else { if (pmap_rr->prr_npages == PMAP_RR_MAX) /* signal an overflow */ pmap_rr->prr_npages++; } } } if (ptp) ptp->wire_count--; /* dropping a PTE */ /* * if we are not on a pv_head list we are done. */ if ((opte & PG_PVLIST) == 0) {#ifdef DIAGNOSTIC if (vm_physseg_find(i386_btop(opte & PG_FRAME), &off) != -1) panic("pmap_remove_ptes: managed page without " "PG_PVLIST for 0x%lx", startva);#endif continue; } bank = vm_physseg_find(i386_btop(opte & PG_FRAME), &off);#ifdef DIAGNOSTIC if (bank == -1) panic("pmap_remove_ptes: unmanaged page marked " "PG_PVLIST, va = 0x%lx, pa = 0x%lx", startva, (u_long)(opte & PG_FRAME));#endif /* sync R/M bits */ simple_lock(&vm_physmem[bank].pmseg.pvhead[off].pvh_lock); vm_physmem[bank].pmseg.attrs[off] |= (opte & (PG_U|PG_M)); pve = pmap_remove_pv(&vm_physmem[bank].pmseg.pvhead[off], pmap, startva); simple_unlock(&vm_physmem[bank].pmseg.pvhead[off].pvh_lock); if (pve) { pve->pv_next = pv_tofree; pv_tofree = pve; } /* end of "for" loop: time for next pte */ } if (pv_tofree) pmap_free_pvs(pmap, pv_tofree);}/* * pmap_remove_pte: remove a single PTE from a PTP * * => must have proper locking on pmap_master_lock * => caller must hold pmap's lock * => PTP must be mapped into KVA * => PTP should be null if pmap == pmap_kernel() * => returns true if we removed a mapping */static boolean_tpmap_remove_pte(pmap, ptp, pte, va, flags) struct pmap *pmap; struct vm_page *ptp; pt_entry_t *pte; vaddr_t va; int flags;{ pt_entry_t opte; int bank, off; struct pv_entry *pve; if (!pmap_valid_entry(*pte)) return(FALSE); /* VA not mapped */ if ((flags & PMAP_REMOVE_SKIPWIRED) && (*pte & PG_W)) { return(FALSE); } opte = *pte; /* save the old PTE */ *pte = 0; /* zap! */ if (opte & PG_W) pmap->pm_stats.wired_count--; pmap->pm_stats.resident_count--; if (ptp) ptp->wire_count--; /* dropping a PTE */ if (pmap_is_curpmap(pmap)) pmap_update_pg(va); /* flush TLB */ /* * if we are not on a pv_head list we are done. */ if ((opte & PG_PVLIST) == 0) {#ifdef DIAGNOSTIC if (vm_physseg_find(i386_btop(opte & PG_FRAME), &off) != -1) panic("pmap_remove_pte: managed page without " "PG_PVLIST for 0x%lx", va);#endif return(TRUE); } bank = vm_physseg_find(i386_btop(opte & PG_FRAME), &off);#ifdef DIAGNOSTIC if (bank == -1) panic("pmap_remove_pte: unmanaged page marked " "PG_PVLIST, va = 0x%lx, pa = 0x%lx", va, (u_long)(opte & PG_FRAME));#endif /* sync R/M bits */ simple_lock(&vm_physmem[bank].pmseg.pvhead[off].pvh_lock); vm_physmem[bank].pmseg.attrs[off] |= (opte & (PG_U|PG_M)); pve = pmap_remove_pv(&vm_physmem[bank].pmseg.pvhead[off], pmap, va); simple_unlock(&vm_physmem[bank].pmseg.pvhead[off].pvh_lock); if (pve) pmap_free_pv(pmap, pve); return(TRUE);}/* * pmap_remove: top level mapping removal function * * => caller should not be holding any pmap locks */voidpmap_remove(pmap, sva, eva) struct pmap *pmap; vaddr_t sva, eva;{ pmap_do_remove(pmap, sva, eva, PMAP_REMOVE_ALL);}/* * pmap_do_remove: mapping removal guts * * => caller should not be holding any pmap locks */static voidpmap_do_remove(pmap, sva, eva, flags) struct pmap *pmap; vaddr_t sva, eva; int flags;{ pt_entry_t *ptes; boolean_t result; paddr_t ptppa; vaddr_t blkendva; struct vm_page *ptp; struct pmap_remove_record pmap_rr, *prr; /* * we lock in the pmap => pv_head direction */ PMAP_MAP_TO_HEAD_LOCK(); ptes = pmap_map_ptes(pmap); /* locks pmap */ /* * removing one page? take shortcut function. */ if (sva + PAGE_SIZE == eva) { if (pmap_valid_entry(pmap->pm_pdir[pdei(sva)])) { /* PA of the PTP */ ptppa = pmap->pm_pdir[pdei(sva)] & PG_FRAME; /* get PTP if non-kernel mapping */ if (pmap == pmap_kernel()) { /* we never free kernel PTPs */ ptp = NULL; } else { if (pmap->pm_ptphint && VM_PAGE_TO_PHYS(pmap->pm_ptphint) == ptppa) { ptp = pmap->pm_ptphint; } else { ptp = PHYS_TO_VM_PAGE(ptppa);#ifdef DIAGNOSTIC if (ptp == NULL) panic("pmap_remove: unmanaged " "PTP detected");#endif } } /* do it! */ result = pmap_remove_pte(pmap, ptp, &ptes[i386_btop(sva)], sva, flags); /* * if mapping removed and the PTP is no longer * being used, free it! */ if (result && ptp && ptp->wire_count <= 1) { pmap->pm_pdir[pdei(sva)] = 0; /* zap! */#if defined(I386_CPU) /* already dumped whole TLB on i386 */ if (cpu_class != CPUCLASS_386)#endif { pmap_update_pg(((vaddr_t) ptes) + ptp->offset); } pmap->pm_stats.resident_count--; if (pmap->pm_ptphint == ptp) pmap->pm_ptphint = pmap->pm_obj.memq.tqh_first; ptp->wire_count = 0; uvm_pagefree(ptp); }
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -