📄 pmap.c
字号:
vm_offset_t start; vm_offset_t end; int prot;{#ifdef DEBUG if (pmapdebug & PDB_FOLLOW) printf("pmap_map(%x, %x, %x, %x)\n", virt, start, end, prot);#endif while (start < end) { pmap_enter(kernel_pmap, virt, start, prot, FALSE); virt += PAGE_SIZE; start += PAGE_SIZE; } return(virt);}/* * Create and return a physical map. * * If the size specified for the map * is zero, the map is an actual physical * map, and may be referenced by the * hardware. * * If the size specified is non-zero, * the map will be used in software only, and * is bounded by that size. */pmap_tpmap_create(size) vm_size_t size;{ register pmap_t pmap;#ifdef DEBUG if (pmapdebug & (PDB_FOLLOW|PDB_CREATE)) printf("pmap_create(%x)\n", size);#endif /* * Software use map does not need a pmap */ if (size) return(NULL); /* XXX: is it ok to wait here? */ pmap = (pmap_t) malloc(sizeof *pmap, M_VMPMAP, M_WAITOK);#ifdef notifwewait if (pmap == NULL) panic("pmap_create: cannot allocate a pmap");#endif bzero(pmap, sizeof(*pmap)); pmap_pinit(pmap); return (pmap);}/* * Initialize a preallocated and zeroed pmap structure, * such as one in a vmspace structure. */voidpmap_pinit(pmap) register struct pmap *pmap;{#ifdef DEBUG if (pmapdebug & (PDB_FOLLOW|PDB_CREATE)) printf("pmap_pinit(%x)\n", pmap);#endif /* * No need to allocate page table space yet but we do need a * valid segment table. Initially, we point everyone at the * "null" segment table. On the first pmap_enter, a real * segment table will be allocated. */ pmap->pm_stab = Segtabzero; pmap->pm_stpa = Segtabzeropa;#if defined(LUNA2) if (mmutype == MMU_68040) pmap->pm_stfree = protostfree;#endif pmap->pm_stchanged = TRUE; pmap->pm_count = 1; simple_lock_init(&pmap->pm_lock);}/* * Retire the given physical map from service. * Should only be called if the map contains * no valid mappings. */voidpmap_destroy(pmap) register pmap_t pmap;{ int count;#ifdef DEBUG if (pmapdebug & PDB_FOLLOW) printf("pmap_destroy(%x)\n", pmap);#endif if (pmap == NULL) return; simple_lock(&pmap->pm_lock); count = --pmap->pm_count; simple_unlock(&pmap->pm_lock); if (count == 0) { pmap_release(pmap); free((caddr_t)pmap, M_VMPMAP); }}/* * Release any resources held by the given physical map. * Called when a pmap initialized by pmap_pinit is being released. * Should only be called if the map contains no valid mappings. */voidpmap_release(pmap) register struct pmap *pmap;{#ifdef DEBUG if (pmapdebug & PDB_FOLLOW) printf("pmap_release(%x)\n", pmap);#endif#ifdef notdef /* DIAGNOSTIC */ /* count would be 0 from pmap_destroy... */ simple_lock(&pmap->pm_lock); if (pmap->pm_count != 1) panic("pmap_release count");#endif if (pmap->pm_ptab) kmem_free_wakeup(pt_map, (vm_offset_t)pmap->pm_ptab, LUNA_MAX_PTSIZE); if (pmap->pm_stab != Segtabzero) kmem_free(kernel_map, (vm_offset_t)pmap->pm_stab, LUNA_STSIZE);}/* * Add a reference to the specified pmap. */voidpmap_reference(pmap) pmap_t pmap;{#ifdef DEBUG if (pmapdebug & PDB_FOLLOW) printf("pmap_reference(%x)\n", pmap);#endif if (pmap != NULL) { simple_lock(&pmap->pm_lock); pmap->pm_count++; simple_unlock(&pmap->pm_lock); }}/* * Remove the given range of addresses from the specified map. * * It is assumed that the start and end are properly * rounded to the page size. */voidpmap_remove(pmap, sva, eva) register pmap_t pmap; register vm_offset_t sva, eva;{ register vm_offset_t nssva; register pt_entry_t *pte; boolean_t firstpage, needcflush; int flags;#ifdef DEBUG if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT)) printf("pmap_remove(%x, %x, %x)\n", pmap, sva, eva);#endif if (pmap == NULL) return;#ifdef PMAPSTATS remove_stats.calls++;#endif firstpage = TRUE; needcflush = FALSE; flags = active_pmap(pmap) ? PRM_TFLUSH : 0; while (sva < eva) { nssva = luna_trunc_seg(sva) + LUNA_SEG_SIZE; if (nssva == 0 || nssva > eva) nssva = eva; /* * If VA belongs to an unallocated segment, * skip to the next segment boundary. */ if (!pmap_ste_v(pmap, sva)) { sva = nssva; continue; } /* * Invalidate every valid mapping within this segment. */ pte = pmap_pte(pmap, sva); while (sva < nssva) { if (pmap_pte_v(pte)) {#ifdef HAVEVAC if (pmap_aliasmask) { /* * Purge kernel side of VAC to ensure * we get the correct state of any * hardware maintained bits. */ if (firstpage) { DCIS();#ifdef PMAPSTATS remove_stats.sflushes++;#endif } /* * Remember if we may need to * flush the VAC due to a non-CI * mapping. */ if (!needcflush && !pmap_pte_ci(pte)) needcflush = TRUE; }#endif pmap_remove_mapping(pmap, sva, pte, flags); firstpage = FALSE; } pte++; sva += PAGE_SIZE; } } /* * Didn't do anything, no need for cache flushes */ if (firstpage) return;#ifdef HAVEVAC /* * In a couple of cases, we don't need to worry about flushing * the VAC: * 1. if this is a kernel mapping, * we have already done it * 2. if it is a user mapping not for the current process, * it won't be there */ if (pmap_aliasmask && (pmap == kernel_pmap || pmap != curproc->p_vmspace->vm_map.pmap)) needcflush = FALSE;#ifdef DEBUG if (pmap_aliasmask && (pmapvacflush & PVF_REMOVE)) { if (pmapvacflush & PVF_TOTAL) DCIA(); else if (pmap == kernel_pmap) DCIS(); else DCIU(); } else#endif if (needcflush) { if (pmap == kernel_pmap) { DCIS();#ifdef PMAPSTATS remove_stats.sflushes++;#endif } else { DCIU();#ifdef PMAPSTATS remove_stats.uflushes++;#endif } }#endif}/* * pmap_page_protect: * * Lower the permission for all mappings to a given page. */voidpmap_page_protect(pa, prot) vm_offset_t pa; vm_prot_t prot;{ register pv_entry_t pv; int s;#ifdef DEBUG if ((pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) || prot == VM_PROT_NONE && (pmapdebug & PDB_REMOVE)) printf("pmap_page_protect(%x, %x)\n", pa, prot);#endif if (pa < vm_first_phys || pa >= vm_last_phys) return; switch (prot) { case VM_PROT_READ|VM_PROT_WRITE: case VM_PROT_ALL: break; /* copy_on_write */ case VM_PROT_READ: case VM_PROT_READ|VM_PROT_EXECUTE: pmap_changebit(pa, PG_RO, TRUE); break; /* remove_all */ default: pv = pa_to_pvh(pa); s = splimp(); while (pv->pv_pmap != NULL) {#ifdef DEBUG if (!pmap_ste_v(pv->pv_pmap, pv->pv_va) || pmap_pte_pa(pmap_pte(pv->pv_pmap,pv->pv_va)) != pa) panic("pmap_page_protect: bad mapping");#endif pmap_remove_mapping(pv->pv_pmap, pv->pv_va, PT_ENTRY_NULL, PRM_TFLUSH|PRM_CFLUSH); } splx(s); break; }}/* * Set the physical protection on the * specified range of this map as requested. */voidpmap_protect(pmap, sva, eva, prot) register pmap_t pmap; register vm_offset_t sva, eva; vm_prot_t prot;{ register vm_offset_t nssva; register pt_entry_t *pte; boolean_t firstpage, needtflush; int isro;#ifdef DEBUG if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) printf("pmap_protect(%x, %x, %x, %x)\n", pmap, sva, eva, prot);#endif if (pmap == NULL) return;#ifdef PMAPSTATS protect_stats.calls++;#endif if ((prot & VM_PROT_READ) == VM_PROT_NONE) { pmap_remove(pmap, sva, eva); return; } if (prot & VM_PROT_WRITE) return; isro = pte_prot(pmap, prot); needtflush = active_pmap(pmap); firstpage = TRUE; while (sva < eva) { nssva = luna_trunc_seg(sva) + LUNA_SEG_SIZE; if (nssva == 0 || nssva > eva) nssva = eva; /* * If VA belongs to an unallocated segment, * skip to the next segment boundary. */ if (!pmap_ste_v(pmap, sva)) { sva = nssva; continue; } /* * Change protection on mapping if it is valid and doesn't * already have the correct protection. */ pte = pmap_pte(pmap, sva); while (sva < nssva) { if (pmap_pte_v(pte) && pmap_pte_prot_chg(pte, isro)) {#ifdef HAVEVAC /* * Purge kernel side of VAC to ensure we * get the correct state of any hardware * maintained bits. * * XXX do we need to clear the VAC in * general to reflect the new protection? */ if (firstpage && pmap_aliasmask) DCIS();#endif#if defined(LUNA2) /* * Clear caches if making RO (see section * "7.3 Cache Coherency" in the manual). */ if (isro && mmutype == MMU_68040) { vm_offset_t pa = pmap_pte_pa(pte); DCFP(pa); ICPP(pa); }#endif pmap_pte_set_prot(pte, isro); if (needtflush) TBIS(sva);#ifdef PMAPSTATS protect_stats.changed++;#endif firstpage = FALSE; }#ifdef PMAPSTATS else if (pmap_pte_v(pte)) { if (isro) protect_stats.alreadyro++; else protect_stats.alreadyrw++; }#endif pte++; sva += PAGE_SIZE; } }#if defined(HAVEVAC) && defined(DEBUG) if (pmap_aliasmask && (pmapvacflush & PVF_PROTECT)) { if (pmapvacflush & PVF_TOTAL) DCIA(); else if (pmap == kernel_pmap) DCIS(); else DCIU(); }#endif}/* * Insert the given physical page (p) at * the specified virtual address (v) in the * target physical map with the protection requested. * * If specified, the page will be wired down, meaning * that the related pte can not be reclaimed. * * NB: This is the only routine which MAY NOT lazy-evaluate * or lose information. That is, this routine must actually * insert this page into the given map NOW. */voidpmap_enter(pmap, va, pa, prot, wired) register pmap_t pmap; vm_offset_t va; register vm_offset_t pa; vm_prot_t prot; boolean_t wired;{ register pt_entry_t *pte; register int npte; vm_offset_t opa; boolean_t cacheable = TRUE; boolean_t checkpv = TRUE;#ifdef DEBUG if (pmapdebug & (PDB_FOLLOW|PDB_ENTER)) printf("pmap_enter(%x, %x, %x, %x, %x)\n", pmap, va, pa, prot, wired);#endif if (pmap == NULL) return;#ifdef PMAPSTATS if (pmap == kernel_pmap) enter_stats.kernel++; else enter_stats.user++;#endif /* * For user mapping, allocate kernel VM resources if necessary. */ if (pmap->pm_ptab == NULL) pmap->pm_ptab = (pt_entry_t *) kmem_alloc_wait(pt_map, LUNA_MAX_PTSIZE); /* * Segment table entry not valid, we need a new PT page */ if (!pmap_ste_v(pmap, va)) pmap_enter_ptpage(pmap, va); pa = luna_trunc_page(pa);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -