📄 pmap.c
字号:
/* * Copyright (c) 1992 OMRON Corporation. * Copyright (c) 1991, 1993 * The Regents of the University of California. All rights reserved. * * This code is derived from software contributed to Berkeley by * the Systems Programming Group of the University of Utah Computer * Science Department. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: hp300/hp300/pmap.c 8.2 (Berkeley) 11/14/93 * * @(#)pmap.c 8.2 (Berkeley) 12/6/93 *//* * LUNA physical map management code taken from: * HP9000/300 series physical map management code. * * Supports: * 68030 with on-chip MMU (LUNA-I) * 68040 with on-chip MMU (LUNA-II) * * Notes: * Don't even pay lip service to multiprocessor support. * * We assume TLB entries don't have process tags (except for the * supervisor/user distinction) so we only invalidate TLB entries * when changing mappings for the current (or kernel) pmap. This is * technically not true for the 68551 but we flush the TLB on every * context switch, so it effectively winds up that way. * * Bitwise and/or operations are significantly faster than bitfield * references so we use them when accessing STE/PTEs in the pmap_pte_* * macros. Note also that the two are not always equivalent; e.g.: * (*(int *)pte & PG_PROT) [4] != pte->pg_prot [1] * and a couple of routines that deal with protection and wiring take * some shortcuts that assume the and/or definitions. * * This implementation will only work for PAGE_SIZE == NBPG * (i.e. 4096 bytes). *//* * Manages physical address maps. * * In addition to hardware address maps, this * module is called upon to provide software-use-only * maps which may or may not be stored in the same * form as hardware maps. These pseudo-maps are * used to store intermediate results from copy * operations to and from address spaces. * * Since the information managed by this module is * also stored by the logical address mapping module, * this module may throw away valid virtual-to-physical * mappings at almost any time. However, invalidations * of virtual-to-physical mappings must be done as * requested. * * In order to cope with hardware architectures which * make virtual-to-physical map invalidates expensive, * this module may delay invalidate or reduced protection * operations until such time as they are actually * necessary. This module is given full information as * to which processors are currently using which maps, * and to when physical maps must be made correct. */#include <sys/param.h>#include <sys/systm.h>#include <sys/proc.h>#include <sys/malloc.h>#include <sys/user.h>#include <luna68k/luna68k/pte.h>#include <vm/vm.h>#include <vm/vm_kern.h>#include <vm/vm_page.h>#include <machine/cpu.h>#ifdef PMAPSTATSstruct { int collectscans; int collectpages; int kpttotal; int kptinuse; int kptmaxuse;} kpt_stats;struct { int kernel; /* entering kernel mapping */ int user; /* entering user mapping */ int ptpneeded; /* needed to allocate a PT page */ int nochange; /* no change at all */ int pwchange; /* no mapping change, just wiring or protection */ int wchange; /* no mapping change, just wiring */ int pchange; /* no mapping change, just protection */ int mchange; /* was mapped but mapping to different page */ int managed; /* a managed page */ int firstpv; /* first mapping for this PA */ int secondpv; /* second mapping for this PA */ int ci; /* cache inhibited */ int unmanaged; /* not a managed page */ int flushes; /* cache flushes */} enter_stats;struct { int calls; int removes; int pvfirst; int pvsearch; int ptinvalid; int uflushes; int sflushes;} remove_stats;struct { int calls; int changed; int alreadyro; int alreadyrw;} protect_stats;struct chgstats { int setcalls; int sethits; int setmiss; int clrcalls; int clrhits; int clrmiss;} changebit_stats[16];#endif#ifdef DEBUGint debugmap = 0;int pmapdebug = 0x2000;#define PDB_FOLLOW 0x0001#define PDB_INIT 0x0002#define PDB_ENTER 0x0004#define PDB_REMOVE 0x0008#define PDB_CREATE 0x0010#define PDB_PTPAGE 0x0020#define PDB_CACHE 0x0040#define PDB_BITS 0x0080#define PDB_COLLECT 0x0100#define PDB_PROTECT 0x0200#define PDB_SEGTAB 0x0400#define PDB_MULTIMAP 0x0800#define PDB_PARANOIA 0x2000#define PDB_WIRING 0x4000#define PDB_PVDUMP 0x8000#ifdef HAVEVACint pmapvacflush = 0;#define PVF_ENTER 0x01#define PVF_REMOVE 0x02#define PVF_PROTECT 0x04#define PVF_TOTAL 0x80#endif#if defined(LUNA2)int dowriteback = 1; /* 68040: enable writeback caching */int dokwriteback = 1; /* 68040: enable writeback caching of kernel AS */#endifextern vm_offset_t pager_sva, pager_eva;#endif/* * Get STEs and PTEs for user/kernel address space */#if defined(LUNA2)#define pmap_ste1(m, v) \ (&((m)->pm_stab[(vm_offset_t)(v) >> SG4_SHIFT1]))/* XXX assumes physically contiguous ST pages (if more than one) */#define pmap_ste2(m, v) \ (&((m)->pm_stab[(st_entry_t *)(*(u_int *)pmap_ste1(m, v) & SG4_ADDR1) \ - (m)->pm_stpa + (((v) & SG4_MASK2) >> SG4_SHIFT2)]))#define pmap_ste(m, v) \ (&((m)->pm_stab[(vm_offset_t)(v) \ >> (mmutype == MMU_68040 ? SG4_SHIFT1 : SG_ISHIFT)]))#define pmap_ste_v(m, v) \ (mmutype == MMU_68040 \ ? ((*(int *)pmap_ste1(m, v) & SG_V) && \ (*(int *)pmap_ste2(m, v) & SG_V)) \ : (*(int *)pmap_ste(m, v) & SG_V))#else#define pmap_ste(m, v) (&((m)->pm_stab[(vm_offset_t)(v) >> SG_ISHIFT]))#define pmap_ste_v(m, v) (*(int *)pmap_ste(m, v) & SG_V)#endif#define pmap_pte(m, v) (&((m)->pm_ptab[(vm_offset_t)(v) >> PG_SHIFT]))#define pmap_pte_pa(pte) (*(int *)(pte) & PG_FRAME)#define pmap_pte_w(pte) (*(int *)(pte) & PG_W)#define pmap_pte_ci(pte) (*(int *)(pte) & PG_CI)#define pmap_pte_m(pte) (*(int *)(pte) & PG_M)#define pmap_pte_u(pte) (*(int *)(pte) & PG_U)#define pmap_pte_prot(pte) (*(int *)(pte) & PG_PROT)#define pmap_pte_v(pte) (*(int *)(pte) & PG_V)#define pmap_pte_set_w(pte, v) \ if (v) *(int *)(pte) |= PG_W; else *(int *)(pte) &= ~PG_W#define pmap_pte_set_prot(pte, v) \ if (v) *(int *)(pte) |= PG_PROT; else *(int *)(pte) &= ~PG_PROT#define pmap_pte_w_chg(pte, nw) ((nw) ^ pmap_pte_w(pte))#define pmap_pte_prot_chg(pte, np) ((np) ^ pmap_pte_prot(pte))/* * Given a map and a machine independent protection code, * convert to an luna protection code. */#define pte_prot(m, p) (protection_codes[p])int protection_codes[8];/* * Kernel page table page management. */struct kpt_page { struct kpt_page *kpt_next; /* link on either used or free list */ vm_offset_t kpt_va; /* always valid kernel VA */ vm_offset_t kpt_pa; /* PA of this page (for speed) */};struct kpt_page *kpt_free_list, *kpt_used_list;struct kpt_page *kpt_pages;/* * Kernel segment/page table and page table map. * The page table map gives us a level of indirection we need to dynamically * expand the page table. It is essentially a copy of the segment table * with PTEs instead of STEs. All are initialized in locore at boot time. * Sysmap will initially contain VM_KERNEL_PT_PAGES pages of PTEs. * Segtabzero is an empty segment table which all processes share til they * reference something. */st_entry_t *Sysseg;pt_entry_t *Sysmap, *Sysptmap;st_entry_t *Segtabzero, *Segtabzeropa;vm_size_t Sysptsize = VM_KERNEL_PT_PAGES;struct pmap kernel_pmap_store;vm_map_t pt_map;vm_offset_t avail_start; /* PA of first available physical page */vm_offset_t avail_end; /* PA of last available physical page */vm_size_t mem_size; /* memory size in bytes */vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss)*/vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */vm_offset_t vm_first_phys; /* PA of first managed page */vm_offset_t vm_last_phys; /* PA just past last managed page */boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */char *pmap_attributes; /* reference and modify bits */#ifdef HAVEVACint pmap_aliasmask; /* seperation at which VA aliasing ok */#endif#if defined(LUNA2)int protostfree; /* prototype (default) free ST map */#endif/* * Internal routines */void pmap_remove_mapping __P((pmap_t, vm_offset_t, pt_entry_t *, int));boolean_t pmap_testbit __P((vm_offset_t, int));void pmap_changebit __P((vm_offset_t, int, boolean_t));void pmap_enter_ptpage __P((pmap_t, vm_offset_t));#ifdef DEBUGvoid pmap_pvdump __P((vm_offset_t));void pmap_check_wiring __P((char *, vm_offset_t));#endif/* pmap_remove_mapping flags */#define PRM_TFLUSH 1#define PRM_CFLUSH 2/* * Bootstrap memory allocator. This function allows for early dynamic * memory allocation until the virtual memory system has been bootstrapped. * After that point, either kmem_alloc or malloc should be used. This * function works by stealing pages from the (to be) managed page pool, * stealing virtual address space, then mapping the pages and zeroing them. * * It should be used from pmap_bootstrap till vm_page_startup, afterwards * it cannot be used, and will generate a panic if tried. Note that this * memory will never be freed, and in essence it is wired down. */void *pmap_bootstrap_alloc(size) { vm_offset_t val; int i; extern boolean_t vm_page_startup_initialized; if (vm_page_startup_initialized) panic("pmap_bootstrap_alloc: called after startup initialized"); size = round_page(size); val = virtual_avail; virtual_avail = pmap_map(virtual_avail, avail_start, avail_start + size, VM_PROT_READ|VM_PROT_WRITE); avail_start += size; blkclr ((caddr_t) val, size); return ((void *) val);}/* * Initialize the pmap module. * Called by vm_init, to initialize any structures that the pmap * system needs to map virtual memory. */voidpmap_init(phys_start, phys_end) vm_offset_t phys_start, phys_end;{ vm_offset_t addr, addr2; vm_size_t npg, s; int rv; extern char kstack[];#ifdef DEBUG if (pmapdebug & PDB_FOLLOW) printf("pmap_init(%x, %x)\n", phys_start, phys_end);#endif /* * Now that kernel map has been allocated, we can mark as * unavailable regions which we have mapped in locore. */ addr = (vm_offset_t) Sysmap; vm_object_reference(kernel_object); (void) vm_map_find(kernel_map, kernel_object, addr, &addr, LUNA_MAX_PTSIZE, FALSE); /* * If this fails it is probably because the static portion of * the kernel page table isn't big enough and we overran the * page table map. Need to adjust pmap_size() in luna_init.c. */ if (addr != (vm_offset_t)Sysmap) goto bogons; addr = (vm_offset_t) kstack; vm_object_reference(kernel_object); (void) vm_map_find(kernel_map, kernel_object, addr, &addr, luna_ptob(UPAGES), FALSE); if (addr != (vm_offset_t)kstack)bogons: panic("pmap_init: bogons in the VM system!\n");#ifdef DEBUG if (pmapdebug & PDB_INIT) { printf("pmap_init: Sysseg %x, Sysmap %x, Sysptmap %x\n", Sysseg, Sysmap, Sysptmap); printf(" pstart %x, pend %x, vstart %x, vend %x\n", avail_start, avail_end, virtual_avail, virtual_end); }#endif /* * Allocate memory for random pmap data structures. Includes the * initial segment table, pv_head_table and pmap_attributes. */ npg = atop(phys_end - phys_start); s = (vm_size_t) (LUNA_STSIZE + sizeof(struct pv_entry) * npg + npg); s = round_page(s); addr = (vm_offset_t) kmem_alloc(kernel_map, s); Segtabzero = (st_entry_t *) addr; Segtabzeropa = (st_entry_t *) pmap_extract(kernel_pmap, addr); addr += LUNA_STSIZE; pv_table = (pv_entry_t) addr; addr += sizeof(struct pv_entry) * npg; pmap_attributes = (char *) addr;#ifdef DEBUG if (pmapdebug & PDB_INIT) printf("pmap_init: %x bytes: npg %x s0 %x(%x) tbl %x atr %x\n", s, npg, Segtabzero, Segtabzeropa, pv_table, pmap_attributes);#endif /* * Allocate physical memory for kernel PT pages and their management. * We need 1 PT page per possible task plus some slop. */ npg = min(atop(LUNA_MAX_KPTSIZE), maxproc+16); s = ptoa(npg) + round_page(npg * sizeof(struct kpt_page)); /* * Verify that space will be allocated in region for which * we already have kernel PT pages. */ addr = 0; rv = vm_map_find(kernel_map, NULL, 0, &addr, s, TRUE); if (rv != KERN_SUCCESS || addr + s >= (vm_offset_t)Sysmap) panic("pmap_init: kernel PT too small"); vm_map_remove(kernel_map, addr, addr + s); /* * Now allocate the space and link the pages together to * form the KPT free list. */ addr = (vm_offset_t) kmem_alloc(kernel_map, s); s = ptoa(npg); addr2 = addr + s; kpt_pages = &((struct kpt_page *)addr2)[npg]; kpt_free_list = (struct kpt_page *) 0; do { addr2 -= LUNA_PAGE_SIZE; (--kpt_pages)->kpt_next = kpt_free_list; kpt_free_list = kpt_pages; kpt_pages->kpt_va = addr2; kpt_pages->kpt_pa = pmap_extract(kernel_pmap, addr2); } while (addr != addr2);#ifdef PMAPSTATS kpt_stats.kpttotal = atop(s);#endif#ifdef DEBUG if (pmapdebug & PDB_INIT) printf("pmap_init: KPT: %d pages from %x to %x\n", atop(s), addr, addr + s);#endif /* * Slightly modified version of kmem_suballoc() to get page table * map where we want it. */ addr = LUNA_PTBASE; s = min(LUNA_PTMAXSIZE, maxproc*LUNA_MAX_PTSIZE); addr2 = addr + s; rv = vm_map_find(kernel_map, NULL, 0, &addr, s, TRUE); if (rv != KERN_SUCCESS) panic("pmap_init: cannot allocate space for PT map"); pmap_reference(vm_map_pmap(kernel_map)); pt_map = vm_map_create(vm_map_pmap(kernel_map), addr, addr2, TRUE); if (pt_map == NULL) panic("pmap_init: cannot create pt_map"); rv = vm_map_submap(kernel_map, addr, addr2, pt_map); if (rv != KERN_SUCCESS) panic("pmap_init: cannot map range to pt_map");#ifdef DEBUG if (pmapdebug & PDB_INIT) printf("pmap_init: pt_map [%x - %x)\n", addr, addr2);#endif#if defined(LUNA2) if (mmutype == MMU_68040) { protostfree = ~l2tobm(0); for (rv = MAXUL2SIZE; rv < sizeof(protostfree)*NBBY; rv++) protostfree &= ~l2tobm(rv); }#endif /* * Now it is safe to enable pv_table recording. */ vm_first_phys = phys_start; vm_last_phys = phys_end; pmap_initialized = TRUE;}/* * Used to map a range of physical addresses into kernel * virtual address space. * * For now, VM is already on, we only need to map the * specified memory. */vm_offset_tpmap_map(virt, start, end, prot) vm_offset_t virt;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -