⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 pmap.c

📁 早期freebsd实现
💻 C
📖 第 1 页 / 共 3 页
字号:
/*- * Copyright (c) 1991, 1993 *	The Regents of the University of California.  All rights reserved. * * This code is derived from software contributed to Berkeley by * the Systems Programming Group of the University of Utah Computer * Science Department and William Jolitz of UUNET Technologies Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright *    notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright *    notice, this list of conditions and the following disclaimer in the *    documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software *    must display the following acknowledgement: *	This product includes software developed by the University of *	California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors *    may be used to endorse or promote products derived from this software *    without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * *	@(#)pmap.c	8.1 (Berkeley) 6/11/93 *//* * Derived from hp300 version by Mike Hibler, this version by William * Jolitz uses a recursive map [a pde points to the page directory] to * map the page tables using the pagetables themselves. This is done to * reduce the impact on kernel virtual memory for lots of sparse address * space, and to reduce the cost of memory to each process. * *	Derived from: hp300/@(#)pmap.c	7.1 (Berkeley) 12/5/90 *//* *	Reno i386 version, from Mike Hibler's hp300 version. *//* *	Manages physical address maps. * *	In addition to hardware address maps, this *	module is called upon to provide software-use-only *	maps which may or may not be stored in the same *	form as hardware maps.  These pseudo-maps are *	used to store intermediate results from copy *	operations to and from address spaces. * *	Since the information managed by this module is *	also stored by the logical address mapping module, *	this module may throw away valid virtual-to-physical *	mappings at almost any time.  However, invalidations *	of virtual-to-physical mappings must be done as *	requested. * *	In order to cope with hardware architectures which *	make virtual-to-physical map invalidates expensive, *	this module may delay invalidate or reduced protection *	operations until such time as they are actually *	necessary.  This module is given full information as *	to which processors are currently using which maps, *	and to when physical maps must be made correct. */#include <sys/param.h>#include <sys/proc.h>#include <sys/malloc.h>#include <sys/user.h>#include <vm/vm.h>#include <vm/vm_kern.h>#include <vm/vm_page.h>#ifdef NOTDEFinclude <vm/vm_pageout.h>include <machine/isa.h>#endif/* * Allocate various and sundry SYSMAPs used in the days of old VM * and not yet converted.  XXX. */#define BSDVM_COMPAT	1#ifdef DEBUGstruct {	int kernel;	/* entering kernel mapping */	int user;	/* entering user mapping */	int ptpneeded;	/* needed to allocate a PT page */	int pwchange;	/* no mapping change, just wiring or protection */	int wchange;	/* no mapping change, just wiring */	int mchange;	/* was mapped but mapping to different page */	int managed;	/* a managed page */	int firstpv;	/* first mapping for this PA */	int secondpv;	/* second mapping for this PA */	int ci;		/* cache inhibited */	int unmanaged;	/* not a managed page */	int flushes;	/* cache flushes */} enter_stats;struct {	int calls;	int removes;	int pvfirst;	int pvsearch;	int ptinvalid;	int uflushes;	int sflushes;} remove_stats;int debugmap = 0;int pmapdebug = 0;#define PDB_FOLLOW	0x0001#define PDB_INIT	0x0002#define PDB_ENTER	0x0004#define PDB_REMOVE	0x0008#define PDB_CREATE	0x0010#define PDB_PTPAGE	0x0020#define PDB_CACHE	0x0040#define PDB_BITS	0x0080#define PDB_COLLECT	0x0100#define PDB_PROTECT	0x0200#define PDB_PDRTAB	0x0400#define PDB_PARANOIA	0x2000#define PDB_WIRING	0x4000#define PDB_PVDUMP	0x8000int pmapvacflush = 0;#define	PVF_ENTER	0x01#define	PVF_REMOVE	0x02#define	PVF_PROTECT	0x04#define	PVF_TOTAL	0x80#endif/* * Get PDEs and PTEs for user/kernel address space */#define	pmap_pde(m, v)	(&((m)->pm_pdir[((vm_offset_t)(v) >> PD_SHIFT)&1023]))#define pmap_pte_pa(pte)	(*(int *)(pte) & PG_FRAME)#define pmap_pde_v(pte)		((pte)->pd_v)#define pmap_pte_w(pte)		((pte)->pg_w)/* #define pmap_pte_ci(pte)	((pte)->pg_ci) */#define pmap_pte_m(pte)		((pte)->pg_m)#define pmap_pte_u(pte)		((pte)->pg_u)#define pmap_pte_v(pte)		((pte)->pg_v)#define pmap_pte_set_w(pte, v)		((pte)->pg_w = (v))#define pmap_pte_set_prot(pte, v)	((pte)->pg_prot = (v))/* * Given a map and a machine independent protection code, * convert to a vax protection code. */#define pte_prot(m, p)	(protection_codes[p])int	protection_codes[8];struct pmap	kernel_pmap_store;vm_offset_t    	avail_start;	/* PA of first available physical page */vm_offset_t	avail_end;	/* PA of last available physical page */vm_size_t	mem_size;	/* memory size in bytes */vm_offset_t	virtual_avail;  /* VA of first avail page (after kernel bss)*/vm_offset_t	virtual_end;	/* VA of last avail page (end of kernel AS) */vm_offset_t	vm_first_phys;	/* PA of first managed page */vm_offset_t	vm_last_phys;	/* PA just past last managed page */int		i386pagesperpage;	/* PAGE_SIZE / I386_PAGE_SIZE */boolean_t	pmap_initialized = FALSE;	/* Has pmap_init completed? */char		*pmap_attributes;	/* reference and modify bits */boolean_t	pmap_testbit();void		pmap_clear_modify();#if BSDVM_COMPAT#include <sys/msgbuf.h>/* * All those kernel PT submaps that BSD is so fond of */struct pte	*CMAP1, *CMAP2, *mmap;caddr_t		CADDR1, CADDR2, vmmap;struct pte	*msgbufmap;struct msgbuf	*msgbufp;#endifvoid pmap_activate __P((pmap_t, struct pcb *));/* *	Bootstrap the system enough to run with virtual memory. *	Map the kernel's code and data, and allocate the system page table. * *	On the I386 this is called after mapping has already been enabled *	and just syncs the pmap module with what has already been done. *	[We can't call it easily with mapping off since the kernel is not *	mapped with PA == VA, hence we would have to relocate every address *	from the linked base (virtual) address 0xFE000000 to the actual *	(physical) address starting relative to 0] */struct pte *pmap_pte();extern vm_offset_t	atdevbase;voidpmap_bootstrap(firstaddr, loadaddr)	vm_offset_t firstaddr;	vm_offset_t loadaddr;{#if BSDVM_COMPAT	vm_offset_t va;	struct pte *pte;#endif	extern vm_offset_t maxmem, physmem;extern int IdlePTD;/* disable pageing in basemem for all machines until this cryptic comment * can be explained */#if 1 ||	defined(ODYSSEUS) || defined(ARGO) || defined(CIRCE)firstaddr=0x100000;	/* for some reason, basemem screws up on this machine */#endifprintf("ps %x pe %x ", firstaddr, maxmem <<PG_SHIFT);	avail_start = firstaddr;	avail_end = maxmem << PG_SHIFT;	/* XXX: allow for msgbuf */	avail_end -= i386_round_page(sizeof(struct msgbuf));	mem_size = physmem << PG_SHIFT;	virtual_avail = atdevbase + 0x100000 - 0xa0000 + 10*NBPG;	virtual_end = VM_MAX_KERNEL_ADDRESS;	i386pagesperpage = PAGE_SIZE / I386_PAGE_SIZE;	/*	 * Initialize protection array.	 */	i386_protection_init();#ifdef notdef	/*	 * Create Kernel page directory table and page maps.	 * [ currently done in locore. i have wild and crazy ideas -wfj ]	 */	bzero(firstaddr, 4*NBPG);	kernel_pmap->pm_pdir = firstaddr + VM_MIN_KERNEL_ADDRESS;	kernel_pmap->pm_ptab = firstaddr + VM_MIN_KERNEL_ADDRESS + NBPG;	firstaddr += NBPG;	for (x = i386_btod(VM_MIN_KERNEL_ADDRESS);		x < i386_btod(VM_MIN_KERNEL_ADDRESS)+3; x++) {			struct pde *pde;		pde = kernel_pmap->pm_pdir + x;		*(int *)pde = firstaddr + x*NBPG | PG_V | PG_KW;	}#else	kernel_pmap->pm_pdir = (pd_entry_t *)(0xfe000000 + IdlePTD);#endif	simple_lock_init(&kernel_pmap->pm_lock);	kernel_pmap->pm_count = 1;#if BSDVM_COMPAT	/*	 * Allocate all the submaps we need	 */#define	SYSMAP(c, p, v, n)	\	v = (c)va; va += ((n)*I386_PAGE_SIZE); p = pte; pte += (n);	va = virtual_avail;	pte = pmap_pte(kernel_pmap, va);	SYSMAP(caddr_t		,CMAP1		,CADDR1	   ,1		)	SYSMAP(caddr_t		,CMAP2		,CADDR2	   ,1		)	SYSMAP(caddr_t		,mmap		,vmmap	   ,1		)	SYSMAP(struct msgbuf *	,msgbufmap	,msgbufp   ,1		)	virtual_avail = va;#endif	/**(int *)PTD = 0;	load_cr3(rcr3());*/}pmap_isvalidphys(addr) {	if (addr < 0xa0000) return (1);	if (addr >= 0x100000) return (1);	return(0);}/* * Bootstrap memory allocator. This function allows for early dynamic * memory allocation until the virtual memory system has been bootstrapped. * After that point, either kmem_alloc or malloc should be used. This * function works by stealing pages from the (to be) managed page pool, * stealing virtual address space, then mapping the pages and zeroing them. * * It should be used from pmap_bootstrap till vm_page_startup, afterwards * it cannot be used, and will generate a panic if tried. Note that this * memory will never be freed, and in essence it is wired down. */void *pmap_bootstrap_alloc(size) {	vm_offset_t val;	int i;	extern boolean_t vm_page_startup_initialized;		if (vm_page_startup_initialized)		panic("pmap_bootstrap_alloc: called after startup initialized");	size = round_page(size);	val = virtual_avail;	/* deal with "hole incursion" */	for (i = 0; i < size; i += PAGE_SIZE) {		while (!pmap_isvalidphys(avail_start))				avail_start += PAGE_SIZE;				virtual_avail = pmap_map(virtual_avail, avail_start,			avail_start + PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE);		avail_start += PAGE_SIZE;	}	blkclr ((caddr_t) val, size);	return ((void *) val);}/* *	Initialize the pmap module. *	Called by vm_init, to initialize any structures that the pmap *	system needs to map virtual memory. */voidpmap_init(phys_start, phys_end)	vm_offset_t	phys_start, phys_end;{	vm_offset_t	addr, addr2;	vm_size_t	npg, s;	int		rv;	extern int KPTphys;#ifdef DEBUG	if (pmapdebug & PDB_FOLLOW)		printf("pmap_init(%x, %x)\n", phys_start, phys_end);#endif	/*	 * Now that kernel map has been allocated, we can mark as	 * unavailable regions which we have mapped in locore.	 */	addr = atdevbase;	(void) vm_map_find(kernel_map, NULL, (vm_offset_t) 0,			   &addr, (0x100000-0xa0000), FALSE);	addr = (vm_offset_t) 0xfe000000+KPTphys/* *NBPG */;	vm_object_reference(kernel_object);	(void) vm_map_find(kernel_map, kernel_object, addr,			   &addr, 2*NBPG, FALSE);	/*	 * Allocate memory for random pmap data structures.  Includes the	 * pv_head_table and pmap_attributes.	 */	npg = atop(phys_end - phys_start);	s = (vm_size_t) (sizeof(struct pv_entry) * npg + npg);	s = round_page(s);	addr = (vm_offset_t) kmem_alloc(kernel_map, s);	pv_table = (pv_entry_t) addr;	addr += sizeof(struct pv_entry) * npg;	pmap_attributes = (char *) addr;#ifdef DEBUG	if (pmapdebug & PDB_INIT)		printf("pmap_init: %x bytes (%x pgs): tbl %x attr %x\n",		       s, npg, pv_table, pmap_attributes);#endif	/*	 * Now it is safe to enable pv_table recording.	 */	vm_first_phys = phys_start;	vm_last_phys = phys_end;	pmap_initialized = TRUE;}/* *	Used to map a range of physical addresses into kernel *	virtual address space. * *	For now, VM is already on, we only need to map the *	specified memory. */vm_offset_tpmap_map(virt, start, end, prot)	vm_offset_t	virt;	vm_offset_t	start;	vm_offset_t	end;	int		prot;{#ifdef DEBUG	if (pmapdebug & PDB_FOLLOW)		printf("pmap_map(%x, %x, %x, %x)\n", virt, start, end, prot);#endif	while (start < end) {		pmap_enter(kernel_pmap, virt, start, prot, FALSE);		virt += PAGE_SIZE;		start += PAGE_SIZE;	}	return(virt);}/* *	Create and return a physical map. * *	If the size specified for the map *	is zero, the map is an actual physical *	map, and may be referenced by the *	hardware. * *	If the size specified is non-zero, *	the map will be used in software only, and *	is bounded by that size. * * [ just allocate a ptd and mark it uninitialize -- should we track *   with a table which process has which ptd? -wfj ] */pmap_tpmap_create(size)	vm_size_t	size;{	register pmap_t pmap;#ifdef DEBUG	if (pmapdebug & (PDB_FOLLOW|PDB_CREATE))		printf("pmap_create(%x)\n", size);#endif	/*	 * Software use map does not need a pmap	 */	if (size)		return(NULL);	/* XXX: is it ok to wait here? */	pmap = (pmap_t) malloc(sizeof *pmap, M_VMPMAP, M_WAITOK);#ifdef notifwewait	if (pmap == NULL)		panic("pmap_create: cannot allocate a pmap");#endif	bzero(pmap, sizeof(*pmap));	pmap_pinit(pmap);	return (pmap);}/* * Initialize a preallocated and zeroed pmap structure, * such as one in a vmspace structure. */voidpmap_pinit(pmap)	register struct pmap *pmap;{#ifdef DEBUG	if (pmapdebug & (PDB_FOLLOW|PDB_CREATE))		pg("pmap_pinit(%x)\n", pmap);#endif	/*	 * No need to allocate page table space yet but we do need a	 * valid page directory table.	 */	pmap->pm_pdir = (pd_entry_t *) kmem_alloc(kernel_map, NBPG);	/* wire in kernel global address entries */	bcopy(PTD+KPTDI_FIRST, pmap->pm_pdir+KPTDI_FIRST,		(KPTDI_LAST-KPTDI_FIRST+1)*4);	/* install self-referential address mapping entry */	*(int *)(pmap->pm_pdir+PTDPTDI) =		(int)pmap_extract(kernel_pmap, (vm_offset_t)pmap->pm_pdir) | PG_V | PG_URKW;	pmap->pm_count = 1;	simple_lock_init(&pmap->pm_lock);}/* *	Retire the given physical map from service. *	Should only be called if the map contains *	no valid mappings. */voidpmap_destroy(pmap)	register pmap_t pmap;{	int count;#ifdef DEBUG	if (pmapdebug & PDB_FOLLOW)		printf("pmap_destroy(%x)\n", pmap);#endif	if (pmap == NULL)		return;	simple_lock(&pmap->pm_lock);	count = --pmap->pm_count;	simple_unlock(&pmap->pm_lock);	if (count == 0) {		pmap_release(pmap);		free((caddr_t)pmap, M_VMPMAP);	}}/* * Release any resources held by the given physical map. * Called when a pmap initialized by pmap_pinit is being released. * Should only be called if the map contains no valid mappings. */voidpmap_release(pmap)	register struct pmap *pmap;{#ifdef DEBUG	if (pmapdebug & PDB_FOLLOW)		pg("pmap_release(%x)\n", pmap);#endif#ifdef notdef /* DIAGNOSTIC */	/* count would be 0 from pmap_destroy... */	simple_lock(&pmap->pm_lock);	if (pmap->pm_count != 1)		panic("pmap_release count");#endif	kmem_free(kernel_map, (vm_offset_t)pmap->pm_pdir, NBPG);}/* *	Add a reference to the specified pmap. */voidpmap_reference(pmap)	pmap_t	pmap;{#ifdef DEBUG	if (pmapdebug & PDB_FOLLOW)		pg("pmap_reference(%x)", pmap);#endif	if (pmap != NULL) {		simple_lock(&pmap->pm_lock);		pmap->pm_count++;		simple_unlock(&pmap->pm_lock);	}}/* *	Remove the given range of addresses from the specified map.

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -