⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 pmap.c

📁 早期freebsd实现
💻 C
📖 第 1 页 / 共 3 页
字号:
/*  * Copyright (c) 1992, 1993 *	The Regents of the University of California.  All rights reserved. * * This code is derived from software contributed to Berkeley by * the Systems Programming Group of the University of Utah Computer * Science Department and Ralph Campbell. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright *    notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright *    notice, this list of conditions and the following disclaimer in the *    documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software *    must display the following acknowledgement: *	This product includes software developed by the University of *	California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors *    may be used to endorse or promote products derived from this software *    without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * *	@(#)pmap.c	8.4 (Berkeley) 1/26/94 *//* *	Manages physical address maps. * *	In addition to hardware address maps, this *	module is called upon to provide software-use-only *	maps which may or may not be stored in the same *	form as hardware maps.  These pseudo-maps are *	used to store intermediate results from copy *	operations to and from address spaces. * *	Since the information managed by this module is *	also stored by the logical address mapping module, *	this module may throw away valid virtual-to-physical *	mappings at almost any time.  However, invalidations *	of virtual-to-physical mappings must be done as *	requested. * *	In order to cope with hardware architectures which *	make virtual-to-physical map invalidates expensive, *	this module may delay invalidate or reduced protection *	operations until such time as they are actually *	necessary.  This module is given full information as *	to which processors are currently using which maps, *	and to when physical maps must be made correct. */#include <sys/param.h>#include <sys/systm.h>#include <sys/proc.h>#include <sys/malloc.h>#include <sys/user.h>#include <sys/buf.h>#ifdef SYSVSHM#include <sys/shm.h>#endif#include <vm/vm_kern.h>#include <vm/vm_page.h>#include <vm/vm_pageout.h>#include <machine/machConst.h>#include <machine/pte.h>extern vm_page_t vm_page_alloc1 __P((void));extern void vm_page_free1 __P((vm_page_t));/* * For each vm_page_t, there is a list of all currently valid virtual * mappings of that page.  An entry is a pv_entry_t, the list is pv_table. * XXX really should do this as a part of the higher level code. */typedef struct pv_entry {	struct pv_entry	*pv_next;	/* next pv_entry */	struct pmap	*pv_pmap;	/* pmap where mapping lies */	vm_offset_t	pv_va;		/* virtual address for mapping */} *pv_entry_t;pv_entry_t	pv_table;	/* array of entries, one per page */extern void	pmap_remove_pv();#define pa_index(pa)		atop((pa) - first_phys_addr)#define pa_to_pvh(pa)		(&pv_table[pa_index(pa)])#ifdef DEBUGstruct {	int kernel;	/* entering kernel mapping */	int user;	/* entering user mapping */	int ptpneeded;	/* needed to allocate a PT page */	int pwchange;	/* no mapping change, just wiring or protection */	int wchange;	/* no mapping change, just wiring */	int mchange;	/* was mapped but mapping to different page */	int managed;	/* a managed page */	int firstpv;	/* first mapping for this PA */	int secondpv;	/* second mapping for this PA */	int ci;		/* cache inhibited */	int unmanaged;	/* not a managed page */	int flushes;	/* cache flushes */	int cachehit;	/* new entry forced valid entry out */} enter_stats;struct {	int calls;	int removes;	int flushes;	int pidflushes;	/* HW pid stolen */	int pvfirst;	int pvsearch;} remove_stats;int pmapdebug;#define PDB_FOLLOW	0x0001#define PDB_INIT	0x0002#define PDB_ENTER	0x0004#define PDB_REMOVE	0x0008#define PDB_CREATE	0x0010#define PDB_PTPAGE	0x0020#define PDB_PVENTRY	0x0040#define PDB_BITS	0x0080#define PDB_COLLECT	0x0100#define PDB_PROTECT	0x0200#define PDB_TLBPID	0x0400#define PDB_PARANOIA	0x2000#define PDB_WIRING	0x4000#define PDB_PVDUMP	0x8000#endif /* DEBUG */struct pmap	kernel_pmap_store;vm_offset_t    	avail_start;	/* PA of first available physical page */vm_offset_t	avail_end;	/* PA of last available physical page */vm_size_t	mem_size;	/* memory size in bytes */vm_offset_t	virtual_avail;  /* VA of first avail page (after kernel bss)*/vm_offset_t	virtual_end;	/* VA of last avail page (end of kernel AS) */int		pmaxpagesperpage;	/* PAGE_SIZE / NBPG */#ifdef ATTRchar		*pmap_attributes;	/* reference and modify bits */#endifstruct segtab	*free_segtab;		/* free list kept locally */u_int		tlbpid_gen = 1;		/* TLB PID generation count */int		tlbpid_cnt = 2;		/* next available TLB PID */pt_entry_t	*Sysmap;		/* kernel pte table */u_int		Sysmapsize;		/* number of pte's in Sysmap *//* *	Bootstrap the system enough to run with virtual memory. *	firstaddr is the first unused kseg0 address (not page aligned). */voidpmap_bootstrap(firstaddr)	vm_offset_t firstaddr;{	register int i;	vm_offset_t start = firstaddr;	extern int maxmem, physmem;#define	valloc(name, type, num) \	    (name) = (type *)firstaddr; firstaddr = (vm_offset_t)((name)+(num))	/*	 * Allocate a PTE table for the kernel.	 * The '1024' comes from PAGER_MAP_SIZE in vm_pager_init().	 * This should be kept in sync.	 * We also reserve space for kmem_alloc_pageable() for vm_fork().	 */	Sysmapsize = (VM_KMEM_SIZE + VM_MBUF_SIZE + VM_PHYS_SIZE +		nbuf * MAXBSIZE + 16 * NCARGS) / NBPG + 1024 + 256;#ifdef SYSVSHM	Sysmapsize += shminfo.shmall;#endif	valloc(Sysmap, pt_entry_t, Sysmapsize);#ifdef ATTR	valloc(pmap_attributes, char, physmem);#endif	/*	 * Allocate memory for pv_table.	 * This will allocate more entries than we really need.	 * We could do this in pmap_init when we know the actual	 * phys_start and phys_end but its better to use kseg0 addresses	 * rather than kernel virtual addresses mapped through the TLB.	 */	i = maxmem - pmax_btop(MACH_CACHED_TO_PHYS(firstaddr));	valloc(pv_table, struct pv_entry, i);	/*	 * Clear allocated memory.	 */	firstaddr = pmax_round_page(firstaddr);	bzero((caddr_t)start, firstaddr - start);	avail_start = MACH_CACHED_TO_PHYS(firstaddr);	avail_end = pmax_ptob(maxmem);	mem_size = avail_end - avail_start;	virtual_avail = VM_MIN_KERNEL_ADDRESS;	virtual_end = VM_MIN_KERNEL_ADDRESS + Sysmapsize * NBPG;	/* XXX need to decide how to set cnt.v_page_size */	pmaxpagesperpage = 1;	simple_lock_init(&kernel_pmap_store.pm_lock);	kernel_pmap_store.pm_count = 1;}/* * Bootstrap memory allocator. This function allows for early dynamic * memory allocation until the virtual memory system has been bootstrapped. * After that point, either kmem_alloc or malloc should be used. This * function works by stealing pages from the (to be) managed page pool, * stealing virtual address space, then mapping the pages and zeroing them. * * It should be used from pmap_bootstrap till vm_page_startup, afterwards * it cannot be used, and will generate a panic if tried. Note that this * memory will never be freed, and in essence it is wired down. */void *pmap_bootstrap_alloc(size)	int size;{	vm_offset_t val;	extern boolean_t vm_page_startup_initialized;	if (vm_page_startup_initialized)		panic("pmap_bootstrap_alloc: called after startup initialized");	val = MACH_PHYS_TO_CACHED(avail_start);	size = round_page(size);	avail_start += size;	blkclr((caddr_t)val, size);	return ((void *)val);}/* *	Initialize the pmap module. *	Called by vm_init, to initialize any structures that the pmap *	system needs to map virtual memory. */voidpmap_init(phys_start, phys_end)	vm_offset_t phys_start, phys_end;{#ifdef DEBUG	if (pmapdebug & (PDB_FOLLOW|PDB_INIT))		printf("pmap_init(%x, %x)\n", phys_start, phys_end);#endif}/* *	Create and return a physical map. * *	If the size specified for the map *	is zero, the map is an actual physical *	map, and may be referenced by the *	hardware. * *	If the size specified is non-zero, *	the map will be used in software only, and *	is bounded by that size. */pmap_tpmap_create(size)	vm_size_t size;{	register pmap_t pmap;#ifdef DEBUG	if (pmapdebug & (PDB_FOLLOW|PDB_CREATE))		printf("pmap_create(%x)\n", size);#endif	/*	 * Software use map does not need a pmap	 */	if (size)		return (NULL);	/* XXX: is it ok to wait here? */	pmap = (pmap_t) malloc(sizeof *pmap, M_VMPMAP, M_WAITOK);#ifdef notifwewait	if (pmap == NULL)		panic("pmap_create: cannot allocate a pmap");#endif	bzero(pmap, sizeof(*pmap));	pmap_pinit(pmap);	return (pmap);}/* * Initialize a preallocated and zeroed pmap structure, * such as one in a vmspace structure. */voidpmap_pinit(pmap)	register struct pmap *pmap;{	register int i;	int s;	extern struct vmspace vmspace0;	extern struct user *proc0paddr;#ifdef DEBUG	if (pmapdebug & (PDB_FOLLOW|PDB_CREATE))		printf("pmap_pinit(%x)\n", pmap);#endif	simple_lock_init(&pmap->pm_lock);	pmap->pm_count = 1;	if (free_segtab) {		s = splimp();		pmap->pm_segtab = free_segtab;		free_segtab = *(struct segtab **)free_segtab;		pmap->pm_segtab->seg_tab[0] = NULL;		splx(s);	} else {		register struct segtab *stp;		vm_page_t mem;		mem = vm_page_alloc1();		pmap_zero_page(VM_PAGE_TO_PHYS(mem));		pmap->pm_segtab = stp = (struct segtab *)			MACH_PHYS_TO_CACHED(VM_PAGE_TO_PHYS(mem));		i = pmaxpagesperpage * (NBPG / sizeof(struct segtab));		s = splimp();		while (--i != 0) {			stp++;			*(struct segtab **)stp = free_segtab;			free_segtab = stp;		}		splx(s);	}#ifdef DIAGNOSTIC	for (i = 0; i < PMAP_SEGTABSIZE; i++)		if (pmap->pm_segtab->seg_tab[i] != 0)			panic("pmap_pinit: pm_segtab != 0");#endif	if (pmap == &vmspace0.vm_pmap) {		/*		 * The initial process has already been allocated a TLBPID		 * in mach_init().		 */		pmap->pm_tlbpid = 1;		pmap->pm_tlbgen = tlbpid_gen;		proc0paddr->u_pcb.pcb_segtab = (void *)pmap->pm_segtab;	} else {		pmap->pm_tlbpid = 0;		pmap->pm_tlbgen = 0;	}}/* *	Retire the given physical map from service. *	Should only be called if the map contains *	no valid mappings. */voidpmap_destroy(pmap)	register pmap_t pmap;{	int count;#ifdef DEBUG	if (pmapdebug & (PDB_FOLLOW|PDB_CREATE))		printf("pmap_destroy(%x)\n", pmap);#endif	if (pmap == NULL)		return;	simple_lock(&pmap->pm_lock);	count = --pmap->pm_count;	simple_unlock(&pmap->pm_lock);	if (count == 0) {		pmap_release(pmap);		free((caddr_t)pmap, M_VMPMAP);	}}/* * Release any resources held by the given physical map. * Called when a pmap initialized by pmap_pinit is being released. * Should only be called if the map contains no valid mappings. */voidpmap_release(pmap)	register pmap_t pmap;{#ifdef DEBUG	if (pmapdebug & (PDB_FOLLOW|PDB_CREATE))		printf("pmap_release(%x)\n", pmap);#endif	if (pmap->pm_segtab) {		register pt_entry_t *pte;		register int i;		int s;#ifdef DIAGNOSTIC		register int j;#endif		for (i = 0; i < PMAP_SEGTABSIZE; i++) {			/* get pointer to segment map */			pte = pmap->pm_segtab->seg_tab[i];			if (!pte)				continue;			vm_page_free1(				PHYS_TO_VM_PAGE(MACH_CACHED_TO_PHYS(pte)));#ifdef DIAGNOSTIC			for (j = 0; j < NPTEPG; j++) {				if (pte->pt_entry)					panic("pmap_release: segmap not empty");			}#endif			pmap->pm_segtab->seg_tab[i] = NULL;		}		s = splimp();		*(struct segtab **)pmap->pm_segtab = free_segtab;		free_segtab = pmap->pm_segtab;		splx(s);		pmap->pm_segtab = NULL;	}}/* *	Add a reference to the specified pmap. */voidpmap_reference(pmap)	pmap_t pmap;{#ifdef DEBUG	if (pmapdebug & PDB_FOLLOW)		printf("pmap_reference(%x)\n", pmap);#endif	if (pmap != NULL) {		simple_lock(&pmap->pm_lock);		pmap->pm_count++;		simple_unlock(&pmap->pm_lock);	}}/* *	Remove the given range of addresses from the specified map. * *	It is assumed that the start and end are properly *	rounded to the page size. */voidpmap_remove(pmap, sva, eva)	register pmap_t pmap;	vm_offset_t sva, eva;{	register vm_offset_t nssva;	register pt_entry_t *pte;	unsigned entry;#ifdef DEBUG	if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT))		printf("pmap_remove(%x, %x, %x)\n", pmap, sva, eva);	remove_stats.calls++;#endif	if (pmap == NULL)		return;	if (!pmap->pm_segtab) {		register pt_entry_t *pte;		/* remove entries from kernel pmap */#ifdef DIAGNOSTIC		if (sva < VM_MIN_KERNEL_ADDRESS || eva > virtual_end)			panic("pmap_remove: kva not in range");#endif		pte = kvtopte(sva);		for (; sva < eva; sva += NBPG, pte++) {			entry = pte->pt_entry;			if (!(entry & PG_V))				continue;			if (entry & PG_WIRED)				pmap->pm_stats.wired_count--;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -