uvm_page.c

来自「基于组件方式开发操作系统的OSKIT源代码」· C语言 代码 · 共 1,367 行 · 第 1/3 页

C
1,367
字号
/*	$NetBSD: uvm_page.c,v 1.46 2000/12/01 09:54:42 chs Exp $	*//*  * Copyright (c) 1997 Charles D. Cranor and Washington University. * Copyright (c) 1991, 1993, The Regents of the University of California.   * * All rights reserved. * * This code is derived from software contributed to Berkeley by * The Mach Operating System project at Carnegie-Mellon University. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright *    notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright *    notice, this list of conditions and the following disclaimer in the *    documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software *    must display the following acknowledgement: *	This product includes software developed by Charles D. Cranor, *      Washington University, the University of California, Berkeley and  *      its contributors. * 4. Neither the name of the University nor the names of its contributors *    may be used to endorse or promote products derived from this software *    without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * *	@(#)vm_page.c   8.3 (Berkeley) 3/21/94 * from: Id: uvm_page.c,v 1.1.2.18 1998/02/06 05:24:42 chs Exp * * * Copyright (c) 1987, 1990 Carnegie-Mellon University. * All rights reserved. *  * Permission to use, copy, modify and distribute this software and * its documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. *  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. *  * Carnegie Mellon requests users of this software to return to * *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU *  School of Computer Science *  Carnegie Mellon University *  Pittsburgh PA 15213-3890 * * any improvements or extensions that they make and grant Carnegie the * rights to redistribute these changes. *//* * uvm_page.c: page ops. */#include "opt_uvmhist.h"#include <sys/param.h>#include <sys/systm.h>#include <sys/malloc.h>#include <sys/sched.h>#include <sys/kernel.h>#define UVM_PAGE                /* pull in uvm_page.h functions */#include <uvm/uvm.h>/* * global vars... XXXCDC: move to uvm. structure. *//* * physical memory config is stored in vm_physmem. */struct vm_physseg vm_physmem[VM_PHYSSEG_MAX];	/* XXXCDC: uvm.physmem */int vm_nphysseg = 0;				/* XXXCDC: uvm.nphysseg *//* * Some supported CPUs in a given architecture don't support all * of the things necessary to do idle page zero'ing efficiently. * We therefore provide a way to disable it from machdep code here. *//* * XXX disabled until we can find a way to do this without causing * problems for either cpu caches or DMA latency. */boolean_t vm_page_zero_enable = FALSE;extern struct uvm_pagerops uvm_vnodeops;/* * local variables *//* * these variables record the values returned by vm_page_bootstrap, * for debugging purposes.  The implementation of uvm_pageboot_alloc * and pmap_startup here also uses them internally. */static vaddr_t      virtual_space_start;static vaddr_t      virtual_space_end;/* * we use a hash table with only one bucket during bootup.  we will * later rehash (resize) the hash table once the allocator is ready. * we static allocate the one bootstrap bucket below... */static struct pglist uvm_bootbucket;/* * local prototypes */static void uvm_pageinsert __P((struct vm_page *));static void uvm_pageremove __P((struct vm_page *));/* * inline functions *//* * uvm_pageinsert: insert a page in the object and the hash table * * => caller must lock object * => caller must lock page queues * => call should have already set pg's object and offset pointers *    and bumped the version counter */__inline static voiduvm_pageinsert(pg)	struct vm_page *pg;{	struct pglist *buck;	int s;#ifdef DIAGNOSTIC	if (pg->flags & PG_TABLED)		panic("uvm_pageinsert: already inserted");#endif	buck = &uvm.page_hash[uvm_pagehash(pg->uobject,pg->offset)];	s = splimp();	simple_lock(&uvm.hashlock);	TAILQ_INSERT_TAIL(buck, pg, hashq);	/* put in hash */	simple_unlock(&uvm.hashlock);	splx(s);	TAILQ_INSERT_TAIL(&pg->uobject->memq, pg, listq); /* put in object */	pg->flags |= PG_TABLED;	pg->uobject->uo_npages++;}/* * uvm_page_remove: remove page from object and hash * * => caller must lock object * => caller must lock page queues */#ifndef OSKITstatic __inline void#elsestatic void#endifuvm_pageremove(pg)	struct vm_page *pg;{	struct pglist *buck;	int s;	KASSERT(pg->flags & PG_TABLED);	buck = &uvm.page_hash[uvm_pagehash(pg->uobject,pg->offset)];	s = splimp();	simple_lock(&uvm.hashlock);	TAILQ_REMOVE(buck, pg, hashq);	simple_unlock(&uvm.hashlock);	splx(s);	if (pg->uobject->pgops == &uvm_vnodeops) {		uvmexp.vnodepages--;	}	/* object should be locked */	TAILQ_REMOVE(&pg->uobject->memq, pg, listq);	pg->flags &= ~PG_TABLED;	pg->uobject->uo_npages--;	pg->uobject = NULL;	pg->version++;}/* * uvm_page_init: init the page system.   called from uvm_init(). *  * => we return the range of kernel virtual memory in kvm_startp/kvm_endp */voiduvm_page_init(kvm_startp, kvm_endp)	vaddr_t *kvm_startp, *kvm_endp;{	vsize_t freepages, pagecount, n;	vm_page_t pagearray;	int lcv, i;  	paddr_t paddr;	/*	 * step 1: init the page queues and page queue locks	 */	for (lcv = 0; lcv < VM_NFREELIST; lcv++) {		for (i = 0; i < PGFL_NQUEUES; i++)			TAILQ_INIT(&uvm.page_free[lcv].pgfl_queues[i]);	}	TAILQ_INIT(&uvm.page_active);	TAILQ_INIT(&uvm.page_inactive_swp);	TAILQ_INIT(&uvm.page_inactive_obj);	simple_lock_init(&uvm.pageqlock);	simple_lock_init(&uvm.fpageqlock);	/*	 * step 2: init the <obj,offset> => <page> hash table. for now	 * we just have one bucket (the bootstrap bucket).   later on we	 * will allocate new buckets as we dynamically resize the hash table.	 */	uvm.page_nhash = 1;			/* 1 bucket */	uvm.page_hashmask = 0;			/* mask for hash function */	uvm.page_hash = &uvm_bootbucket;	/* install bootstrap bucket */	TAILQ_INIT(uvm.page_hash);		/* init hash table */	simple_lock_init(&uvm.hashlock);	/* init hash table lock */	/* 	 * step 3: allocate vm_page structures.	 */	/*	 * sanity check:	 * before calling this function the MD code is expected to register	 * some free RAM with the uvm_page_physload() function.   our job	 * now is to allocate vm_page structures for this memory.	 */	if (vm_nphysseg == 0)		panic("uvm_page_bootstrap: no memory pre-allocated");		/*	 * first calculate the number of free pages...  	 *	 * note that we use start/end rather than avail_start/avail_end.	 * this allows us to allocate extra vm_page structures in case we	 * want to return some memory to the pool after booting.	 */	 	freepages = 0;	for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)		freepages += (vm_physmem[lcv].end - vm_physmem[lcv].start);	/*	 * we now know we have (PAGE_SIZE * freepages) bytes of memory we can	 * use.   for each page of memory we use we need a vm_page structure.	 * thus, the total number of pages we can use is the total size of	 * the memory divided by the PAGE_SIZE plus the size of the vm_page	 * structure.   we add one to freepages as a fudge factor to avoid	 * truncation errors (since we can only allocate in terms of whole	 * pages).	 */	 	pagecount = ((freepages + 1) << PAGE_SHIFT) /	    (PAGE_SIZE + sizeof(struct vm_page));	pagearray = (vm_page_t)uvm_pageboot_alloc(pagecount *	    sizeof(struct vm_page));	memset(pagearray, 0, pagecount * sizeof(struct vm_page));					 	/*	 * step 4: init the vm_page structures and put them in the correct	 * place...	 */	for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) {		n = vm_physmem[lcv].end - vm_physmem[lcv].start;		if (n > pagecount) {			printf("uvm_page_init: lost %ld page(s) in init\n",			    (long)(n - pagecount));			panic("uvm_page_init");  /* XXXCDC: shouldn't happen? */			/* n = pagecount; */		}		/* set up page array pointers */		vm_physmem[lcv].pgs = pagearray;		pagearray += n;		pagecount -= n;		vm_physmem[lcv].lastpg = vm_physmem[lcv].pgs + (n - 1);		/* init and free vm_pages (we've already zeroed them) */		paddr = ptoa(vm_physmem[lcv].start);		for (i = 0 ; i < n ; i++, paddr += PAGE_SIZE) {			vm_physmem[lcv].pgs[i].phys_addr = paddr;			if (atop(paddr) >= vm_physmem[lcv].avail_start &&			    atop(paddr) <= vm_physmem[lcv].avail_end) {				uvmexp.npages++;				/* add page to free pool */				uvm_pagefree(&vm_physmem[lcv].pgs[i]);			}		}	}	/*	 * step 5: pass up the values of virtual_space_start and	 * virtual_space_end (obtained by uvm_pageboot_alloc) to the upper	 * layers of the VM.	 */	*kvm_startp = round_page(virtual_space_start);	*kvm_endp = trunc_page(virtual_space_end);	/*	 * step 6: init locks for kernel threads	 */	simple_lock_init(&uvm.pagedaemon_lock);	simple_lock_init(&uvm.aiodoned_lock);	/*	 * step 7: init reserve thresholds	 * XXXCDC - values may need adjusting	 */	uvmexp.reserve_pagedaemon = 1;	uvmexp.reserve_kernel = 5;	/*	 * step 8: determine if we should zero pages in the idle	 * loop.	 */	uvm.page_idle_zero = vm_page_zero_enable;	/*	 * done!	 */	uvm.page_init_done = TRUE;}/* * uvm_setpagesize: set the page size *  * => sets page_shift and page_mask from uvmexp.pagesize. */   voiduvm_setpagesize(){	if (uvmexp.pagesize == 0)		uvmexp.pagesize = DEFAULT_PAGE_SIZE;	uvmexp.pagemask = uvmexp.pagesize - 1;	if ((uvmexp.pagemask & uvmexp.pagesize) != 0)		panic("uvm_setpagesize: page size not a power of two");	for (uvmexp.pageshift = 0; ; uvmexp.pageshift++)		if ((1 << uvmexp.pageshift) == uvmexp.pagesize)			break;}/* * uvm_pageboot_alloc: steal memory from physmem for bootstrapping */vaddr_tuvm_pageboot_alloc(size)	vsize_t size;{#if defined(PMAP_STEAL_MEMORY)	vaddr_t addr;	/* 	 * defer bootstrap allocation to MD code (it may want to allocate 	 * from a direct-mapped segment).  pmap_steal_memory should round	 * off virtual_space_start/virtual_space_end.	 */	addr = pmap_steal_memory(size, &virtual_space_start,	    &virtual_space_end);	return(addr);#else /* !PMAP_STEAL_MEMORY */	static boolean_t initialized = FALSE;	vaddr_t addr, vaddr;	paddr_t paddr;	/* round to page size */	size = round_page(size);	/*	 * on first call to this function, initialize ourselves.	 */	if (initialized == FALSE) {		pmap_virtual_space(&virtual_space_start, &virtual_space_end);		/* round it the way we like it */		virtual_space_start = round_page(virtual_space_start);		virtual_space_end = trunc_page(virtual_space_end);		initialized = TRUE;	}	/*	 * allocate virtual memory for this request	 */	if (virtual_space_start == virtual_space_end ||	    (virtual_space_end - virtual_space_start) < size)		panic("uvm_pageboot_alloc: out of virtual space");	addr = virtual_space_start;#ifdef PMAP_GROWKERNEL	/*	 * If the kernel pmap can't map the requested space,	 * then allocate more resources for it.	 */	if (uvm_maxkaddr < (addr + size)) {		uvm_maxkaddr = pmap_growkernel(addr + size);		if (uvm_maxkaddr < (addr + size))			panic("uvm_pageboot_alloc: pmap_growkernel() failed");	}#endif	virtual_space_start += size;	/*	 * allocate and mapin physical pages to back new virtual pages	 */	for (vaddr = round_page(addr) ; vaddr < addr + size ;	    vaddr += PAGE_SIZE) {		if (!uvm_page_physget(&paddr))			panic("uvm_pageboot_alloc: out of memory");

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?