⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 pmap.c

📁 基于组件方式开发操作系统的OSKIT源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
 * => do NOT use this on kernel mappings [why?  because pv_ptp may be NULL] * => we may grab pmap_tmpptp_lock and return with it held */__inline static pt_entry_t *pmap_tmpmap_pvepte(pve)	struct pv_entry *pve;{#ifdef DIAGNOSTIC	if (pve->pv_pmap == pmap_kernel())		panic("pmap_tmpmap_pvepte: attempt to map kernel");#endif	/* is it current pmap?  use direct mapping... */	if (pmap_is_curpmap(pve->pv_pmap))		return(vtopte(pve->pv_va));	return(((pt_entry_t *)pmap_tmpmap_pa(VM_PAGE_TO_PHYS(pve->pv_ptp)))	       + ptei((unsigned)pve->pv_va));}/* * pmap_tmpunmap_pvepte: release a mapping obtained with pmap_tmpmap_pvepte * * => we will release pmap_tmpptp_lock if we hold it */__inline static voidpmap_tmpunmap_pvepte(pve)	struct pv_entry *pve;{	/* was it current pmap?   if so, return */	if (pmap_is_curpmap(pve->pv_pmap))		return;	pmap_tmpunmap_pa();}/* * pmap_map_ptes: map a pmap's PTEs into KVM and lock them in * * => we lock enough pmaps to keep things locked in * => must be undone with pmap_unmap_ptes before returning */#ifndef OSKIT__inline#endifstatic pt_entry_t *pmap_map_ptes(pmap)	struct pmap *pmap;{	pd_entry_t opde;	/* the kernel's pmap is always accessible */	if (pmap == pmap_kernel()) {		return(PTE_BASE);	}	/* if curpmap then we are always mapped */	if (pmap_is_curpmap(pmap)) {		simple_lock(&pmap->pm_obj.vmobjlock);		return(PTE_BASE);	}	/* need to lock both curpmap and pmap: use ordered locking */	if ((unsigned) pmap < (unsigned) curpcb->pcb_pmap) {		simple_lock(&pmap->pm_obj.vmobjlock);		simple_lock(&curpcb->pcb_pmap->pm_obj.vmobjlock);	} else {		simple_lock(&curpcb->pcb_pmap->pm_obj.vmobjlock);		simple_lock(&pmap->pm_obj.vmobjlock);	}	/* need to load a new alternate pt space into curpmap? */	opde = *APDP_PDE;	if (!pmap_valid_entry(opde) || (opde & PG_FRAME) != pmap->pm_pdirpa) {		*APDP_PDE = (pd_entry_t) (pmap->pm_pdirpa | PG_RW | PG_V);		if (pmap_valid_entry(opde))			pmap_update();	}	return(APTE_BASE);}/* * pmap_unmap_ptes: unlock the PTE mapping of "pmap" */__inline static voidpmap_unmap_ptes(pmap)	struct pmap *pmap;{	if (pmap == pmap_kernel()) {		return;	}	if (pmap_is_curpmap(pmap)) {		simple_unlock(&pmap->pm_obj.vmobjlock);	} else {		simple_unlock(&pmap->pm_obj.vmobjlock);		simple_unlock(&curpcb->pcb_pmap->pm_obj.vmobjlock);	}}/* * p m a p   k e n t e r   f u n c t i o n s * * functions to quickly enter/remove pages from the kernel address * space.   pmap_kremove/pmap_kenter_pgs are exported to MI kernel. * we make use of the recursive PTE mappings. *//* * pmap_kenter_pa: enter a kernel mapping without R/M (pv_entry) tracking * * => no need to lock anything, assume va is already allocated * => should be faster than normal pmap enter function */voidpmap_kenter_pa(va, pa, prot)	vaddr_t va;	paddr_t pa;	vm_prot_t prot;{	pt_entry_t *pte, opte;	if (va < VM_MIN_KERNEL_ADDRESS)		pte = vtopte(va);	else		pte = kvtopte(va);	opte = *pte;#ifdef LARGEPAGES	/* XXX For now... */	if (opte & PG_PS) {		panic("pmap_kenter_pa: PG_PS");	}#endif	*pte = pa | ((prot & VM_PROT_WRITE)? PG_RW : PG_RO) |		PG_V | pmap_pg_g;	/* zap! */	if (pmap_valid_entry(opte))		pmap_update_pg(va);}/* * pmap_kremove: remove a kernel mapping(s) without R/M (pv_entry) tracking * * => no need to lock anything * => caller must dispose of any vm_page mapped in the va range * => note: not an inline function * => we assume the va is page aligned and the len is a multiple of PAGE_SIZE * => we assume kernel only unmaps valid addresses and thus don't bother *    checking the valid bit before doing TLB flushing */voidpmap_kremove(va, len)	vaddr_t va;	vsize_t len;{	pt_entry_t *pte;	len >>= PAGE_SHIFT;	for ( /* null */ ; len ; len--, va += PAGE_SIZE) {		if (va < VM_MIN_KERNEL_ADDRESS)			pte = vtopte(va);		else			pte = kvtopte(va);#ifdef LARGEPAGES		/* XXX For now... */		if (*pte & PG_PS)			panic("pmap_kremove: PG_PS");#endif#ifdef DIAGNOSTIC		if (*pte & PG_PVLIST)			panic("pmap_kremove: PG_PVLIST mapping for 0x%lx\n",			      va);#endif		*pte = 0;		/* zap! */#if defined(I386_CPU)		if (cpu_class != CPUCLASS_386)#endif			pmap_update_pg(va);	}#if defined(I386_CPU)	if (cpu_class == CPUCLASS_386)		pmap_update();#endif}/* * pmap_kenter_pgs: enter in a number of vm_pages */voidpmap_kenter_pgs(va, pgs, npgs)	vaddr_t va;	struct vm_page **pgs;	int npgs;{	pt_entry_t *pte, opte;	int lcv;	vaddr_t tva;#if defined(I386_CPU)	boolean_t need_update = FALSE;#endif	for (lcv = 0 ; lcv < npgs ; lcv++) {		tva = va + lcv * PAGE_SIZE;		if (va < VM_MIN_KERNEL_ADDRESS)			pte = vtopte(tva);		else			pte = kvtopte(tva);		opte = *pte;#ifdef LARGEPAGES		/* XXX For now... */		if (opte & PG_PS)			panic("pmap_kenter_pgs: PG_PS");#endif		*pte = VM_PAGE_TO_PHYS(pgs[lcv]) | PG_RW | PG_V | pmap_pg_g;#if defined(I386_CPU)		if (cpu_class == CPUCLASS_386) {			if (pmap_valid_entry(opte))				need_update = TRUE;			continue;		}#endif		if (pmap_valid_entry(opte))			pmap_update_pg(tva);	}#if defined(I386_CPU)	if (need_update && cpu_class == CPUCLASS_386)		pmap_update();#endif}/* * p m a p   i n i t   f u n c t i o n s * * pmap_bootstrap and pmap_init are called during system startup * to init the pmap module.   pmap_bootstrap() does a low level * init just to get things rolling.   pmap_init() finishes the job. *//* * pmap_bootstrap: get the system in a state where it can run with VM *	properly enabled (called before main()).   the VM system is *      fully init'd later... * * => on i386, locore.s has already enabled the MMU by allocating *	a PDP for the kernel, and nkpde PTP's for the kernel. * => kva_start is the first free virtual address in kernel space */voidpmap_bootstrap(kva_start)	vaddr_t kva_start;{	struct pmap *kpm;	vaddr_t kva;	pt_entry_t *pte;	/*	 * set up our local static global vars that keep track of the	 * usage of KVM before kernel_map is set up	 */	virtual_avail = kva_start;		/* first free KVA */	virtual_end = VM_MAX_KERNEL_ADDRESS;	/* last KVA */	/*	 * set up protection_codes: we need to be able to convert from	 * a MI protection code (some combo of VM_PROT...) to something	 * we can jam into a i386 PTE.	 */	protection_codes[VM_PROT_NONE] = 0;  			/* --- */	protection_codes[VM_PROT_EXECUTE] = PG_RO;		/* --x */	protection_codes[VM_PROT_READ] = PG_RO;			/* -r- */	protection_codes[VM_PROT_READ|VM_PROT_EXECUTE] = PG_RO;	/* -rx */	protection_codes[VM_PROT_WRITE] = PG_RW;		/* w-- */	protection_codes[VM_PROT_WRITE|VM_PROT_EXECUTE] = PG_RW;/* w-x */	protection_codes[VM_PROT_WRITE|VM_PROT_READ] = PG_RW;	/* wr- */	protection_codes[VM_PROT_ALL] = PG_RW;			/* wrx */	/*	 * now we init the kernel's pmap	 *	 * the kernel pmap's pm_obj is not used for much.   however, in	 * user pmaps the pm_obj contains the list of active PTPs.	 * the pm_obj currently does not have a pager.   it might be possible	 * to add a pager that would allow a process to read-only mmap its	 * own page tables (fast user level vtophys?).   this may or may not	 * be useful.	 */	kpm = pmap_kernel();	simple_lock_init(&kpm->pm_obj.vmobjlock);	kpm->pm_obj.pgops = NULL;	TAILQ_INIT(&kpm->pm_obj.memq);	kpm->pm_obj.uo_npages = 0;	kpm->pm_obj.uo_refs = 1;	memset(&kpm->pm_list, 0, sizeof(kpm->pm_list));  /* pm_list not used */#ifdef OSKIT	kpm->pm_pdir = (pd_entry_t *)base_pdir_pa; /* XXX */	kpm->pm_pdirpa = (u_int32_t)base_pdir_pa;#else	kpm->pm_pdir = (pd_entry_t *)(proc0.p_addr->u_pcb.pcb_cr3 + KERNBASE);	kpm->pm_pdirpa = (u_int32_t) proc0.p_addr->u_pcb.pcb_cr3;#endif	kpm->pm_stats.wired_count = kpm->pm_stats.resident_count =		i386_btop(kva_start - VM_MIN_KERNEL_ADDRESS);	/*	 * the above is just a rough estimate and not critical to the proper	 * operation of the system.	 */	curpcb->pcb_pmap = kpm;	/* proc0's pcb */	/*	 * enable global TLB entries if they are supported	 */	if (cpu_feature & CPUID_PGE) {#ifdef OSKIT	    	extern int _end[];#endif		lcr4(rcr4() | CR4_PGE);	/* enable hardware (via %cr4) */		pmap_pg_g = PG_G;		/* enable software */		/* add PG_G attribute to already mapped kernel pages */#ifdef OSKIT		for (kva = VM_MIN_KERNEL_ADDRESS ; kva < (vaddr_t)_end ;		     kva += PAGE_SIZE) {#else		for (kva = VM_MIN_KERNEL_ADDRESS ; kva < virtual_avail ;		     kva += PAGE_SIZE) {#endif			if (pmap_valid_entry(PTE_BASE[i386_btop(kva)]))				PTE_BASE[i386_btop(kva)] |= PG_G;		}	}#ifdef LARGEPAGES	/*	 * enable large pages of they are supported.	 */	if (cpu_feature & CPUID_PSE) {#ifndef OSKIT		paddr_t pa;		vaddr_t kva_end;		pd_entry_t *pde;		extern char _etext;#endif		lcr4(rcr4() | CR4_PSE);	/* enable hardware (via %cr4) */		pmap_largepages = 1;	/* enable software */		/*		 * the TLB must be flushed after enabling large pages		 * on Pentium CPUs, according to section 3.6.2.2 of		 * "Intel Architecture Software Developer's Manual,		 * Volume 3: System Programming".		 */		tlbflush();#ifndef OSKIT		/*		 * now, remap the kernel text using large pages.  we		 * assume that the linker has properly aligned the		 * .data segment to a 4MB boundary.		 */		kva_end = roundup((vaddr_t)&_etext, NBPD);		for (pa = 0, kva = KERNBASE; kva < kva_end;		     kva += NBPD, pa += NBPD) {			pde = &kpm->pm_pdir[pdei(kva)];			*pde = pa | pmap_pg_g | PG_PS |			    PG_KR | PG_V;	/* zap! */			tlbflush();		}#endif	}#endif /* LARGEPAGES */	/*	 * now we allocate the "special" VAs which are used for tmp mappings	 * by the pmap (and other modules).    we allocate the VAs by advancing	 * virtual_avail (note that there are no pages mapped at these VAs).	 * we find the PTE that maps the allocated VA via the linear PTE	 * mapping.	 */	pte = PTE_BASE + i386_btop(virtual_avail);	csrcp = (caddr_t) virtual_avail;  csrc_pte = pte;	/* allocate */	virtual_avail += PAGE_SIZE; pte++;			/* advance */	cdstp = (caddr_t) virtual_avail;  cdst_pte = pte;	virtual_avail += PAGE_SIZE; pte++;	zerop = (caddr_t) virtual_avail;  zero_pte = pte;	virtual_avail += PAGE_SIZE; pte++;	ptpp = (caddr_t) virtual_avail;  ptp_pte = pte;	virtual_avail += PAGE_SIZE; pte++;	/* XXX: vmmap used by mem.c... should be uvm_map_reserve */	vmmap = (char *)virtual_avail;			/* don't need pte */	virtual_avail += PAGE_SIZE; pte++;#ifndef OSKIT	msgbuf_vaddr = virtual_avail;			/* don't need pte */	virtual_avail += round_page(MSGBUFSIZE); pte++;	idt_vaddr = virtual_avail;			/* don't need pte */	virtual_avail += PAGE_SIZE; pte++;	idt_paddr = avail_start;			/* steal a page */	avail_start += PAGE_SIZE;#if defined(I586_CPU)	/* pentium f00f bug stuff */	pentium_idt_vaddr = virtual_avail;		/* don't need pte */	virtual_avail += PAGE_SIZE; pte++;#endif#endif /*!OSKIT*/	/*	 * now we reserve some VM for mapping pages when doing a crash dump	 */	virtual_avail = reserve_dumppages(virtual_avail);	/*	 * init the static-global locks and global lists.	 */	spinlockinit(&pmap_main_lock, "pmaplk", 0);	simple_lock_init(&pvalloc_lock);	simple_lock_init(&pmaps_lock);	simple_lock_init(&pmap_copy_page_lock);	simple_lock_init(&pmap_zero_page_lock);	simple_lock_init(&pmap_tmpptp_lock);	LIST_INIT(&pmaps);	TAILQ_INIT(&pv_freepages);	TAILQ_INIT(&pv_unusedpgs);	/*	 * initialize the pmap pool.	 */	pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",		  0, pool_page_alloc_nointr, pool_page_free_nointr, M_VMPMAP);	/*	 * initialize the PDE pool and cache.	 */	pool_init(&pmap_pdp_pool, PAGE_SIZE, 0, 0, 0, "pdppl",		  0, pool_page_alloc_nointr, pool_page_free_nointr, M_VMPMAP);	pool_cache_init(&pmap_pdp_cache, &pmap_pdp_pool,			pmap_pdp_ctor, NULL, NULL);	/*	 * ensure the TLB is sync'd with reality by flushing it...	 */	pmap_update();}/* * pmap_init: called from uvm_init, our job is to get the pmap * system ready to manage mappings... this mainly means initing * the pv_entry stuff. */voidpmap_init(){	int npages, lcv, i;	vaddr_t addr;	vsize_t s;	/*	 * compute the number of pages we have and then allocate RAM	 * for each pages' pv_head and saved attributes.	 */	npages = 0;	for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)		npages += (vm_physmem[lcv].end - vm_physmem[lcv].start);	s = (vsize_t) (sizeof(struct pv_head) * npages +		       sizeof(char) * npages);	s = round_page(s); /* round up */	addr = (vaddr_t) uvm_km_zalloc(kernel_map, s);	if (addr == 0)		panic("pmap_init: unable to allocate pv_heads");	/*	 * init all pv_head's and attrs in one memset	 */	/* allocate pv_head stuff first */	for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) {		vm_physmem[lcv].pmseg.pvhead = (struct pv_head *) addr;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -