⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 pmap.c

📁 早期freebsd实现
💻 C
📖 第 1 页 / 共 5 页
字号:
	pte = pmap_pte(pmap, va);	opa = pmap_pte_pa(pte);#ifdef DEBUG	if (pmapdebug & PDB_ENTER)		printf("enter: pte %x, *pte %x\n", pte, *(int *)pte);#endif	/*	 * Mapping has not changed, must be protection or wiring change.	 */	if (opa == pa) {#ifdef PMAPSTATS		enter_stats.pwchange++;#endif		/*		 * Wiring change, just update stats.		 * We don't worry about wiring PT pages as they remain		 * resident as long as there are valid mappings in them.		 * Hence, if a user page is wired, the PT page will be also.		 */		if (pmap_pte_w_chg(pte, wired ? PG_W : 0)) {#ifdef DEBUG			if (pmapdebug & PDB_ENTER)				printf("enter: wiring change -> %x\n", wired);#endif			if (wired)				pmap->pm_stats.wired_count++;			else				pmap->pm_stats.wired_count--;#ifdef PMAPSTATS			if (pmap_pte_prot(pte) == pte_prot(pmap, prot))				enter_stats.wchange++;#endif		}#ifdef PMAPSTATS		else if (pmap_pte_prot(pte) != pte_prot(pmap, prot))			enter_stats.pchange++;		else			enter_stats.nochange++;#endif		/*		 * Retain cache inhibition status		 */		checkpv = FALSE;		if (pmap_pte_ci(pte))			cacheable = FALSE;		goto validate;	}	/*	 * Mapping has changed, invalidate old range and fall through to	 * handle validating new mapping.	 */	if (opa) {#ifdef DEBUG		if (pmapdebug & PDB_ENTER)			printf("enter: removing old mapping %x\n", va);#endif		pmap_remove_mapping(pmap, va, pte, PRM_TFLUSH|PRM_CFLUSH);#ifdef PMAPSTATS		enter_stats.mchange++;#endif	}	/*	 * If this is a new user mapping, increment the wiring count	 * on this PT page.  PT pages are wired down as long as there	 * is a valid mapping in the page.	 */	if (pmap != kernel_pmap)		(void) vm_map_pageable(pt_map, trunc_page(pte),				       round_page(pte+1), FALSE);	/*	 * Enter on the PV list if part of our managed memory	 * Note that we raise IPL while manipulating pv_table	 * since pmap_enter can be called at interrupt time.	 */	if (pa >= vm_first_phys && pa < vm_last_phys) {		register pv_entry_t pv, npv;		int s;#ifdef PMAPSTATS		enter_stats.managed++;#endif		pv = pa_to_pvh(pa);		s = splimp();#ifdef DEBUG		if (pmapdebug & PDB_ENTER)			printf("enter: pv at %x: %x/%x/%x\n",			       pv, pv->pv_va, pv->pv_pmap, pv->pv_next);#endif		/*		 * No entries yet, use header as the first entry		 */		if (pv->pv_pmap == NULL) {#ifdef PMAPSTATS			enter_stats.firstpv++;#endif			pv->pv_va = va;			pv->pv_pmap = pmap;			pv->pv_next = NULL;			pv->pv_ptste = NULL;			pv->pv_ptpmap = NULL;			pv->pv_flags = 0;		}		/*		 * There is at least one other VA mapping this page.		 * Place this entry after the header.		 */		else {#ifdef DEBUG			for (npv = pv; npv; npv = npv->pv_next)				if (pmap == npv->pv_pmap && va == npv->pv_va)					panic("pmap_enter: already in pv_tab");#endif			npv = (pv_entry_t)				malloc(sizeof *npv, M_VMPVENT, M_NOWAIT);			npv->pv_va = va;			npv->pv_pmap = pmap;			npv->pv_next = pv->pv_next;			npv->pv_ptste = NULL;			npv->pv_ptpmap = NULL;			npv->pv_flags = 0;			pv->pv_next = npv;#ifdef PMAPSTATS			if (!npv->pv_next)				enter_stats.secondpv++;#endif#ifdef HAVEVAC			/*			 * Since there is another logical mapping for the			 * same page we may need to cache-inhibit the			 * descriptors on those CPUs with external VACs.			 * We don't need to CI if:			 *			 * - No two mappings belong to the same user pmaps.			 *   Since the cache is flushed on context switches			 *   there is no problem between user processes.			 *			 * - Mappings within a single pmap are a certain			 *   magic distance apart.  VAs at these appropriate			 *   boundaries map to the same cache entries or			 *   otherwise don't conflict.			 *			 * To keep it simple, we only check for these special			 * cases if there are only two mappings, otherwise we			 * punt and always CI.			 *			 * Note that there are no aliasing problems with the			 * on-chip data-cache when the WA bit is set.			 */			if (pmap_aliasmask) {				if (pv->pv_flags & PV_CI) {#ifdef DEBUG					if (pmapdebug & PDB_CACHE)					printf("enter: pa %x already CI'ed\n",					       pa);#endif					checkpv = cacheable = FALSE;				} else if (npv->pv_next ||					   ((pmap == pv->pv_pmap ||					     pmap == kernel_pmap ||					     pv->pv_pmap == kernel_pmap) &&					    ((pv->pv_va & pmap_aliasmask) !=					     (va & pmap_aliasmask)))) {#ifdef DEBUG					if (pmapdebug & PDB_CACHE)					printf("enter: pa %x CI'ing all\n",					       pa);#endif					cacheable = FALSE;					pv->pv_flags |= PV_CI;#ifdef PMAPSTATS					enter_stats.ci++;#endif				}			}#endif		}		splx(s);	}	/*	 * Assumption: if it is not part of our managed memory	 * then it must be device memory which may be volitile.	 */	else if (pmap_initialized) {		checkpv = cacheable = FALSE;#ifdef PMAPSTATS		enter_stats.unmanaged++;#endif	}	/*	 * Increment counters	 */	pmap->pm_stats.resident_count++;	if (wired)		pmap->pm_stats.wired_count++;validate:#ifdef HAVEVAC	/*	 * Purge kernel side of VAC to ensure we get correct state	 * of HW bits so we don't clobber them.	 */	if (pmap_aliasmask)		DCIS();#endif	/*	 * Build the new PTE.	 */	npte = pa | pte_prot(pmap, prot) | (*(int *)pte & (PG_M|PG_U)) | PG_V;	if (wired)		npte |= PG_W;	if (!checkpv && !cacheable)		npte |= PG_CI;#if defined(LUNA2)	if (mmutype == MMU_68040 && (npte & (PG_PROT|PG_CI)) == PG_RW)#ifdef DEBUG		if (dowriteback && (dokwriteback || pmap != kernel_pmap))#endif		npte |= PG_CCB;#endif#ifdef DEBUG	if (pmapdebug & PDB_ENTER)		printf("enter: new pte value %x\n", npte);#endif	/*	 * Remember if this was a wiring-only change.	 * If so, we need not flush the TLB and caches.	 */	wired = ((*(int *)pte ^ npte) == PG_W);#if defined(LUNA2)	if (mmutype == MMU_68040 && !wired) {		DCFP(pa);		ICPP(pa);	}#endif	*(int *)pte = npte;	if (!wired && active_pmap(pmap))		TBIS(va);#ifdef HAVEVAC	/*	 * The following is executed if we are entering a second	 * (or greater) mapping for a physical page and the mappings	 * may create an aliasing problem.  In this case we must	 * cache inhibit the descriptors involved and flush any	 * external VAC.	 */	if (checkpv && !cacheable) {		pmap_changebit(pa, PG_CI, TRUE);		DCIA();#ifdef PMAPSTATS		enter_stats.flushes++;#endif#ifdef DEBUG		if ((pmapdebug & (PDB_CACHE|PDB_PVDUMP)) ==		    (PDB_CACHE|PDB_PVDUMP))			pmap_pvdump(pa);#endif	}#ifdef DEBUG	else if (pmapvacflush & PVF_ENTER) {		if (pmapvacflush & PVF_TOTAL)			DCIA();		else if (pmap == kernel_pmap)			DCIS();		else			DCIU();	}#endif#endif#ifdef DEBUG	if ((pmapdebug & PDB_WIRING) && pmap != kernel_pmap)		pmap_check_wiring("enter", trunc_page(pmap_pte(pmap, va)));#endif}/* *	Routine:	pmap_change_wiring *	Function:	Change the wiring attribute for a map/virtual-address *			pair. *	In/out conditions: *			The mapping must already exist in the pmap. */voidpmap_change_wiring(pmap, va, wired)	register pmap_t	pmap;	vm_offset_t	va;	boolean_t	wired;{	register pt_entry_t *pte;#ifdef DEBUG	if (pmapdebug & PDB_FOLLOW)		printf("pmap_change_wiring(%x, %x, %x)\n", pmap, va, wired);#endif	if (pmap == NULL)		return;	pte = pmap_pte(pmap, va);#ifdef DEBUG	/*	 * Page table page is not allocated.	 * Should this ever happen?  Ignore it for now,	 * we don't want to force allocation of unnecessary PTE pages.	 */	if (!pmap_ste_v(pmap, va)) {		if (pmapdebug & PDB_PARANOIA)			printf("pmap_change_wiring: invalid STE for %x\n", va);		return;	}	/*	 * Page not valid.  Should this ever happen?	 * Just continue and change wiring anyway.	 */	if (!pmap_pte_v(pte)) {		if (pmapdebug & PDB_PARANOIA)			printf("pmap_change_wiring: invalid PTE for %x\n", va);	}#endif	/*	 * If wiring actually changed (always?) set the wire bit and	 * update the wire count.  Note that wiring is not a hardware	 * characteristic so there is no need to invalidate the TLB.	 */	if (pmap_pte_w_chg(pte, wired ? PG_W : 0)) {		pmap_pte_set_w(pte, wired);		if (wired)			pmap->pm_stats.wired_count++;		else			pmap->pm_stats.wired_count--;	}}/* *	Routine:	pmap_extract *	Function: *		Extract the physical page address associated *		with the given map/virtual_address pair. */vm_offset_tpmap_extract(pmap, va)	register pmap_t	pmap;	vm_offset_t va;{	register vm_offset_t pa;#ifdef DEBUG	if (pmapdebug & PDB_FOLLOW)		printf("pmap_extract(%x, %x) -> ", pmap, va);#endif	pa = 0;	if (pmap && pmap_ste_v(pmap, va))		pa = *(int *)pmap_pte(pmap, va);	if (pa)		pa = (pa & PG_FRAME) | (va & ~PG_FRAME);#ifdef DEBUG	if (pmapdebug & PDB_FOLLOW)		printf("%x\n", pa);#endif	return(pa);}/* *	Copy the range specified by src_addr/len *	from the source map to the range dst_addr/len *	in the destination map. * *	This routine is only advisory and need not do anything. */void pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)	pmap_t		dst_pmap;	pmap_t		src_pmap;	vm_offset_t	dst_addr;	vm_size_t	len;	vm_offset_t	src_addr;{#ifdef DEBUG	if (pmapdebug & PDB_FOLLOW)		printf("pmap_copy(%x, %x, %x, %x, %x)\n",		       dst_pmap, src_pmap, dst_addr, len, src_addr);#endif}/* *	Require that all active physical maps contain no *	incorrect entries NOW.  [This update includes *	forcing updates of any address map caching.] * *	Generally used to insure that a thread about *	to run will see a semantically correct world. */void pmap_update(){#ifdef DEBUG	if (pmapdebug & PDB_FOLLOW)		printf("pmap_update()\n");#endif	TBIA();}/* *	Routine:	pmap_collect *	Function: *		Garbage collects the physical map system for *		pages which are no longer used. *		Success need not be guaranteed -- that is, there *		may well be pages which are not referenced, but *		others may be collected. *	Usage: *		Called by the pageout daemon when pages are scarce. */voidpmap_collect(pmap)	pmap_t		pmap;{	register vm_offset_t pa;	register pv_entry_t pv;	register int *pte;	vm_offset_t kpa;	int s;#ifdef DEBUG	int *ste;	int opmapdebug;#endif	if (pmap != kernel_pmap)		return;#ifdef DEBUG	if (pmapdebug & PDB_FOLLOW)		printf("pmap_collect(%x)\n", pmap);#endif#ifdef PMAPSTATS	kpt_stats.collectscans++;#endif	s = splimp();	for (pa = vm_first_phys; pa < vm_last_phys; pa += PAGE_SIZE) {		register struct kpt_page *kpt, **pkpt;		/*		 * Locate physical pages which are being used as kernel		 * page table pages.		 */		pv = pa_to_pvh(pa);		if (pv->pv_pmap != kernel_pmap || !(pv->pv_flags & PV_PTPAGE))			continue;		do {			if (pv->pv_ptste && pv->pv_ptpmap == kernel_pmap)				break;		} while (pv = pv->pv_next);		if (pv == NULL)			continue;#ifdef DEBUG		if (pv->pv_va < (vm_offset_t)Sysmap ||		    pv->pv_va >= (vm_offset_t)Sysmap + LUNA_MAX_PTSIZE)			printf("collect: kernel PT VA out of range\n");		else			goto ok;		pmap_pvdump(pa);		continue;ok:#endif		pte = (int *)(pv->pv_va + LUNA_PAGE_SIZE);		while (--pte >= (int *)pv->pv_va && *pte == PG_NV)			;		if (pte >= (int *)pv->pv_va)			continue;#ifdef DEBUG		if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT)) {			printf("collect: freeing KPT page at %x (ste %x@%x)\n",			       pv->pv_va, *(int *)pv->pv_ptste, pv->pv_ptste);			opmapdebug = pmapdebug;			pmapdebug |= PDB_PTPAGE;		}		ste = (int *)pv->pv_ptste;#endif		/*		 * If all entries were invalid we can remove the page.		 * We call pmap_remove_entry to take care of invalidating		 * ST and Sysptmap entries.		 */		kpa = pmap_extract(pmap, pv->pv_va);		pmap_remove_mapping(pmap, pv->pv_va, PT_ENTRY_NULL,				    PRM_TFLUSH|PRM_CFLUSH);		/*		 * Use the physical address to locate the original

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -