⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 pmap.c

📁 早期freebsd实现
💻 C
📖 第 1 页 / 共 3 页
字号:
			pmap->pm_stats.resident_count--;			pmap_remove_pv(pmap, sva, entry & PG_FRAME);#ifdef ATTR			pmap_attributes[atop(entry & PG_FRAME)] = 0;#endif			pte->pt_entry = PG_NV;			/*			 * Flush the TLB for the given address.			 */			MachTLBFlushAddr(sva);#ifdef DEBUG			remove_stats.flushes++;#endif		}		return;	}#ifdef DIAGNOSTIC	if (eva > VM_MAXUSER_ADDRESS)		panic("pmap_remove: uva not in range");#endif	while (sva < eva) {		nssva = pmax_trunc_seg(sva) + NBSEG;		if (nssva == 0 || nssva > eva)			nssva = eva;		/*		 * If VA belongs to an unallocated segment,		 * skip to the next segment boundary.		 */		if (!(pte = pmap_segmap(pmap, sva))) {			sva = nssva;			continue;		}		/*		 * Invalidate every valid mapping within this segment.		 */		pte += (sva >> PGSHIFT) & (NPTEPG - 1);		for (; sva < nssva; sva += NBPG, pte++) {			entry = pte->pt_entry;			if (!(entry & PG_V))				continue;			if (entry & PG_WIRED)				pmap->pm_stats.wired_count--;			pmap->pm_stats.resident_count--;			pmap_remove_pv(pmap, sva, entry & PG_FRAME);#ifdef ATTR			pmap_attributes[atop(entry & PG_FRAME)] = 0;#endif			pte->pt_entry = PG_NV;			/*			 * Flush the TLB for the given address.			 */			if (pmap->pm_tlbgen == tlbpid_gen) {				MachTLBFlushAddr(sva | (pmap->pm_tlbpid <<					VMMACH_TLB_PID_SHIFT));#ifdef DEBUG				remove_stats.flushes++;#endif			}		}	}}/* *	pmap_page_protect: * *	Lower the permission for all mappings to a given page. */voidpmap_page_protect(pa, prot)	vm_offset_t pa;	vm_prot_t prot;{	register pv_entry_t pv;	register vm_offset_t va;	int s;#ifdef DEBUG	if ((pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) ||	    prot == VM_PROT_NONE && (pmapdebug & PDB_REMOVE))		printf("pmap_page_protect(%x, %x)\n", pa, prot);#endif	if (!IS_VM_PHYSADDR(pa))		return;	switch (prot) {	case VM_PROT_READ|VM_PROT_WRITE:	case VM_PROT_ALL:		break;	/* copy_on_write */	case VM_PROT_READ:	case VM_PROT_READ|VM_PROT_EXECUTE:		pv = pa_to_pvh(pa);		s = splimp();		/*		 * Loop over all current mappings setting/clearing as appropos.		 */		if (pv->pv_pmap != NULL) {			for (; pv; pv = pv->pv_next) {				extern vm_offset_t pager_sva, pager_eva;				va = pv->pv_va;				/*				 * XXX don't write protect pager mappings				 */				if (va >= pager_sva && va < pager_eva)					continue;				pmap_protect(pv->pv_pmap, va, va + PAGE_SIZE,					prot);			}		}		splx(s);		break;	/* remove_all */	default:		pv = pa_to_pvh(pa);		s = splimp();		while (pv->pv_pmap != NULL) {			pmap_remove(pv->pv_pmap, pv->pv_va,				    pv->pv_va + PAGE_SIZE);		}		splx(s);	}}/* *	Set the physical protection on the *	specified range of this map as requested. */voidpmap_protect(pmap, sva, eva, prot)	register pmap_t pmap;	vm_offset_t sva, eva;	vm_prot_t prot;{	register vm_offset_t nssva;	register pt_entry_t *pte;	register unsigned entry;	u_int p;#ifdef DEBUG	if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT))		printf("pmap_protect(%x, %x, %x, %x)\n", pmap, sva, eva, prot);#endif	if (pmap == NULL)		return;	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {		pmap_remove(pmap, sva, eva);		return;	}	p = (prot & VM_PROT_WRITE) ? PG_RW : PG_RO;	if (!pmap->pm_segtab) {		/*		 * Change entries in kernel pmap.		 * This will trap if the page is writeable (in order to set		 * the dirty bit) even if the dirty bit is already set. The		 * optimization isn't worth the effort since this code isn't		 * executed much. The common case is to make a user page		 * read-only.		 */#ifdef DIAGNOSTIC		if (sva < VM_MIN_KERNEL_ADDRESS || eva > virtual_end)			panic("pmap_protect: kva not in range");#endif		pte = kvtopte(sva);		for (; sva < eva; sva += NBPG, pte++) {			entry = pte->pt_entry;			if (!(entry & PG_V))				continue;			entry = (entry & ~(PG_M | PG_RO)) | p;			pte->pt_entry = entry;			/*			 * Update the TLB if the given address is in the cache.			 */			MachTLBUpdate(sva, entry);		}		return;	}#ifdef DIAGNOSTIC	if (eva > VM_MAXUSER_ADDRESS)		panic("pmap_protect: uva not in range");#endif	while (sva < eva) {		nssva = pmax_trunc_seg(sva) + NBSEG;		if (nssva == 0 || nssva > eva)			nssva = eva;		/*		 * If VA belongs to an unallocated segment,		 * skip to the next segment boundary.		 */		if (!(pte = pmap_segmap(pmap, sva))) {			sva = nssva;			continue;		}		/*		 * Change protection on every valid mapping within this segment.		 */		pte += (sva >> PGSHIFT) & (NPTEPG - 1);		for (; sva < nssva; sva += NBPG, pte++) {			entry = pte->pt_entry;			if (!(entry & PG_V))				continue;			entry = (entry & ~(PG_M | PG_RO)) | p;			pte->pt_entry = entry;			/*			 * Update the TLB if the given address is in the cache.			 */			if (pmap->pm_tlbgen == tlbpid_gen)				MachTLBUpdate(sva | (pmap->pm_tlbpid <<					VMMACH_TLB_PID_SHIFT), entry);		}	}}/* *	Insert the given physical page (p) at *	the specified virtual address (v) in the *	target physical map with the protection requested. * *	If specified, the page will be wired down, meaning *	that the related pte can not be reclaimed. * *	NB:  This is the only routine which MAY NOT lazy-evaluate *	or lose information.  That is, this routine must actually *	insert this page into the given map NOW. */voidpmap_enter(pmap, va, pa, prot, wired)	register pmap_t pmap;	vm_offset_t va;	register vm_offset_t pa;	vm_prot_t prot;	boolean_t wired;{	register pt_entry_t *pte;	register u_int npte;	register int i, j;	vm_page_t mem;#ifdef DEBUG	if (pmapdebug & (PDB_FOLLOW|PDB_ENTER))		printf("pmap_enter(%x, %x, %x, %x, %x)\n",		       pmap, va, pa, prot, wired);#endif#ifdef DIAGNOSTIC	if (!pmap)		panic("pmap_enter: pmap");	if (!pmap->pm_segtab) {		enter_stats.kernel++;		if (va < VM_MIN_KERNEL_ADDRESS || va >= virtual_end)			panic("pmap_enter: kva");	} else {		enter_stats.user++;		if (va >= VM_MAXUSER_ADDRESS)			panic("pmap_enter: uva");	}	if (pa & 0x80000000)		panic("pmap_enter: pa");	if (!(prot & VM_PROT_READ))		panic("pmap_enter: prot");#endif	if (IS_VM_PHYSADDR(pa)) {		register pv_entry_t pv, npv;		int s;		if (!(prot & VM_PROT_WRITE))			npte = PG_RO;		else {			register vm_page_t mem;			mem = PHYS_TO_VM_PAGE(pa);			if ((int)va < 0) {				/*				 * Don't bother to trap on kernel writes,				 * just record page as dirty.				 */				npte = PG_M;				mem->flags &= ~PG_CLEAN;			} else#ifdef ATTR				if ((pmap_attributes[atop(pa)] &				    PMAP_ATTR_MOD) || !(mem->flags & PG_CLEAN))#else				if (!(mem->flags & PG_CLEAN))#endif					npte = PG_M;			else				npte = 0;		}#ifdef DEBUG		enter_stats.managed++;#endif		/*		 * Enter the pmap and virtual address into the		 * physical to virtual map table.		 */		pv = pa_to_pvh(pa);		s = splimp();#ifdef DEBUG		if (pmapdebug & PDB_ENTER)			printf("pmap_enter: pv %x: was %x/%x/%x\n",			       pv, pv->pv_va, pv->pv_pmap, pv->pv_next);#endif		if (pv->pv_pmap == NULL) {			/*			 * No entries yet, use header as the first entry			 */#ifdef DEBUG			if (pmapdebug & PDB_PVENTRY)				printf("pmap_enter: first pv: pmap %x va %x\n",					pmap, va);			enter_stats.firstpv++;#endif			pv->pv_va = va;			pv->pv_pmap = pmap;			pv->pv_next = NULL;		} else {			/*			 * There is at least one other VA mapping this page.			 * Place this entry after the header.			 *			 * Note: the entry may already be in the table if			 * we are only changing the protection bits.			 */			for (npv = pv; npv; npv = npv->pv_next)				if (pmap == npv->pv_pmap && va == npv->pv_va) {#ifdef DIAGNOSTIC					unsigned entry;					if (!pmap->pm_segtab)						entry = kvtopte(va)->pt_entry;					else {						pte = pmap_segmap(pmap, va);						if (pte) {							pte += (va >> PGSHIFT) &							    (NPTEPG - 1);							entry = pte->pt_entry;						} else							entry = 0;					}					if (!(entry & PG_V) ||					    (entry & PG_FRAME) != pa)						printf(			"pmap_enter: found va %x pa %x in pv_table but != %x\n",							va, pa, entry);#endif					goto fnd;				}#ifdef DEBUG			if (pmapdebug & PDB_PVENTRY)				printf("pmap_enter: new pv: pmap %x va %x\n",					pmap, va);#endif			/* can this cause us to recurse forever? */			npv = (pv_entry_t)				malloc(sizeof *npv, M_VMPVENT, M_NOWAIT);			npv->pv_va = va;			npv->pv_pmap = pmap;			npv->pv_next = pv->pv_next;			pv->pv_next = npv;#ifdef DEBUG			if (!npv->pv_next)				enter_stats.secondpv++;#endif		fnd:			;		}		splx(s);	} else {		/*		 * Assumption: if it is not part of our managed memory		 * then it must be device memory which may be volitile.		 */#ifdef DEBUG		enter_stats.unmanaged++;#endif		npte = (prot & VM_PROT_WRITE) ? (PG_M | PG_N) : (PG_RO | PG_N);	}	/*	 * The only time we need to flush the cache is if we	 * execute from a physical address and then change the data.	 * This is the best place to do this.	 * pmap_protect() and pmap_remove() are mostly used to switch	 * between R/W and R/O pages.	 * NOTE: we only support cache flush for read only text.	 */	if (prot == (VM_PROT_READ | VM_PROT_EXECUTE))		MachFlushICache(MACH_PHYS_TO_CACHED(pa), PAGE_SIZE);	if (!pmap->pm_segtab) {		/* enter entries into kernel pmap */		pte = kvtopte(va);		npte |= pa | PG_V | PG_G;		if (wired) {			pmap->pm_stats.wired_count += pmaxpagesperpage;			npte |= PG_WIRED;		}		i = pmaxpagesperpage;		do {			if (!(pte->pt_entry & PG_V)) {				pmap->pm_stats.resident_count++;			} else {#ifdef DIAGNOSTIC				if (pte->pt_entry & PG_WIRED)					panic("pmap_enter: kernel wired");#endif			}			/*			 * Update the same virtual address entry.			 */			MachTLBUpdate(va, npte);			pte->pt_entry = npte;			va += NBPG;			npte += NBPG;			pte++;		} while (--i != 0);		return;	}	if (!(pte = pmap_segmap(pmap, va))) {		mem = vm_page_alloc1();		pmap_zero_page(VM_PAGE_TO_PHYS(mem));		pmap_segmap(pmap, va) = pte = (pt_entry_t *)			MACH_PHYS_TO_CACHED(VM_PAGE_TO_PHYS(mem));	}	pte += (va >> PGSHIFT) & (NPTEPG - 1);	/*	 * Now validate mapping with desired protection/wiring.	 * Assume uniform modified and referenced status for all	 * PMAX pages in a MACH page.	 */	npte |= pa | PG_V;	if (wired) {		pmap->pm_stats.wired_count += pmaxpagesperpage;		npte |= PG_WIRED;	}#ifdef DEBUG	if (pmapdebug & PDB_ENTER) {		printf("pmap_enter: new pte %x", npte);		if (pmap->pm_tlbgen == tlbpid_gen)			printf(" tlbpid %d", pmap->pm_tlbpid);		printf("\n");	}#endif	i = pmaxpagesperpage;	do {		pte->pt_entry = npte;		if (pmap->pm_tlbgen == tlbpid_gen)			MachTLBUpdate(va | (pmap->pm_tlbpid <<				VMMACH_TLB_PID_SHIFT), npte);		va += NBPG;		npte += NBPG;		pte++;	} while (--i != 0);}/* *	Routine:	pmap_change_wiring *	Function:	Change the wiring attribute for a map/virtual-address *			pair. *	In/out conditions: *			The mapping must already exist in the pmap. */voidpmap_change_wiring(pmap, va, wired)	register pmap_t	pmap;	vm_offset_t va;	boolean_t wired;{	register pt_entry_t *pte;	u_int p;	register int i;#ifdef DEBUG	if (pmapdebug & (PDB_FOLLOW|PDB_WIRING))		printf("pmap_change_wiring(%x, %x, %x)\n", pmap, va, wired);#endif	if (pmap == NULL)		return;	p = wired ? PG_WIRED : 0;	/*	 * Don't need to flush the TLB since PG_WIRED is only in software.

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -