⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 pmap.c

📁 早期freebsd实现
💻 C
📖 第 1 页 / 共 5 页
字号:
		 * (kmem_alloc assigned) address for the page and put		 * that page back on the free list.		 */		for (pkpt = &kpt_used_list, kpt = *pkpt;		     kpt != (struct kpt_page *)0;		     pkpt = &kpt->kpt_next, kpt = *pkpt)			if (kpt->kpt_pa == kpa)				break;#ifdef DEBUG		if (kpt == (struct kpt_page *)0)			panic("pmap_collect: lost a KPT page");		if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT))			printf("collect: %x (%x) to free list\n",			       kpt->kpt_va, kpa);#endif		*pkpt = kpt->kpt_next;		kpt->kpt_next = kpt_free_list;		kpt_free_list = kpt;#ifdef PMAPSTATS		kpt_stats.kptinuse--;		kpt_stats.collectpages++;#endif#ifdef DEBUG		if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT))			pmapdebug = opmapdebug;		if (*ste)			printf("collect: kernel STE at %x still valid (%x)\n",			       ste, *ste);		ste = (int *)&Sysptmap[(st_entry_t *)ste-pmap_ste(kernel_pmap, 0)];		if (*ste)			printf("collect: kernel PTmap at %x still valid (%x)\n",			       ste, *ste);#endif	}	splx(s);}voidpmap_activate(pmap, pcbp)	register pmap_t pmap;	struct pcb *pcbp;{#ifdef DEBUG	if (pmapdebug & (PDB_FOLLOW|PDB_SEGTAB))		printf("pmap_activate(%x, %x)\n", pmap, pcbp);#endif	PMAP_ACTIVATE(pmap, pcbp, pmap == curproc->p_vmspace->vm_map.pmap);}/* *	pmap_zero_page zeros the specified (machine independent) *	page by mapping the page into virtual memory and using *	bzero to clear its contents, one machine dependent page *	at a time. * *	XXX this is a bad implementation for virtual cache machines *	(320/350) because pmap_enter doesn't cache-inhibit the temporary *	kernel mapping and we wind up with data cached for that KVA. *	It is probably a win for physical cache machines (370/380) *	as the cache loading is not wasted. */voidpmap_zero_page(phys)	vm_offset_t phys;{	register vm_offset_t kva;	extern caddr_t CADDR1;#ifdef DEBUG	if (pmapdebug & PDB_FOLLOW)		printf("pmap_zero_page(%x)\n", phys);#endif	kva = (vm_offset_t) CADDR1;	pmap_enter(kernel_pmap, kva, phys, VM_PROT_READ|VM_PROT_WRITE, TRUE);	bzero((caddr_t)kva, LUNA_PAGE_SIZE);	pmap_remove_mapping(kernel_pmap, kva, PT_ENTRY_NULL,			    PRM_TFLUSH|PRM_CFLUSH);}/* *	pmap_copy_page copies the specified (machine independent) *	page by mapping the page into virtual memory and using *	bcopy to copy the page, one machine dependent page at a *	time. * * *	XXX this is a bad implementation for virtual cache machines *	(320/350) because pmap_enter doesn't cache-inhibit the temporary *	kernel mapping and we wind up with data cached for that KVA. *	It is probably a win for physical cache machines (370/380) *	as the cache loading is not wasted. */voidpmap_copy_page(src, dst)	vm_offset_t src, dst;{	register vm_offset_t skva, dkva;	extern caddr_t CADDR1, CADDR2;#ifdef DEBUG	if (pmapdebug & PDB_FOLLOW)		printf("pmap_copy_page(%x, %x)\n", src, dst);#endif	skva = (vm_offset_t) CADDR1;	dkva = (vm_offset_t) CADDR2;	pmap_enter(kernel_pmap, skva, src, VM_PROT_READ, TRUE);	pmap_enter(kernel_pmap, dkva, dst, VM_PROT_READ|VM_PROT_WRITE, TRUE);	copypage((caddr_t)skva, (caddr_t)dkva);	/* CADDR1 and CADDR2 are virtually contiguous */	pmap_remove(kernel_pmap, skva, skva+2*PAGE_SIZE);}/* *	Routine:	pmap_pageable *	Function: *		Make the specified pages (by pmap, offset) *		pageable (or not) as requested. * *		A page which is not pageable may not take *		a fault; therefore, its page table entry *		must remain valid for the duration. * *		This routine is merely advisory; pmap_enter *		will specify that these pages are to be wired *		down (or not) as appropriate. */voidpmap_pageable(pmap, sva, eva, pageable)	pmap_t		pmap;	vm_offset_t	sva, eva;	boolean_t	pageable;{#ifdef DEBUG	if (pmapdebug & PDB_FOLLOW)		printf("pmap_pageable(%x, %x, %x, %x)\n",		       pmap, sva, eva, pageable);#endif	/*	 * If we are making a PT page pageable then all valid	 * mappings must be gone from that page.  Hence it should	 * be all zeros and there is no need to clean it.	 * Assumptions:	 *	- we are called with only one page at a time	 *	- PT pages have only one pv_table entry	 */	if (pmap == kernel_pmap && pageable && sva + PAGE_SIZE == eva) {		register pv_entry_t pv;		register vm_offset_t pa;#ifdef DEBUG		if ((pmapdebug & (PDB_FOLLOW|PDB_PTPAGE)) == PDB_PTPAGE)			printf("pmap_pageable(%x, %x, %x, %x)\n",			       pmap, sva, eva, pageable);#endif		if (!pmap_ste_v(pmap, sva))			return;		pa = pmap_pte_pa(pmap_pte(pmap, sva));		if (pa < vm_first_phys || pa >= vm_last_phys)			return;		pv = pa_to_pvh(pa);		if (pv->pv_ptste == NULL)			return;#ifdef DEBUG		if (pv->pv_va != sva || pv->pv_next) {			printf("pmap_pageable: bad PT page va %x next %x\n",			       pv->pv_va, pv->pv_next);			return;		}#endif		/*		 * Mark it unmodified to avoid pageout		 */		pmap_changebit(pa, PG_M, FALSE);#ifdef DEBUG		if (pmapdebug & PDB_PTPAGE)			printf("pmap_pageable: PT page %x(%x) unmodified\n",			       sva, *(int *)pmap_pte(pmap, sva));		if (pmapdebug & PDB_WIRING)			pmap_check_wiring("pageable", sva);#endif	}}/* *	Clear the modify bits on the specified physical page. */voidpmap_clear_modify(pa)	vm_offset_t	pa;{#ifdef DEBUG	if (pmapdebug & PDB_FOLLOW)		printf("pmap_clear_modify(%x)\n", pa);#endif	pmap_changebit(pa, PG_M, FALSE);}/* *	pmap_clear_reference: * *	Clear the reference bit on the specified physical page. */void pmap_clear_reference(pa)	vm_offset_t	pa;{#ifdef DEBUG	if (pmapdebug & PDB_FOLLOW)		printf("pmap_clear_reference(%x)\n", pa);#endif	pmap_changebit(pa, PG_U, FALSE);}/* *	pmap_is_referenced: * *	Return whether or not the specified physical page is referenced *	by any physical maps. */boolean_tpmap_is_referenced(pa)	vm_offset_t	pa;{#ifdef DEBUG	if (pmapdebug & PDB_FOLLOW) {		boolean_t rv = pmap_testbit(pa, PG_U);		printf("pmap_is_referenced(%x) -> %c\n", pa, "FT"[rv]);		return(rv);	}#endif	return(pmap_testbit(pa, PG_U));}/* *	pmap_is_modified: * *	Return whether or not the specified physical page is modified *	by any physical maps. */boolean_tpmap_is_modified(pa)	vm_offset_t	pa;{#ifdef DEBUG	if (pmapdebug & PDB_FOLLOW) {		boolean_t rv = pmap_testbit(pa, PG_M);		printf("pmap_is_modified(%x) -> %c\n", pa, "FT"[rv]);		return(rv);	}#endif	return(pmap_testbit(pa, PG_M));}vm_offset_tpmap_phys_address(ppn)	int ppn;{	return(luna_ptob(ppn));}/* * Miscellaneous support routines follow *//* * Invalidate a single page denoted by pmap/va. * If (pte != NULL), it is the already computed PTE for the page. * If (flags & PRM_TFLUSH), we must invalidate any TLB information. * If (flags & PRM_CFLUSH), we must flush/invalidate any cache information. *//* static */voidpmap_remove_mapping(pmap, va, pte, flags)	register pmap_t pmap;	register vm_offset_t va;	register pt_entry_t *pte;	int flags;{	register vm_offset_t pa;	register pv_entry_t pv, npv;	pmap_t ptpmap;	int *ste, s, bits;#ifdef DEBUG	pt_entry_t opte;	if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT))		printf("pmap_remove_mapping(%x, %x, %x, %x)\n",		       pmap, va, pte, flags);#endif	/*	 * PTE not provided, compute it from pmap and va.	 */	if (pte == PT_ENTRY_NULL) {		pte = pmap_pte(pmap, va);		if (*(int *)pte == PG_NV)			return;	}#ifdef HAVEVAC	if (pmap_aliasmask && (flags & PRM_CFLUSH)) {		/*		 * Purge kernel side of VAC to ensure we get the correct		 * state of any hardware maintained bits.		 */		DCIS();#ifdef PMAPSTATS		remove_stats.sflushes++;#endif		/*		 * If this is a non-CI user mapping for the current process,		 * flush the VAC.  Note that the kernel side was flushed		 * above so we don't worry about non-CI kernel mappings.		 */		if (pmap == curproc->p_vmspace->vm_map.pmap &&		    !pmap_pte_ci(pte)) {			DCIU();#ifdef PMAPSTATS			remove_stats.uflushes++;#endif		}	}#endif	pa = pmap_pte_pa(pte);#ifdef DEBUG	opte = *pte;#endif#ifdef PMAPSTATS	remove_stats.removes++;#endif	/*	 * Update statistics	 */	if (pmap_pte_w(pte))		pmap->pm_stats.wired_count--;	pmap->pm_stats.resident_count--;	/*	 * Invalidate the PTE after saving the reference modify info.	 */#ifdef DEBUG	if (pmapdebug & PDB_REMOVE)		printf("remove: invalidating pte at %x\n", pte);#endif	bits = *(int *)pte & (PG_U|PG_M);	*(int *)pte = PG_NV;	if ((flags & PRM_TFLUSH) && active_pmap(pmap))		TBIS(va);	/*	 * For user mappings decrement the wiring count on	 * the PT page.  We do this after the PTE has been	 * invalidated because vm_map_pageable winds up in	 * pmap_pageable which clears the modify bit for the	 * PT page.	 */	if (pmap != kernel_pmap) {		(void) vm_map_pageable(pt_map, trunc_page(pte),				       round_page(pte+1), TRUE);#ifdef DEBUG		if (pmapdebug & PDB_WIRING)			pmap_check_wiring("remove", trunc_page(pte));#endif	}	/*	 * If this isn't a managed page, we are all done.	 */	if (pa < vm_first_phys || pa >= vm_last_phys)		return;	/*	 * Otherwise remove it from the PV table	 * (raise IPL since we may be called at interrupt time).	 */	pv = pa_to_pvh(pa);	ste = (int *)0;	s = splimp();	/*	 * If it is the first entry on the list, it is actually	 * in the header and we must copy the following entry up	 * to the header.  Otherwise we must search the list for	 * the entry.  In either case we free the now unused entry.	 */	if (pmap == pv->pv_pmap && va == pv->pv_va) {		ste = (int *)pv->pv_ptste;		ptpmap = pv->pv_ptpmap;		npv = pv->pv_next;		if (npv) {			npv->pv_flags = pv->pv_flags;			*pv = *npv;			free((caddr_t)npv, M_VMPVENT);		} else			pv->pv_pmap = NULL;#ifdef PMAPSTATS		remove_stats.pvfirst++;#endif	} else {		for (npv = pv->pv_next; npv; npv = npv->pv_next) {#ifdef PMAPSTATS			remove_stats.pvsearch++;#endif			if (pmap == npv->pv_pmap && va == npv->pv_va)				break;			pv = npv;		}#ifdef DEBUG		if (npv == NULL)			panic("pmap_remove: PA not in pv_tab");#endif		ste = (int *)npv->pv_ptste;		ptpmap = npv->pv_ptpmap;		pv->pv_next = npv->pv_next;		free((caddr_t)npv, M_VMPVENT);		pv = pa_to_pvh(pa);	}#ifdef HAVEVAC	/*	 * If only one mapping left we no longer need to cache inhibit	 */	if (pmap_aliasmask &&	    pv->pv_pmap && pv->pv_next == NULL && (pv->pv_flags & PV_CI)) {#ifdef DEBUG		if (pmapdebug & PDB_CACHE)			printf("remove: clearing CI for pa %x\n", pa);#endif		pv->pv_flags &= ~PV_CI;		pmap_changebit(pa, PG_CI, FALSE);#ifdef DEBUG		if ((pmapdebug & (PDB_CACHE|PDB_PVDUMP)) ==		    (PDB_CACHE|PDB_PVDUMP))			pmap_pvdump(pa);#endif	}#endif	/*	 * If this was a PT page we must also remove the	 * mapping from the associated segment table.	 */	if (ste) {#ifdef PMAPSTATS		remove_stats.ptinvalid++;#endif#ifdef DEBUG		if (pmapdebug & (PDB_REMOVE|PDB_PTPAGE))			printf("remove: ste was %x@%x pte was %x@%x\n",			       *ste, ste, *(int *)&opte, pmap_pte(pmap, va));#endif#if defined(LUNA2)		if (mmutype == MMU_68040) {			int *este = &ste[NPTEPG/SG4_LEV3SIZE];			while (ste < este)				*ste++ = SG_NV;#ifdef DEBUG			ste -= NPTEPG/SG4_LEV3SIZE;#endif		} else#endif		*ste = SG_NV;		/*		 * If it was a user PT page, we decrement the		 * reference count on the segment table as well,		 * freeing it if it is now empty.		 */		if (ptpmap != kernel_pmap) {#ifdef DEBUG			if (pmapdebug & (PDB_REMOVE|PDB_SEGTAB))				printf("remove: stab %x, refcnt %d\n",				       ptpmap->pm_stab, ptpmap->pm_sref - 1);			if ((pmapdebug & PDB_PARANOIA) &&			    ptpmap->pm_stab != (st_entry_t *)trunc_page(ste))				panic("remove: bogus ste");#endif			if (--(ptpmap->pm_sref) == 0) {#ifdef DEBUG				if (pmapdebug&(PDB_REMOVE|PDB_SEGTAB))					printf("remove: free stab %x\n",					       ptpmap->pm_stab);#endif				kmem_free(kernel_map,					  (vm_offset_t)ptpmap->pm_stab,					  LUNA_STSIZE);				ptpmap->pm_stab = Segtabzero;				ptpmap->pm_stpa = Segtabzeropa;#if defined(LUNA2)				if (mmutype == MMU_68040)					ptpmap->pm_stfree = protostfree;#endif				ptpmap->pm_stchanged = TRUE;				/*				 * XXX may have changed segment table

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -