⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 pmap.c

📁 早期freebsd实现
💻 C
📖 第 1 页 / 共 5 页
字号:
				 * pointer for current process so				 * update now to reload hardware.				 */				if (ptpmap == curproc->p_vmspace->vm_map.pmap)					PMAP_ACTIVATE(ptpmap,					    (struct pcb *)curproc->p_addr, 1);			}		}#if 0		/*		 * XXX this should be unnecessary as we have been		 * flushing individual mappings as we go.		 */		if (ptpmap == kernel_pmap)			TBIAS();		else			TBIAU();#endif		pv->pv_flags &= ~PV_PTPAGE;		ptpmap->pm_ptpages--;	}	/*	 * Update saved attributes for managed page	 */	pmap_attributes[pa_index(pa)] |= bits;	splx(s);}/* static */boolean_tpmap_testbit(pa, bit)	register vm_offset_t pa;	int bit;{	register pv_entry_t pv;	register int *pte;	int s;	if (pa < vm_first_phys || pa >= vm_last_phys)		return(FALSE);	pv = pa_to_pvh(pa);	s = splimp();	/*	 * Check saved info first	 */	if (pmap_attributes[pa_index(pa)] & bit) {		splx(s);		return(TRUE);	}#ifdef HAVEVAC	/*	 * Flush VAC to get correct state of any hardware maintained bits.	 */	if (pmap_aliasmask && (bit & (PG_U|PG_M)))		DCIS();#endif	/*	 * Not found, check current mappings returning	 * immediately if found.	 */	if (pv->pv_pmap != NULL) {		for (; pv; pv = pv->pv_next) {			pte = (int *) pmap_pte(pv->pv_pmap, pv->pv_va);			if (*pte & bit) {				splx(s);				return(TRUE);			}		}	}	splx(s);	return(FALSE);}/* static */voidpmap_changebit(pa, bit, setem)	register vm_offset_t pa;	int bit;	boolean_t setem;{	register pv_entry_t pv;	register int *pte, npte;	vm_offset_t va;	int s;	boolean_t firstpage = TRUE;#ifdef PMAPSTATS	struct chgstats *chgp;#endif#ifdef DEBUG	if (pmapdebug & PDB_BITS)		printf("pmap_changebit(%x, %x, %s)\n",		       pa, bit, setem ? "set" : "clear");#endif	if (pa < vm_first_phys || pa >= vm_last_phys)		return;#ifdef PMAPSTATS	chgp = &changebit_stats[(bit>>2)-1];	if (setem)		chgp->setcalls++;	else		chgp->clrcalls++;#endif	pv = pa_to_pvh(pa);	s = splimp();	/*	 * Clear saved attributes (modify, reference)	 */	if (!setem)		pmap_attributes[pa_index(pa)] &= ~bit;	/*	 * Loop over all current mappings setting/clearing as appropos	 * If setting RO do we need to clear the VAC?	 */	if (pv->pv_pmap != NULL) {#ifdef DEBUG		int toflush = 0;#endif		for (; pv; pv = pv->pv_next) {#ifdef DEBUG			toflush |= (pv->pv_pmap == kernel_pmap) ? 2 : 1;#endif			va = pv->pv_va;			/*			 * XXX don't write protect pager mappings			 */			if (bit == PG_RO) {				extern vm_offset_t pager_sva, pager_eva;				if (va >= pager_sva && va < pager_eva)					continue;			}			pte = (int *) pmap_pte(pv->pv_pmap, va);#ifdef HAVEVAC			/*			 * Flush VAC to ensure we get correct state of HW bits			 * so we don't clobber them.			 */			if (firstpage && pmap_aliasmask) {				firstpage = FALSE;				DCIS();			}#endif			if (setem)				npte = *pte | bit;			else				npte = *pte & ~bit;			if (*pte != npte) {#if defined(LUNA2)				/*				 * If we are changing caching status or				 * protection make sure the caches are				 * flushed (but only once).				 */				if (firstpage && mmutype == MMU_68040 &&				    (bit == PG_RO && setem ||				     (bit & PG_CMASK))) {					firstpage = FALSE;					DCFP(pa);					ICPP(pa);				}#endif				*pte = npte;				if (active_pmap(pv->pv_pmap))					TBIS(va);#ifdef PMAPSTATS				if (setem)					chgp->sethits++;				else					chgp->clrhits++;#endif			}#ifdef PMAPSTATS			else {				if (setem)					chgp->setmiss++;				else					chgp->clrmiss++;			}#endif		}#if defined(HAVEVAC) && defined(DEBUG)		if (setem && bit == PG_RO && (pmapvacflush & PVF_PROTECT)) {			if ((pmapvacflush & PVF_TOTAL) || toflush == 3)				DCIA();			else if (toflush == 2)				DCIS();			else				DCIU();		}#endif	}	splx(s);}/* static */voidpmap_enter_ptpage(pmap, va)	register pmap_t pmap;	register vm_offset_t va;{	register vm_offset_t ptpa;	register pv_entry_t pv;	st_entry_t *ste;	int s;#ifdef DEBUG	if (pmapdebug & (PDB_FOLLOW|PDB_ENTER|PDB_PTPAGE))		printf("pmap_enter_ptpage: pmap %x, va %x\n", pmap, va);#endif#ifdef PMAPSTATS	enter_stats.ptpneeded++;#endif	/*	 * Allocate a segment table if necessary.  Note that it is allocated	 * from kernel_map and not pt_map.  This keeps user page tables	 * aligned on segment boundaries in the kernel address space.	 * The segment table is wired down.  It will be freed whenever the	 * reference count drops to zero.	 */	if (pmap->pm_stab == Segtabzero) {		pmap->pm_stab = (st_entry_t *)			kmem_alloc(kernel_map, LUNA_STSIZE);		pmap->pm_stpa = (st_entry_t *)			pmap_extract(kernel_pmap, (vm_offset_t)pmap->pm_stab);#if defined(LUNA2)		if (mmutype == MMU_68040) {#ifdef DEBUG			if (dowriteback && dokwriteback)#endif			pmap_changebit((vm_offset_t)pmap->pm_stab, PG_CCB, 0);			pmap->pm_stfree = protostfree;		}#endif		pmap->pm_stchanged = TRUE;		/*		 * XXX may have changed segment table pointer for current		 * process so update now to reload hardware.		 */		if (pmap == curproc->p_vmspace->vm_map.pmap)			PMAP_ACTIVATE(pmap, (struct pcb *)curproc->p_addr, 1);#ifdef DEBUG		if (pmapdebug & (PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB))			printf("enter: pmap %x stab %x(%x)\n",			       pmap, pmap->pm_stab, pmap->pm_stpa);#endif	}	ste = pmap_ste(pmap, va);#if defined(LUNA2)	/*	 * Allocate level 2 descriptor block if necessary	 */	if (mmutype == MMU_68040) {		if (!ste->sg_v) {			int ix;			caddr_t addr;						ix = bmtol2(pmap->pm_stfree);			if (ix == -1)				panic("enter: out of address space"); /* XXX */			pmap->pm_stfree &= ~l2tobm(ix);			addr = (caddr_t)&pmap->pm_stab[ix*SG4_LEV2SIZE];			bzero(addr, SG4_LEV2SIZE*sizeof(st_entry_t));			addr = (caddr_t)&pmap->pm_stpa[ix*SG4_LEV2SIZE];			*(int *)ste = (u_int)addr | SG_RW | SG_U | SG_V;#ifdef DEBUG			if (pmapdebug & (PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB))				printf("enter: alloc ste2 %d(%x)\n", ix, addr);#endif		}		ste = pmap_ste2(pmap, va);		/*		 * Since a level 2 descriptor maps a block of SG4_LEV3SIZE		 * level 3 descriptors, we need a chunk of NPTEPG/SG4_LEV3SIZE		 * (16) such descriptors (NBPG/SG4_LEV3SIZE bytes) to map a		 * PT page--the unit of allocation.  We set `ste' to point		 * to the first entry of that chunk which is validated in its		 * entirety below.		 */		ste = (st_entry_t *)((int)ste & ~(NBPG/SG4_LEV3SIZE-1));#ifdef DEBUG		if (pmapdebug & (PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB))			printf("enter: ste2 %x (%x)\n",			       pmap_ste2(pmap, va), ste);#endif	}#endif	va = trunc_page((vm_offset_t)pmap_pte(pmap, va));	/*	 * In the kernel we allocate a page from the kernel PT page	 * free list and map it into the kernel page table map (via	 * pmap_enter).	 */	if (pmap == kernel_pmap) {		register struct kpt_page *kpt;		s = splimp();		if ((kpt = kpt_free_list) == (struct kpt_page *)0) {			/*			 * No PT pages available.			 * Try once to free up unused ones.			 */#ifdef DEBUG			if (pmapdebug & PDB_COLLECT)				printf("enter: no KPT pages, collecting...\n");#endif			pmap_collect(kernel_pmap);			if ((kpt = kpt_free_list) == (struct kpt_page *)0)				panic("pmap_enter_ptpage: can't get KPT page");		}#ifdef PMAPSTATS		if (++kpt_stats.kptinuse > kpt_stats.kptmaxuse)			kpt_stats.kptmaxuse = kpt_stats.kptinuse;#endif		kpt_free_list = kpt->kpt_next;		kpt->kpt_next = kpt_used_list;		kpt_used_list = kpt;		ptpa = kpt->kpt_pa;		bzero((caddr_t)kpt->kpt_va, LUNA_PAGE_SIZE);		pmap_enter(pmap, va, ptpa, VM_PROT_DEFAULT, TRUE);#ifdef DEBUG		if (pmapdebug & (PDB_ENTER|PDB_PTPAGE)) {			int ix = pmap_ste(pmap, va) - pmap_ste(pmap, 0);			printf("enter: add &Sysptmap[%d]: %x (KPT page %x)\n",			       ix, *(int *)&Sysptmap[ix], kpt->kpt_va);		}#endif		splx(s);	}	/*	 * For user processes we just simulate a fault on that location	 * letting the VM system allocate a zero-filled page.	 */	else {#ifdef DEBUG		if (pmapdebug & (PDB_ENTER|PDB_PTPAGE))			printf("enter: about to fault UPT pg at %x\n", va);#endif		s = vm_fault(pt_map, va, VM_PROT_READ|VM_PROT_WRITE, FALSE);		if (s != KERN_SUCCESS) {			printf("vm_fault(pt_map, %x, RW, 0) -> %d\n", va, s);			panic("pmap_enter: vm_fault failed");		}		ptpa = pmap_extract(kernel_pmap, va);		/*		 * Mark the page clean now to avoid its pageout (and		 * hence creation of a pager) between now and when it		 * is wired; i.e. while it is on a paging queue.		 */		PHYS_TO_VM_PAGE(ptpa)->flags |= PG_CLEAN;#ifdef DEBUG		PHYS_TO_VM_PAGE(ptpa)->flags |= PG_PTPAGE;#endif	}#if defined(LUNA2)	/*	 * Turn off copyback caching of page table pages,	 * could get ugly otherwise.	 */#ifdef DEBUG	if (dowriteback && dokwriteback)#endif	if (mmutype == MMU_68040) {		int *pte = (int *)pmap_pte(kernel_pmap, va);#ifdef DEBUG		if ((pmapdebug & PDB_PARANOIA) && (*pte & PG_CCB) == 0)			printf("%s PT no CCB: kva=%x ptpa=%x pte@%x=%x\n",			       pmap == kernel_pmap ? "Kernel" : "User",			       va, ptpa, pte, *pte);#endif		pmap_changebit(ptpa, PG_CCB, 0);	}#endif	/*	 * Locate the PV entry in the kernel for this PT page and	 * record the STE address.  This is so that we can invalidate	 * the STE when we remove the mapping for the page.	 */	pv = pa_to_pvh(ptpa);	s = splimp();	if (pv) {		pv->pv_flags |= PV_PTPAGE;		do {			if (pv->pv_pmap == kernel_pmap && pv->pv_va == va)				break;		} while (pv = pv->pv_next);	}#ifdef DEBUG	if (pv == NULL)		panic("pmap_enter_ptpage: PT page not entered");#endif	pv->pv_ptste = ste;	pv->pv_ptpmap = pmap;#ifdef DEBUG	if (pmapdebug & (PDB_ENTER|PDB_PTPAGE))		printf("enter: new PT page at PA %x, ste at %x\n", ptpa, ste);#endif	/*	 * Map the new PT page into the segment table.	 * Also increment the reference count on the segment table if this	 * was a user page table page.  Note that we don't use vm_map_pageable	 * to keep the count like we do for PT pages, this is mostly because	 * it would be difficult to identify ST pages in pmap_pageable to	 * release them.  We also avoid the overhead of vm_map_pageable.	 */#if defined(LUNA2)	if (mmutype == MMU_68040) {		st_entry_t *este;		for (este = &ste[NPTEPG/SG4_LEV3SIZE]; ste < este; ste++) {			*(int *)ste = ptpa | SG_U | SG_RW | SG_V;			ptpa += SG4_LEV3SIZE * sizeof(st_entry_t);		}	} else#endif	*(int *)ste = (ptpa & SG_FRAME) | SG_RW | SG_V;	if (pmap != kernel_pmap) {		pmap->pm_sref++;#ifdef DEBUG		if (pmapdebug & (PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB))			printf("enter: stab %x refcnt %d\n",			       pmap->pm_stab, pmap->pm_sref);#endif	}#if 0	/*	 * Flush stale TLB info.	 */	if (pmap == kernel_pmap)		TBIAS();	else		TBIAU();#endif	pmap->pm_ptpages++;	splx(s);}#ifdef DEBUG/* static */voidpmap_pvdump(pa)	vm_offset_t pa;{	register pv_entry_t pv;	printf("pa %x", pa);	for (pv = pa_to_pvh(pa); pv; pv = pv->pv_next)		printf(" -> pmap %x, va %x, ptste %x, ptpmap %x, flags %x",		       pv->pv_pmap, pv->pv_va, pv->pv_ptste, pv->pv_ptpmap,		       pv->pv_flags);	printf("\n");}/* static */voidpmap_check_wiring(str, va)	char *str;	vm_offset_t va;{	vm_map_entry_t entry;	register int count, *pte;	va = trunc_page(va);	if (!pmap_ste_v(kernel_pmap, va) ||	    !pmap_pte_v(pmap_pte(kernel_pmap, va)))		return;	if (!vm_map_lookup_entry(pt_map, va, &entry)) {		printf("wired_check: entry for %x not found\n", va);		return;	}	count = 0;	for (pte = (int *)va; pte < (int *)(va+PAGE_SIZE); pte++)		if (*pte)			count++;	if (entry->wired_count != count)		printf("*%s*: %x: w%d/a%d\n",		       str, va, entry->wired_count, count);}#endif

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -