⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 pmap.c

📁 早期freebsd实现
💻 C
📖 第 1 页 / 共 5 页
字号:
				setcontext(pm->pm_ctxnum);				for (; va < nva; va += NBPG) {					tpte = getpte(va);					pmap_stats.ps_npg_prot_all++;					if (tpte & PG_W) {						pmap_stats.ps_npg_prot_actual++;						cache_flush_page(va);						setpte(va, tpte & ~PG_W);					}				}			} else {				register int pteva;				/*				 * No context, hence not cached;				 * just update PTEs.				 */				setcontext(0);				/* XXX use per-cpu pteva? */				setsegmap(0, pmeg);				pteva = VA_VPG(va) * NBPG;				for (; va < nva; pteva += NBPG, va += NBPG)					setpte(pteva, getpte(pteva) & ~PG_W);			}		}	}	simple_unlock(&pm->pm_lock);	splx(s);}/* * Change the protection and/or wired status of the given (MI) virtual page. * XXX: should have separate function (or flag) telling whether only wiring * is changing. */voidpmap_changeprot(pm, va, prot, wired)	register struct pmap *pm;	register vm_offset_t va;	vm_prot_t prot;	int wired;{	register int vseg, tpte, newprot, pmeg, ctx, i, s;#ifdef DEBUG	if (pmapdebug & PDB_CHANGEPROT)		printf("pmap_changeprot(%x, %x, %x, %x)\n",		    pm, va, prot, wired);#endif	write_user_windows();	/* paranoia */	if (pm == kernel_pmap)		newprot = prot & VM_PROT_WRITE ? PG_S|PG_W : PG_S;	else		newprot = prot & VM_PROT_WRITE ? PG_W : 0;	vseg = VA_VSEG(va);	s = splpmap();		/* conservative */	pmap_stats.ps_changeprots++;	/* update PTEs in software or hardware */	if ((pmeg = pm->pm_segmap[vseg]) == seginval) {		register int *pte = &pm->pm_pte[vseg][VA_VPG(va)];		/* update in software */		if ((*pte & PG_PROT) == newprot)			goto useless;		*pte = (*pte & ~PG_PROT) | newprot;	} else {		/* update in hardware */		ctx = getcontext();		if (pm->pm_ctx) {			/* use current context; flush writeback cache */			setcontext(pm->pm_ctxnum);			tpte = getpte(va);			if ((tpte & PG_PROT) == newprot)				goto useless;			if (vactype == VAC_WRITEBACK &&			    (newprot & PG_W) == 0 &&			    (tpte & (PG_W | PG_NC)) == PG_W)				cache_flush_page((int)va);		} else {			setcontext(0);			/* XXX use per-cpu va? */			setsegmap(0, pmeg);			va = VA_VPG(va) * NBPG;			tpte = getpte(va);			if ((tpte & PG_PROT) == newprot)				goto useless;		}		tpte = (tpte & ~PG_PROT) | newprot;		setpte(va, tpte);		setcontext(ctx);	}	splx(s);	return;useless:	/* only wiring changed, and we ignore wiring */	pmap_stats.ps_useless_changeprots++;	splx(s);}/* * Insert (MI) physical page pa at virtual address va in the given pmap. * NB: the pa parameter includes type bits PMAP_OBIO, PMAP_NC as necessary. * * If pa is not in the `managed' range it will not be `bank mapped'. * This works during bootstrap only because the first 4MB happens to * map one-to-one. * * There may already be something else there, or we might just be * changing protections and/or wiring on an existing mapping. *	XXX	should have different entry points for changing! */voidpmap_enter(pm, va, pa, prot, wired)	register struct pmap *pm;	vm_offset_t va, pa;	vm_prot_t prot;	int wired;{	register struct pvlist *pv;	register int pteproto, ctx;	if (pm == NULL)		return;#ifdef DEBUG	if (pmapdebug & PDB_ENTER)		printf("pmap_enter(%x, %x, %x, %x, %x)\n",		    pm, va, pa, prot, wired);#endif	pteproto = PG_V | ((pa & PMAP_TNC) << PG_TNC_SHIFT);	pa &= ~PMAP_TNC;	/*	 * Set up prototype for new PTE.  Cannot set PG_NC from PV_NC yet	 * since the pvlist no-cache bit might change as a result of the	 * new mapping.	 */	if (managed(pa)) {		pteproto |= SWTOHW(atop(pa));		pv = pvhead(pa);	} else {		pteproto |= atop(pa) & PG_PFNUM;		pv = NULL;	}	if (prot & VM_PROT_WRITE)		pteproto |= PG_W;	ctx = getcontext();	if (pm == kernel_pmap)		pmap_enk(pm, va, prot, wired, pv, pteproto | PG_S);	else		pmap_enu(pm, va, prot, wired, pv, pteproto);	setcontext(ctx);}/* enter new (or change existing) kernel mapping */pmap_enk(pm, va, prot, wired, pv, pteproto)	register struct pmap *pm;	vm_offset_t va;	vm_prot_t prot;	int wired;	register struct pvlist *pv;	register int pteproto;{	register int vseg, tpte, pmeg, i, s;	vseg = VA_VSEG(va);	s = splpmap();		/* XXX way too conservative */	if (pm->pm_segmap[vseg] != seginval &&	    (tpte = getpte(va)) & PG_V) {		register int addr = tpte & PG_PFNUM;		/* old mapping exists */		if (addr == (pteproto & PG_PFNUM)) {			/* just changing protection and/or wiring */			splx(s);			pmap_changeprot(pm, va, prot, wired);			return;		}/*printf("pmap_enk: changing existing va=>pa entry\n");*/		/*		 * Switcheroo: changing pa for this va.		 * If old pa was managed, remove from pvlist.		 * If old page was cached, flush cache.		 */		addr = ptoa(HWTOSW(addr));		if (managed(addr))			pv_unlink(pvhead(addr), pm, va);		if (#ifdef notdef		    vactype != VAC_NONE &&#endif		    (tpte & PG_NC) == 0) {			setcontext(0);	/* ??? */			cache_flush_page((int)va);		}	} else {		/* adding new entry */		pm->pm_npte[vseg]++;	}	/*	 * If the new mapping is for a managed PA, enter into pvlist.	 * Note that the mapping for a malloc page will always be	 * unique (hence will never cause a second call to malloc).	 */	if (pv != NULL)		pteproto |= pv_link(pv, pm, va);	pmeg = pm->pm_segmap[vseg];	if (pmeg == seginval) {		register int tva;		/*		 * Allocate an MMU entry now (on locked list),		 * and map it into every context.  Set all its		 * PTEs invalid (we will then overwrite one, but		 * this is more efficient than looping twice).		 */#ifdef DEBUG		if (pm->pm_ctx == NULL || pm->pm_ctxnum != 0)			panic("pmap_enk: kern seg but no kern ctx");#endif		pmeg = me_alloc(&me_locked, pm, vseg)->me_pmeg;		pm->pm_segmap[vseg] = pmeg;		i = ncontext - 1;		do {			setcontext(i);			setsegmap(va, pmeg);		} while (--i >= 0);		/* set all PTEs to invalid, then overwrite one PTE below */		tva = VA_ROUNDDOWNTOSEG(va);		i = NPTESG;		do {			setpte(tva, 0);			tva += NBPG;		} while (--i > 0);	}	/* ptes kept in hardware only */	setpte(va, pteproto);	splx(s);}/* enter new (or change existing) user mapping */pmap_enu(pm, va, prot, wired, pv, pteproto)	register struct pmap *pm;	vm_offset_t va;	vm_prot_t prot;	int wired;	register struct pvlist *pv;	register int pteproto;{	register int vseg, *pte, tpte, pmeg, i, s, doflush;	write_user_windows();		/* XXX conservative */	vseg = VA_VSEG(va);	s = splpmap();			/* XXX conservative */	/*	 * If there is no space in which the PTEs can be written	 * while they are not in the hardware, this must be a new	 * virtual segment.  Get PTE space and count the segment.	 *	 * TO SPEED UP CTX ALLOC, PUT SEGMENT BOUNDS STUFF HERE	 * AND IN pmap_rmu()	 */retry:	pte = pm->pm_pte[vseg];	if (pte == NULL) {		/* definitely a new mapping */		register int size = NPTESG * sizeof *pte;		pte = (int *)malloc((u_long)size, M_VMPMAP, M_WAITOK);		if (pm->pm_pte[vseg] != NULL) {printf("pmap_enter: pte filled during sleep\n");	/* can this happen? */			free((caddr_t)pte, M_VMPMAP);			goto retry;		}#ifdef DEBUG		if (pm->pm_segmap[vseg] != seginval)			panic("pmap_enter: new ptes, but not seginval");#endif		bzero((caddr_t)pte, size);		pm->pm_pte[vseg] = pte;		pm->pm_npte[vseg] = 1;	} else {		/* might be a change: fetch old pte */		doflush = 0;		if ((pmeg = pm->pm_segmap[vseg]) == seginval)			tpte = pte[VA_VPG(va)];	/* software pte */		else {			if (pm->pm_ctx) {	/* hardware pte */				setcontext(pm->pm_ctxnum);				tpte = getpte(va);				doflush = 1;			} else {				setcontext(0);				/* XXX use per-cpu pteva? */				setsegmap(0, pmeg);				tpte = getpte(VA_VPG(va) * NBPG);			}		}		if (tpte & PG_V) {			register int addr = tpte & PG_PFNUM;			/* old mapping exists */			if (addr == (pteproto & PG_PFNUM)) {				/* just changing prot and/or wiring */				splx(s);				/* caller should call this directly: */				pmap_changeprot(pm, va, prot, wired);				return;			}			/*			 * Switcheroo: changing pa for this va.			 * If old pa was managed, remove from pvlist.			 * If old page was cached, flush cache.			 *//*printf("%s[%d]: pmap_enu: changing existing va(%x)=>pa entry\n",curproc->p_comm, curproc->p_pid, va);*/			addr = ptoa(HWTOSW(addr));			if (managed(addr))				pv_unlink(pvhead(addr), pm, va);			if (#ifdef notdef			    vactype != VAC_NONE &&#endif			    doflush && (tpte & PG_NC) == 0)				cache_flush_page((int)va);		} else {			/* adding new entry */			pm->pm_npte[vseg]++;		}	}	if (pv != NULL)		pteproto |= pv_link(pv, pm, va);	/*	 * Update hardware or software PTEs (whichever are active).	 */	if ((pmeg = pm->pm_segmap[vseg]) != seginval) {		/* ptes are in hardare */		if (pm->pm_ctx)			setcontext(pm->pm_ctxnum);		else {			setcontext(0);			/* XXX use per-cpu pteva? */			setsegmap(0, pmeg);			va = VA_VPG(va) * NBPG;		}		setpte(va, pteproto);	}	/* update software copy */	pte += VA_VPG(va);	*pte = pteproto;	splx(s);}/* * Change the wiring attribute for a map/virtual-address pair. *//* ARGSUSED */voidpmap_change_wiring(pm, va, wired)	struct pmap *pm;	vm_offset_t va;	int wired;{	pmap_stats.ps_useless_changewire++;}/* * Extract the physical page address associated * with the given map/virtual_address pair. * GRR, the vm code knows; we should not have to do this! */vm_offset_tpmap_extract(pm, va)	register struct pmap *pm;	vm_offset_t va;{	register int tpte;	register int vseg;	if (pm == NULL) {		printf("pmap_extract: null pmap\n");		return (0);	}	vseg = VA_VSEG(va);	if (pm->pm_segmap[vseg] != seginval) {		register int ctx = getcontext();		if (pm->pm_ctx) {			setcontext(pm->pm_ctxnum);			tpte = getpte(va);		} else {			setcontext(0);			tpte = getpte(VA_VPG(va) * NBPG);		}		setcontext(ctx);	} else {		register int *pte = pm->pm_pte[vseg];		if (pte == NULL) {			printf("pmap_extract: invalid vseg\n");			return (0);		}		tpte = pte[VA_VPG(va)];	}	if ((tpte & PG_V) == 0) {		printf("pmap_extract: invalid pte\n");		return (0);	}	tpte &= PG_PFNUM;	tpte = HWTOSW(tpte);	return ((tpte << PGSHIFT) | (va & PGOFSET));}/* * Copy the range specified by src_addr/len * from the source map to the range dst_addr/len * in the destination map. * * This routine is only advisory and need not do anything. *//* ARGSUSED */voidpmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)	struct pmap *dst_pmap, *src_pmap;	vm_offset_t dst_addr;	vm_size_t len;	vm_offset_t src_addr;{}/* * Require that all active physical maps contain no * incorrect entries NOW.  [This update includes * forcing updates of any address map caching.] */voidpmap_update(){}/* * Garbage collects the physical map system for * pages which are no longer used. * Success need not be guaranteed -- that is, there * may well be pages which are not referenced, but * others may be collected. * Called by the pageout daemon when pages are scarce. *//* ARGSUSED */voidpmap_collect(pm)	struct pmap *pm;{}/* * Clear the modify bit for the given physical page. */voidpmap_clear_modify(pa)	register vm_offset_t pa;{	register struct pvlist *pv;	if (managed(pa)) {		pv = pvhead(pa);		(void) pv_syncflags(pv);		pv->pv_flags &= ~PV_MOD;	}}/* * Tell whether the given physical page has been modified. */intpmap_is_modified(pa)	register vm_offset_t pa;{	register struct pvlist *pv;	if (managed(pa)) {		pv = pvhead(pa);		if (pv->pv_flags & PV_MOD || pv_syncflags(pv) & PV_MOD)			return (1);	}	return (0);}/* * Clear the reference bit for the given physical page. */voidpmap_cl

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -