⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 pmap.c

📁 早期freebsd实现
💻 C
📖 第 1 页 / 共 5 页
字号:
voidpmap_reference(pm)	struct pmap *pm;{	if (pm != NULL) {		simple_lock(&pm->pm_lock);		pm->pm_refcount++;		simple_unlock(&pm->pm_lock);	}}static int pmap_rmk(struct pmap *, vm_offset_t, vm_offset_t, int, int, int);static int pmap_rmu(struct pmap *, vm_offset_t, vm_offset_t, int, int, int);/* * Remove the given range of mapping entries. * The starting and ending addresses are already rounded to pages. * Sheer lunacy: pmap_remove is often asked to remove nonexistent * mappings. */voidpmap_remove(pm, va, endva)	register struct pmap *pm;	register vm_offset_t va, endva;{	register vm_offset_t nva;	register int vseg, nleft, s, ctx;	register int (*rm)(struct pmap *, vm_offset_t, vm_offset_t,			    int, int, int);	if (pm == NULL)		return;#ifdef DEBUG	if (pmapdebug & PDB_REMOVE)		printf("pmap_remove(%x, %x, %x)\n", pm, va, endva);#endif	if (pm == kernel_pmap) {		/*		 * Removing from kernel address space.		 */		rm = pmap_rmk;	} else {		/*		 * Removing from user address space.		 */		write_user_windows();		rm = pmap_rmu;	}	ctx = getcontext();	s = splpmap();		/* XXX conservative */	simple_lock(&pm->pm_lock);	for (; va < endva; va = nva) {		/* do one virtual segment at a time */		vseg = VA_VSEG(va);		nva = VSTOVA(vseg + 1);		if (nva == 0 || nva > endva)			nva = endva;		if ((nleft = pm->pm_npte[vseg]) != 0)			pm->pm_npte[vseg] = (*rm)(pm, va, nva,			    vseg, nleft, pm->pm_segmap[vseg]);	}	simple_unlock(&pm->pm_lock);	splx(s);	setcontext(ctx);}#define perftest#ifdef perftest/* counters, one per possible length */int	rmk_vlen[NPTESG+1];	/* virtual length per rmk() call */int	rmk_npg[NPTESG+1];	/* n valid pages per rmk() call */int	rmk_vlendiff;		/* # times npg != vlen */#endif/* * The following magic number was chosen because: *	1. It is the same amount of work to cache_flush_page 4 pages *	   as to cache_flush_segment 1 segment (so at 4 the cost of *	   flush is the same). *	2. Flushing extra pages is bad (causes cache not to work). *	3. The current code, which malloc()s 5 pages for each process *	   for a user vmspace/pmap, almost never touches all 5 of those *	   pages. */#define	PMAP_RMK_MAGIC	5	/* if > magic, use cache_flush_segment *//* * Remove a range contained within a single segment. * These are egregiously complicated routines. *//* remove from kernel, return new nleft */static intpmap_rmk(pm, va, endva, vseg, nleft, pmeg)	register struct pmap *pm;	register vm_offset_t va, endva;	register int vseg, nleft, pmeg;{	register int i, tpte, perpage, npg;	register struct pvlist *pv;#ifdef perftest	register int nvalid;#endif#ifdef DEBUG	if (pmeg == seginval)		panic("pmap_rmk: not loaded");	if (pm->pm_ctx == NULL)		panic("pmap_rmk: lost context");#endif	setcontext(0);	/* decide how to flush cache */	npg = (endva - va) >> PGSHIFT;	if (npg > PMAP_RMK_MAGIC) {		/* flush the whole segment */		perpage = 0;#ifdef notdef		if (vactype != VAC_NONE)#endif			cache_flush_segment(vseg);	} else {		/* flush each page individually; some never need flushing */		perpage = 1;	}#ifdef perftest	nvalid = 0;#endif	while (va < endva) {		tpte = getpte(va);		if ((tpte & PG_V) == 0) {			va += PAGE_SIZE;			continue;		}		pv = NULL;		/* if cacheable, flush page as needed */		if ((tpte & PG_NC) == 0) {#ifdef perftest			nvalid++;#endif			if (perpage)				cache_flush_page(va);		}		if ((tpte & PG_TYPE) == PG_OBMEM) {			i = ptoa(HWTOSW(tpte & PG_PFNUM));			if (managed(i)) {				pv = pvhead(i);				pv->pv_flags |= MR(tpte);				pv_unlink(pv, pm, va);			}		}		nleft--;		setpte(va, 0);		va += NBPG;	}#ifdef perftest	rmk_vlen[npg]++;	rmk_npg[nvalid]++;	if (npg != nvalid)		rmk_vlendiff++;#endif	/*	 * If the segment is all gone, remove it from everyone and	 * free the MMU entry.	 */	if (nleft == 0) {		va = VSTOVA(vseg);		/* retract */		setsegmap(va, seginval);		for (i = ncontext; --i > 0;) {			setcontext(i);			setsegmap(va, seginval);		}		me_free(pm, pmeg);	}	return (nleft);}#ifdef perftest/* as before but for pmap_rmu */int	rmu_vlen[NPTESG+1];	/* virtual length per rmu() call */int	rmu_npg[NPTESG+1];	/* n valid pages per rmu() call */int	rmu_vlendiff;		/* # times npg != vlen */int	rmu_noflush;		/* # times rmu does not need to flush at all */#endif/* * Just like pmap_rmk_magic, but we have a different threshold. * Note that this may well deserve further tuning work. */#define	PMAP_RMU_MAGIC	4	/* if > magic, use cache_flush_segment *//* remove from user */static intpmap_rmu(pm, va, endva, vseg, nleft, pmeg)	register struct pmap *pm;	register vm_offset_t va, endva;	register int vseg, nleft, pmeg;{	register int *pte0, i, pteva, tpte, perpage, npg;	register struct pvlist *pv;#ifdef perftest	register int doflush, nvalid;#endif	pte0 = pm->pm_pte[vseg];	if (pmeg == seginval) {		register int *pte = pte0 + VA_VPG(va);		/*		 * PTEs are not in MMU.  Just invalidate software copies.		 */		for (; va < endva; pte++, va += PAGE_SIZE) {			tpte = *pte;			if ((tpte & PG_V) == 0) {				/* nothing to remove (braindead VM layer) */				continue;			}			if ((tpte & PG_TYPE) == PG_OBMEM) {				i = ptoa(HWTOSW(tpte & PG_PFNUM));				if (managed(i))					pv_unlink(pvhead(i), pm, va);			}			nleft--;			*pte = 0;		}		if (nleft == 0) {			free((caddr_t)pte0, M_VMPMAP);			pm->pm_pte[vseg] = NULL;		}		return (nleft);	}	/*	 * PTEs are in MMU.  Invalidate in hardware, update ref &	 * mod bits, and flush cache if required.	 */	if (pm->pm_ctx) {		/* process has a context, must flush cache */		npg = (endva - va) >> PGSHIFT;#ifdef perftest		doflush = 1;		nvalid = 0;#endif		setcontext(pm->pm_ctxnum);		if (npg > PMAP_RMU_MAGIC) {			perpage = 0; /* flush the whole segment */#ifdef notdef			if (vactype != VAC_NONE)#endif				cache_flush_segment(vseg);		} else			perpage = 1;		pteva = va;	} else {		/* no context, use context 0; cache flush unnecessary */		setcontext(0);		/* XXX use per-cpu pteva? */		setsegmap(0, pmeg);		pteva = VA_VPG(va) * NBPG;		perpage = 0;#ifdef perftest		npg = 0;		doflush = 0;		nvalid = 0;		rmu_noflush++;#endif	}	for (; va < endva; pteva += PAGE_SIZE, va += PAGE_SIZE) {		tpte = getpte(pteva);		if ((tpte & PG_V) == 0)			continue;		pv = NULL;		/* if cacheable, flush page as needed */		if (doflush && (tpte & PG_NC) == 0) {#ifdef perftest			nvalid++;#endif			if (perpage)				cache_flush_page(va);		}		if ((tpte & PG_TYPE) == PG_OBMEM) {			i = ptoa(HWTOSW(tpte & PG_PFNUM));			if (managed(i)) {				pv = pvhead(i);				pv->pv_flags |= MR(tpte);				pv_unlink(pv, pm, va);			}		}		nleft--;		setpte(pteva, 0);	}#ifdef perftest	if (doflush) {		rmu_vlen[npg]++;		rmu_npg[nvalid]++;		if (npg != nvalid)			rmu_vlendiff++;	}#endif	/*	 * If the segment is all gone, and the context is loaded, give	 * the segment back.	 */	if (nleft == 0 && pm->pm_ctx != NULL) {		va = VSTOVA(vseg);		/* retract */		setsegmap(va, seginval);		free((caddr_t)pte0, M_VMPMAP);		pm->pm_pte[vseg] = NULL;		me_free(pm, pmeg);	}	return (nleft);}/* * Lower (make more strict) the protection on the specified * physical page. * * There are only two cases: either the protection is going to 0 * (in which case we do the dirty work here), or it is going from * to read-only (in which case pv_changepte does the trick). */voidpmap_page_protect(pa, prot)	vm_offset_t pa;	vm_prot_t prot;{	register struct pvlist *pv, *pv0, *npv;	register struct pmap *pm;	register int *pte;	register int va, vseg, pteva, tpte;	register int flags, nleft, i, pmeg, s, ctx, doflush;#ifdef DEBUG	if ((pmapdebug & PDB_CHANGEPROT) ||	    (pmapdebug & PDB_REMOVE && prot == VM_PROT_NONE))		printf("pmap_page_protect(%x, %x)\n", pa, prot);#endif	/*	 * Skip unmanaged pages, or operations that do not take	 * away write permission.	 */	if (!managed(pa) || prot & VM_PROT_WRITE)		return;	write_user_windows();	/* paranoia */	if (prot & VM_PROT_READ) {		pv_changepte(pvhead(pa), 0, PG_W);		return;	}	/*	 * Remove all access to all people talking to this page.	 * Walk down PV list, removing all mappings.	 * The logic is much like that for pmap_remove,	 * but we know we are removing exactly one page.	 */	pv = pvhead(pa);	s = splpmap();	if ((pm = pv->pv_pmap) == NULL) {		splx(s);		return;	}	ctx = getcontext();	pv0 = pv;	flags = pv->pv_flags & ~PV_NC;	for (;; pm = pv->pv_pmap) {		va = pv->pv_va;		vseg = VA_VSEG(va);		if ((nleft = pm->pm_npte[vseg]) == 0)			panic("pmap_remove_all: empty vseg");		nleft--;		pm->pm_npte[vseg] = nleft;		pmeg = pm->pm_segmap[vseg];		pte = pm->pm_pte[vseg];		if (pmeg == seginval) {			if (nleft) {				pte += VA_VPG(va);				*pte = 0;			} else {				free((caddr_t)pte, M_VMPMAP);				pm->pm_pte[vseg] = NULL;			}			goto nextpv;		}		if (pm->pm_ctx) {			setcontext(pm->pm_ctxnum);			pteva = va;#ifdef notdef			doflush = vactype != VAC_NONE;#else			doflush = 1;#endif		} else {			setcontext(0);			/* XXX use per-cpu pteva? */			setsegmap(0, pmeg);			pteva = VA_VPG(va) * NBPG;			doflush = 0;		}		if (nleft) {			if (doflush)				cache_flush_page(va);			tpte = getpte(pteva);			if ((tpte & PG_V) == 0)				panic("pmap_page_protect !PG_V 1");			flags |= MR(tpte);			setpte(pteva, 0);		} else {			if (doflush)				cache_flush_page(va);			tpte = getpte(pteva);			if ((tpte & PG_V) == 0)				panic("pmap_page_protect !PG_V 2");			flags |= MR(tpte);			if (pm->pm_ctx) {				setsegmap(va, seginval);				if (pm == kernel_pmap) {					for (i = ncontext; --i > 0;) {						setcontext(i);						setsegmap(va, seginval);					}					goto skipptefree;				}			}			free((caddr_t)pte, M_VMPMAP);			pm->pm_pte[vseg] = NULL;		skipptefree:			me_free(pm, pmeg);		}	nextpv:		npv = pv->pv_next;		if (pv != pv0)			free((caddr_t)pv, M_VMPVENT);		if ((pv = npv) == NULL)			break;	}	pv0->pv_pmap = NULL;	pv0->pv_flags = flags;	setcontext(ctx);	splx(s);}/* * Lower (make more strict) the protection on the specified * range of this pmap. * * There are only two cases: either the protection is going to 0 * (in which case we call pmap_remove to do the dirty work), or * it is going from read/write to read-only.  The latter is * fairly easy. */voidpmap_protect(pm, sva, eva, prot)	register struct pmap *pm;	vm_offset_t sva, eva;	vm_prot_t prot;{	register int va, nva, vseg, pteva, pmeg;	register int s, ctx;	if (pm == NULL || prot & VM_PROT_WRITE)		return;	if ((prot & VM_PROT_READ) == 0) {		pmap_remove(pm, sva, eva);		return;	}	write_user_windows();	ctx = getcontext();	s = splpmap();	simple_lock(&pm->pm_lock);	for (va = sva; va < eva;) {		vseg = VA_VSEG(va);		nva = VSTOVA(vseg + 1);if (nva == 0) panic("pmap_protect: last segment");	/* cannot happen */		if (nva > eva)			nva = eva;		if (pm->pm_npte[vseg] == 0) {			va = nva;			continue;		}		pmeg = pm->pm_segmap[vseg];		if (pmeg == seginval) {			register int *pte = &pm->pm_pte[vseg][VA_VPG(va)];			/* not in MMU; just clear PG_W from core copies */			for (; va < nva; va += NBPG)				*pte++ &= ~PG_W;		} else {			/* in MMU: take away write bits from MMU PTEs */			if (#ifdef notdef			    vactype != VAC_NONE &&#endif			    pm->pm_ctx) {				register int tpte;				/*				 * Flush cache so that any existing cache				 * tags are updated.  This is really only				 * needed for PTEs that lose PG_W.				 */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -