⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 pmap.c

📁 早期freebsd实现
💻 C
📖 第 1 页 / 共 5 页
字号:
/* * pv_unlink is a helper function for pmap_remove. * It takes a pointer to the pv_table head for some physical address * and removes the appropriate (pmap, va) entry. * * Once the entry is removed, if the pv_table head has the cache * inhibit bit set, see if we can turn that off; if so, walk the * pvlist and turn off PG_NC in each PTE.  (The pvlist is by * definition nonempty, since it must have at least two elements * in it to have PV_NC set, and we only remove one here.) */static voidpv_unlink(pv, pm, va)	register struct pvlist *pv;	register struct pmap *pm;	register vm_offset_t va;{	register struct pvlist *npv;	/*	 * First entry is special (sigh).	 */	npv = pv->pv_next;	if (pv->pv_pmap == pm && pv->pv_va == va) {		pmap_stats.ps_unlink_pvfirst++;		if (npv != NULL) {			pv->pv_next = npv->pv_next;			pv->pv_pmap = npv->pv_pmap;			pv->pv_va = npv->pv_va;			free((caddr_t)npv, M_VMPVENT);		} else			pv->pv_pmap = NULL;	} else {		register struct pvlist *prev;		for (prev = pv;; prev = npv, npv = npv->pv_next) {			pmap_stats.ps_unlink_pvsearch++;			if (npv == NULL)				panic("pv_unlink");			if (npv->pv_pmap == pm && npv->pv_va == va)				break;		}		prev->pv_next = npv->pv_next;		free((caddr_t)npv, M_VMPVENT);	}	if (pv->pv_flags & PV_NC) {		/*		 * Not cached: check to see if we can fix that now.		 */		va = pv->pv_va;		for (npv = pv->pv_next; npv != NULL; npv = npv->pv_next)			if (BADALIAS(va, npv->pv_va))				return;		pv->pv_flags &= ~PV_NC;		pv_changepte(pv, 0, PG_NC);	}}/* * pv_link is the inverse of pv_unlink, and is used in pmap_enter. * It returns PG_NC if the (new) pvlist says that the address cannot * be cached. */static intpv_link(pv, pm, va)	register struct pvlist *pv;	register struct pmap *pm;	register vm_offset_t va;{	register struct pvlist *npv;	register int ret;	if (pv->pv_pmap == NULL) {		/* no pvlist entries yet */		pmap_stats.ps_enter_firstpv++;		pv->pv_next = NULL;		pv->pv_pmap = pm;		pv->pv_va = va;		return (0);	}	/*	 * Before entering the new mapping, see if	 * it will cause old mappings to become aliased	 * and thus need to be `discached'.	 */	ret = 0;	pmap_stats.ps_enter_secondpv++;	if (pv->pv_flags & PV_NC) {		/* already uncached, just stay that way */		ret = PG_NC;	} else {		/* MAY NEED TO DISCACHE ANYWAY IF va IS IN DVMA SPACE? */		for (npv = pv; npv != NULL; npv = npv->pv_next) {			if (BADALIAS(va, npv->pv_va)) {				pv->pv_flags |= PV_NC;				pv_changepte(pv, ret = PG_NC, 0);				break;			}		}	}	npv = (struct pvlist *)malloc(sizeof *npv, M_VMPVENT, M_WAITOK);	npv->pv_next = pv->pv_next;	npv->pv_pmap = pm;	npv->pv_va = va;	pv->pv_next = npv;	return (ret);}/* * Walk the given list and flush the cache for each (MI) page that is * potentially in the cache. */pv_flushcache(pv)	register struct pvlist *pv;{	register struct pmap *pm;	register int i, s, ctx;	write_user_windows();	/* paranoia? */	s = splpmap();		/* XXX extreme paranoia */	if ((pm = pv->pv_pmap) != NULL) {		ctx = getcontext();		for (;;) {			if (pm->pm_ctx) {				setcontext(pm->pm_ctxnum);				cache_flush_page(pv->pv_va);			}			pv = pv->pv_next;			if (pv == NULL)				break;			pm = pv->pv_pmap;		}		setcontext(ctx);	}	splx(s);}/*----------------------------------------------------------------*//* * At last, pmap code. *//* * Bootstrap the system enough to run with VM enabled. * * nmmu is the number of mmu entries (``PMEGs''); * nctx is the number of contexts. */voidpmap_bootstrap(nmmu, nctx)	int nmmu, nctx;{	register union ctxinfo *ci;	register struct mmuentry *me;	register int i, j, n, z, vs;	register caddr_t p;	register void (*rom_setmap)(int ctx, caddr_t va, int pmeg);	int lastpage;	extern char end[];	extern caddr_t reserve_dumppages(caddr_t);	ncontext = nctx;	/*	 * Last segment is the `invalid' one (one PMEG of pte's with !pg_v).	 * It will never be used for anything else.	 */	seginval = --nmmu;	/*	 * Preserve the monitor ROM's reserved VM region, so that	 * we can use L1-A or the monitor's debugger.  As a side	 * effect we map the ROM's reserved VM into all contexts	 * (otherwise L1-A crashes the machine!).	 */	nmmu = mmu_reservemon(nmmu);	/*	 * Allocate and clear mmu entry and context structures.	 */	p = end;	mmuentry = me = (struct mmuentry *)p;	p += nmmu * sizeof *me;	ctxinfo = ci = (union ctxinfo *)p;	p += nctx * sizeof *ci;	bzero(end, p - end);	/*	 * Set up the `constants' for the call to vm_init()	 * in main().  All pages beginning at p (rounded up to	 * the next whole page) and continuing through the number	 * of available pages are free, but they start at a higher	 * virtual address.  This gives us two mappable MD pages	 * for pmap_zero_page and pmap_copy_page, and one MI page	 * for /dev/mem, all with no associated physical memory.	 */	p = (caddr_t)(((u_int)p + NBPG - 1) & ~PGOFSET);	avail_start = (int)p - KERNBASE;	avail_end = init_translations() << PGSHIFT;	i = (int)p;	vpage[0] = p, p += NBPG;	vpage[1] = p, p += NBPG;	vmempage = p, p += NBPG;	p = reserve_dumppages(p);	virtual_avail = (vm_offset_t)p;	virtual_end = VM_MAX_KERNEL_ADDRESS;	p = (caddr_t)i;			/* retract to first free phys */	/*	 * Intialize the kernel pmap.	 */	{		register struct kpmap *k = &kernel_pmap_store;/*		kernel_pmap = (struct pmap *)k; */		k->pm_ctx = ctxinfo;		/* k->pm_ctxnum = 0; */		simple_lock_init(&k->pm_lock);		k->pm_refcount = 1;		/* k->pm_mmuforw = 0; */		k->pm_mmuback = &k->pm_mmuforw;		k->pm_segmap = &k->pm_rsegmap[-NUSEG];		k->pm_pte = &k->pm_rpte[-NUSEG];		k->pm_npte = &k->pm_rnpte[-NUSEG];		for (i = NKSEG; --i >= 0;)			k->pm_rsegmap[i] = seginval;	}	/*	 * All contexts are free except the kernel's.	 *	 * XXX sun4c could use context 0 for users?	 */	ci->c_pmap = kernel_pmap;	ctx_freelist = ci + 1;	for (i = 1; i < ncontext; i++) {		ci++;		ci->c_nextfree = ci + 1;	}	ci->c_nextfree = NULL;	ctx_kick = 0;	ctx_kickdir = -1;	/* me_freelist = NULL; */	/* already NULL */	/*	 * Init mmu entries that map the kernel physical addresses.	 * If the page bits in p are 0, we filled the last segment	 * exactly (now how did that happen?); if not, it is	 * the last page filled in the last segment.	 *	 * All the other MMU entries are free.	 *	 * THIS ASSUMES SEGMENT i IS MAPPED BY MMU ENTRY i DURING THE	 * BOOT PROCESS	 */	z = ((((u_int)p + NBPSG - 1) & ~SGOFSET) - KERNBASE) >> SGSHIFT;	lastpage = VA_VPG(p);	if (lastpage == 0)		lastpage = NPTESG;	p = (caddr_t)KERNBASE;		/* first va */	vs = VA_VSEG(KERNBASE);		/* first virtual segment */	rom_setmap = promvec->pv_setctxt;	for (i = 0;;) {		/*		 * Distribute each kernel segment into all contexts.		 * This is done through the monitor ROM, rather than		 * directly here: if we do a setcontext we will fault,		 * as we are not (yet) mapped in any other context.		 */		for (j = 1; j < nctx; j++)			rom_setmap(j, p, i);		/* set up the mmu entry */		me->me_pmeg = i;		insque(me, me_locked.mh_prev);		/* me->me_pmforw = NULL; */		me->me_pmback = kernel_pmap->pm_mmuback;		*kernel_pmap->pm_mmuback = me;		kernel_pmap->pm_mmuback = &me->me_pmforw;		me->me_pmap = kernel_pmap;		me->me_vseg = vs;		kernel_pmap->pm_segmap[vs] = i;		n = ++i < z ? NPTESG : lastpage;		kernel_pmap->pm_npte[vs] = n;		me++;		vs++;		if (i < z) {			p += NBPSG;			continue;		}		/*		 * Unmap the pages, if any, that are not part of		 * the final segment.		 */		for (p += n * NBPG; j < NPTESG; j++, p += NBPG)			setpte(p, 0);		break;	}	for (; i < nmmu; i++, me++) {		me->me_pmeg = i;		me->me_next = me_freelist;		/* me->me_pmap = NULL; */		me_freelist = me;	}	/*	 * write protect & encache kernel text;	 * set red zone at kernel base; enable cache on message buffer.	 */	{		extern char etext[], trapbase[];#ifdef KGDB		register int mask = ~PG_NC;	/* XXX chgkprot is busted */#else		register int mask = ~(PG_W | PG_NC);#endif		for (p = trapbase; p < etext; p += NBPG)			setpte(p, getpte(p) & mask);		p = (caddr_t)KERNBASE;		setpte(p, 0);		p += NBPG;		setpte(p, getpte(p) & ~PG_NC);	}	/*	 * Grab physical memory list (for /dev/mem).	 */	npmemarr = makememarr(pmemarr, MA_SIZE, MEMARR_TOTALPHYS);}/* * Bootstrap memory allocator. This function allows for early dynamic * memory allocation until the virtual memory system has been bootstrapped. * After that point, either kmem_alloc or malloc should be used. This * function works by stealing pages from the (to be) managed page pool, * stealing virtual address space, then mapping the pages and zeroing them. * * It should be used from pmap_bootstrap till vm_page_startup, afterwards * it cannot be used, and will generate a panic if tried. Note that this * memory will never be freed, and in essence it is wired down. */void *pmap_bootstrap_alloc(size)	int size;{	register void *mem;	extern int vm_page_startup_initialized;	if (vm_page_startup_initialized)		panic("pmap_bootstrap_alloc: called after startup initialized");	size = round_page(size);	mem = (void *)virtual_avail;	virtual_avail = pmap_map(virtual_avail, avail_start,	    avail_start + size, VM_PROT_READ|VM_PROT_WRITE);	avail_start += size;	bzero((void *)mem, size);	return (mem);}/* * Initialize the pmap module. */voidpmap_init(phys_start, phys_end)	register vm_offset_t phys_start, phys_end;{	register vm_size_t s;	if (PAGE_SIZE != NBPG)		panic("pmap_init: CLSIZE!=1");	/*	 * Allocate and clear memory for the pv_table.	 */	s = sizeof(struct pvlist) * atop(phys_end - phys_start);	s = round_page(s);	pv_table = (struct pvlist *)kmem_alloc(kernel_map, s);	bzero((caddr_t)pv_table, s);	vm_first_phys = phys_start;	vm_num_phys = phys_end - phys_start;}/* * Map physical addresses into kernel VM. */vm_offset_tpmap_map(va, pa, endpa, prot)	register vm_offset_t va, pa, endpa;	register int prot;{	register int pgsize = PAGE_SIZE;	while (pa < endpa) {		pmap_enter(kernel_pmap, va, pa, prot, 1);		va += pgsize;		pa += pgsize;	}	return (va);}/* * Create and return a physical map. * * If size is nonzero, the map is useless. (ick) */struct pmap *pmap_create(size)	vm_size_t size;{	register struct pmap *pm;	if (size)		return (NULL);	pm = (struct pmap *)malloc(sizeof *pm, M_VMPMAP, M_WAITOK);#ifdef DEBUG	if (pmapdebug & PDB_CREATE)		printf("pmap_create: created %x\n", pm);#endif	bzero((caddr_t)pm, sizeof *pm);	pmap_pinit(pm);	return (pm);}/* * Initialize a preallocated and zeroed pmap structure, * such as one in a vmspace structure. */voidpmap_pinit(pm)	register struct pmap *pm;{	register int i;#ifdef DEBUG	if (pmapdebug & PDB_CREATE)		printf("pmap_pinit(%x)\n", pm);#endif	/* pm->pm_ctx = NULL; */	simple_lock_init(&pm->pm_lock);	pm->pm_refcount = 1;	/* pm->pm_mmuforw = NULL; */	pm->pm_mmuback = &pm->pm_mmuforw;	pm->pm_segmap = pm->pm_rsegmap;	pm->pm_pte = pm->pm_rpte;	pm->pm_npte = pm->pm_rnpte;	for (i = NUSEG; --i >= 0;)		pm->pm_rsegmap[i] = seginval;	/* bzero((caddr_t)pm->pm_rpte, sizeof pm->pm_rpte); */	/* bzero((caddr_t)pm->pm_rnpte, sizeof pm->pm_rnpte); */}/* * Retire the given pmap from service. * Should only be called if the map contains no valid mappings. */voidpmap_destroy(pm)	register struct pmap *pm;{	int count;	if (pm == NULL)		return;#ifdef DEBUG	if (pmapdebug & PDB_DESTROY)		printf("pmap_destroy(%x)\n", pm);#endif	simple_lock(&pm->pm_lock);	count = --pm->pm_refcount;	simple_unlock(&pm->pm_lock);	if (count == 0) {		pmap_release(pm);		free((caddr_t)pm, M_VMPMAP);	}}/* * Release any resources held by the given physical map. * Called when a pmap initialized by pmap_pinit is being released. */voidpmap_release(pm)	register struct pmap *pm;{	register union ctxinfo *c;	register int s = splpmap();	/* paranoia */#ifdef DEBUG	if (pmapdebug & PDB_DESTROY)		printf("pmap_release(%x)\n", pm);#endif	if (pm->pm_mmuforw)		panic("pmap_release mmuforw");	if ((c = pm->pm_ctx) != NULL) {		if (pm->pm_ctxnum == 0)			panic("pmap_release: releasing kernel");		ctx_free(pm);	}	splx(s);}/* * Add a reference to the given pmap. */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -