⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 pmap.c

📁 基于组件方式开发操作系统的OSKIT源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
 * => caller should have pmap locked * => we will gain the lock on the pv_head and allocate the new pv_entry * => caller should adjust ptp's wire_count before calling */__inline static voidpmap_enter_pv(pvh, pve, pmap, va, ptp)	struct pv_head *pvh;	struct pv_entry *pve;	/* preallocated pve for us to use */	struct pmap *pmap;	vaddr_t va;	struct vm_page *ptp;	/* PTP in pmap that maps this VA */{	pve->pv_pmap = pmap;	pve->pv_va = va;	pve->pv_ptp = ptp;			/* NULL for kernel pmap */	simple_lock(&pvh->pvh_lock);		/* lock pv_head */	pve->pv_next = pvh->pvh_list;		/* add to ... */	pvh->pvh_list = pve;			/* ... locked list */	simple_unlock(&pvh->pvh_lock);		/* unlock, done! */}/* * pmap_remove_pv: try to remove a mapping from a pv_list * * => caller should hold proper lock on pmap_main_lock * => pmap should be locked * => caller should hold lock on pv_head [so that attrs can be adjusted] * => caller should adjust ptp's wire_count and free PTP if needed * => we return the removed pve */__inline static struct pv_entry *pmap_remove_pv(pvh, pmap, va)	struct pv_head *pvh;	struct pmap *pmap;	vaddr_t va;{	struct pv_entry *pve, **prevptr;	prevptr = &pvh->pvh_list;		/* previous pv_entry pointer */	pve = *prevptr;	while (pve) {		if (pve->pv_pmap == pmap && pve->pv_va == va) {	/* match? */			*prevptr = pve->pv_next;		/* remove it! */			break;		}		prevptr = &pve->pv_next;		/* previous pointer */		pve = pve->pv_next;			/* advance */	}	return(pve);				/* return removed pve */}/* * p t p   f u n c t i o n s *//* * pmap_alloc_ptp: allocate a PTP for a PMAP * * => pmap should already be locked by caller * => we use the ptp's wire_count to count the number of active mappings *	in the PTP (we start it at one to prevent any chance this PTP *	will ever leak onto the active/inactive queues) * => we should not be holding any pv_head locks (in case we are forced *	to call pmap_steal_ptp()) * => we may need to lock pv_head's if we have to steal a PTP * => just_try: true if we want a PTP, but not enough to steal one * 	from another pmap (e.g. during optional functions like pmap_copy) */__inline static struct vm_page *pmap_alloc_ptp(pmap, pde_index, just_try)	struct pmap *pmap;	int pde_index;	boolean_t just_try;{	struct vm_page *ptp;	ptp = uvm_pagealloc(&pmap->pm_obj, ptp_i2o(pde_index), NULL,			    UVM_PGA_USERESERVE|UVM_PGA_ZERO);	if (ptp == NULL) {		if (just_try)			return(NULL);		ptp = pmap_steal_ptp(&pmap->pm_obj, ptp_i2o(pde_index));		if (ptp == NULL) {			return (NULL);		}		/* stole one; zero it. */		pmap_zero_page(VM_PAGE_TO_PHYS(ptp));	}	/* got one! */	ptp->flags &= ~PG_BUSY;	/* never busy */	ptp->wire_count = 1;	/* no mappings yet */	pmap->pm_pdir[pde_index] =		(pd_entry_t) (VM_PAGE_TO_PHYS(ptp) | PG_u | PG_RW | PG_V);	pmap->pm_stats.resident_count++;	/* count PTP as resident */	pmap->pm_ptphint = ptp;	return(ptp);}/* * pmap_steal_ptp: steal a PTP from any pmap that we can access * * => obj is locked by caller. * => we can throw away mappings at this level (except in the kernel's pmap) * => stolen PTP is placed in <obj,offset> pmap * => we lock pv_head's * => hopefully, this function will be seldom used [much better to have *	enough free pages around for us to allocate off the free page list] */static struct vm_page *pmap_steal_ptp(obj, offset)	struct uvm_object *obj;	vaddr_t offset;{	struct vm_page *ptp = NULL;	struct pmap *firstpmap;	struct uvm_object *curobj;	pt_entry_t *ptes;	int idx, lcv;	boolean_t caller_locked, we_locked;	simple_lock(&pmaps_lock);	if (pmaps_hand == NULL)		pmaps_hand = LIST_FIRST(&pmaps);	firstpmap = pmaps_hand;	do { /* while we haven't looped back around to firstpmap */		curobj = &pmaps_hand->pm_obj;		we_locked = FALSE;		caller_locked = (curobj == obj);		if (!caller_locked) {			we_locked = simple_lock_try(&curobj->vmobjlock);		}		if (caller_locked || we_locked) {			ptp = curobj->memq.tqh_first;			for (/*null*/; ptp != NULL; ptp = ptp->listq.tqe_next) {				/*				 * might have found a PTP we can steal				 * (unless it has wired pages).				 */				idx = ptp_o2i(ptp->offset);#ifdef DIAGNOSTIC				if (VM_PAGE_TO_PHYS(ptp) !=				    (pmaps_hand->pm_pdir[idx] & PG_FRAME))					panic("pmap_steal_ptp: PTP mismatch!");#endif				ptes = (pt_entry_t *)					pmap_tmpmap_pa(VM_PAGE_TO_PHYS(ptp));				for (lcv = 0 ; lcv < PTES_PER_PTP ; lcv++)					if ((ptes[lcv] & (PG_V|PG_W)) ==					    (PG_V|PG_W))						break;				if (lcv == PTES_PER_PTP)					pmap_remove_ptes(pmaps_hand, NULL, ptp,							 (vaddr_t)ptes,							 ptp_i2v(idx),							 ptp_i2v(idx+1),							 PMAP_REMOVE_ALL);				pmap_tmpunmap_pa();				if (lcv != PTES_PER_PTP)					/* wired, try next PTP */					continue;				/*				 * got it!!!				 */				pmaps_hand->pm_pdir[idx] = 0;	/* zap! */				pmaps_hand->pm_stats.resident_count--;				if (pmap_is_curpmap(pmaps_hand))					pmap_update();				else if (pmap_valid_entry(*APDP_PDE) &&					 (*APDP_PDE & PG_FRAME) ==					 pmaps_hand->pm_pdirpa) {					pmap_update_pg(((vaddr_t)APTE_BASE) +						       ptp->offset);				}				/* put it in our pmap! */				uvm_pagerealloc(ptp, obj, offset);				break;	/* break out of "for" loop */			}			if (we_locked) {				simple_unlock(&curobj->vmobjlock);			}		}		/* advance the pmaps_hand */		pmaps_hand = LIST_NEXT(pmaps_hand, pm_list);		if (pmaps_hand == NULL) {			pmaps_hand = LIST_FIRST(&pmaps);		}	} while (ptp == NULL && pmaps_hand != firstpmap);	simple_unlock(&pmaps_lock);	return(ptp);}/* * pmap_get_ptp: get a PTP (if there isn't one, allocate a new one) * * => pmap should NOT be pmap_kernel() * => pmap should be locked */static struct vm_page *pmap_get_ptp(pmap, pde_index, just_try)	struct pmap *pmap;	int pde_index;	boolean_t just_try;{	struct vm_page *ptp;	if (pmap_valid_entry(pmap->pm_pdir[pde_index])) {		/* valid... check hint (saves us a PA->PG lookup) */		if (pmap->pm_ptphint &&		    (pmap->pm_pdir[pde_index] & PG_FRAME) ==		    VM_PAGE_TO_PHYS(pmap->pm_ptphint))			return(pmap->pm_ptphint);		ptp = uvm_pagelookup(&pmap->pm_obj, ptp_i2o(pde_index));#ifdef DIAGNOSTIC		if (ptp == NULL)			panic("pmap_get_ptp: unmanaged user PTP");#endif		pmap->pm_ptphint = ptp;		return(ptp);	}	/* allocate a new PTP (updates ptphint) */	return(pmap_alloc_ptp(pmap, pde_index, just_try));}/* * p m a p  l i f e c y c l e   f u n c t i o n s *//* * pmap_pdp_ctor: constructor for the PDP cache. */intpmap_pdp_ctor(void *arg, void *object, int flags){	pd_entry_t *pdir = object;	paddr_t pdirpa;	/*	 * NOTE: The `pmap_lock' is held when the PDP is allocated.	 * WE MUST NOT BLOCK!	 */	/* fetch the physical address of the page directory. */	(void) pmap_extract(pmap_kernel(), (vaddr_t) pdir, &pdirpa);#ifndef OSKIT	/* zero init area */	memset(pdir, 0, PDSLOT_PTE * sizeof(pd_entry_t));#else#if 0	printf(__FUNCTION__": pdir = %p, pdirpa = 0x%lx\n", pdir, (long)pdirpa);#endif	/* zero init area */	memset(pdir, 0, PAGE_SIZE);#endif	/* put in recursibve PDE to map the PTEs */	pdir[PDSLOT_PTE] = pdirpa | PG_V | PG_KW;	/* put in kernel VM PDEs */	memcpy(&pdir[PDSLOT_KERN], &PDP_BASE[PDSLOT_KERN],	    nkpde * sizeof(pd_entry_t));#ifndef OSKIT	/* zero the rest */	memset(&pdir[PDSLOT_KERN + nkpde], 0,	    PAGE_SIZE - ((PDSLOT_KERN + nkpde) * sizeof(pd_entry_t)));#endif	return (0);}/* * pmap_create: create a pmap * * => note: old pmap interface took a "size" args which allowed for *	the creation of "software only" pmaps (not in bsd). */struct pmap *pmap_create(){	struct pmap *pmap;	pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);	/* init uvm_object */	simple_lock_init(&pmap->pm_obj.vmobjlock);	pmap->pm_obj.pgops = NULL;	/* currently not a mappable object */	TAILQ_INIT(&pmap->pm_obj.memq);	pmap->pm_obj.uo_npages = 0;	pmap->pm_obj.uo_refs = 1;	pmap->pm_stats.wired_count = 0;	pmap->pm_stats.resident_count = 1;	/* count the PDP allocd below */	pmap->pm_ptphint = NULL;	pmap->pm_flags = 0;	/* init the LDT */	pmap->pm_ldt = NULL;	pmap->pm_ldt_len = 0;	pmap->pm_ldt_sel = GSEL(GLDT_SEL, SEL_KPL);	/* allocate PDP */	/*	 * we need to lock pmaps_lock to prevent nkpde from changing on	 * us.  note that there is no need to splimp to protect us from	 * malloc since malloc allocates out of a submap and we should	 * have already allocated kernel PTPs to cover the range...	 *	 * NOTE: WE MUST NOT BLOCK WHILE HOLDING THE `pmap_lock'!	 */	simple_lock(&pmaps_lock);	/* XXX Need a generic "I want memory" wchan */	while ((pmap->pm_pdir =	    pool_cache_get(&pmap_pdp_cache, PR_NOWAIT)) == NULL)		(void) ltsleep(&lbolt, PVM, "pmapcr", hz >> 3, &pmaps_lock);#if 0	printf(__FUNCTION__": pmap->pm_pdir = 0x%p\n", pmap->pm_pdir);#endif	pmap->pm_pdirpa = pmap->pm_pdir[PDSLOT_PTE] & PG_FRAME;#if 0	printf(__FUNCTION__": new page directory pa = %x\n", pmap->pm_pdirpa);	pdir_dump(pmap->pm_pdirpa);#endif	LIST_INSERT_HEAD(&pmaps, pmap, pm_list);	simple_unlock(&pmaps_lock);	return (pmap);}/* * pmap_destroy: drop reference count on pmap.   free pmap if *	reference count goes to zero. */voidpmap_destroy(pmap)	struct pmap *pmap;{	struct vm_page *pg;	int refs;	/*	 * drop reference count	 */	simple_lock(&pmap->pm_obj.vmobjlock);	refs = --pmap->pm_obj.uo_refs;	simple_unlock(&pmap->pm_obj.vmobjlock);	if (refs > 0) {		return;	}	/*	 * reference count is zero, free pmap resources and then free pmap.	 */	/*	 * remove it from global list of pmaps	 */	simple_lock(&pmaps_lock);	if (pmap == pmaps_hand)		pmaps_hand = LIST_NEXT(pmaps_hand, pm_list);	LIST_REMOVE(pmap, pm_list);	simple_unlock(&pmaps_lock);	/*	 * free any remaining PTPs	 */	while (pmap->pm_obj.memq.tqh_first != NULL) {		pg = pmap->pm_obj.memq.tqh_first;#ifdef DIAGNOSTIC		if (pg->flags & PG_BUSY)			panic("pmap_release: busy page table page");#endif		/* pmap_page_protect?  currently no need for it. */		pg->wire_count = 0;		uvm_pagefree(pg);	}	/* XXX: need to flush it out of other processor's APTE space? */	pool_cache_put(&pmap_pdp_cache, pmap->pm_pdir);#ifdef USER_LDT	if (pmap->pm_flags & PMF_USER_LDT) {		/*		 * no need to switch the LDT; this address space is gone,		 * nothing is using it.		 */		ldt_free(pmap);		uvm_km_free(kernel_map, (vaddr_t)pmap->pm_ldt,			    pmap->pm_ldt_len * sizeof(union descriptor));	}#endif	pool_put(&pmap_pmap_pool, pmap);}/* *	Add a reference to the specified pmap. */voidpmap_reference(pmap)	struct pmap *pmap;{	simple_lock(&pmap->pm_obj.vmobjlock);	pmap->pm_obj.uo_refs++;	simple_unlock(&pmap->pm_obj.vmobjlock);}#if defined(PMAP_FORK)/* * pmap_fork: perform any necessary data structure manipulation when * a VM space is forked. */voidpmap_fork(pmap1, pmap2)	struct pmap *pmap1, *pmap2;{	simple_lock(&pmap1->pm_obj.vmobjlock);	simple_lock(&pmap2->pm_obj.vmobjlock);#ifdef USER_LDT	/* Copy the LDT, if necessary. */	if (pmap1->pm_flags & PMF_USER_LDT) {		union descriptor *new_ldt;		size_t len;		len = pmap1->pm_ldt_len * sizeof(union descriptor);		new_ldt = (union descriptor *)uvm_km_alloc(kernel_map, len);		memcpy(new_ldt, pmap1->pm_ldt, len);		pmap2->pm_ldt = new_ldt;		pmap2->pm_ldt_len = pmap1->pm_ldt_len;		pmap2->pm_flags |= PMF_USER_LDT;		ldt_alloc(pmap2, new_ldt, len);	}#endif /* USER_LDT */	simple_unlock(&pmap2->pm_obj.vmobjlock);	simple_unlock(&pmap1->pm_obj.vmobjlock);}#endif /* PMAP_FORK */#ifdef USER_LDT/* * pmap_ldt_cleanup: if the pmap has a local LDT, deallocate it, and * restore the default. */voidpmap_ldt_cleanup(p)	struct proc *p;{	struct pcb *pcb = &p->p_addr->u_pcb;	pmap_t pmap = p->p_vmspace->vm_map.pmap;	union descriptor *old_ldt = NULL;	size_t len = 0;	simple_lock(&pmap->pm_obj.vmobjlock);	if (pmap->pm_flags & PMF_USER_LDT) {		ldt_free(pmap);		pmap->pm_ldt_sel = GSEL(GLDT_SEL, SEL_KPL);		pcb->pcb_ldt_sel = pmap->pm_ldt_sel;		if (pcb == curpcb)			lldt(pcb->pcb_ldt_sel);		old_ldt = pmap->pm_ldt;		len = pmap->pm_ldt_len * sizeof(union descriptor);		pmap->pm_ldt = NULL;		pmap->pm_ldt_len = 0;		pmap->pm_flags &= ~PMF_USER_LDT;	}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -