⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 pmap.c

📁 基于组件方式开发操作系统的OSKIT源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
		addr = (vaddr_t)(vm_physmem[lcv].pmseg.pvhead +				 (vm_physmem[lcv].end - vm_physmem[lcv].start));		for (i = 0;		     i < (vm_physmem[lcv].end - vm_physmem[lcv].start); i++) {			simple_lock_init(			    &vm_physmem[lcv].pmseg.pvhead[i].pvh_lock);		}	}	/* now allocate attrs */	for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) {		vm_physmem[lcv].pmseg.attrs = (char *) addr;		addr = (vaddr_t)(vm_physmem[lcv].pmseg.attrs +				 (vm_physmem[lcv].end - vm_physmem[lcv].start));	}	/*	 * now we need to free enough pv_entry structures to allow us to get	 * the kmem_map/kmem_object allocated and inited (done after this	 * function is finished).  to do this we allocate one bootstrap page out	 * of kernel_map and use it to provide an initial pool of pv_entry	 * structures.   we never free this page.	 */	pv_initpage = (struct pv_page *) uvm_km_alloc(kernel_map, PAGE_SIZE);	if (pv_initpage == NULL)		panic("pmap_init: pv_initpage");	pv_cachedva = 0;   /* a VA we have allocated but not used yet */	pv_nfpvents = 0;	(void) pmap_add_pvpage(pv_initpage, FALSE);	/*	 * done: pmap module is up (and ready for business)	 */	pmap_initialized = TRUE;}/* * p v _ e n t r y   f u n c t i o n s *//* * pv_entry allocation functions: *   the main pv_entry allocation functions are: *     pmap_alloc_pv: allocate a pv_entry structure *     pmap_free_pv: free one pv_entry *     pmap_free_pvs: free a list of pv_entrys * * the rest are helper functions *//* * pmap_alloc_pv: inline function to allocate a pv_entry structure * => we lock pvalloc_lock * => if we fail, we call out to pmap_alloc_pvpage * => 3 modes: *    ALLOCPV_NEED   = we really need a pv_entry, even if we have to steal it *    ALLOCPV_TRY    = we want a pv_entry, but not enough to steal *    ALLOCPV_NONEED = we are trying to grow our free list, don't really need *			one now * * "try" is for optional functions like pmap_copy(). */__inline static struct pv_entry *pmap_alloc_pv(pmap, mode)	struct pmap *pmap;	int mode;{	struct pv_page *pvpage;	struct pv_entry *pv;	simple_lock(&pvalloc_lock);	if (pv_freepages.tqh_first != NULL) {		pvpage = pv_freepages.tqh_first;		pvpage->pvinfo.pvpi_nfree--;		if (pvpage->pvinfo.pvpi_nfree == 0) {			/* nothing left in this one? */			TAILQ_REMOVE(&pv_freepages, pvpage, pvinfo.pvpi_list);		}		pv = pvpage->pvinfo.pvpi_pvfree;#ifdef DIAGNOSTIC		if (pv == NULL)			panic("pmap_alloc_pv: pvpi_nfree off");#endif		pvpage->pvinfo.pvpi_pvfree = pv->pv_next;		pv_nfpvents--;  /* took one from pool */	} else {		pv = NULL;		/* need more of them */	}	/*	 * if below low water mark or we didn't get a pv_entry we try and	 * create more pv_entrys ...	 */	if (pv_nfpvents < PVE_LOWAT || pv == NULL) {		if (pv == NULL)			pv = pmap_alloc_pvpage(pmap, (mode == ALLOCPV_TRY) ?					       mode : ALLOCPV_NEED);		else			(void) pmap_alloc_pvpage(pmap, ALLOCPV_NONEED);	}	simple_unlock(&pvalloc_lock);	return(pv);}/* * pmap_alloc_pvpage: maybe allocate a new pvpage * * if need_entry is false: try and allocate a new pv_page * if need_entry is true: try and allocate a new pv_page and return a *	new pv_entry from it.   if we are unable to allocate a pv_page *	we make a last ditch effort to steal a pv_page from some other *	mapping.    if that fails, we panic... * * => we assume that the caller holds pvalloc_lock */static struct pv_entry *pmap_alloc_pvpage(pmap, mode)	struct pmap *pmap;	int mode;{	struct vm_page *pg;	struct pv_page *pvpage;	int lcv, idx, npg, s;	struct pv_entry *pv, *cpv, *prevpv;	/*	 * if we need_entry and we've got unused pv_pages, allocate from there	 */	if (mode != ALLOCPV_NONEED && pv_unusedpgs.tqh_first != NULL) {		/* move it to pv_freepages list */		pvpage = pv_unusedpgs.tqh_first;		TAILQ_REMOVE(&pv_unusedpgs, pvpage, pvinfo.pvpi_list);		TAILQ_INSERT_HEAD(&pv_freepages, pvpage, pvinfo.pvpi_list);		/* allocate a pv_entry */		pvpage->pvinfo.pvpi_nfree--;	/* can't go to zero */		pv = pvpage->pvinfo.pvpi_pvfree;#ifdef DIAGNOSTIC		if (pv == NULL)			panic("pmap_alloc_pvpage: pvpi_nfree off");#endif		pvpage->pvinfo.pvpi_pvfree = pv->pv_next;		pv_nfpvents--;  /* took one from pool */		return(pv);	}	/*	 *  see if we've got a cached unmapped VA that we can map a page in.	 * if not, try to allocate one.	 */	s = splimp();   /* must protect kmem_map/kmem_object with splimp! */	if (pv_cachedva == 0) {		pv_cachedva = uvm_km_kmemalloc(kmem_map, uvmexp.kmem_object,		    PAGE_SIZE, UVM_KMF_TRYLOCK|UVM_KMF_VALLOC);		if (pv_cachedva == 0) {			splx(s);			goto steal_one;		}	}	/*	 * we have a VA, now let's try and allocate a page in the object	 * note: we are still holding splimp to protect kmem_object	 */	if (!simple_lock_try(&uvmexp.kmem_object->vmobjlock)) {		splx(s);		goto steal_one;	}	pg = uvm_pagealloc(uvmexp.kmem_object, pv_cachedva -			   vm_map_min(kernel_map),			   NULL, UVM_PGA_USERESERVE);	if (pg)		pg->flags &= ~PG_BUSY;	/* never busy */	simple_unlock(&uvmexp.kmem_object->vmobjlock);	splx(s);	/* splimp now dropped */	if (pg == NULL)		goto steal_one;	/*	 * add a mapping for our new pv_page and free its entrys (save one!)	 *	 * NOTE: If we are allocating a PV page for the kernel pmap, the	 * pmap is already locked!  (...but entering the mapping is safe...)	 */	pmap_kenter_pa(pv_cachedva, VM_PAGE_TO_PHYS(pg), VM_PROT_ALL);	pvpage = (struct pv_page *) pv_cachedva;	pv_cachedva = 0;	return(pmap_add_pvpage(pvpage, mode != ALLOCPV_NONEED));steal_one:	/*	 * if we don't really need a pv_entry right now, we can just return.	 */	if (mode != ALLOCPV_NEED)		return(NULL);	/*	 * last ditch effort!   we couldn't allocate a free page to make	 * more pv_entrys so we try and steal one from someone else.	 */	pv = NULL;	for (lcv = 0 ; pv == NULL && lcv < vm_nphysseg ; lcv++) {		npg = vm_physmem[lcv].end - vm_physmem[lcv].start;		for (idx = 0 ; idx < npg ; idx++) {			struct pv_head *pvhead = vm_physmem[lcv].pmseg.pvhead;			if (pvhead->pvh_list == NULL)				continue;	/* spot check */			if (!simple_lock_try(&pvhead->pvh_lock))				continue;			cpv = prevpv = pvhead->pvh_list;			while (cpv) {				if (pmap_try_steal_pv(pvhead, cpv, prevpv))					break;				prevpv = cpv;				cpv = cpv->pv_next;			}			simple_unlock(&pvhead->pvh_lock);			/* got one?  break out of the loop! */			if (cpv) {				pv = cpv;				break;			}		}	}	return(pv);}/* * pmap_try_steal_pv: try and steal a pv_entry from a pmap * * => return true if we did it! */static boolean_tpmap_try_steal_pv(pvh, cpv, prevpv)	struct pv_head *pvh;	struct pv_entry *cpv, *prevpv;{	pt_entry_t *ptep;	/* pointer to a PTE */	/*	 * we never steal kernel mappings or mappings from pmaps we can't lock	 */	if (cpv->pv_pmap == pmap_kernel() ||	    !simple_lock_try(&cpv->pv_pmap->pm_obj.vmobjlock))		return(FALSE);	/*	 * yes, we can try and steal it.   first we need to remove the	 * mapping from the pmap.	 */	ptep = pmap_tmpmap_pvepte(cpv);	if (*ptep & PG_W) {		ptep = NULL;	/* wired page, avoid stealing this one */	} else {		*ptep = 0;		/* zap! */		if (pmap_is_curpmap(cpv->pv_pmap))			pmap_update_pg(cpv->pv_va);		pmap_tmpunmap_pvepte(cpv);	}	if (ptep == NULL) {		simple_unlock(&cpv->pv_pmap->pm_obj.vmobjlock);		return(FALSE);	/* wired page, abort! */	}	cpv->pv_pmap->pm_stats.resident_count--;	if (cpv->pv_ptp && cpv->pv_ptp->wire_count)		/* drop PTP's wired count */		cpv->pv_ptp->wire_count--;	/*	 * XXX: if wire_count goes to one the PTP could be freed, however,	 * we'd have to lock the page queues (etc.) to do that and it could	 * cause deadlock headaches.   besides, the pmap we just stole from	 * may want the mapping back anyway, so leave the PTP around.	 */	/*	 * now we need to remove the entry from the pvlist	 */	if (cpv == pvh->pvh_list)		pvh->pvh_list = cpv->pv_next;	else		prevpv->pv_next = cpv->pv_next;	return(TRUE);}/* * pmap_add_pvpage: add a pv_page's pv_entrys to the free list * * => caller must hold pvalloc_lock * => if need_entry is true, we allocate and return one pv_entry */static struct pv_entry *pmap_add_pvpage(pvp, need_entry)	struct pv_page *pvp;	boolean_t need_entry;{	int tofree, lcv;	/* do we need to return one? */	tofree = (need_entry) ? PVE_PER_PVPAGE - 1 : PVE_PER_PVPAGE;	pvp->pvinfo.pvpi_pvfree = NULL;	pvp->pvinfo.pvpi_nfree = tofree;	for (lcv = 0 ; lcv < tofree ; lcv++) {		pvp->pvents[lcv].pv_next = pvp->pvinfo.pvpi_pvfree;		pvp->pvinfo.pvpi_pvfree = &pvp->pvents[lcv];	}	if (need_entry)		TAILQ_INSERT_TAIL(&pv_freepages, pvp, pvinfo.pvpi_list);	else		TAILQ_INSERT_TAIL(&pv_unusedpgs, pvp, pvinfo.pvpi_list);	pv_nfpvents += tofree;	return((need_entry) ? &pvp->pvents[lcv] : NULL);}/* * pmap_free_pv_doit: actually free a pv_entry * * => do not call this directly!  instead use either *    1. pmap_free_pv ==> free a single pv_entry *    2. pmap_free_pvs => free a list of pv_entrys * => we must be holding pvalloc_lock */__inline static voidpmap_free_pv_doit(pv)	struct pv_entry *pv;{	struct pv_page *pvp;	pvp = (struct pv_page *) i386_trunc_page(pv);	pv_nfpvents++;	pvp->pvinfo.pvpi_nfree++;	/* nfree == 1 => fully allocated page just became partly allocated */	if (pvp->pvinfo.pvpi_nfree == 1) {		TAILQ_INSERT_HEAD(&pv_freepages, pvp, pvinfo.pvpi_list);	}	/* free it */	pv->pv_next = pvp->pvinfo.pvpi_pvfree;	pvp->pvinfo.pvpi_pvfree = pv;	/*	 * are all pv_page's pv_entry's free?  move it to unused queue.	 */	if (pvp->pvinfo.pvpi_nfree == PVE_PER_PVPAGE) {		TAILQ_REMOVE(&pv_freepages, pvp, pvinfo.pvpi_list);		TAILQ_INSERT_HEAD(&pv_unusedpgs, pvp, pvinfo.pvpi_list);	}}/* * pmap_free_pv: free a single pv_entry * * => we gain the pvalloc_lock */__inline static voidpmap_free_pv(pmap, pv)	struct pmap *pmap;	struct pv_entry *pv;{	simple_lock(&pvalloc_lock);	pmap_free_pv_doit(pv);	/*	 * Can't free the PV page if the PV entries were associated with	 * the kernel pmap; the pmap is already locked.	 */	if (pv_nfpvents > PVE_HIWAT && pv_unusedpgs.tqh_first != NULL &&	    pmap != pmap_kernel())		pmap_free_pvpage();	simple_unlock(&pvalloc_lock);}/* * pmap_free_pvs: free a list of pv_entrys * * => we gain the pvalloc_lock */__inline static voidpmap_free_pvs(pmap, pvs)	struct pmap *pmap;	struct pv_entry *pvs;{	struct pv_entry *nextpv;	simple_lock(&pvalloc_lock);	for ( /* null */ ; pvs != NULL ; pvs = nextpv) {		nextpv = pvs->pv_next;		pmap_free_pv_doit(pvs);	}	/*	 * Can't free the PV page if the PV entries were associated with	 * the kernel pmap; the pmap is already locked.	 */	if (pv_nfpvents > PVE_HIWAT && pv_unusedpgs.tqh_first != NULL &&	    pmap != pmap_kernel())		pmap_free_pvpage();	simple_unlock(&pvalloc_lock);}/* * pmap_free_pvpage: try and free an unused pv_page structure * * => assume caller is holding the pvalloc_lock and that *	there is a page on the pv_unusedpgs list * => if we can't get a lock on the kmem_map we try again later * => note: analysis of MI kmem_map usage [i.e. malloc/free] shows *	that if we can lock the kmem_map then we are not already *	holding kmem_object's lock. */static voidpmap_free_pvpage(){	int s;	struct vm_map *map;	vm_map_entry_t dead_entries;	struct pv_page *pvp;	s = splimp(); /* protect kmem_map */	pvp = pv_unusedpgs.tqh_first;	/*	 * note: watch out for pv_initpage which is allocated out of	 * kernel_map rather than kmem_map.	 */	if (pvp == pv_initpage)		map = kernel_map;	else		map = kmem_map;	if (vm_map_lock_try(map)) {		/* remove pvp from pv_unusedpgs */		TAILQ_REMOVE(&pv_unusedpgs, pvp, pvinfo.pvpi_list);		/* unmap the page */		dead_entries = NULL;		(void)uvm_unmap_remove(map, (vaddr_t) pvp,				       ((vaddr_t) pvp) + PAGE_SIZE,				       &dead_entries);		vm_map_unlock(map);		if (dead_entries != NULL)			uvm_unmap_detach(dead_entries, 0);		pv_nfpvents -= PVE_PER_PVPAGE;  /* update free count */	}	if (pvp == pv_initpage)		/* no more initpage, we've freed it */		pv_initpage = NULL;	splx(s);}/* * main pv_entry manipulation functions: *   pmap_enter_pv: enter a mapping onto a pv_head list *   pmap_remove_pv: remove a mappiing from a pv_head list * * NOTE: pmap_enter_pv expects to lock the pvh itself *       pmap_remove_pv expects te caller to lock the pvh before calling *//* * pmap_enter_pv: enter a mapping onto a pv_head lst * * => caller should hold the proper lock on pmap_main_lock

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -