⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 uvm_km.c

📁 基于组件方式开发操作系统的OSKIT源代码
💻 C
📖 第 1 页 / 共 2 页
字号:
		}		UVMHIST_LOG(maphist,"  page 0x%x, busy=%d", pp,		    pp->flags & PG_BUSY, 0, 0);		KASSERT((pp->flags & PG_BUSY) == 0);		KASSERT((pp->pqflags & PQ_ACTIVE) == 0);		KASSERT((pp->pqflags & PQ_INACTIVE) == 0);		uvm_pagefree(pp);	}	simple_unlock(&uobj->vmobjlock);}/* * uvm_km_kmemalloc: lower level kernel memory allocator for malloc() * * => we map wired memory into the specified map using the obj passed in * => NOTE: we can return NULL even if we can wait if there is not enough *	free VM space in the map... caller should be prepared to handle *	this case. * => we return KVA of memory allocated * => flags: NOWAIT, VALLOC - just allocate VA, TRYLOCK - fail if we can't *	lock the map */vaddr_tuvm_km_kmemalloc(map, obj, size, flags)	vm_map_t map;	struct uvm_object *obj;	vsize_t size;	int flags;{	vaddr_t kva, loopva;	vaddr_t offset;	struct vm_page *pg;	UVMHIST_FUNC("uvm_km_kmemalloc"); UVMHIST_CALLED(maphist);	UVMHIST_LOG(maphist,"  (map=0x%x, obj=0x%x, size=0x%x, flags=%d)",		    map, obj, size, flags);	KASSERT(vm_map_pmap(map) == pmap_kernel());	/*	 * setup for call	 */	size = round_page(size);	kva = vm_map_min(map);	/* hint */	/*	 * allocate some virtual space	 */	if (__predict_false(uvm_map(map, &kva, size, obj, UVM_UNKNOWN_OFFSET,	      0, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,			  UVM_ADV_RANDOM, (flags & UVM_KMF_TRYLOCK))) 			!= KERN_SUCCESS)) {		UVMHIST_LOG(maphist, "<- done (no VM)",0,0,0,0);		return(0);	}	/*	 * if all we wanted was VA, return now	 */	if (flags & UVM_KMF_VALLOC) {		UVMHIST_LOG(maphist,"<- done valloc (kva=0x%x)", kva,0,0,0);		return(kva);	}	/*	 * recover object offset from virtual address	 */	offset = kva - vm_map_min(kernel_map);	UVMHIST_LOG(maphist, "  kva=0x%x, offset=0x%x", kva, offset,0,0);	/*	 * now allocate and map in the memory... note that we are the only ones	 * whom should ever get a handle on this area of VM.	 */	loopva = kva;	while (size) {		simple_lock(&obj->vmobjlock);		pg = uvm_pagealloc(obj, offset, NULL, 0);		if (pg) {			pg->flags &= ~PG_BUSY;	/* new page */			UVM_PAGE_OWN(pg, NULL);		}		simple_unlock(&obj->vmobjlock);				/*		 * out of memory?		 */		if (__predict_false(pg == NULL)) {			if (flags & UVM_KMF_NOWAIT) {				/* free everything! */				uvm_unmap(map, kva, kva + size);				return(0);			} else {				uvm_wait("km_getwait2");	/* sleep here */				continue;			}		}				/*		 * map it in: note that we call pmap_enter with the map and		 * object unlocked in case we are kmem_map/kmem_object		 * (because if pmap_enter wants to allocate out of kmem_object		 * it will need to lock it itself!)		 */		if (UVM_OBJ_IS_INTRSAFE_OBJECT(obj)) {			pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg),			    VM_PROT_ALL);		} else {			pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg),			    UVM_PROT_ALL,			    PMAP_WIRED | VM_PROT_READ | VM_PROT_WRITE);		}		loopva += PAGE_SIZE;		offset += PAGE_SIZE;		size -= PAGE_SIZE;	}	UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0);	return(kva);}/* * uvm_km_free: free an area of kernel memory */voiduvm_km_free(map, addr, size)	vm_map_t map;	vaddr_t addr;	vsize_t size;{	uvm_unmap(map, trunc_page(addr), round_page(addr+size));}/* * uvm_km_free_wakeup: free an area of kernel memory and wake up * anyone waiting for vm space. * * => XXX: "wanted" bit + unlock&wait on other end? */voiduvm_km_free_wakeup(map, addr, size)	vm_map_t map;	vaddr_t addr;	vsize_t size;{	vm_map_entry_t dead_entries;	vm_map_lock(map);	(void)uvm_unmap_remove(map, trunc_page(addr), round_page(addr+size), 			 &dead_entries);	wakeup(map);	vm_map_unlock(map);	if (dead_entries != NULL)		uvm_unmap_detach(dead_entries, 0);}/* * uvm_km_alloc1: allocate wired down memory in the kernel map. * * => we can sleep if needed */vaddr_tuvm_km_alloc1(map, size, zeroit)	vm_map_t map;	vsize_t size;	boolean_t zeroit;{	vaddr_t kva, loopva, offset;	struct vm_page *pg;	UVMHIST_FUNC("uvm_km_alloc1"); UVMHIST_CALLED(maphist);	UVMHIST_LOG(maphist,"(map=0x%x, size=0x%x)", map, size,0,0);	KASSERT(vm_map_pmap(map) == pmap_kernel());	size = round_page(size);	kva = vm_map_min(map);		/* hint */	/*	 * allocate some virtual space	 */	if (__predict_false(uvm_map(map, &kva, size, uvm.kernel_object,	      UVM_UNKNOWN_OFFSET, 0, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL,					      UVM_INH_NONE, UVM_ADV_RANDOM,					      0)) != KERN_SUCCESS)) {		UVMHIST_LOG(maphist,"<- done (no VM)",0,0,0,0);		return(0);	}	/*	 * recover object offset from virtual address	 */	offset = kva - vm_map_min(kernel_map);	UVMHIST_LOG(maphist,"  kva=0x%x, offset=0x%x", kva, offset,0,0);	/*	 * now allocate the memory.  we must be careful about released pages.	 */	loopva = kva;	while (size) {		simple_lock(&uvm.kernel_object->vmobjlock);		pg = uvm_pagelookup(uvm.kernel_object, offset);		/*		 * if we found a page in an unallocated region, it must be		 * released		 */		if (pg) {			if ((pg->flags & PG_RELEASED) == 0)				panic("uvm_km_alloc1: non-released page");			pg->flags |= PG_WANTED;			UVM_UNLOCK_AND_WAIT(pg, &uvm.kernel_object->vmobjlock,			    FALSE, "km_alloc", 0);			continue;   /* retry */		}				/* allocate ram */		pg = uvm_pagealloc(uvm.kernel_object, offset, NULL, 0);		if (pg) {			pg->flags &= ~PG_BUSY;	/* new page */			UVM_PAGE_OWN(pg, NULL);		}		simple_unlock(&uvm.kernel_object->vmobjlock);		if (__predict_false(pg == NULL)) {			uvm_wait("km_alloc1w");	/* wait for memory */			continue;		}				/*		 * map it in; note we're never called with an intrsafe		 * object, so we always use regular old pmap_enter().		 */		pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg),		    UVM_PROT_ALL, PMAP_WIRED | VM_PROT_READ | VM_PROT_WRITE);		loopva += PAGE_SIZE;		offset += PAGE_SIZE;		size -= PAGE_SIZE;	}		/*	 * zero on request (note that "size" is now zero due to the above loop	 * so we need to subtract kva from loopva to reconstruct the size).	 */	if (zeroit)		memset((caddr_t)kva, 0, loopva - kva);	UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0);	return(kva);}/* * uvm_km_valloc: allocate zero-fill memory in the kernel's address space * * => memory is not allocated until fault time */vaddr_tuvm_km_valloc(map, size)	vm_map_t map;	vsize_t size;{	return(uvm_km_valloc_align(map, size, 0));}vaddr_tuvm_km_valloc_align(map, size, align)	vm_map_t map;	vsize_t size;	vsize_t align;{	vaddr_t kva;	UVMHIST_FUNC("uvm_km_valloc"); UVMHIST_CALLED(maphist);	UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x)", map, size, 0,0);	KASSERT(vm_map_pmap(map) == pmap_kernel());	size = round_page(size);	kva = vm_map_min(map);		/* hint */	/*	 * allocate some virtual space.  will be demand filled by kernel_object.	 */	if (__predict_false(uvm_map(map, &kva, size, uvm.kernel_object,	    UVM_UNKNOWN_OFFSET, align, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL,					    UVM_INH_NONE, UVM_ADV_RANDOM,					    0)) != KERN_SUCCESS)) {		UVMHIST_LOG(maphist, "<- done (no VM)", 0,0,0,0);		return(0);	}	UVMHIST_LOG(maphist, "<- done (kva=0x%x)", kva,0,0,0);	return(kva);}/* * uvm_km_valloc_wait: allocate zero-fill memory in the kernel's address space * * => memory is not allocated until fault time * => if no room in map, wait for space to free, unless requested size *    is larger than map (in which case we return 0) */vaddr_tuvm_km_valloc_prefer_wait(map, size, prefer)	vm_map_t map;	vsize_t size;	voff_t prefer;{	vaddr_t kva;	UVMHIST_FUNC("uvm_km_valloc_prefer_wait"); UVMHIST_CALLED(maphist);	UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x)", map, size, 0,0);	KASSERT(vm_map_pmap(map) == pmap_kernel());	size = round_page(size);	if (size > vm_map_max(map) - vm_map_min(map))		return(0);	while (1) {		kva = vm_map_min(map);		/* hint */		/*		 * allocate some virtual space.   will be demand filled		 * by kernel_object.		 */		if (__predict_true(uvm_map(map, &kva, size, uvm.kernel_object,		    prefer, 0, UVM_MAPFLAG(UVM_PROT_ALL,		    UVM_PROT_ALL, UVM_INH_NONE, UVM_ADV_RANDOM, 0))		    == KERN_SUCCESS)) {			UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0);			return(kva);		}		/*		 * failed.  sleep for a while (on map)		 */		UVMHIST_LOG(maphist,"<<<sleeping>>>",0,0,0,0);		tsleep((caddr_t)map, PVM, "vallocwait", 0);	}	/*NOTREACHED*/}vaddr_tuvm_km_valloc_wait(map, size)	vm_map_t map;	vsize_t size;{	return uvm_km_valloc_prefer_wait(map, size, UVM_UNKNOWN_OFFSET);}/* Sanity; must specify both or none. */#if (defined(PMAP_MAP_POOLPAGE) || defined(PMAP_UNMAP_POOLPAGE)) && \    (!defined(PMAP_MAP_POOLPAGE) || !defined(PMAP_UNMAP_POOLPAGE))#error Must specify MAP and UNMAP together.#endif/* * uvm_km_alloc_poolpage: allocate a page for the pool allocator * * => if the pmap specifies an alternate mapping method, we use it. *//* ARGSUSED */vaddr_tuvm_km_alloc_poolpage1(map, obj, waitok)	vm_map_t map;	struct uvm_object *obj;	boolean_t waitok;{#if defined(PMAP_MAP_POOLPAGE)	struct vm_page *pg;	vaddr_t va; again:	pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE);	if (__predict_false(pg == NULL)) {		if (waitok) {			uvm_wait("plpg");			goto again;		} else			return (0);	}	va = PMAP_MAP_POOLPAGE(VM_PAGE_TO_PHYS(pg));	if (__predict_false(va == 0))		uvm_pagefree(pg);	return (va);#else	vaddr_t va;	int s;	/*	 * NOTE: We may be called with a map that doens't require splimp	 * protection (e.g. kernel_map).  However, it does not hurt to	 * go to splimp in this case (since unprocted maps will never be	 * accessed in interrupt context).	 *	 * XXX We may want to consider changing the interface to this	 * XXX function.	 */	s = splimp();	va = uvm_km_kmemalloc(map, obj, PAGE_SIZE, waitok ? 0 : UVM_KMF_NOWAIT);	splx(s);	return (va);#endif /* PMAP_MAP_POOLPAGE */}/* * uvm_km_free_poolpage: free a previously allocated pool page * * => if the pmap specifies an alternate unmapping method, we use it. *//* ARGSUSED */voiduvm_km_free_poolpage1(map, addr)	vm_map_t map;	vaddr_t addr;{#if defined(PMAP_UNMAP_POOLPAGE)	paddr_t pa;	pa = PMAP_UNMAP_POOLPAGE(addr);	uvm_pagefree(PHYS_TO_VM_PAGE(pa));#else	int s;	/*	 * NOTE: We may be called with a map that doens't require splimp	 * protection (e.g. kernel_map).  However, it does not hurt to	 * go to splimp in this case (since unprocted maps will never be	 * accessed in interrupt context).	 *	 * XXX We may want to consider changing the interface to this	 * XXX function.	 */	s = splimp();	uvm_km_free(map, addr, PAGE_SIZE);	splx(s);#endif /* PMAP_UNMAP_POOLPAGE */}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -