uvm_aobj.c

来自「基于组件方式开发操作系统的OSKIT源代码」· C语言 代码 · 共 1,553 行 · 第 1/3 页

C
1,553
字号
	if (flags == 0 || (flags & UAO_FLAG_KERNSWAP) != 0) {		int mflags = (flags & UAO_FLAG_KERNSWAP) != 0 ?		    M_NOWAIT : M_WAITOK;		/* allocate hash table or array depending on object size */		if (UAO_USES_SWHASH(aobj)) {			aobj->u_swhash = hashinit(UAO_SWHASH_BUCKETS(aobj),			    HASH_LIST, M_UVMAOBJ, mflags, &aobj->u_swhashmask);			if (aobj->u_swhash == NULL)				panic("uao_create: hashinit swhash failed");		} else {			aobj->u_swslots = malloc(pages * sizeof(int),			    M_UVMAOBJ, mflags);			if (aobj->u_swslots == NULL)				panic("uao_create: malloc swslots failed");			memset(aobj->u_swslots, 0, pages * sizeof(int));		}		if (flags) {			aobj->u_flags &= ~UAO_FLAG_NOSWAP; /* clear noswap */			return(&aobj->u_obj);			/* done! */		}	}	/* 	 * init aobj fields 	 */	simple_lock_init(&aobj->u_obj.vmobjlock);	aobj->u_obj.pgops = &aobj_pager;	TAILQ_INIT(&aobj->u_obj.memq);	aobj->u_obj.uo_npages = 0;	/* 	 * now that aobj is ready, add it to the global list 	 */	simple_lock(&uao_list_lock);	LIST_INSERT_HEAD(&uao_list, aobj, u_list);	simple_unlock(&uao_list_lock);	/* 	 * done! 	 */	return(&aobj->u_obj);}/* * uao_init: set up aobj pager subsystem * * => called at boot time from uvm_pager_init() */voiduao_init(){	static int uao_initialized;	if (uao_initialized)		return;	uao_initialized = TRUE;	LIST_INIT(&uao_list);	simple_lock_init(&uao_list_lock);	/*	 * NOTE: Pages fror this pool must not come from a pageable	 * kernel map!	 */	pool_init(&uao_swhash_elt_pool, sizeof(struct uao_swhash_elt),	    0, 0, 0, "uaoeltpl", 0, NULL, NULL, M_UVMAOBJ);	pool_init(&uvm_aobj_pool, sizeof(struct uvm_aobj), 0, 0, 0,	    "aobjpl", 0,	    pool_page_alloc_nointr, pool_page_free_nointr, M_UVMAOBJ);}/* * uao_reference: add a ref to an aobj * * => aobj must be unlocked * => just lock it and call the locked version */voiduao_reference(uobj)	struct uvm_object *uobj;{	simple_lock(&uobj->vmobjlock);	uao_reference_locked(uobj);	simple_unlock(&uobj->vmobjlock);}/* * uao_reference_locked: add a ref to an aobj that is already locked * * => aobj must be locked * this needs to be separate from the normal routine * since sometimes we need to add a reference to an aobj when * it's already locked. */voiduao_reference_locked(uobj)	struct uvm_object *uobj;{	UVMHIST_FUNC("uao_reference"); UVMHIST_CALLED(maphist);	/* 	 * kernel_object already has plenty of references, leave it alone. 	 */	if (UVM_OBJ_IS_KERN_OBJECT(uobj))		return;	uobj->uo_refs++;		/* bump! */	UVMHIST_LOG(maphist, "<- done (uobj=0x%x, ref = %d)", 		    uobj, uobj->uo_refs,0,0);}/* * uao_detach: drop a reference to an aobj * * => aobj must be unlocked * => just lock it and call the locked version */voiduao_detach(uobj)	struct uvm_object *uobj;{	simple_lock(&uobj->vmobjlock);	uao_detach_locked(uobj);}/* * uao_detach_locked: drop a reference to an aobj * * => aobj must be locked, and is unlocked (or freed) upon return. * this needs to be separate from the normal routine * since sometimes we need to detach from an aobj when * it's already locked. */voiduao_detach_locked(uobj)	struct uvm_object *uobj;{	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;	struct vm_page *pg;	boolean_t busybody;	UVMHIST_FUNC("uao_detach"); UVMHIST_CALLED(maphist);	/* 	 * detaching from kernel_object is a noop. 	 */	if (UVM_OBJ_IS_KERN_OBJECT(uobj)) {		simple_unlock(&uobj->vmobjlock);		return;	}	UVMHIST_LOG(maphist,"  (uobj=0x%x)  ref=%d", uobj,uobj->uo_refs,0,0);	uobj->uo_refs--;				/* drop ref! */	if (uobj->uo_refs) {				/* still more refs? */		simple_unlock(&uobj->vmobjlock);		UVMHIST_LOG(maphist, "<- done (rc>0)", 0,0,0,0);		return;	}	/* 	 * remove the aobj from the global list. 	 */	simple_lock(&uao_list_lock);	LIST_REMOVE(aobj, u_list);	simple_unlock(&uao_list_lock);	/* 	 * free all the pages that aren't PG_BUSY,	 * mark for release any that are. 	 */	busybody = FALSE;	for (pg = TAILQ_FIRST(&uobj->memq);	     pg != NULL;	     pg = TAILQ_NEXT(pg, listq)) {		if (pg->flags & PG_BUSY) {			pg->flags |= PG_RELEASED;			busybody = TRUE;			continue;		}		/* zap the mappings, free the swap slot, free the page */		pmap_page_protect(pg, VM_PROT_NONE);		uao_dropswap(&aobj->u_obj, pg->offset >> PAGE_SHIFT);		uvm_lock_pageq();		uvm_pagefree(pg);		uvm_unlock_pageq();	}	/* 	 * if we found any busy pages, we're done for now. 	 * mark the aobj for death, releasepg will finish up for us. 	 */	if (busybody) {		aobj->u_flags |= UAO_FLAG_KILLME;		simple_unlock(&aobj->u_obj.vmobjlock);		return;	}	/* 	 * finally, free the rest. 	 */	uao_free(aobj);}/* * uao_flush: "flush" pages out of a uvm object * * => object should be locked by caller.  we may _unlock_ the object *	if (and only if) we need to clean a page (PGO_CLEANIT). *	XXXJRT Currently, however, we don't.  In the case of cleaning *	XXXJRT a page, we simply just deactivate it.  Should probably *	XXXJRT handle this better, in the future (although "flushing" *	XXXJRT anonymous memory isn't terribly important). * => if PGO_CLEANIT is not set, then we will neither unlock the object *	or block. * => if PGO_ALLPAGE is set, then all pages in the object are valid targets *	for flushing. * => NOTE: we rely on the fact that the object's memq is a TAILQ and *	that new pages are inserted on the tail end of the list.  thus, *	we can make a complete pass through the object in one go by starting *	at the head and working towards the tail (new pages are put in *	front of us). * => NOTE: we are allowed to lock the page queues, so the caller *	must not be holding the lock on them [e.g. pagedaemon had *	better not call us with the queues locked] * => we return TRUE unless we encountered some sort of I/O error *	XXXJRT currently never happens, as we never directly initiate *	XXXJRT I/O * * comment on "cleaning" object and PG_BUSY pages: *	this routine is holding the lock on the object.  the only time *	that is can run into a PG_BUSY page that it does not own is if *	some other process has started I/O on the page (e.g. either *	a pagein or a pageout).  if the PG_BUSY page is being paged *	in, then it can not be dirty (!PG_CLEAN) because no one has *	had a change to modify it yet.  if the PG_BUSY page is being *	paged out then it means that someone else has already started *	cleaning the page for us (how nice!).  in this case, if we *	have syncio specified, then after we make our pass through the *	object we need to wait for the other PG_BUSY pages to clear *	off (i.e. we need to do an iosync).  also note that once a *	page is PG_BUSY is must stary in its object until it is un-busyed. *	XXXJRT We never actually do this, as we are "flushing" anonymous *	XXXJRT memory, which doesn't have persistent backing store. * * note on page traversal: *	we can traverse the pages in an object either by going down the *	linked list in "uobj->memq", or we can go over the address range *	by page doing hash table lookups for each address.  depending *	on how many pages are in the object it may be cheaper to do one *	or the other.  we set "by_list" to true if we are using memq. *	if the cost of a hash lookup was equal to the cost of the list *	traversal we could compare the number of pages in the start->stop *	range to the total number of pages in the object.  however, it *	seems that a hash table lookup is more expensive than the linked *	list traversal, so we multiply the number of pages in the *	start->stop range by a penalty which we define below. */#define	UAO_HASH_PENALTY 4	/* XXX: a guess */boolean_tuao_flush(uobj, start, stop, flags)	struct uvm_object *uobj;	voff_t start, stop;	int flags;{	struct uvm_aobj *aobj = (struct uvm_aobj *) uobj;	struct vm_page *pp, *ppnext;	boolean_t retval, by_list;	voff_t curoff;	UVMHIST_FUNC("uao_flush"); UVMHIST_CALLED(maphist);	curoff = 0;	/* XXX: shut up gcc */	retval = TRUE;	/* default to success */	if (flags & PGO_ALLPAGES) {		start = 0;		stop = aobj->u_pages << PAGE_SHIFT;		by_list = TRUE;		/* always go by the list */	} else {		start = trunc_page(start);		stop = round_page(stop);		if (stop > (aobj->u_pages << PAGE_SHIFT)) {			printf("uao_flush: strange, got an out of range "			    "flush (fixed)\n");			stop = aobj->u_pages << PAGE_SHIFT;		}		by_list = (uobj->uo_npages <=		    ((stop - start) >> PAGE_SHIFT) * UAO_HASH_PENALTY);	}	UVMHIST_LOG(maphist,	    " flush start=0x%lx, stop=0x%x, by_list=%d, flags=0x%x",	    start, stop, by_list, flags);	/*	 * Don't need to do any work here if we're not freeing	 * or deactivating pages.	 */	if ((flags & (PGO_DEACTIVATE|PGO_FREE)) == 0) {		UVMHIST_LOG(maphist,		    "<- done (no work to do)",0,0,0,0);		return (retval);	}	/*	 * now do it.  note: we must update ppnext in the body of loop or we	 * will get stuck.  we need to use ppnext because we may free "pp"	 * before doing the next loop.	 */	if (by_list) {		pp = uobj->memq.tqh_first;	} else {		curoff = start;		pp = uvm_pagelookup(uobj, curoff);	}	ppnext = NULL;	/* XXX: shut up gcc */	uvm_lock_pageq();	/* page queues locked */	/* locked: both page queues and uobj */	for ( ; (by_list && pp != NULL) ||	    (!by_list && curoff < stop) ; pp = ppnext) {		if (by_list) {			ppnext = TAILQ_NEXT(pp, listq);			/* range check */			if (pp->offset < start || pp->offset >= stop)				continue;		} else {			curoff += PAGE_SIZE;			if (curoff < stop)				ppnext = uvm_pagelookup(uobj, curoff);			/* null check */			if (pp == NULL)				continue;		}				switch (flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE)) {		/*		 * XXX In these first 3 cases, we always just		 * XXX deactivate the page.  We may want to		 * XXX handle the different cases more specifically		 * XXX in the future.		 */		case PGO_CLEANIT|PGO_FREE:		case PGO_CLEANIT|PGO_DEACTIVATE:		case PGO_DEACTIVATE: deactivate_it:			/* skip the page if it's loaned or wired */			if (pp->loan_count != 0 ||			    pp->wire_count != 0)				continue;			/* zap all mappings for the page. */			pmap_page_protect(pp, VM_PROT_NONE);			/* ...and deactivate the page. */			uvm_pagedeactivate(pp);			continue;		case PGO_FREE:			/*			 * If there are multiple references to			 * the object, just deactivate the page.			 */			if (uobj->uo_refs > 1)				goto deactivate_it;			/* XXX skip the page if it's loaned or wired */			if (pp->loan_count != 0 ||			    pp->wire_count != 0)				continue;			/*			 * mark the page as released if its busy.			 */			if (pp->flags & PG_BUSY) {				pp->flags |= PG_RELEASED;				continue;			}			/* zap all mappings for the page. */			pmap_page_protect(pp, VM_PROT_NONE);			uao_dropswap(uobj, pp->offset >> PAGE_SHIFT);			uvm_pagefree(pp);			continue;		default:			panic("uao_flush: weird flags");		}#ifdef DIAGNOSTIC		panic("uao_flush: unreachable code");#endif	}	uvm_unlock_pageq();	UVMHIST_LOG(maphist,	    "<- done, rv=%d",retval,0,0,0);	return (retval);}/* * uao_get: fetch me a page * * we have three cases: * 1: page is resident     -> just return the page. * 2: page is zero-fill    -> allocate a new page and zero it. * 3: page is swapped out  -> fetch the page from swap. * * cases 1 and 2 can be handled with PGO_LOCKED, case 3 cannot. * so, if the "center" page hits case 3 (or any page, with PGO_ALLPAGES), * then we will need to return VM_PAGER_UNLOCK. * * => prefer map unlocked (not required) * => object must be locked!  we will _unlock_ it before starting any I/O. * => flags: PGO_ALLPAGES: get all of the pages *           PGO_LOCKED: fault data structures are locked * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx] * => NOTE: caller must check for released pages!! */static intuao_get(uobj, offset, pps, npagesp, centeridx, access_type, advice, flags)	struct uvm_object *uobj;	voff_t offset;	struct vm_page **pps;	int *npagesp;	int centeridx, advice, flags;	vm_prot_t access_type;{	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;	voff_t current_offset;	vm_page_t ptmp;	int lcv, gotpages, maxpages, swslot, rv, pageidx;	boolean_t done;	UVMHIST_FUNC("uao_get"); UVMHIST_CALLED(pdhist);	UVMHIST_LOG(pdhist, "aobj=%p offset=%d, flags=%d",		    aobj, offset, flags,0);	/* 	 * get number of pages 	 */	maxpages = *npagesp;	/* 	 * step 1: handled the case where fault data structures are locked. 	 */	if (flags & PGO_LOCKED) {		/* 		 * step 1a: get pages that are already resident.   only do		 * this if the data structures are locked (i.e. the first		 * time through). 		 */		done = TRUE;	/* be optimistic */		gotpages = 0;	/* # of pages we got so far */		for (lcv = 0, current_offset = offset ; lcv < maxpages ;		    lcv++, current_offset += PAGE_SIZE) {			/* do we care about this page?  if not, skip it */			if (pps[lcv] == PGO_DONTCARE)				continue;			ptmp = uvm_pagelookup(uobj, current_offset);			/* 			 * if page is new, attempt to allocate the page,			 * zero-fill'd. 			 */			if (ptmp == NULL && uao_find_swslot(aobj,			    current_offset >> PAGE_SHIFT) == 0) {				ptmp = uvm_pagealloc(uobj, current_offset,				    NULL, UVM_PGA_ZERO);				if (ptmp) {					/* new page */					ptmp->flags &= ~(PG_BUSY|PG_FAKE);					ptmp->pqflags |= PQ_AOBJ;					UVM_PAGE_OWN(ptmp, NULL);				}			}			/*			 * to be useful must get a non-busy, non-released page			 */			if (ptmp == NULL ||			    (ptmp->flags & (PG_BUSY|PG_RELEASED)) != 0) {				if (lcv == centeridx ||				    (flags & PGO_ALLPAGES) != 0)					/* need to do a wait or I/O! */					done = FALSE;						continue;			}			/*			 * useful page: busy/lock it and plug it in our			 * result array			 */			/* caller must un-busy this page */			ptmp->flags |= PG_BUSY;				UVM_PAGE_OWN(ptmp, "uao_get1");

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?