subr_pool.c

来自「基于组件方式开发操作系统的OSKIT源代码」· C语言 代码 · 共 1,799 行 · 第 1/3 页

C
1,799
字号
		panic("pool_get: must have NOWAIT");#endif	simple_lock(&pp->pr_slock);	pr_enter(pp, file, line); startover:	/*	 * Check to see if we've reached the hard limit.  If we have,	 * and we can wait, then wait until an item has been returned to	 * the pool.	 */#ifdef DIAGNOSTIC	if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) {		pr_leave(pp);		simple_unlock(&pp->pr_slock);		panic("pool_get: %s: crossed hard limit", pp->pr_wchan);	}#endif#ifndef OSKIT	if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {		if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {			/*			 * XXX: A warning isn't logged in this case.  Should			 * it be?			 */			pp->pr_flags |= PR_WANTED;			pr_leave(pp);			ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);			pr_enter(pp, file, line);			goto startover;		}		/*		 * Log a message that the hard limit has been hit.		 */		if (pp->pr_hardlimit_warning != NULL &&		    ratecheck(&pp->pr_hardlimit_warning_last,			      &pp->pr_hardlimit_ratecap))			log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);		if (flags & PR_URGENT)			panic("pool_get: urgent");		pp->pr_nfail++;		pr_leave(pp);		simple_unlock(&pp->pr_slock);		return (NULL);	}#endif	/*	 * The convention we use is that if `curpage' is not NULL, then	 * it points at a non-empty bucket. In particular, `curpage'	 * never points at a page header which has PR_PHINPAGE set and	 * has no items in its bucket.	 */	if ((ph = pp->pr_curpage) == NULL) {		void *v;#ifdef DIAGNOSTIC		if (pp->pr_nitems != 0) {			simple_unlock(&pp->pr_slock);			printf("pool_get: %s: curpage NULL, nitems %u\n",			    pp->pr_wchan, pp->pr_nitems);			panic("pool_get: nitems inconsistent\n");		}#endif		/*		 * Call the back-end page allocator for more memory.		 * Release the pool lock, as the back-end page allocator		 * may block.		 */		pr_leave(pp);		simple_unlock(&pp->pr_slock);		v = (*pp->pr_alloc)(pp->pr_pagesz, flags, pp->pr_mtype);		simple_lock(&pp->pr_slock);		pr_enter(pp, file, line);		if (v == NULL) {			/*			 * We were unable to allocate a page, but			 * we released the lock during allocation,			 * so perhaps items were freed back to the			 * pool.  Check for this case.			 */			if (pp->pr_curpage != NULL)				goto startover;			if (flags & PR_URGENT)				panic("pool_get: urgent");			if ((flags & PR_WAITOK) == 0) {				pp->pr_nfail++;				pr_leave(pp);				simple_unlock(&pp->pr_slock);				return (NULL);			}			/*			 * Wait for items to be returned to this pool.			 *			 * XXX: we actually want to wait just until			 * the page allocator has memory again. Depending			 * on this pool's usage, we might get stuck here			 * for a long time.			 *			 * XXX: maybe we should wake up once a second and			 * try again?			 */			pp->pr_flags |= PR_WANTED;			pr_leave(pp);			ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);			pr_enter(pp, file, line);			goto startover;		}		/* We have more memory; add it to the pool */		pp->pr_npagealloc++;		pool_prime_page(pp, v);		/* Start the allocation process over. */		goto startover;	}	if (__predict_false((v = pi = TAILQ_FIRST(&ph->ph_itemlist)) == NULL)) {		pr_leave(pp);		simple_unlock(&pp->pr_slock);		panic("pool_get: %s: page empty", pp->pr_wchan);	}#ifdef DIAGNOSTIC	if (__predict_false(pp->pr_nitems == 0)) {		pr_leave(pp);		simple_unlock(&pp->pr_slock);		printf("pool_get: %s: items on itemlist, nitems %u\n",		    pp->pr_wchan, pp->pr_nitems);		panic("pool_get: nitems inconsistent\n");	}#endif	pr_log(pp, v, PRLOG_GET, file, line);#ifdef DIAGNOSTIC	if (__predict_false(pi->pi_magic != PI_MAGIC)) {		pr_printlog(pp, pi, printf);		panic("pool_get(%s): free list modified: magic=%x; page %p;"		       " item addr %p\n",			pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);	}#endif	/*	 * Remove from item list.	 */	TAILQ_REMOVE(&ph->ph_itemlist, pi, pi_list);	pp->pr_nitems--;	pp->pr_nout++;	if (ph->ph_nmissing == 0) {#ifdef DIAGNOSTIC		if (__predict_false(pp->pr_nidle == 0))			panic("pool_get: nidle inconsistent");#endif		pp->pr_nidle--;	}	ph->ph_nmissing++;	if (TAILQ_FIRST(&ph->ph_itemlist) == NULL) {#ifdef DIAGNOSTIC		if (__predict_false(ph->ph_nmissing != pp->pr_itemsperpage)) {			pr_leave(pp);			simple_unlock(&pp->pr_slock);			panic("pool_get: %s: nmissing inconsistent",			    pp->pr_wchan);		}#endif		/*		 * Find a new non-empty page header, if any.		 * Start search from the page head, to increase		 * the chance for "high water" pages to be freed.		 *		 * Migrate empty pages to the end of the list.  This		 * will speed the update of curpage as pages become		 * idle.  Empty pages intermingled with idle pages		 * is no big deal.  As soon as a page becomes un-empty,		 * it will move back to the head of the list.		 */		TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);		TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist);		for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL;		     ph = TAILQ_NEXT(ph, ph_pagelist))			if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)				break;		pp->pr_curpage = ph;	}	pp->pr_nget++;	/*	 * If we have a low water mark and we are now below that low	 * water mark, add more items to the pool.	 */	if (pp->pr_nitems < pp->pr_minitems && pool_catchup(pp) != 0) {		/*		 * XXX: Should we log a warning?  Should we set up a timeout		 * to try again in a second or so?  The latter could break		 * a caller's assumptions about interrupt protection, etc.		 */	}	pr_leave(pp);	simple_unlock(&pp->pr_slock);	return (v);}/* * Internal version of pool_put().  Pool is already locked/entered. */static voidpool_do_put(struct pool *pp, void *v, const char *file, long line){	struct pool_item *pi = v;	struct pool_item_header *ph;	caddr_t page;	int s;	page = (caddr_t)((u_long)v & pp->pr_pagemask);#ifdef DIAGNOSTIC	if (__predict_false(pp->pr_nout == 0)) {		printf("pool %s: putting with none out\n",		    pp->pr_wchan);		panic("pool_put");	}#endif	pr_log(pp, v, PRLOG_PUT, file, line);	if (__predict_false((ph = pr_find_pagehead(pp, page)) == NULL)) {		pr_printlog(pp, NULL, printf);		panic("pool_put: %s: page header missing", pp->pr_wchan);	}#ifdef LOCKDEBUG	/*	 * Check if we're freeing a locked simple lock.	 */	simple_lock_freecheck((caddr_t)pi, ((caddr_t)pi) + pp->pr_size);#endif	/*	 * Return to item list.	 */#ifdef DIAGNOSTIC	pi->pi_magic = PI_MAGIC;#endif#ifdef DEBUG	{		int i, *ip = v;		for (i = 0; i < pp->pr_size / sizeof(int); i++) {			*ip++ = PI_MAGIC;		}	}#endif	TAILQ_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);	ph->ph_nmissing--;	pp->pr_nput++;	pp->pr_nitems++;	pp->pr_nout--;	/* Cancel "pool empty" condition if it exists */	if (pp->pr_curpage == NULL)		pp->pr_curpage = ph;	if (pp->pr_flags & PR_WANTED) {		pp->pr_flags &= ~PR_WANTED;		if (ph->ph_nmissing == 0)			pp->pr_nidle++;		wakeup((caddr_t)pp);		return;	}	/*	 * If this page is now complete, do one of two things:	 *	 *	(1) If we have more pages than the page high water	 *	    mark, free the page back to the system.	 *	 *	(2) Move it to the end of the page list, so that	 *	    we minimize our chances of fragmenting the	 *	    pool.  Idle pages migrate to the end (along with	 *	    completely empty pages, so that we find un-empty	 *	    pages more quickly when we update curpage) of the	 *	    list so they can be more easily swept up by	 *	    the pagedaemon when pages are scarce.	 */	if (ph->ph_nmissing == 0) {		pp->pr_nidle++;		if (pp->pr_npages > pp->pr_maxpages) {			pr_rmpage(pp, ph);		} else {			TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);			TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist);			/*			 * Update the timestamp on the page.  A page must			 * be idle for some period of time before it can			 * be reclaimed by the pagedaemon.  This minimizes			 * ping-pong'ing for memory.			 */			s = splclock();			ph->ph_time = mono_time;			splx(s);			/*			 * Update the current page pointer.  Just look for			 * the first page with any free items.			 *			 * XXX: Maybe we want an option to look for the			 * page with the fewest available items, to minimize			 * fragmentation?			 */			for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL;			     ph = TAILQ_NEXT(ph, ph_pagelist))				if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)					break;			pp->pr_curpage = ph;		}	}	/*	 * If the page has just become un-empty, move it to the head of	 * the list, and make it the current page.  The next allocation	 * will get the item from this page, instead of further fragmenting	 * the pool.	 */	else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {		TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);		TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist);		pp->pr_curpage = ph;	}}/* * Return resource to the pool; must be called at appropriate spl level */void_pool_put(struct pool *pp, void *v, const char *file, long line){	simple_lock(&pp->pr_slock);	pr_enter(pp, file, line);	pool_do_put(pp, v, file, line);	pr_leave(pp);	simple_unlock(&pp->pr_slock);}/* * Add N items to the pool. */intpool_prime(struct pool *pp, int n, caddr_t storage){	caddr_t cp;	int newnitems, newpages;#ifdef DIAGNOSTIC	if (__predict_false(storage && !(pp->pr_roflags & PR_STATIC)))		panic("pool_prime: static");	/* !storage && static caught below */#endif	simple_lock(&pp->pr_slock);	newnitems = pp->pr_minitems + n;	newpages =		roundup(newnitems, pp->pr_itemsperpage) / pp->pr_itemsperpage		- pp->pr_minpages;	while (newpages-- > 0) {		if (pp->pr_roflags & PR_STATIC) {			cp = storage;			storage += pp->pr_pagesz;		} else {			simple_unlock(&pp->pr_slock);			cp = (*pp->pr_alloc)(pp->pr_pagesz, 0, pp->pr_mtype);			simple_lock(&pp->pr_slock);		}		if (cp == NULL) {			simple_unlock(&pp->pr_slock);			return (ENOMEM);		}		pp->pr_npagealloc++;		pool_prime_page(pp, cp);		pp->pr_minpages++;	}	pp->pr_minitems = newnitems;	if (pp->pr_minpages >= pp->pr_maxpages)		pp->pr_maxpages = pp->pr_minpages + 1;	/* XXX */	simple_unlock(&pp->pr_slock);	return (0);}/* * Add a page worth of items to the pool. * * Note, we must be called with the pool descriptor LOCKED. */static voidpool_prime_page(struct pool *pp, caddr_t storage){	struct pool_item *pi;	struct pool_item_header *ph;	caddr_t cp = storage;	unsigned int align = pp->pr_align;	unsigned int ioff = pp->pr_itemoffset;	int s, n;	if (((u_long)cp & (pp->pr_pagesz - 1)) != 0)		panic("pool_prime_page: %s: unaligned page", pp->pr_wchan);	if ((pp->pr_roflags & PR_PHINPAGE) != 0) {		ph = (struct pool_item_header *)(cp + pp->pr_phoffset);	} else {		s = splhigh();		ph = pool_get(&phpool, PR_URGENT);		splx(s);		LIST_INSERT_HEAD(&pp->pr_hashtab[PR_HASH_INDEX(pp, cp)],				 ph, ph_hashlist);	}	/*	 * Insert page header.	 */	TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist);	TAILQ_INIT(&ph->ph_itemlist);	ph->ph_page = storage;	ph->ph_nmissing = 0;	memset(&ph->ph_time, 0, sizeof(ph->ph_time));	pp->pr_nidle++;	/*	 * Color this page.	 */	cp = (caddr_t)(cp + pp->pr_curcolor);	if ((pp->pr_curcolor += align) > pp->pr_maxcolor)		pp->pr_curcolor = 0;	/*	 * Adjust storage to apply aligment to `pr_itemoffset' in each item.	 */	if (ioff != 0)		cp = (caddr_t)(cp + (align - ioff));	/*	 * Insert remaining chunks on the bucket list.	 */	n = pp->pr_itemsperpage;	pp->pr_nitems += n;	while (n--) {		pi = (struct pool_item *)cp;		/* Insert on page list */		TAILQ_INSERT_TAIL(&ph->ph_itemlist, pi, pi_list);#ifdef DIAGNOSTIC		pi->pi_magic = PI_MAGIC;#endif		cp = (caddr_t)(cp + pp->pr_size);	}	/*	 * If the pool was depleted, point at the new page.	 */	if (pp->pr_curpage == NULL)		pp->pr_curpage = ph;	if (++pp->pr_npages > pp->pr_hiwat)		pp->pr_hiwat = pp->pr_npages;}/* * Like pool_prime(), except this is used by pool_get() when nitems * drops below the low water mark.  This is used to catch up nitmes * with the low water mark. * * Note 1, we never wait for memory here, we let the caller decide what to do. * * Note 2, this doesn't work with static pools. * * Note 3, we must be called with the pool already locked, and we return * with it locked. */static intpool_catchup(struct pool *pp){	caddr_t cp;	int error = 0;	if (pp->pr_roflags & PR_STATIC) {		/*		 * We dropped below the low water mark, and this is not a		 * good thing.  Log a warning.		 *		 * XXX: rate-limit this?		 */		printf("WARNING: static pool `%s' dropped below low water "		    "mark\n", pp->pr_wchan);		return (0);	}	while (pp->pr_nitems < pp->pr_minitems) {		/*		 * Call the page back-end allocator for more memory.		 *		 * XXX: We never wait, so should we bother unlocking		 * the pool descriptor?		 */		simple_unlock(&pp->pr_slock);		cp = (*pp->pr_alloc)(pp->pr_pagesz, 0, pp->pr_mtype);		simple_lock(&pp->pr_slock);		if (__predict_false(cp == NULL)) {			error = ENOMEM;			break;		}		pp->pr_npagealloc++;		pool_prime_page(pp, cp);	}	return (error);}voidpool_setlowat(struct pool *pp, int n){	int error;	simple_lock(&pp->pr_slock);	pp->pr_minitems = n;	pp->pr_minpages = (n == 0)		? 0		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;	/* Make sure we're caught up with the newly-set low water mark. */	if ((pp->pr_nitems < pp->pr_minitems) &&	    (error = pool_catchup(pp)) != 0) {		/*		 * XXX: Should we log a warning?  Should we set up a timeout		 * to try again in a second or so?  The latter could break		 * a caller's assumptions about interrupt protection, etc.		 */	}	simple_unlock(&pp->pr_slock);}voidpool_sethiwat(struct pool *pp, int n){	simple_lock(&pp->pr_slock);	pp->pr_maxpages = (n == 0)		? 0		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;	simple_unlock(&pp->pr_slock);}voidpool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap){	simple_lock(&pp->pr_slock);	pp->pr_hardlimit = n;	pp->pr_hardlimit_warning = warnmess;	pp->pr_hardlimit_ratecap.tv_sec = ratecap;	pp->pr_hardlimit_warning_last.tv_sec = 0;	pp->pr_hardlimit_warning_last.tv_usec = 0;	/*	 * In-line version of pool_sethiwat(), because we don't want to	 * release the lock.	 */	pp->pr_maxpages = (n == 0)		? 0		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?