subr_pool.c

来自「基于组件方式开发操作系统的OSKIT源代码」· C语言 代码 · 共 1,799 行 · 第 1/3 页

C
1,799
字号
/*	$NetBSD: subr_pool.c,v 1.48 2000/12/11 05:22:56 thorpej Exp $	*//*- * Copyright (c) 1997, 1999, 2000 The NetBSD Foundation, Inc. * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace * Simulation Facility, NASA Ames Research Center. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright *    notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright *    notice, this list of conditions and the following disclaimer in the *    documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software *    must display the following acknowledgement: *	This product includes software developed by the NetBSD *	Foundation, Inc. and its contributors. * 4. Neither the name of The NetBSD Foundation nor the names of its *    contributors may be used to endorse or promote products derived *    from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */#include "opt_pool.h"#include "opt_poollog.h"#include "opt_lockdebug.h"#include <sys/param.h>#include <sys/systm.h>#include <sys/proc.h>#include <sys/errno.h>#include <sys/kernel.h>#include <sys/malloc.h>#include <sys/lock.h>#include <sys/pool.h>#include <sys/syslog.h>#include <uvm/uvm.h>/* * Pool resource management utility. * * Memory is allocated in pages which are split into pieces according * to the pool item size. Each page is kept on a list headed by `pr_pagelist' * in the pool structure and the individual pool items are on a linked list * headed by `ph_itemlist' in each page header. The memory for building * the page list is either taken from the allocated pages themselves (for * small pool items) or taken from an internal pool of page headers (`phpool'). *//* List of all pools */TAILQ_HEAD(,pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head);/* Private pool for page header structures */static struct pool phpool;/* # of seconds to retain page after last use */int pool_inactive_time = 10;/* Next candidate for drainage (see pool_drain()) */static struct pool	*drainpp;/* This spin lock protects both pool_head and drainpp. */struct simplelock pool_head_slock = SIMPLELOCK_INITIALIZER;struct pool_item_header {	/* Page headers */	TAILQ_ENTRY(pool_item_header)				ph_pagelist;	/* pool page list */	TAILQ_HEAD(,pool_item)	ph_itemlist;	/* chunk list for this page */	LIST_ENTRY(pool_item_header)				ph_hashlist;	/* Off-page page headers */	int			ph_nmissing;	/* # of chunks in use */	caddr_t			ph_page;	/* this page's address */	struct timeval		ph_time;	/* last referenced */};struct pool_item {#ifdef DIAGNOSTIC	int pi_magic;#endif#define	PI_MAGIC 0xdeadbeef	/* Other entries use only this list entry */	TAILQ_ENTRY(pool_item)	pi_list;};#define	PR_HASH_INDEX(pp,addr) \	(((u_long)(addr) >> (pp)->pr_pageshift) & (PR_HASHTABSIZE - 1))/* * Pool cache management. * * Pool caches provide a way for constructed objects to be cached by the * pool subsystem.  This can lead to performance improvements by avoiding * needless object construction/destruction; it is deferred until absolutely * necessary. * * Caches are grouped into cache groups.  Each cache group references * up to 16 constructed objects.  When a cache allocates an object * from the pool, it calls the object's constructor and places it into * a cache group.  When a cache group frees an object back to the pool, * it first calls the object's destructor.  This allows the object to * persist in constructed form while freed to the cache. * * Multiple caches may exist for each pool.  This allows a single * object type to have multiple constructed forms.  The pool references * each cache, so that when a pool is drained by the pagedaemon, it can * drain each individual cache as well.  Each time a cache is drained, * the most idle cache group is freed to the pool in its entirety. * * Pool caches are layed on top of pools.  By layering them, we can avoid * the complexity of cache management for pools which would not benefit * from it. *//* The cache group pool. */static struct pool pcgpool;/* The pool cache group. */#define	PCG_NOBJECTS		16struct pool_cache_group {	TAILQ_ENTRY(pool_cache_group)		pcg_list;	/* link in the pool cache's group list */	u_int	pcg_avail;	/* # available objects */				/* pointers to the objects */	void	*pcg_objects[PCG_NOBJECTS];};static void	pool_cache_reclaim(struct pool_cache *);static int	pool_catchup(struct pool *);static void	pool_prime_page(struct pool *, caddr_t);static void	*pool_page_alloc(unsigned long, int, int);static void	pool_page_free(void *, unsigned long, int);static void pool_print1(struct pool *, const char *,	void (*)(const char *, ...));/* * Pool log entry. An array of these is allocated in pool_create(). */struct pool_log {	const char	*pl_file;	long		pl_line;	int		pl_action;#define	PRLOG_GET	1#define	PRLOG_PUT	2	void		*pl_addr;};/* Number of entries in pool log buffers */#ifndef POOL_LOGSIZE#define	POOL_LOGSIZE	10#endifint pool_logsize = POOL_LOGSIZE;#ifdef DIAGNOSTICstatic __inline voidpr_log(struct pool *pp, void *v, int action, const char *file, long line){	int n = pp->pr_curlogentry;	struct pool_log *pl;	if ((pp->pr_roflags & PR_LOGGING) == 0)		return;	/*	 * Fill in the current entry. Wrap around and overwrite	 * the oldest entry if necessary.	 */	pl = &pp->pr_log[n];	pl->pl_file = file;	pl->pl_line = line;	pl->pl_action = action;	pl->pl_addr = v;	if (++n >= pp->pr_logsize)		n = 0;	pp->pr_curlogentry = n;}static voidpr_printlog(struct pool *pp, struct pool_item *pi,    void (*pr)(const char *, ...)){	int i = pp->pr_logsize;	int n = pp->pr_curlogentry;	if ((pp->pr_roflags & PR_LOGGING) == 0)		return;	/*	 * Print all entries in this pool's log.	 */	while (i-- > 0) {		struct pool_log *pl = &pp->pr_log[n];		if (pl->pl_action != 0) {			if (pi == NULL || pi == pl->pl_addr) {				(*pr)("\tlog entry %d:\n", i);				(*pr)("\t\taction = %s, addr = %p\n",				    pl->pl_action == PRLOG_GET ? "get" : "put",				    pl->pl_addr);				(*pr)("\t\tfile: %s at line %lu\n",				    pl->pl_file, pl->pl_line);			}		}		if (++n >= pp->pr_logsize)			n = 0;	}}static __inline voidpr_enter(struct pool *pp, const char *file, long line){	if (__predict_false(pp->pr_entered_file != NULL)) {		printf("pool %s: reentrancy at file %s line %ld\n",		    pp->pr_wchan, file, line);		printf("         previous entry at file %s line %ld\n",		    pp->pr_entered_file, pp->pr_entered_line);		panic("pr_enter");	}	pp->pr_entered_file = file;	pp->pr_entered_line = line;}static __inline voidpr_leave(struct pool *pp){	if (__predict_false(pp->pr_entered_file == NULL)) {		printf("pool %s not entered?\n", pp->pr_wchan);		panic("pr_leave");	}	pp->pr_entered_file = NULL;	pp->pr_entered_line = 0;}static __inline voidpr_enter_check(struct pool *pp, void (*pr)(const char *, ...)){	if (pp->pr_entered_file != NULL)		(*pr)("\n\tcurrently entered from file %s line %ld\n",		    pp->pr_entered_file, pp->pr_entered_line);}#else#define	pr_log(pp, v, action, file, line)#define	pr_printlog(pp, pi, pr)#define	pr_enter(pp, file, line)#define	pr_leave(pp)#define	pr_enter_check(pp, pr)#endif /* DIAGNOSTIC *//* * Return the pool page header based on page address. */static __inline struct pool_item_header *pr_find_pagehead(struct pool *pp, caddr_t page){	struct pool_item_header *ph;	if ((pp->pr_roflags & PR_PHINPAGE) != 0)		return ((struct pool_item_header *)(page + pp->pr_phoffset));	for (ph = LIST_FIRST(&pp->pr_hashtab[PR_HASH_INDEX(pp, page)]);	     ph != NULL;	     ph = LIST_NEXT(ph, ph_hashlist)) {		if (ph->ph_page == page)			return (ph);	}	return (NULL);}/* * Remove a page from the pool. */static __inline voidpr_rmpage(struct pool *pp, struct pool_item_header *ph){	/*	 * If the page was idle, decrement the idle page count.	 */	if (ph->ph_nmissing == 0) {#ifdef DIAGNOSTIC		if (pp->pr_nidle == 0)			panic("pr_rmpage: nidle inconsistent");		if (pp->pr_nitems < pp->pr_itemsperpage)			panic("pr_rmpage: nitems inconsistent");#endif		pp->pr_nidle--;	}	pp->pr_nitems -= pp->pr_itemsperpage;	/*	 * Unlink a page from the pool and release it.	 */	TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);	(*pp->pr_free)(ph->ph_page, pp->pr_pagesz, pp->pr_mtype);	pp->pr_npages--;	pp->pr_npagefree++;	if ((pp->pr_roflags & PR_PHINPAGE) == 0) {		int s;		LIST_REMOVE(ph, ph_hashlist);		s = splhigh();		pool_put(&phpool, ph);		splx(s);	}	if (pp->pr_curpage == ph) {		/*		 * Find a new non-empty page header, if any.		 * Start search from the page head, to increase the		 * chance for "high water" pages to be freed.		 */		for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL;		     ph = TAILQ_NEXT(ph, ph_pagelist))			if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)				break;		pp->pr_curpage = ph;	}}/* * Allocate and initialize a pool. */struct pool *pool_create(size_t size, u_int align, u_int ioff, int nitems,    const char *wchan, size_t pagesz,    void *(*alloc)(unsigned long, int, int),    void (*release)(void *, unsigned long, int),    int mtype){	struct pool *pp;	int flags;	pp = (struct pool *)malloc(sizeof(*pp), M_POOL, M_NOWAIT);	if (pp == NULL)		return (NULL);	flags = PR_FREEHEADER;	pool_init(pp, size, align, ioff, flags, wchan, pagesz,		  alloc, release, mtype);	if (nitems != 0) {		if (pool_prime(pp, nitems, NULL) != 0) {			pool_destroy(pp);			return (NULL);		}	}	return (pp);}/* * Initialize the given pool resource structure. * * We export this routine to allow other kernel parts to declare * static pools that must be initialized before malloc() is available. */voidpool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,    const char *wchan, size_t pagesz,    void *(*alloc)(unsigned long, int, int),    void (*release)(void *, unsigned long, int),    int mtype){	int off, slack, i;#ifdef POOL_DIAGNOSTIC	/*	 * Always log if POOL_DIAGNOSTIC is defined.	 */	if (pool_logsize != 0)		flags |= PR_LOGGING;#endif	/*	 * Check arguments and construct default values.	 */	if (!powerof2(pagesz))		panic("pool_init: page size invalid (%lx)\n", (u_long)pagesz);	if (alloc == NULL && release == NULL) {		alloc = pool_page_alloc;		release = pool_page_free;		pagesz = PAGE_SIZE;	/* Rounds to PAGE_SIZE anyhow. */	} else if ((alloc != NULL && release != NULL) == 0) {		/* If you specifiy one, must specify both. */		panic("pool_init: must specify alloc and release together");	}				if (pagesz == 0)		pagesz = PAGE_SIZE;	if (align == 0)		align = ALIGN(1);	if (size < sizeof(struct pool_item))		size = sizeof(struct pool_item);	size = ALIGN(size);	if (size > pagesz)		panic("pool_init: pool item size (%lu) too large",		      (u_long)size);	/*	 * Initialize the pool structure.	 */	TAILQ_INIT(&pp->pr_pagelist);	TAILQ_INIT(&pp->pr_cachelist);	pp->pr_curpage = NULL;	pp->pr_npages = 0;	pp->pr_minitems = 0;	pp->pr_minpages = 0;	pp->pr_maxpages = UINT_MAX;	pp->pr_roflags = flags;	pp->pr_flags = 0;	pp->pr_size = size;	pp->pr_align = align;	pp->pr_wchan = wchan;	pp->pr_mtype = mtype;	pp->pr_alloc = alloc;	pp->pr_free = release;	pp->pr_pagesz = pagesz;	pp->pr_pagemask = ~(pagesz - 1);	pp->pr_pageshift = ffs(pagesz) - 1;	pp->pr_nitems = 0;	pp->pr_nout = 0;	pp->pr_hardlimit = UINT_MAX;	pp->pr_hardlimit_warning = NULL;	pp->pr_hardlimit_ratecap.tv_sec = 0;	pp->pr_hardlimit_ratecap.tv_usec = 0;	pp->pr_hardlimit_warning_last.tv_sec = 0;	pp->pr_hardlimit_warning_last.tv_usec = 0;	/*	 * Decide whether to put the page header off page to avoid	 * wasting too large a part of the page. Off-page page headers	 * go on a hash table, so we can match a returned item	 * with its header based on the page address.	 * We use 1/16 of the page size as the threshold (XXX: tune)	 */	if (pp->pr_size < pagesz/16) {		/* Use the end of the page for the page header */		pp->pr_roflags |= PR_PHINPAGE;		pp->pr_phoffset = off =			pagesz - ALIGN(sizeof(struct pool_item_header));	} else {		/* The page header will be taken from our page header pool */		pp->pr_phoffset = 0;		off = pagesz;		for (i = 0; i < PR_HASHTABSIZE; i++) {			LIST_INIT(&pp->pr_hashtab[i]);		}	}	/*	 * Alignment is to take place at `ioff' within the item. This means	 * we must reserve up to `align - 1' bytes on the page to allow	 * appropriate positioning of each item.	 *	 * Silently enforce `0 <= ioff < align'.	 */	pp->pr_itemoffset = ioff = ioff % align;	pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;	KASSERT(pp->pr_itemsperpage != 0);	/*	 * Use the slack between the chunks and the page header	 * for "cache coloring".	 */	slack = off - pp->pr_itemsperpage * pp->pr_size;	pp->pr_maxcolor = (slack / align) * align;	pp->pr_curcolor = 0;	pp->pr_nget = 0;	pp->pr_nfail = 0;	pp->pr_nput = 0;	pp->pr_npagealloc = 0;	pp->pr_npagefree = 0;	pp->pr_hiwat = 0;	pp->pr_nidle = 0;	if (flags & PR_LOGGING) {		if (kmem_map == NULL ||		    (pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log),		     M_TEMP, M_NOWAIT)) == NULL)			pp->pr_roflags &= ~PR_LOGGING;		pp->pr_curlogentry = 0;		pp->pr_logsize = pool_logsize;	}	pp->pr_entered_file = NULL;	pp->pr_entered_line = 0;	simple_lock_init(&pp->pr_slock);	/*	 * Initialize private page header pool and cache magazine pool if we	 * haven't done so yet.	 * XXX LOCKING.	 */	if (phpool.pr_size == 0) {		pool_init(&phpool, sizeof(struct pool_item_header), 0, 0,		    0, "phpool", 0, 0, 0, 0);		pool_init(&pcgpool, sizeof(struct pool_cache_group), 0, 0,		    0, "pcgpool", 0, 0, 0, 0);	}	/* Insert into the list of all pools. */	simple_lock(&pool_head_slock);	TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist);	simple_unlock(&pool_head_slock);}/* * De-commision a pool resource. */voidpool_destroy(struct pool *pp){	struct pool_item_header *ph;	struct pool_cache *pc;	/* Destroy all caches for this pool. */	while ((pc = TAILQ_FIRST(&pp->pr_cachelist)) != NULL)		pool_cache_destroy(pc);#ifdef DIAGNOSTIC	if (pp->pr_nout != 0) {		pr_printlog(pp, NULL, printf);		panic("pool_destroy: pool busy: still out: %u\n",		    pp->pr_nout);	}#endif	/* Remove all pages */	if ((pp->pr_roflags & PR_STATIC) == 0)		while ((ph = pp->pr_pagelist.tqh_first) != NULL)			pr_rmpage(pp, ph);	/* Remove from global pool list */	simple_lock(&pool_head_slock);	TAILQ_REMOVE(&pool_head, pp, pr_poollist);	/* XXX Only clear this if we were drainpp? */	drainpp = NULL;	simple_unlock(&pool_head_slock);	if ((pp->pr_roflags & PR_LOGGING) != 0)		free(pp->pr_log, M_TEMP);	if (pp->pr_roflags & PR_FREEHEADER)		free(pp, M_POOL);}/* * Grab an item from the pool; must be called at appropriate spl level */void *_pool_get(struct pool *pp, int flags, const char *file, long line){	void *v;	struct pool_item *pi;	struct pool_item_header *ph;#ifdef DIAGNOSTIC	if (__predict_false((pp->pr_roflags & PR_STATIC) &&			    (flags & PR_MALLOCOK))) {		pr_printlog(pp, NULL, printf);		panic("pool_get: static");	}#endif#ifndef OSKIT	if (__predict_false(curproc == NULL && doing_shutdown == 0 &&			    (flags & PR_WAITOK) != 0))

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?