⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 dynahash.c

📁 PostgreSQL 8.1.4的源码 适用于Linux下的开源数据库系统
💻 C
📖 第 1 页 / 共 2 页
字号:
/*------------------------------------------------------------------------- * * dynahash.c *	  dynamic hash tables * * * Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * * IDENTIFICATION *	  $PostgreSQL: pgsql/src/backend/utils/hash/dynahash.c,v 1.65.2.1 2005/11/22 18:23:24 momjian Exp $ * *------------------------------------------------------------------------- *//* * * Dynamic hashing, after CACM April 1988 pp 446-457, by Per-Ake Larson. * Coded into C, with minor code improvements, and with hsearch(3) interface, * by ejp@ausmelb.oz, Jul 26, 1988: 13:16; * also, hcreate/hdestroy routines added to simulate hsearch(3). * * These routines simulate hsearch(3) and family, with the important * difference that the hash table is dynamic - can grow indefinitely * beyond its original size (as supplied to hcreate()). * * Performance appears to be comparable to that of hsearch(3). * The 'source-code' options referred to in hsearch(3)'s 'man' page * are not implemented; otherwise functionality is identical. * * Compilation controls: * DEBUG controls some informative traces, mainly for debugging. * HASH_STATISTICS causes HashAccesses and HashCollisions to be maintained; * when combined with HASH_DEBUG, these are displayed by hdestroy(). * * Problems & fixes to ejp@ausmelb.oz. WARNING: relies on pre-processor * concatenation property, in probably unnecessary code 'optimisation'. * * Modified margo@postgres.berkeley.edu February 1990 *		added multiple table interface * Modified by sullivan@postgres.berkeley.edu April 1990 *		changed ctl structure for shared memory */#include "postgres.h"#include "storage/shmem.h"#include "utils/dynahash.h"#include "utils/hsearch.h"#include "utils/memutils.h"/* * Key (also entry) part of a HASHELEMENT */#define ELEMENTKEY(helem)  (((char *)(helem)) + MAXALIGN(sizeof(HASHELEMENT)))/* * Fast MOD arithmetic, assuming that y is a power of 2 ! */#define MOD(x,y)			   ((x) & ((y)-1))/* * Private function prototypes */static void *DynaHashAlloc(Size size);static HASHSEGMENT seg_alloc(HTAB *hashp);static bool element_alloc(HTAB *hashp, int nelem);static bool dir_realloc(HTAB *hashp);static bool expand_table(HTAB *hashp);static void hdefault(HTAB *hashp);static bool init_htab(HTAB *hashp, long nelem);static void hash_corrupted(HTAB *hashp);/* * memory allocation support */static MemoryContext CurrentDynaHashCxt = NULL;static void *DynaHashAlloc(Size size){	Assert(MemoryContextIsValid(CurrentDynaHashCxt));	return MemoryContextAlloc(CurrentDynaHashCxt, size);}#if HASH_STATISTICSstatic long hash_accesses,			hash_collisions,			hash_expansions;#endif/************************** CREATE ROUTINES **********************//* * hash_create -- create a new dynamic hash table * *	tabname: a name for the table (for debugging purposes) *	nelem: maximum number of elements expected *	*info: additional table parameters, as indicated by flags *	flags: bitmask indicating which parameters to take from *info * * Note: for a shared-memory hashtable, nelem needs to be a pretty good * estimate, since we can't expand the table on the fly.  But an unshared * hashtable can be expanded on-the-fly, so it's better for nelem to be * on the small side and let the table grow if it's exceeded.  An overly * large nelem will penalize hash_seq_search speed without buying much. */HTAB *hash_create(const char *tabname, long nelem, HASHCTL *info, int flags){	HTAB	   *hashp;	HASHHDR    *hctl;	/*	 * For shared hash tables, we have a local hash header (HTAB struct) that	 * we allocate in TopMemoryContext; all else is in shared memory.	 *	 * For non-shared hash tables, everything including the hash header is in	 * a memory context created specially for the hash table --- this makes	 * hash_destroy very simple.  The memory context is made a child of either	 * a context specified by the caller, or TopMemoryContext if nothing is	 * specified.	 */	if (flags & HASH_SHARED_MEM)	{		/* Set up to allocate the hash header */		CurrentDynaHashCxt = TopMemoryContext;	}	else	{		/* Create the hash table's private memory context */		if (flags & HASH_CONTEXT)			CurrentDynaHashCxt = info->hcxt;		else			CurrentDynaHashCxt = TopMemoryContext;		CurrentDynaHashCxt = AllocSetContextCreate(CurrentDynaHashCxt,												   tabname,												   ALLOCSET_DEFAULT_MINSIZE,												   ALLOCSET_DEFAULT_INITSIZE,												   ALLOCSET_DEFAULT_MAXSIZE);	}	/* Initialize the hash header, plus a copy of the table name */	hashp = (HTAB *) DynaHashAlloc(sizeof(HTAB) + strlen(tabname) +1);	MemSet(hashp, 0, sizeof(HTAB));	hashp->tabname = (char *) (hashp + 1);	strcpy(hashp->tabname, tabname);	if (flags & HASH_FUNCTION)		hashp->hash = info->hash;	else		hashp->hash = string_hash;		/* default hash function */	/*	 * If you don't specify a match function, it defaults to strncmp() if you	 * used string_hash (either explicitly or by default) and to memcmp()	 * otherwise.  (Prior to PostgreSQL 7.4, memcmp() was always used.)	 */	if (flags & HASH_COMPARE)		hashp->match = info->match;	else if (hashp->hash == string_hash)		hashp->match = (HashCompareFunc) strncmp;	else		hashp->match = memcmp;	/*	 * Similarly, the key-copying function defaults to strncpy() or memcpy().	 */	if (flags & HASH_KEYCOPY)		hashp->keycopy = info->keycopy;	else if (hashp->hash == string_hash)		hashp->keycopy = (HashCopyFunc) strncpy;	else		hashp->keycopy = memcpy;	if (flags & HASH_ALLOC)		hashp->alloc = info->alloc;	else		hashp->alloc = DynaHashAlloc;	if (flags & HASH_SHARED_MEM)	{		/*		 * ctl structure is preallocated for shared memory tables. Note that		 * HASH_DIRSIZE and HASH_ALLOC had better be set as well.		 */		hashp->hctl = info->hctl;		hashp->dir = info->dir;		hashp->hcxt = NULL;		hashp->isshared = true;		/* hash table already exists, we're just attaching to it */		if (flags & HASH_ATTACH)			return hashp;	}	else	{		/* setup hash table defaults */		hashp->hctl = NULL;		hashp->dir = NULL;		hashp->hcxt = CurrentDynaHashCxt;		hashp->isshared = false;	}	if (!hashp->hctl)	{		hashp->hctl = (HASHHDR *) hashp->alloc(sizeof(HASHHDR));		if (!hashp->hctl)			ereport(ERROR,					(errcode(ERRCODE_OUT_OF_MEMORY),					 errmsg("out of memory")));	}	hdefault(hashp);	hctl = hashp->hctl;#ifdef HASH_STATISTICS	hctl->accesses = hctl->collisions = 0;#endif	if (flags & HASH_SEGMENT)	{		hctl->ssize = info->ssize;		hctl->sshift = my_log2(info->ssize);		/* ssize had better be a power of 2 */		Assert(hctl->ssize == (1L << hctl->sshift));	}	if (flags & HASH_FFACTOR)		hctl->ffactor = info->ffactor;	/*	 * SHM hash tables have fixed directory size passed by the caller.	 */	if (flags & HASH_DIRSIZE)	{		hctl->max_dsize = info->max_dsize;		hctl->dsize = info->dsize;	}	/*	 * hash table now allocates space for key and data but you have to say how	 * much space to allocate	 */	if (flags & HASH_ELEM)	{		Assert(info->entrysize >= info->keysize);		hctl->keysize = info->keysize;		hctl->entrysize = info->entrysize;	}	/* Build the hash directory structure */	if (!init_htab(hashp, nelem))	{		hash_destroy(hashp);		elog(ERROR, "failed to initialize hash table");	}	/*	 * For a shared hash table, preallocate the requested number of elements.	 * This reduces problems with run-time out-of-shared-memory conditions.	 */	if (flags & HASH_SHARED_MEM)	{		if (!element_alloc(hashp, (int) nelem))		{			hash_destroy(hashp);			ereport(ERROR,					(errcode(ERRCODE_OUT_OF_MEMORY),					 errmsg("out of memory")));		}	}	return hashp;}/* * Set default HASHHDR parameters. */static voidhdefault(HTAB *hashp){	HASHHDR    *hctl = hashp->hctl;	MemSet(hctl, 0, sizeof(HASHHDR));	hctl->ssize = DEF_SEGSIZE;	hctl->sshift = DEF_SEGSIZE_SHIFT;	hctl->dsize = DEF_DIRSIZE;	hctl->ffactor = DEF_FFACTOR;	hctl->nentries = 0;	hctl->nsegs = 0;	/* rather pointless defaults for key & entry size */	hctl->keysize = sizeof(char *);	hctl->entrysize = 2 * sizeof(char *);	/* table has no fixed maximum size */	hctl->max_dsize = NO_MAX_DSIZE;	/* garbage collection for HASH_REMOVE */	hctl->freeList = NULL;}static boolinit_htab(HTAB *hashp, long nelem){	HASHHDR    *hctl = hashp->hctl;	HASHSEGMENT *segp;	long		lnbuckets;	int			nbuckets;	int			nsegs;	/*	 * Divide number of elements by the fill factor to determine a desired	 * number of buckets.  Allocate space for the next greater power of two	 * number of buckets	 */	lnbuckets = (nelem - 1) / hctl->ffactor + 1;	nbuckets = 1 << my_log2(lnbuckets);	hctl->max_bucket = hctl->low_mask = nbuckets - 1;	hctl->high_mask = (nbuckets << 1) - 1;	/*	 * Figure number of directory segments needed, round up to a power of 2	 */	nsegs = (nbuckets - 1) / hctl->ssize + 1;	nsegs = 1 << my_log2(nsegs);	/*	 * Make sure directory is big enough. If pre-allocated directory is too	 * small, choke (caller screwed up).	 */	if (nsegs > hctl->dsize)	{		if (!(hashp->dir))			hctl->dsize = nsegs;		else			return false;	}	/* Allocate a directory */	if (!(hashp->dir))	{		CurrentDynaHashCxt = hashp->hcxt;		hashp->dir = (HASHSEGMENT *)			hashp->alloc(hctl->dsize * sizeof(HASHSEGMENT));		if (!hashp->dir)			return false;	}	/* Allocate initial segments */	for (segp = hashp->dir; hctl->nsegs < nsegs; hctl->nsegs++, segp++)	{		*segp = seg_alloc(hashp);		if (*segp == NULL)			return false;	}	/* Choose number of entries to allocate at a time */	hctl->nelem_alloc = (int) Min(nelem, HASHELEMENT_ALLOC_MAX);	hctl->nelem_alloc = Max(hctl->nelem_alloc, 1);#if HASH_DEBUG	fprintf(stderr, "init_htab:\n%s%p\n%s%ld\n%s%ld\n%s%d\n%s%ld\n%s%u\n%s%x\n%s%x\n%s%ld\n%s%ld\n",			"TABLE POINTER   ", hashp,			"DIRECTORY SIZE  ", hctl->dsize,			"SEGMENT SIZE    ", hctl->ssize,			"SEGMENT SHIFT   ", hctl->sshift,			"FILL FACTOR     ", hctl->ffactor,			"MAX BUCKET      ", hctl->max_bucket,			"HIGH MASK       ", hctl->high_mask,			"LOW  MASK       ", hctl->low_mask,			"NSEGS           ", hctl->nsegs,			"NENTRIES        ", hctl->nentries);#endif	return true;}/* * Estimate the space needed for a hashtable containing the given number * of entries of given size. * NOTE: this is used to estimate the footprint of hashtables in shared * memory; therefore it does not count HTAB which is in local memory. * NB: assumes that all hash structure parameters have default values! */Sizehash_estimate_size(long num_entries, Size entrysize){	Size		size;	long		nBuckets,				nSegments,				nDirEntries,				nElementAllocs,				elementSize,				elementAllocCnt;	/* estimate number of buckets wanted */	nBuckets = 1L << my_log2((num_entries - 1) / DEF_FFACTOR + 1);	/* # of segments needed for nBuckets */	nSegments = 1L << my_log2((nBuckets - 1) / DEF_SEGSIZE + 1);	/* directory entries */	nDirEntries = DEF_DIRSIZE;	while (nDirEntries < nSegments)		nDirEntries <<= 1;		/* dir_alloc doubles dsize at each call */	/* fixed control info */	size = MAXALIGN(sizeof(HASHHDR));	/* but not HTAB, per above */	/* directory */	size = add_size(size, mul_size(nDirEntries, sizeof(HASHSEGMENT)));	/* segments */	size = add_size(size, mul_size(nSegments,								MAXALIGN(DEF_SEGSIZE * sizeof(HASHBUCKET))));	/* elements --- allocated in groups of up to HASHELEMENT_ALLOC_MAX */	elementSize = MAXALIGN(sizeof(HASHELEMENT)) + MAXALIGN(entrysize);	elementAllocCnt = Min(num_entries, HASHELEMENT_ALLOC_MAX);	elementAllocCnt = Max(elementAllocCnt, 1);	nElementAllocs = (num_entries - 1) / elementAllocCnt + 1;	size = add_size(size,					mul_size(nElementAllocs,							 mul_size(elementAllocCnt, elementSize)));	return size;}/* * Select an appropriate directory size for a hashtable with the given * maximum number of entries. * This is only needed for hashtables in shared memory, whose directories * cannot be expanded dynamically. * NB: assumes that all hash structure parameters have default values! * * XXX this had better agree with the behavior of init_htab()... */longhash_select_dirsize(long num_entries){	long		nBuckets,				nSegments,				nDirEntries;	/* estimate number of buckets wanted */	nBuckets = 1L << my_log2((num_entries - 1) / DEF_FFACTOR + 1);	/* # of segments needed for nBuckets */	nSegments = 1L << my_log2((nBuckets - 1) / DEF_SEGSIZE + 1);	/* directory entries */	nDirEntries = DEF_DIRSIZE;	while (nDirEntries < nSegments)		nDirEntries <<= 1;		/* dir_alloc doubles dsize at each call */	return nDirEntries;}/********************** DESTROY ROUTINES ************************/voidhash_destroy(HTAB *hashp){	if (hashp != NULL)	{		/* allocation method must be one we know how to free, too */		Assert(hashp->alloc == DynaHashAlloc);		/* so this hashtable must have it's own context */		Assert(hashp->hcxt != NULL);		hash_stats("destroy", hashp);		/*		 * Free everything by destroying the hash table's memory context.		 */		MemoryContextDelete(hashp->hcxt);	}}voidhash_stats(const char *where, HTAB *hashp){#if HASH_STATISTICS	fprintf(stderr, "%s: this HTAB -- accesses %ld collisions %ld\n",			where, hashp->hctl->accesses, hashp->hctl->collisions);	fprintf(stderr, "hash_stats: entries %ld keysize %ld maxp %u segmentcount %ld\n",			hashp->hctl->nentries, hashp->hctl->keysize,			hashp->hctl->max_bucket, hashp->hctl->nsegs);	fprintf(stderr, "%s: total accesses %ld total collisions %ld\n",			where, hash_accesses, hash_collisions);	fprintf(stderr, "hash_stats: total expansions %ld\n",			hash_expansions);#endif}/*******************************SEARCH ROUTINES *****************************/

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -