⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 dynahash.c

📁 PostgreSQL 8.1.4的源码 适用于Linux下的开源数据库系统
💻 C
📖 第 1 页 / 共 2 页
字号:
/* Convert a hash value to a bucket number */static inline uint32calc_bucket(HASHHDR *hctl, uint32 hash_val){	uint32		bucket;	bucket = hash_val & hctl->high_mask;	if (bucket > hctl->max_bucket)		bucket = bucket & hctl->low_mask;	return bucket;}/*---------- * hash_search -- look up key in table and perform action * * action is one of: *		HASH_FIND: look up key in table *		HASH_ENTER: look up key in table, creating entry if not present *		HASH_ENTER_NULL: same, but return NULL if out of memory *		HASH_REMOVE: look up key in table, remove entry if present * * Return value is a pointer to the element found/entered/removed if any, * or NULL if no match was found.  (NB: in the case of the REMOVE action, * the result is a dangling pointer that shouldn't be dereferenced!) * * HASH_ENTER will normally ereport a generic "out of memory" error if * it is unable to create a new entry.	The HASH_ENTER_NULL operation is * the same except it will return NULL if out of memory.  Note that * HASH_ENTER_NULL cannot be used with the default palloc-based allocator, * since palloc internally ereports on out-of-memory. * * If foundPtr isn't NULL, then *foundPtr is set TRUE if we found an * existing entry in the table, FALSE otherwise.  This is needed in the * HASH_ENTER case, but is redundant with the return value otherwise. *---------- */void *hash_search(HTAB *hashp,			const void *keyPtr,			HASHACTION action,			bool *foundPtr){	HASHHDR    *hctl = hashp->hctl;	Size		keysize = hctl->keysize;	uint32		hashvalue;	uint32		bucket;	long		segment_num;	long		segment_ndx;	HASHSEGMENT segp;	HASHBUCKET	currBucket;	HASHBUCKET *prevBucketPtr;	HashCompareFunc match;#if HASH_STATISTICS	hash_accesses++;	hctl->accesses++;#endif	/*	 * Do the initial lookup	 */	hashvalue = hashp->hash(keyPtr, keysize);	bucket = calc_bucket(hctl, hashvalue);	segment_num = bucket >> hctl->sshift;	segment_ndx = MOD(bucket, hctl->ssize);	segp = hashp->dir[segment_num];	if (segp == NULL)		hash_corrupted(hashp);	prevBucketPtr = &segp[segment_ndx];	currBucket = *prevBucketPtr;	/*	 * Follow collision chain looking for matching key	 */	match = hashp->match;		/* save one fetch in inner loop */	while (currBucket != NULL)	{		if (currBucket->hashvalue == hashvalue &&			match(ELEMENTKEY(currBucket), keyPtr, keysize) == 0)			break;		prevBucketPtr = &(currBucket->link);		currBucket = *prevBucketPtr;#if HASH_STATISTICS		hash_collisions++;		hctl->collisions++;#endif	}	if (foundPtr)		*foundPtr = (bool) (currBucket != NULL);	/*	 * OK, now what?	 */	switch (action)	{		case HASH_FIND:			if (currBucket != NULL)				return (void *) ELEMENTKEY(currBucket);			return NULL;		case HASH_REMOVE:			if (currBucket != NULL)			{				Assert(hctl->nentries > 0);				hctl->nentries--;				/* remove record from hash bucket's chain. */				*prevBucketPtr = currBucket->link;				/* add the record to the freelist for this table.  */				currBucket->link = hctl->freeList;				hctl->freeList = currBucket;				/*				 * better hope the caller is synchronizing access to this				 * element, because someone else is going to reuse it the next				 * time something is added to the table				 */				return (void *) ELEMENTKEY(currBucket);			}			return NULL;		case HASH_ENTER_NULL:			/* ENTER_NULL does not work with palloc-based allocator */			Assert(hashp->alloc != DynaHashAlloc);			/* FALL THRU */		case HASH_ENTER:			/* Return existing element if found, else create one */			if (currBucket != NULL)				return (void *) ELEMENTKEY(currBucket);			/* get the next free element */			currBucket = hctl->freeList;			if (currBucket == NULL)			{				/* no free elements.  allocate another chunk of buckets */				if (!element_alloc(hashp, hctl->nelem_alloc))				{					/* out of memory */					if (action == HASH_ENTER_NULL)						return NULL;					/* report a generic message */					if (hashp->isshared)						ereport(ERROR,								(errcode(ERRCODE_OUT_OF_MEMORY),								 errmsg("out of shared memory")));					else						ereport(ERROR,								(errcode(ERRCODE_OUT_OF_MEMORY),								 errmsg("out of memory")));				}				currBucket = hctl->freeList;				Assert(currBucket != NULL);			}			hctl->freeList = currBucket->link;			/* link into hashbucket chain */			*prevBucketPtr = currBucket;			currBucket->link = NULL;			/* copy key into record */			currBucket->hashvalue = hashvalue;			hashp->keycopy(ELEMENTKEY(currBucket), keyPtr, keysize);			/* caller is expected to fill the data field on return */			/* Check if it is time to split the segment */			if (++hctl->nentries / (long) (hctl->max_bucket + 1) >= hctl->ffactor)			{				/*				 * NOTE: failure to expand table is not a fatal error, it just				 * means we have to run at higher fill factor than we wanted.				 */				expand_table(hashp);			}			return (void *) ELEMENTKEY(currBucket);	}	elog(ERROR, "unrecognized hash action code: %d", (int) action);	return NULL;				/* keep compiler quiet */}/* * hash_seq_init/_search *			Sequentially search through hash table and return *			all the elements one by one, return NULL when no more. * * NOTE: caller may delete the returned element before continuing the scan. * However, deleting any other element while the scan is in progress is * UNDEFINED (it might be the one that curIndex is pointing at!).  Also, * if elements are added to the table while the scan is in progress, it is * unspecified whether they will be visited by the scan or not. */voidhash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp){	status->hashp = hashp;	status->curBucket = 0;	status->curEntry = NULL;}void *hash_seq_search(HASH_SEQ_STATUS *status){	HTAB	   *hashp;	HASHHDR    *hctl;	uint32		max_bucket;	long		ssize;	long		segment_num;	long		segment_ndx;	HASHSEGMENT segp;	uint32		curBucket;	HASHELEMENT *curElem;	if ((curElem = status->curEntry) != NULL)	{		/* Continuing scan of curBucket... */		status->curEntry = curElem->link;		if (status->curEntry == NULL)	/* end of this bucket */			++status->curBucket;		return (void *) ELEMENTKEY(curElem);	}	/*	 * Search for next nonempty bucket starting at curBucket.	 */	curBucket = status->curBucket;	hashp = status->hashp;	hctl = hashp->hctl;	ssize = hctl->ssize;	max_bucket = hctl->max_bucket;	if (curBucket > max_bucket)		return NULL;			/* search is done */	/*	 * first find the right segment in the table directory.	 */	segment_num = curBucket >> hctl->sshift;	segment_ndx = MOD(curBucket, ssize);	segp = hashp->dir[segment_num];	/*	 * Pick up the first item in this bucket's chain.  If chain is not empty	 * we can begin searching it.  Otherwise we have to advance to find the	 * next nonempty bucket.  We try to optimize that case since searching a	 * near-empty hashtable has to iterate this loop a lot.	 */	while ((curElem = segp[segment_ndx]) == NULL)	{		/* empty bucket, advance to next */		if (++curBucket > max_bucket)		{			status->curBucket = curBucket;			return NULL;		/* search is done */		}		if (++segment_ndx >= ssize)		{			segment_num++;			segment_ndx = 0;			segp = hashp->dir[segment_num];		}	}	/* Begin scan of curBucket... */	status->curEntry = curElem->link;	if (status->curEntry == NULL)		/* end of this bucket */		++curBucket;	status->curBucket = curBucket;	return (void *) ELEMENTKEY(curElem);}/********************************* UTILITIES ************************//* * Expand the table by adding one more hash bucket. */static boolexpand_table(HTAB *hashp){	HASHHDR    *hctl = hashp->hctl;	HASHSEGMENT old_seg,				new_seg;	long		old_bucket,				new_bucket;	long		new_segnum,				new_segndx;	long		old_segnum,				old_segndx;	HASHBUCKET *oldlink,			   *newlink;	HASHBUCKET	currElement,				nextElement;#ifdef HASH_STATISTICS	hash_expansions++;#endif	new_bucket = hctl->max_bucket + 1;	new_segnum = new_bucket >> hctl->sshift;	new_segndx = MOD(new_bucket, hctl->ssize);	if (new_segnum >= hctl->nsegs)	{		/* Allocate new segment if necessary -- could fail if dir full */		if (new_segnum >= hctl->dsize)			if (!dir_realloc(hashp))				return false;		if (!(hashp->dir[new_segnum] = seg_alloc(hashp)))			return false;		hctl->nsegs++;	}	/* OK, we created a new bucket */	hctl->max_bucket++;	/*	 * *Before* changing masks, find old bucket corresponding to same hash	 * values; values in that bucket may need to be relocated to new bucket.	 * Note that new_bucket is certainly larger than low_mask at this point,	 * so we can skip the first step of the regular hash mask calc.	 */	old_bucket = (new_bucket & hctl->low_mask);	/*	 * If we crossed a power of 2, readjust masks.	 */	if ((uint32) new_bucket > hctl->high_mask)	{		hctl->low_mask = hctl->high_mask;		hctl->high_mask = (uint32) new_bucket | hctl->low_mask;	}	/*	 * Relocate records to the new bucket.	NOTE: because of the way the hash	 * masking is done in calc_bucket, only one old bucket can need to be	 * split at this point.  With a different way of reducing the hash value,	 * that might not be true!	 */	old_segnum = old_bucket >> hctl->sshift;	old_segndx = MOD(old_bucket, hctl->ssize);	old_seg = hashp->dir[old_segnum];	new_seg = hashp->dir[new_segnum];	oldlink = &old_seg[old_segndx];	newlink = &new_seg[new_segndx];	for (currElement = *oldlink;		 currElement != NULL;		 currElement = nextElement)	{		nextElement = currElement->link;		if ((long) calc_bucket(hctl, currElement->hashvalue) == old_bucket)		{			*oldlink = currElement;			oldlink = &currElement->link;		}		else		{			*newlink = currElement;			newlink = &currElement->link;		}	}	/* don't forget to terminate the rebuilt hash chains... */	*oldlink = NULL;	*newlink = NULL;	return true;}static booldir_realloc(HTAB *hashp){	HASHSEGMENT *p;	HASHSEGMENT *old_p;	long		new_dsize;	long		old_dirsize;	long		new_dirsize;	if (hashp->hctl->max_dsize != NO_MAX_DSIZE)		return false;	/* Reallocate directory */	new_dsize = hashp->hctl->dsize << 1;	old_dirsize = hashp->hctl->dsize * sizeof(HASHSEGMENT);	new_dirsize = new_dsize * sizeof(HASHSEGMENT);	old_p = hashp->dir;	CurrentDynaHashCxt = hashp->hcxt;	p = (HASHSEGMENT *) hashp->alloc((Size) new_dirsize);	if (p != NULL)	{		memcpy(p, old_p, old_dirsize);		MemSet(((char *) p) + old_dirsize, 0, new_dirsize - old_dirsize);		hashp->dir = p;		hashp->hctl->dsize = new_dsize;		/* XXX assume the allocator is palloc, so we know how to free */		Assert(hashp->alloc == DynaHashAlloc);		pfree(old_p);		return true;	}	return false;}static HASHSEGMENTseg_alloc(HTAB *hashp){	HASHSEGMENT segp;	CurrentDynaHashCxt = hashp->hcxt;	segp = (HASHSEGMENT) hashp->alloc(sizeof(HASHBUCKET) * hashp->hctl->ssize);	if (!segp)		return NULL;	MemSet(segp, 0, sizeof(HASHBUCKET) * hashp->hctl->ssize);	return segp;}/* * allocate some new elements and link them into the free list */static boolelement_alloc(HTAB *hashp, int nelem){	HASHHDR    *hctl = hashp->hctl;	Size		elementSize;	HASHELEMENT *tmpElement;	int			i;	/* Each element has a HASHELEMENT header plus user data. */	elementSize = MAXALIGN(sizeof(HASHELEMENT)) + MAXALIGN(hctl->entrysize);	CurrentDynaHashCxt = hashp->hcxt;	tmpElement = (HASHELEMENT *)		hashp->alloc(nelem * elementSize);	if (!tmpElement)		return false;	/* link all the new entries into the freelist */	for (i = 0; i < nelem; i++)	{		tmpElement->link = hctl->freeList;		hctl->freeList = tmpElement;		tmpElement = (HASHELEMENT *) (((char *) tmpElement) + elementSize);	}	return true;}/* complain when we have detected a corrupted hashtable */static voidhash_corrupted(HTAB *hashp){	/*	 * If the corruption is in a shared hashtable, we'd better force a	 * systemwide restart.	Otherwise, just shut down this one backend.	 */	if (hashp->isshared)		elog(PANIC, "hash table \"%s\" corrupted", hashp->tabname);	else		elog(FATAL, "hash table \"%s\" corrupted", hashp->tabname);}/* calculate ceil(log base 2) of num */intmy_log2(long num){	int			i;	long		limit;	for (i = 0, limit = 1; limit < num; i++, limit <<= 1)		;	return i;}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -