⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 hash.c

📁 PostgreSQL 8.1.4的源码 适用于Linux下的开源数据库系统
💻 C
📖 第 1 页 / 共 2 页
字号:
}/* *	hashrescan() -- rescan an index relation */Datumhashrescan(PG_FUNCTION_ARGS){	IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);	ScanKey		scankey = (ScanKey) PG_GETARG_POINTER(1);	HashScanOpaque so = (HashScanOpaque) scan->opaque;	Relation	rel = scan->indexRelation;	/* if we are called from beginscan, so is still NULL */	if (so)	{		/* release any pins we still hold */		if (BufferIsValid(so->hashso_curbuf))			_hash_dropbuf(rel, so->hashso_curbuf);		so->hashso_curbuf = InvalidBuffer;		if (BufferIsValid(so->hashso_mrkbuf))			_hash_dropbuf(rel, so->hashso_mrkbuf);		so->hashso_mrkbuf = InvalidBuffer;		/* release lock on bucket, too */		if (so->hashso_bucket_blkno)			_hash_droplock(rel, so->hashso_bucket_blkno, HASH_SHARE);		so->hashso_bucket_blkno = 0;	}	/* set positions invalid (this will cause _hash_first call) */	ItemPointerSetInvalid(&(scan->currentItemData));	ItemPointerSetInvalid(&(scan->currentMarkData));	/* Update scan key, if a new one is given */	if (scankey && scan->numberOfKeys > 0)	{		memmove(scan->keyData,				scankey,				scan->numberOfKeys * sizeof(ScanKeyData));		if (so)			so->hashso_bucket_valid = false;	}	PG_RETURN_VOID();}/* *	hashendscan() -- close down a scan */Datumhashendscan(PG_FUNCTION_ARGS){	IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);	HashScanOpaque so = (HashScanOpaque) scan->opaque;	Relation	rel = scan->indexRelation;	/* don't need scan registered anymore */	_hash_dropscan(scan);	/* release any pins we still hold */	if (BufferIsValid(so->hashso_curbuf))		_hash_dropbuf(rel, so->hashso_curbuf);	so->hashso_curbuf = InvalidBuffer;	if (BufferIsValid(so->hashso_mrkbuf))		_hash_dropbuf(rel, so->hashso_mrkbuf);	so->hashso_mrkbuf = InvalidBuffer;	/* release lock on bucket, too */	if (so->hashso_bucket_blkno)		_hash_droplock(rel, so->hashso_bucket_blkno, HASH_SHARE);	so->hashso_bucket_blkno = 0;	/* be tidy */	ItemPointerSetInvalid(&(scan->currentItemData));	ItemPointerSetInvalid(&(scan->currentMarkData));	pfree(so);	scan->opaque = NULL;	PG_RETURN_VOID();}/* *	hashmarkpos() -- save current scan position */Datumhashmarkpos(PG_FUNCTION_ARGS){	IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);	HashScanOpaque so = (HashScanOpaque) scan->opaque;	Relation	rel = scan->indexRelation;	/* release pin on old marked data, if any */	if (BufferIsValid(so->hashso_mrkbuf))		_hash_dropbuf(rel, so->hashso_mrkbuf);	so->hashso_mrkbuf = InvalidBuffer;	ItemPointerSetInvalid(&(scan->currentMarkData));	/* bump pin count on currentItemData and copy to currentMarkData */	if (ItemPointerIsValid(&(scan->currentItemData)))	{		IncrBufferRefCount(so->hashso_curbuf);		so->hashso_mrkbuf = so->hashso_curbuf;		scan->currentMarkData = scan->currentItemData;	}	PG_RETURN_VOID();}/* *	hashrestrpos() -- restore scan to last saved position */Datumhashrestrpos(PG_FUNCTION_ARGS){	IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);	HashScanOpaque so = (HashScanOpaque) scan->opaque;	Relation	rel = scan->indexRelation;	/* release pin on current data, if any */	if (BufferIsValid(so->hashso_curbuf))		_hash_dropbuf(rel, so->hashso_curbuf);	so->hashso_curbuf = InvalidBuffer;	ItemPointerSetInvalid(&(scan->currentItemData));	/* bump pin count on currentMarkData and copy to currentItemData */	if (ItemPointerIsValid(&(scan->currentMarkData)))	{		IncrBufferRefCount(so->hashso_mrkbuf);		so->hashso_curbuf = so->hashso_mrkbuf;		scan->currentItemData = scan->currentMarkData;	}	PG_RETURN_VOID();}/* * Bulk deletion of all index entries pointing to a set of heap tuples. * The set of target tuples is specified via a callback routine that tells * whether any given heap tuple (identified by ItemPointer) is being deleted. * * Result: a palloc'd struct containing statistical info for VACUUM displays. */Datumhashbulkdelete(PG_FUNCTION_ARGS){	Relation	rel = (Relation) PG_GETARG_POINTER(0);	IndexBulkDeleteCallback callback = (IndexBulkDeleteCallback) PG_GETARG_POINTER(1);	void	   *callback_state = (void *) PG_GETARG_POINTER(2);	IndexBulkDeleteResult *result;	BlockNumber num_pages;	double		tuples_removed;	double		num_index_tuples;	double		orig_ntuples;	Bucket		orig_maxbucket;	Bucket		cur_maxbucket;	Bucket		cur_bucket;	Buffer		metabuf;	HashMetaPage metap;	HashMetaPageData local_metapage;	tuples_removed = 0;	num_index_tuples = 0;	/*	 * Read the metapage to fetch original bucket and tuple counts.  Also, we	 * keep a copy of the last-seen metapage so that we can use its	 * hashm_spares[] values to compute bucket page addresses.	This is a bit	 * hokey but perfectly safe, since the interesting entries in the spares	 * array cannot change under us; and it beats rereading the metapage for	 * each bucket.	 */	metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_READ);	metap = (HashMetaPage) BufferGetPage(metabuf);	_hash_checkpage(rel, (Page) metap, LH_META_PAGE);	orig_maxbucket = metap->hashm_maxbucket;	orig_ntuples = metap->hashm_ntuples;	memcpy(&local_metapage, metap, sizeof(local_metapage));	_hash_relbuf(rel, metabuf);	/* Scan the buckets that we know exist */	cur_bucket = 0;	cur_maxbucket = orig_maxbucket;loop_top:	while (cur_bucket <= cur_maxbucket)	{		BlockNumber bucket_blkno;		BlockNumber blkno;		bool		bucket_dirty = false;		/* Get address of bucket's start page */		bucket_blkno = BUCKET_TO_BLKNO(&local_metapage, cur_bucket);		/* Exclusive-lock the bucket so we can shrink it */		_hash_getlock(rel, bucket_blkno, HASH_EXCLUSIVE);		/* Shouldn't have any active scans locally, either */		if (_hash_has_active_scan(rel, cur_bucket))			elog(ERROR, "hash index has active scan during VACUUM");		/* Scan each page in bucket */		blkno = bucket_blkno;		while (BlockNumberIsValid(blkno))		{			Buffer		buf;			Page		page;			HashPageOpaque opaque;			OffsetNumber offno;			OffsetNumber maxoffno;			bool		page_dirty = false;			vacuum_delay_point();			buf = _hash_getbuf(rel, blkno, HASH_WRITE);			page = BufferGetPage(buf);			_hash_checkpage(rel, page, LH_BUCKET_PAGE | LH_OVERFLOW_PAGE);			opaque = (HashPageOpaque) PageGetSpecialPointer(page);			Assert(opaque->hasho_bucket == cur_bucket);			/* Scan each tuple in page */			offno = FirstOffsetNumber;			maxoffno = PageGetMaxOffsetNumber(page);			while (offno <= maxoffno)			{				HashItem	hitem;				ItemPointer htup;				hitem = (HashItem) PageGetItem(page,											   PageGetItemId(page, offno));				htup = &(hitem->hash_itup.t_tid);				if (callback(htup, callback_state))				{					/* delete the item from the page */					PageIndexTupleDelete(page, offno);					bucket_dirty = page_dirty = true;					/* don't increment offno, instead decrement maxoffno */					maxoffno = OffsetNumberPrev(maxoffno);					tuples_removed += 1;				}				else				{					offno = OffsetNumberNext(offno);					num_index_tuples += 1;				}			}			/*			 * Write page if needed, advance to next page.			 */			blkno = opaque->hasho_nextblkno;			if (page_dirty)				_hash_wrtbuf(rel, buf);			else				_hash_relbuf(rel, buf);		}		/* If we deleted anything, try to compact free space */		if (bucket_dirty)			_hash_squeezebucket(rel, cur_bucket, bucket_blkno);		/* Release bucket lock */		_hash_droplock(rel, bucket_blkno, HASH_EXCLUSIVE);		/* Advance to next bucket */		cur_bucket++;	}	/* Write-lock metapage and check for split since we started */	metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_WRITE);	metap = (HashMetaPage) BufferGetPage(metabuf);	_hash_checkpage(rel, (Page) metap, LH_META_PAGE);	if (cur_maxbucket != metap->hashm_maxbucket)	{		/* There's been a split, so process the additional bucket(s) */		cur_maxbucket = metap->hashm_maxbucket;		memcpy(&local_metapage, metap, sizeof(local_metapage));		_hash_relbuf(rel, metabuf);		goto loop_top;	}	/* Okay, we're really done.  Update tuple count in metapage. */	if (orig_maxbucket == metap->hashm_maxbucket &&		orig_ntuples == metap->hashm_ntuples)	{		/*		 * No one has split or inserted anything since start of scan, so		 * believe our count as gospel.		 */		metap->hashm_ntuples = num_index_tuples;	}	else	{		/*		 * Otherwise, our count is untrustworthy since we may have		 * double-scanned tuples in split buckets.	Proceed by dead-reckoning.		 */		if (metap->hashm_ntuples > tuples_removed)			metap->hashm_ntuples -= tuples_removed;		else			metap->hashm_ntuples = 0;		num_index_tuples = metap->hashm_ntuples;	}	_hash_wrtbuf(rel, metabuf);	/* return statistics */	num_pages = RelationGetNumberOfBlocks(rel);	result = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult));	result->num_pages = num_pages;	result->num_index_tuples = num_index_tuples;	result->tuples_removed = tuples_removed;	PG_RETURN_POINTER(result);}voidhash_redo(XLogRecPtr lsn, XLogRecord *record){	elog(PANIC, "hash_redo: unimplemented");}voidhash_desc(char *buf, uint8 xl_info, char *rec){}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -