⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 hashpage.c

📁 关系型数据库 Postgresql 6.5.2
💻 C
📖 第 1 页 / 共 2 页
字号:
				break;			default:				elog(ERROR, "_hash_unsetpagelock: invalid access (%d) on blk %x: %s",					 access, blkno, RelationGetRelationName(rel));				break;		}	}}void_hash_pagedel(Relation rel, ItemPointer tid){	Buffer		buf;	Buffer		metabuf;	Page		page;	BlockNumber blkno;	OffsetNumber offno;	HashMetaPage metap;	HashPageOpaque opaque;	blkno = ItemPointerGetBlockNumber(tid);	offno = ItemPointerGetOffsetNumber(tid);	buf = _hash_getbuf(rel, blkno, HASH_WRITE);	page = BufferGetPage(buf);	_hash_checkpage(page, LH_BUCKET_PAGE | LH_OVERFLOW_PAGE);	opaque = (HashPageOpaque) PageGetSpecialPointer(page);	PageIndexTupleDelete(page, offno);	_hash_wrtnorelbuf(rel, buf);	if (PageIsEmpty(page) && (opaque->hasho_flag & LH_OVERFLOW_PAGE))	{		buf = _hash_freeovflpage(rel, buf);		if (BufferIsValid(buf))			_hash_relbuf(rel, buf, HASH_WRITE);	}	else		_hash_relbuf(rel, buf, HASH_WRITE);	metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_WRITE);	metap = (HashMetaPage) BufferGetPage(metabuf);	_hash_checkpage((Page) metap, LH_META_PAGE);	++metap->hashm_nkeys;	_hash_wrtbuf(rel, metabuf);}void_hash_expandtable(Relation rel, Buffer metabuf){	HashMetaPage metap;	Bucket		old_bucket;	Bucket		new_bucket;	uint32		spare_ndx;/*	  elog(DEBUG, "_hash_expandtable: expanding..."); */	metap = (HashMetaPage) BufferGetPage(metabuf);	_hash_checkpage((Page) metap, LH_META_PAGE);	metap = (HashMetaPage) _hash_chgbufaccess(rel, &metabuf, HASH_READ, HASH_WRITE);	new_bucket = ++metap->MAX_BUCKET;	metap = (HashMetaPage) _hash_chgbufaccess(rel, &metabuf, HASH_WRITE, HASH_READ);	old_bucket = (metap->MAX_BUCKET & metap->LOW_MASK);	/*	 * If the split point is increasing (MAX_BUCKET's log base 2 *	 * increases), we need to copy the current contents of the spare split	 * bucket to the next bucket.	 */	spare_ndx = _hash_log2(metap->MAX_BUCKET + 1);	if (spare_ndx > metap->OVFL_POINT)	{		metap = (HashMetaPage) _hash_chgbufaccess(rel, &metabuf, HASH_READ, HASH_WRITE);		metap->SPARES[spare_ndx] = metap->SPARES[metap->OVFL_POINT];		metap->OVFL_POINT = spare_ndx;		metap = (HashMetaPage) _hash_chgbufaccess(rel, &metabuf, HASH_WRITE, HASH_READ);	}	if (new_bucket > metap->HIGH_MASK)	{		/* Starting a new doubling */		metap = (HashMetaPage) _hash_chgbufaccess(rel, &metabuf, HASH_READ, HASH_WRITE);		metap->LOW_MASK = metap->HIGH_MASK;		metap->HIGH_MASK = new_bucket | metap->LOW_MASK;		metap = (HashMetaPage) _hash_chgbufaccess(rel, &metabuf, HASH_WRITE, HASH_READ);	}	/* Relocate records to the new bucket */	_hash_splitpage(rel, metabuf, old_bucket, new_bucket);}/* * _hash_splitpage -- split 'obucket' into 'obucket' and 'nbucket' * * this routine is actually misnamed -- we are splitting a bucket that * consists of a base bucket page and zero or more overflow (bucket * chain) pages. */static void_hash_splitpage(Relation rel,				Buffer metabuf,				Bucket obucket,				Bucket nbucket){	Bucket		bucket;	Buffer		obuf;	Buffer		nbuf;	Buffer		ovflbuf;	BlockNumber oblkno;	BlockNumber nblkno;	bool		null;	Datum		datum;	HashItem	hitem;	HashPageOpaque oopaque;	HashPageOpaque nopaque;	HashMetaPage metap;	IndexTuple	itup;	int			itemsz;	OffsetNumber ooffnum;	OffsetNumber noffnum;	OffsetNumber omaxoffnum;	Page		opage;	Page		npage;	TupleDesc	itupdesc;/*	  elog(DEBUG, "_hash_splitpage: splitting %d into %d,%d",		 obucket, obucket, nbucket);*/	metap = (HashMetaPage) BufferGetPage(metabuf);	_hash_checkpage((Page) metap, LH_META_PAGE);	/* get the buffers & pages */	oblkno = BUCKET_TO_BLKNO(obucket);	nblkno = BUCKET_TO_BLKNO(nbucket);	obuf = _hash_getbuf(rel, oblkno, HASH_WRITE);	nbuf = _hash_getbuf(rel, nblkno, HASH_WRITE);	opage = BufferGetPage(obuf);	npage = BufferGetPage(nbuf);	/* initialize the new bucket */	_hash_pageinit(npage, BufferGetPageSize(nbuf));	nopaque = (HashPageOpaque) PageGetSpecialPointer(npage);	nopaque->hasho_prevblkno = InvalidBlockNumber;	nopaque->hasho_nextblkno = InvalidBlockNumber;	nopaque->hasho_flag = LH_BUCKET_PAGE;	nopaque->hasho_oaddr = InvalidOvflAddress;	nopaque->hasho_bucket = nbucket;	_hash_wrtnorelbuf(rel, nbuf);	/*	 * make sure the old bucket isn't empty.  advance 'opage' and friends	 * through the overflow bucket chain until we find a non-empty page.	 *	 * XXX we should only need this once, if we are careful to preserve the	 * invariant that overflow pages are never empty.	 */	_hash_checkpage(opage, LH_BUCKET_PAGE);	oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);	if (PageIsEmpty(opage))	{		oblkno = oopaque->hasho_nextblkno;		_hash_relbuf(rel, obuf, HASH_WRITE);		if (!BlockNumberIsValid(oblkno))		{			/*			 * the old bucket is completely empty; of course, the new			 * bucket will be as well, but since it's a base bucket page			 * we don't care.			 */			_hash_relbuf(rel, nbuf, HASH_WRITE);			return;		}		obuf = _hash_getbuf(rel, oblkno, HASH_WRITE);		opage = BufferGetPage(obuf);		_hash_checkpage(opage, LH_OVERFLOW_PAGE);		if (PageIsEmpty(opage))			elog(ERROR, "_hash_splitpage: empty overflow page %d", oblkno);		oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);	}	/*	 * we are now guaranteed that 'opage' is not empty.  partition the	 * tuples in the old bucket between the old bucket and the new bucket,	 * advancing along their respective overflow bucket chains and adding	 * overflow pages as needed.	 */	ooffnum = FirstOffsetNumber;	omaxoffnum = PageGetMaxOffsetNumber(opage);	for (;;)	{		/*		 * at each iteration through this loop, each of these variables		 * should be up-to-date: obuf opage oopaque ooffnum omaxoffnum		 */		/* check if we're at the end of the page */		if (ooffnum > omaxoffnum)		{			/* at end of page, but check for overflow page */			oblkno = oopaque->hasho_nextblkno;			if (BlockNumberIsValid(oblkno))			{				/*				 * we ran out of tuples on this particular page, but we				 * have more overflow pages; re-init values.				 */				_hash_wrtbuf(rel, obuf);				obuf = _hash_getbuf(rel, oblkno, HASH_WRITE);				opage = BufferGetPage(obuf);				_hash_checkpage(opage, LH_OVERFLOW_PAGE);				oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);				/* we're guaranteed that an ovfl page has at least 1 tuple */				if (PageIsEmpty(opage))				{					elog(ERROR, "_hash_splitpage: empty ovfl page %d!",						 oblkno);				}				ooffnum = FirstOffsetNumber;				omaxoffnum = PageGetMaxOffsetNumber(opage);			}			else			{				/*				 * we're at the end of the bucket chain, so now we're				 * really done with everything.  before quitting, call				 * _hash_squeezebucket to ensure the tuples in the bucket				 * (including the overflow pages) are packed as tightly as				 * possible.				 */				_hash_wrtbuf(rel, obuf);				_hash_wrtbuf(rel, nbuf);				_hash_squeezebucket(rel, metap, obucket);				return;			}		}		/* hash on the tuple */		hitem = (HashItem) PageGetItem(opage, PageGetItemId(opage, ooffnum));		itup = &(hitem->hash_itup);		itupdesc = RelationGetDescr(rel);		datum = index_getattr(itup, 1, itupdesc, &null);		bucket = _hash_call(rel, metap, datum);		if (bucket == nbucket)		{			/*			 * insert the tuple into the new bucket.  if it doesn't fit on			 * the current page in the new bucket, we must allocate a new			 * overflow page and place the tuple on that page instead.			 */			itemsz = IndexTupleDSize(hitem->hash_itup)				+ (sizeof(HashItemData) - sizeof(IndexTupleData));			itemsz = MAXALIGN(itemsz);			if (PageGetFreeSpace(npage) < itemsz)			{				ovflbuf = _hash_addovflpage(rel, &metabuf, nbuf);				_hash_wrtbuf(rel, nbuf);				nbuf = ovflbuf;				npage = BufferGetPage(nbuf);				_hash_checkpage(npage, LH_BUCKET_PAGE | LH_OVERFLOW_PAGE);			}			noffnum = OffsetNumberNext(PageGetMaxOffsetNumber(npage));			PageAddItem(npage, (Item) hitem, itemsz, noffnum, LP_USED);			_hash_wrtnorelbuf(rel, nbuf);			/*			 * now delete the tuple from the old bucket.  after this			 * section of code, 'ooffnum' will actually point to the			 * ItemId to which we would point if we had advanced it before			 * the deletion (PageIndexTupleDelete repacks the ItemId			 * array).	this also means that 'omaxoffnum' is exactly one			 * less than it used to be, so we really can just decrement it			 * instead of calling PageGetMaxOffsetNumber.			 */			PageIndexTupleDelete(opage, ooffnum);			_hash_wrtnorelbuf(rel, obuf);			omaxoffnum = OffsetNumberPrev(omaxoffnum);			/*			 * tidy up.  if the old page was an overflow page and it is			 * now empty, we must free it (we want to preserve the			 * invariant that overflow pages cannot be empty).			 */			if (PageIsEmpty(opage) &&				(oopaque->hasho_flag & LH_OVERFLOW_PAGE))			{				obuf = _hash_freeovflpage(rel, obuf);				/* check that we're not through the bucket chain */				if (BufferIsInvalid(obuf))				{					_hash_wrtbuf(rel, nbuf);					_hash_squeezebucket(rel, metap, obucket);					return;				}				/*				 * re-init. again, we're guaranteed that an ovfl page has				 * at least one tuple.				 */				opage = BufferGetPage(obuf);				_hash_checkpage(opage, LH_OVERFLOW_PAGE);				oblkno = BufferGetBlockNumber(obuf);				oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);				if (PageIsEmpty(opage))				{					elog(ERROR, "_hash_splitpage: empty overflow page %d",						 oblkno);				}				ooffnum = FirstOffsetNumber;				omaxoffnum = PageGetMaxOffsetNumber(opage);			}		}		else		{			/*			 * the tuple stays on this page.  we didn't move anything, so			 * we didn't delete anything and therefore we don't have to			 * change 'omaxoffnum'.			 *			 * XXX any hash value from [0, nbucket-1] will map to this			 * bucket, which doesn't make sense to me.			 */			ooffnum = OffsetNumberNext(ooffnum);		}	}	/* NOTREACHED */}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -