⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 heapam.c

📁 postgresql8.3.4源码,开源数据库
💻 C
📖 第 1 页 / 共 5 页
字号:
		/*		 * advance to next/prior page and detect end of scan		 */		if (backward)		{			finished = (page == scan->rs_startblock);			if (page == 0)				page = scan->rs_nblocks;			page--;		}		else		{			page++;			if (page >= scan->rs_nblocks)				page = 0;			finished = (page == scan->rs_startblock);			/*			 * Report our new scan position for synchronization purposes. We			 * don't do that when moving backwards, however. That would just			 * mess up any other forward-moving scanners.			 *			 * Note: we do this before checking for end of scan so that the			 * final state of the position hint is back at the start of the			 * rel.  That's not strictly necessary, but otherwise when you run			 * the same query multiple times the starting position would shift			 * a little bit backwards on every invocation, which is confusing.			 * We don't guarantee any specific ordering in general, though.			 */			if (scan->rs_syncscan)				ss_report_location(scan->rs_rd, page);		}		/*		 * return NULL if we've exhausted all the pages		 */		if (finished)		{			if (BufferIsValid(scan->rs_cbuf))				ReleaseBuffer(scan->rs_cbuf);			scan->rs_cbuf = InvalidBuffer;			scan->rs_cblock = InvalidBlockNumber;			tuple->t_data = NULL;			scan->rs_inited = false;			return;		}		heapgetpage(scan, page);		LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);		dp = (Page) BufferGetPage(scan->rs_cbuf);		lines = PageGetMaxOffsetNumber((Page) dp);		linesleft = lines;		if (backward)		{			lineoff = lines;			lpp = PageGetItemId(dp, lines);		}		else		{			lineoff = FirstOffsetNumber;			lpp = PageGetItemId(dp, FirstOffsetNumber);		}	}}/* ---------------- *		heapgettup_pagemode - fetch next heap tuple in page-at-a-time mode * *		Same API as heapgettup, but used in page-at-a-time mode * * The internal logic is much the same as heapgettup's too, but there are some * differences: we do not take the buffer content lock (that only needs to * happen inside heapgetpage), and we iterate through just the tuples listed * in rs_vistuples[] rather than all tuples on the page.  Notice that * lineindex is 0-based, where the corresponding loop variable lineoff in * heapgettup is 1-based. * ---------------- */static voidheapgettup_pagemode(HeapScanDesc scan,					ScanDirection dir,					int nkeys,					ScanKey key){	HeapTuple	tuple = &(scan->rs_ctup);	bool		backward = ScanDirectionIsBackward(dir);	BlockNumber page;	bool		finished;	Page		dp;	int			lines;	int			lineindex;	OffsetNumber lineoff;	int			linesleft;	ItemId		lpp;	/*	 * calculate next starting lineindex, given scan direction	 */	if (ScanDirectionIsForward(dir))	{		if (!scan->rs_inited)		{			/*			 * return null immediately if relation is empty			 */			if (scan->rs_nblocks == 0)			{				Assert(!BufferIsValid(scan->rs_cbuf));				tuple->t_data = NULL;				return;			}			page = scan->rs_startblock; /* first page */			heapgetpage(scan, page);			lineindex = 0;			scan->rs_inited = true;		}		else		{			/* continue from previously returned page/tuple */			page = scan->rs_cblock;		/* current page */			lineindex = scan->rs_cindex + 1;		}		dp = (Page) BufferGetPage(scan->rs_cbuf);		lines = scan->rs_ntuples;		/* page and lineindex now reference the next visible tid */		linesleft = lines - lineindex;	}	else if (backward)	{		if (!scan->rs_inited)		{			/*			 * return null immediately if relation is empty			 */			if (scan->rs_nblocks == 0)			{				Assert(!BufferIsValid(scan->rs_cbuf));				tuple->t_data = NULL;				return;			}			/*			 * Disable reporting to syncscan logic in a backwards scan; it's			 * not very likely anyone else is doing the same thing at the same			 * time, and much more likely that we'll just bollix things for			 * forward scanners.			 */			scan->rs_syncscan = false;			/* start from last page of the scan */			if (scan->rs_startblock > 0)				page = scan->rs_startblock - 1;			else				page = scan->rs_nblocks - 1;			heapgetpage(scan, page);		}		else		{			/* continue from previously returned page/tuple */			page = scan->rs_cblock;		/* current page */		}		dp = (Page) BufferGetPage(scan->rs_cbuf);		lines = scan->rs_ntuples;		if (!scan->rs_inited)		{			lineindex = lines - 1;			scan->rs_inited = true;		}		else		{			lineindex = scan->rs_cindex - 1;		}		/* page and lineindex now reference the previous visible tid */		linesleft = lineindex + 1;	}	else	{		/*		 * ``no movement'' scan direction: refetch prior tuple		 */		if (!scan->rs_inited)		{			Assert(!BufferIsValid(scan->rs_cbuf));			tuple->t_data = NULL;			return;		}		page = ItemPointerGetBlockNumber(&(tuple->t_self));		if (page != scan->rs_cblock)			heapgetpage(scan, page);		/* Since the tuple was previously fetched, needn't lock page here */		dp = (Page) BufferGetPage(scan->rs_cbuf);		lineoff = ItemPointerGetOffsetNumber(&(tuple->t_self));		lpp = PageGetItemId(dp, lineoff);		Assert(ItemIdIsNormal(lpp));		tuple->t_data = (HeapTupleHeader) PageGetItem((Page) dp, lpp);		tuple->t_len = ItemIdGetLength(lpp);		/* check that rs_cindex is in sync */		Assert(scan->rs_cindex < scan->rs_ntuples);		Assert(lineoff == scan->rs_vistuples[scan->rs_cindex]);		return;	}	/*	 * advance the scan until we find a qualifying tuple or run out of stuff	 * to scan	 */	for (;;)	{		while (linesleft > 0)		{			lineoff = scan->rs_vistuples[lineindex];			lpp = PageGetItemId(dp, lineoff);			Assert(ItemIdIsNormal(lpp));			tuple->t_data = (HeapTupleHeader) PageGetItem((Page) dp, lpp);			tuple->t_len = ItemIdGetLength(lpp);			ItemPointerSet(&(tuple->t_self), page, lineoff);			/*			 * if current tuple qualifies, return it.			 */			if (key != NULL)			{				bool		valid;				HeapKeyTest(tuple, RelationGetDescr(scan->rs_rd),							nkeys, key, valid);				if (valid)				{					scan->rs_cindex = lineindex;					return;				}			}			else			{				scan->rs_cindex = lineindex;				return;			}			/*			 * otherwise move to the next item on the page			 */			--linesleft;			if (backward)				--lineindex;			else				++lineindex;		}		/*		 * if we get here, it means we've exhausted the items on this page and		 * it's time to move to the next.		 */		if (backward)		{			finished = (page == scan->rs_startblock);			if (page == 0)				page = scan->rs_nblocks;			page--;		}		else		{			page++;			if (page >= scan->rs_nblocks)				page = 0;			finished = (page == scan->rs_startblock);			/*			 * Report our new scan position for synchronization purposes. We			 * don't do that when moving backwards, however. That would just			 * mess up any other forward-moving scanners.			 *			 * Note: we do this before checking for end of scan so that the			 * final state of the position hint is back at the start of the			 * rel.  That's not strictly necessary, but otherwise when you run			 * the same query multiple times the starting position would shift			 * a little bit backwards on every invocation, which is confusing.			 * We don't guarantee any specific ordering in general, though.			 */			if (scan->rs_syncscan)				ss_report_location(scan->rs_rd, page);		}		/*		 * return NULL if we've exhausted all the pages		 */		if (finished)		{			if (BufferIsValid(scan->rs_cbuf))				ReleaseBuffer(scan->rs_cbuf);			scan->rs_cbuf = InvalidBuffer;			scan->rs_cblock = InvalidBlockNumber;			tuple->t_data = NULL;			scan->rs_inited = false;			return;		}		heapgetpage(scan, page);		dp = (Page) BufferGetPage(scan->rs_cbuf);		lines = scan->rs_ntuples;		linesleft = lines;		if (backward)			lineindex = lines - 1;		else			lineindex = 0;	}}#if defined(DISABLE_COMPLEX_MACRO)/* * This is formatted so oddly so that the correspondence to the macro * definition in access/heapam.h is maintained. */Datumfastgetattr(HeapTuple tup, int attnum, TupleDesc tupleDesc,			bool *isnull){	return (			(attnum) > 0 ?			(			 ((isnull) ? (*(isnull) = false) : (dummyret) NULL),			 HeapTupleNoNulls(tup) ?			 (			  (tupleDesc)->attrs[(attnum) - 1]->attcacheoff >= 0 ?			  (			   fetchatt((tupleDesc)->attrs[(attnum) - 1],						(char *) (tup)->t_data + (tup)->t_data->t_hoff +						(tupleDesc)->attrs[(attnum) - 1]->attcacheoff)			   )			  :			  nocachegetattr((tup), (attnum), (tupleDesc), (isnull))			  )			 :			 (			  att_isnull((attnum) - 1, (tup)->t_data->t_bits) ?			  (			   ((isnull) ? (*(isnull) = true) : (dummyret) NULL),			   (Datum) NULL			   )			  :			  (			   nocachegetattr((tup), (attnum), (tupleDesc), (isnull))			   )			  )			 )			:			(			 (Datum) NULL			 )		);}#endif   /* defined(DISABLE_COMPLEX_MACRO) *//* ---------------------------------------------------------------- *					 heap access method interface * ---------------------------------------------------------------- *//* ---------------- *		relation_open - open any relation by relation OID * *		If lockmode is not "NoLock", the specified kind of lock is *		obtained on the relation.  (Generally, NoLock should only be *		used if the caller knows it has some appropriate lock on the *		relation already.) * *		An error is raised if the relation does not exist. * *		NB: a "relation" is anything with a pg_class entry.  The caller is *		expected to check whether the relkind is something it can handle. * ---------------- */Relationrelation_open(Oid relationId, LOCKMODE lockmode){	Relation	r;	Assert(lockmode >= NoLock && lockmode < MAX_LOCKMODES);	/* Get the lock before trying to open the relcache entry */	if (lockmode != NoLock)		LockRelationOid(relationId, lockmode);	/* The relcache does all the real work... */	r = RelationIdGetRelation(relationId);	if (!RelationIsValid(r))		elog(ERROR, "could not open relation with OID %u", relationId);	/* Make note that we've accessed a temporary relation */	if (r->rd_istemp)		MyXactAccessedTempRel = true;	pgstat_initstats(r);	return r;}/* ---------------- *		try_relation_open - open any relation by relation OID * *		Same as relation_open, except return NULL instead of failing *		if the relation does not exist. * ---------------- */Relationtry_relation_open(Oid relationId, LOCKMODE lockmode){	Relation	r;	Assert(lockmode >= NoLock && lockmode < MAX_LOCKMODES);	/* Get the lock first */	if (lockmode != NoLock)		LockRelationOid(relationId, lockmode);	/*	 * Now that we have the lock, probe to see if the relation really exists	 * or not.	 */	if (!SearchSysCacheExists(RELOID,							  ObjectIdGetDatum(relationId),							  0, 0, 0))	{		/* Release useless lock */		if (lockmode != NoLock)			UnlockRelationOid(relationId, lockmode);		return NULL;	}	/* Should be safe to do a relcache load */	r = RelationIdGetRelation(relationId);	if (!RelationIsValid(r))		elog(ERROR, "could not open relation with OID %u", relationId);	/* Make note that we've accessed a temporary relation */	if (r->rd_istemp)		MyXactAccessedTempRel = true;	pgstat_initstats(r);	return r;}/* ---------------- *		relation_open_nowait - open but don't wait for lock * *		Same as relation_open, except throw an error instead of waiting *		when the requested lock is not immediately obtainable. * ----------------

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -