⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 heapam.c

📁 PostgreSQL 8.1.4的源码 适用于Linux下的开源数据库系统
💻 C
📖 第 1 页 / 共 5 页
字号:
		if (infomask & HEAP_XMAX_IS_MULTI)		{			/* wait for multixact */			MultiXactIdWait((MultiXactId) xwait);			LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);			/*			 * If xwait had just locked the tuple then some other xact could			 * update this tuple before we get to this point.  Check for xmax			 * change, and start over if so.			 */			if (!(tp.t_data->t_infomask & HEAP_XMAX_IS_MULTI) ||				!TransactionIdEquals(HeapTupleHeaderGetXmax(tp.t_data),									 xwait))				goto l1;			/*			 * You might think the multixact is necessarily done here, but not			 * so: it could have surviving members, namely our own xact or			 * other subxacts of this backend.	It is legal for us to delete			 * the tuple in either case, however (the latter case is			 * essentially a situation of upgrading our former shared lock to			 * exclusive).	We don't bother changing the on-disk hint bits			 * since we are about to overwrite the xmax altogether.			 */		}		else		{			/* wait for regular transaction to end */			XactLockTableWait(xwait);			LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);			/*			 * xwait is done, but if xwait had just locked the tuple then some			 * other xact could update this tuple before we get to this point.			 * Check for xmax change, and start over if so.			 */			if ((tp.t_data->t_infomask & HEAP_XMAX_IS_MULTI) ||				!TransactionIdEquals(HeapTupleHeaderGetXmax(tp.t_data),									 xwait))				goto l1;			/* Otherwise we can mark it committed or aborted */			if (!(tp.t_data->t_infomask & (HEAP_XMAX_COMMITTED |										   HEAP_XMAX_INVALID)))			{				if (TransactionIdDidCommit(xwait))					tp.t_data->t_infomask |= HEAP_XMAX_COMMITTED;				else					tp.t_data->t_infomask |= HEAP_XMAX_INVALID;				SetBufferCommitInfoNeedsSave(buffer);			}		}		/*		 * We may overwrite if previous xmax aborted, or if it committed but		 * only locked the tuple without updating it.		 */		if (tp.t_data->t_infomask & (HEAP_XMAX_INVALID |									 HEAP_IS_LOCKED))			result = HeapTupleMayBeUpdated;		else			result = HeapTupleUpdated;	}	if (crosscheck != InvalidSnapshot && result == HeapTupleMayBeUpdated)	{		/* Perform additional check for serializable RI updates */		if (!HeapTupleSatisfiesSnapshot(tp.t_data, crosscheck, buffer))			result = HeapTupleUpdated;	}	if (result != HeapTupleMayBeUpdated)	{		Assert(result == HeapTupleSelfUpdated ||			   result == HeapTupleUpdated ||			   result == HeapTupleBeingUpdated);		Assert(!(tp.t_data->t_infomask & HEAP_XMAX_INVALID));		*ctid = tp.t_data->t_ctid;		*update_xmax = HeapTupleHeaderGetXmax(tp.t_data);		LockBuffer(buffer, BUFFER_LOCK_UNLOCK);		ReleaseBuffer(buffer);		if (have_tuple_lock)			UnlockTuple(relation, &(tp.t_self), ExclusiveLock);		return result;	}	START_CRIT_SECTION();	/* store transaction information of xact deleting the tuple */	tp.t_data->t_infomask &= ~(HEAP_XMAX_COMMITTED |							   HEAP_XMAX_INVALID |							   HEAP_XMAX_IS_MULTI |							   HEAP_IS_LOCKED |							   HEAP_MOVED);	HeapTupleHeaderSetXmax(tp.t_data, xid);	HeapTupleHeaderSetCmax(tp.t_data, cid);	/* Make sure there is no forward chain link in t_ctid */	tp.t_data->t_ctid = tp.t_self;	/* XLOG stuff */	if (!relation->rd_istemp)	{		xl_heap_delete xlrec;		XLogRecPtr	recptr;		XLogRecData rdata[2];		xlrec.target.node = relation->rd_node;		xlrec.target.tid = tp.t_self;		rdata[0].data = (char *) &xlrec;		rdata[0].len = SizeOfHeapDelete;		rdata[0].buffer = InvalidBuffer;		rdata[0].next = &(rdata[1]);		rdata[1].data = NULL;		rdata[1].len = 0;		rdata[1].buffer = buffer;		rdata[1].buffer_std = true;		rdata[1].next = NULL;		recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_DELETE, rdata);		PageSetLSN(dp, recptr);		PageSetTLI(dp, ThisTimeLineID);	}	else	{		/* No XLOG record, but still need to flag that XID exists on disk */		MyXactMadeTempRelUpdate = true;	}	END_CRIT_SECTION();	LockBuffer(buffer, BUFFER_LOCK_UNLOCK);	/*	 * If the tuple has toasted out-of-line attributes, we need to delete	 * those items too.  We have to do this before WriteBuffer because we need	 * to look at the contents of the tuple, but it's OK to release the	 * context lock on the buffer first.	 */	if (HeapTupleHasExternal(&tp))		toast_delete(relation, &tp);	/*	 * Mark tuple for invalidation from system caches at next command	 * boundary. We have to do this before WriteBuffer because we need to look	 * at the contents of the tuple, so we need to hold our refcount on the	 * buffer.	 */	CacheInvalidateHeapTuple(relation, &tp);	WriteBuffer(buffer);	/*	 * Release the lmgr tuple lock, if we had it.	 */	if (have_tuple_lock)		UnlockTuple(relation, &(tp.t_self), ExclusiveLock);	pgstat_count_heap_delete(&relation->pgstat_info);	return HeapTupleMayBeUpdated;}/* *	simple_heap_delete - delete a tuple * * This routine may be used to delete a tuple when concurrent updates of * the target tuple are not expected (for example, because we have a lock * on the relation associated with the tuple).	Any failure is reported * via ereport(). */voidsimple_heap_delete(Relation relation, ItemPointer tid){	HTSU_Result result;	ItemPointerData update_ctid;	TransactionId update_xmax;	result = heap_delete(relation, tid,						 &update_ctid, &update_xmax,						 GetCurrentCommandId(), InvalidSnapshot,						 true /* wait for commit */ );	switch (result)	{		case HeapTupleSelfUpdated:			/* Tuple was already updated in current command? */			elog(ERROR, "tuple already updated by self");			break;		case HeapTupleMayBeUpdated:			/* done successfully */			break;		case HeapTupleUpdated:			elog(ERROR, "tuple concurrently updated");			break;		default:			elog(ERROR, "unrecognized heap_delete status: %u", result);			break;	}}/* *	heap_update - replace a tuple * * NB: do not call this directly unless you are prepared to deal with * concurrent-update conditions.  Use simple_heap_update instead. * *	relation - table to be modified (caller must hold suitable lock) *	otid - TID of old tuple to be replaced *	newtup - newly constructed tuple data to store *	ctid - output parameter, used only for failure case (see below) *	update_xmax - output parameter, used only for failure case (see below) *	cid - update command ID (used for visibility test, and stored into *		cmax/cmin if successful) *	crosscheck - if not InvalidSnapshot, also check old tuple against this *	wait - true if should wait for any conflicting update to commit/abort * * Normal, successful return value is HeapTupleMayBeUpdated, which * actually means we *did* update it.  Failure return codes are * HeapTupleSelfUpdated, HeapTupleUpdated, or HeapTupleBeingUpdated * (the last only possible if wait == false). * * On success, the header fields of *newtup are updated to match the new * stored tuple; in particular, newtup->t_self is set to the TID where the * new tuple was inserted.	However, any TOAST changes in the new tuple's * data are not reflected into *newtup. * * In the failure cases, the routine returns the tuple's t_ctid and t_xmax. * If t_ctid is the same as otid, the tuple was deleted; if different, the * tuple was updated, and t_ctid is the location of the replacement tuple. * (t_xmax is needed to verify that the replacement tuple matches.) */HTSU_Resultheap_update(Relation relation, ItemPointer otid, HeapTuple newtup,			ItemPointer ctid, TransactionId *update_xmax,			CommandId cid, Snapshot crosscheck, bool wait){	HTSU_Result result;	TransactionId xid = GetCurrentTransactionId();	ItemId		lp;	HeapTupleData oldtup;	HeapTuple	heaptup;	PageHeader	dp;	Buffer		buffer,				newbuf;	bool		need_toast,				already_marked;	Size		newtupsize,				pagefree;	bool		have_tuple_lock = false;	Assert(ItemPointerIsValid(otid));	buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(otid));	LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);	dp = (PageHeader) BufferGetPage(buffer);	lp = PageGetItemId(dp, ItemPointerGetOffsetNumber(otid));	oldtup.t_datamcxt = NULL;	oldtup.t_data = (HeapTupleHeader) PageGetItem(dp, lp);	oldtup.t_len = ItemIdGetLength(lp);	oldtup.t_self = *otid;	/*	 * Note: beyond this point, use oldtup not otid to refer to old tuple.	 * otid may very well point at newtup->t_self, which we will overwrite	 * with the new tuple's location, so there's great risk of confusion if we	 * use otid anymore.	 */l2:	result = HeapTupleSatisfiesUpdate(oldtup.t_data, cid, buffer);	if (result == HeapTupleInvisible)	{		LockBuffer(buffer, BUFFER_LOCK_UNLOCK);		ReleaseBuffer(buffer);		elog(ERROR, "attempted to update invisible tuple");	}	else if (result == HeapTupleBeingUpdated && wait)	{		TransactionId xwait;		uint16		infomask;		/* must copy state data before unlocking buffer */		xwait = HeapTupleHeaderGetXmax(oldtup.t_data);		infomask = oldtup.t_data->t_infomask;		LockBuffer(buffer, BUFFER_LOCK_UNLOCK);		/*		 * Acquire tuple lock to establish our priority for the tuple (see		 * heap_lock_tuple).  LockTuple will release us when we are		 * next-in-line for the tuple.		 *		 * If we are forced to "start over" below, we keep the tuple lock;		 * this arranges that we stay at the head of the line while rechecking		 * tuple state.		 */		if (!have_tuple_lock)		{			LockTuple(relation, &(oldtup.t_self), ExclusiveLock);			have_tuple_lock = true;		}		/*		 * Sleep until concurrent transaction ends.  Note that we don't care		 * if the locker has an exclusive or shared lock, because we need		 * exclusive.		 */		if (infomask & HEAP_XMAX_IS_MULTI)		{			/* wait for multixact */			MultiXactIdWait((MultiXactId) xwait);			LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);			/*			 * If xwait had just locked the tuple then some other xact could			 * update this tuple before we get to this point.  Check for xmax			 * change, and start over if so.			 */			if (!(oldtup.t_data->t_infomask & HEAP_XMAX_IS_MULTI) ||				!TransactionIdEquals(HeapTupleHeaderGetXmax(oldtup.t_data),									 xwait))				goto l2;			/*			 * You might think the multixact is necessarily done here, but not			 * so: it could have surviving members, namely our own xact or			 * other subxacts of this backend.	It is legal for us to update			 * the tuple in either case, however (the latter case is			 * essentially a situation of upgrading our former shared lock to			 * exclusive).	We don't bother changing the on-disk hint bits			 * since we are about to overwrite the xmax altogether.			 */		}		else		{			/* wait for regular transaction to end */			XactLockTableWait(xwait);			LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);			/*			 * xwait is done, but if xwait had just locked the tuple then some			 * other xact could update this tuple before we get to this point.			 * Check for xmax change, and start over if so.			 */			if ((oldtup.t_data->t_infomask & HEAP_XMAX_IS_MULTI) ||				!TransactionIdEquals(HeapTupleHeaderGetXmax(oldtup.t_data),									 xwait))				goto l2;			/* Otherwise we can mark it committed or aborted */			if (!(oldtup.t_data->t_infomask & (HEAP_XMAX_COMMITTED |											   HEAP_XMAX_INVALID)))			{				if (TransactionIdDidCommit(xwait))					oldtup.t_data->t_infomask |= HEAP_XMAX_COMMITTED;				else					oldtup.t_data->t_infomask |= HEAP_XMAX_INVALID;				SetBufferCommitInfoNeedsSave(buffer);			}		}		/*		 * We may overwrite if previous xmax aborted, or if it committed but		 * only locked the tuple without updating it.		 */		if (oldtup.t_data->t_infomask & (HEAP_XMAX_INVALID |										 HEAP_IS_LOCKED))			result = HeapTupleMayBeUpdated;		else			result = HeapTupleUpdated;	}	if (crosscheck != InvalidSnapshot && result == HeapTupleMayBeUpdated)	{		/* Perform additional check for serializable RI updates */		if (!HeapTupleSatisfiesSnapshot(oldtup.t_data, crosscheck, buffer))			result = HeapTupleUpdated;	}	if (result != HeapTupleMayBeUpdated)	{		Assert(result == HeapTupleSelfUpdated ||			   result == HeapTupleUpdated ||			   result == HeapTupleBeingUpdated);		Assert(!(oldtup.t_data->t_infomask & HEAP_XMAX_INVALID));		*ctid = oldtup.t_data->t_ctid;		*update_xmax = HeapTupleHeaderGetXmax(oldtup.t_data);		LockBuffer(buffer, BUFFER_LOCK_UNLOCK);		ReleaseBuffer(buffer);		if (have_tuple_lock)			UnlockTuple(relation, &(oldtup.t_self), ExclusiveLock);		return result;	}	/* Fill in OID and transaction status data for newtup */	if (relation->rd_rel->relhasoids)	{#ifdef NOT_USED		/* this is redundant with an Assert in HeapTupleSetOid */		Assert(newtup->t_data->t_infomask & HEAP_HASOID);#endif		HeapTupleSetOid(newtup, HeapTupleGetOid(&oldtup));	}	else	{		/* check there is not space for an OID */		Assert(!(newtup->t_data->t_infomask & HEAP_HASOID));	}	newtup->t_data->t_infomask &= ~(HEAP_XACT_MASK);	newtup->t_data->t_infomask |= (HEAP_XMAX_INVALID | HEAP_UPDATED);	HeapTupleHeaderSetXmin(newtup->t_data, xid);	HeapTupleHeaderSetCmin(newtup->t_data, cid);	HeapTupleHeaderSetXmax(newtup->t_data, 0);	/* zero out Datum fields */	HeapTupleHeaderSetCmax(newtup->t_data, 0);	/* for cleanliness */	/*	 * If the toaster needs to be activated, OR if the new tuple will not fit	 * on the same page as the old, then we need to release the context lock	 * (but not the pin!) on the old tuple's buffer while we are off doing	 * TOAST and/or table-file-extension work.	We must mark the old tuple to	 * show that it's already being updated, else other processes may try to	 * update it themselves.	 *	 * We need to invoke the toaster if there are already any out-of-line	 * toasted values present, or if the new tuple is over-threshold.	 */	newtupsize = MAXALIGN(newtup->t_len);	need_toast = (HeapTupleHasExternal(&oldtup) ||				  HeapTupleHasExternal(newtup) ||				  newtupsize > TOAST_TUPLE_THRESHOLD);	pagefree = PageGetFreeSpace((Page) dp);	if (need_toast || newtupsize > pagefree)	{		oldtup.t_data->t_infomask &= ~(HEAP_XMAX_COMMITTED |									   HEAP_XMAX_INVALID |									   HEAP_XMAX_IS_MULTI |

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -