heapam.c
来自「PostgreSQL7.4.6 for Linux」· C语言 代码 · 共 2,410 行 · 第 1/5 页
C
2,410 行
* version of the row). * On success, newtup->t_self is set to the TID where the new tuple * was inserted. */intheap_update(Relation relation, ItemPointer otid, HeapTuple newtup, ItemPointer ctid, CommandId cid, Snapshot crosscheck, bool wait){ ItemId lp; HeapTupleData oldtup; PageHeader dp; Buffer buffer, newbuf; bool need_toast, already_marked; Size newtupsize, pagefree; int result; uint16 sv_infomask; Assert(ItemPointerIsValid(otid)); buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(otid)); if (!BufferIsValid(buffer)) elog(ERROR, "ReadBuffer failed"); LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE); dp = (PageHeader) BufferGetPage(buffer); lp = PageGetItemId(dp, ItemPointerGetOffsetNumber(otid)); oldtup.t_datamcxt = NULL; oldtup.t_data = (HeapTupleHeader) PageGetItem(dp, lp); oldtup.t_len = ItemIdGetLength(lp); oldtup.t_self = *otid; /* * Note: beyond this point, use oldtup not otid to refer to old tuple. * otid may very well point at newtup->t_self, which we will overwrite * with the new tuple's location, so there's great risk of confusion * if we use otid anymore. */l2: sv_infomask = oldtup.t_data->t_infomask; result = HeapTupleSatisfiesUpdate(oldtup.t_data, cid); if (sv_infomask != oldtup.t_data->t_infomask) SetBufferCommitInfoNeedsSave(buffer); if (result == HeapTupleInvisible) { LockBuffer(buffer, BUFFER_LOCK_UNLOCK); ReleaseBuffer(buffer); elog(ERROR, "attempted to update invisible tuple"); } else if (result == HeapTupleBeingUpdated && wait) { TransactionId xwait = HeapTupleHeaderGetXmax(oldtup.t_data); /* sleep untill concurrent transaction ends */ LockBuffer(buffer, BUFFER_LOCK_UNLOCK); XactLockTableWait(xwait); LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE); if (!TransactionIdDidCommit(xwait)) goto l2; /* * xwait is committed but if xwait had just marked the tuple for * update then some other xaction could update this tuple before * we got to this point. */ if (!TransactionIdEquals(HeapTupleHeaderGetXmax(oldtup.t_data), xwait)) goto l2; if (!(oldtup.t_data->t_infomask & HEAP_XMAX_COMMITTED)) { oldtup.t_data->t_infomask |= HEAP_XMAX_COMMITTED; SetBufferCommitInfoNeedsSave(buffer); } /* if tuple was marked for update but not updated... */ if (oldtup.t_data->t_infomask & HEAP_MARKED_FOR_UPDATE) result = HeapTupleMayBeUpdated; else result = HeapTupleUpdated; } if (crosscheck != SnapshotAny && result == HeapTupleMayBeUpdated) { /* Perform additional check for serializable RI updates */ sv_infomask = oldtup.t_data->t_infomask; if (!HeapTupleSatisfiesSnapshot(oldtup.t_data, crosscheck)) result = HeapTupleUpdated; if (sv_infomask != oldtup.t_data->t_infomask) SetBufferCommitInfoNeedsSave(buffer); } if (result != HeapTupleMayBeUpdated) { Assert(result == HeapTupleSelfUpdated || result == HeapTupleUpdated || result == HeapTupleBeingUpdated); *ctid = oldtup.t_data->t_ctid; LockBuffer(buffer, BUFFER_LOCK_UNLOCK); ReleaseBuffer(buffer); return result; } /* Fill in OID and transaction status data for newtup */ if (relation->rd_rel->relhasoids) {#ifdef NOT_USED /* this is redundant with an Assert in HeapTupleSetOid */ Assert(newtup->t_data->t_infomask & HEAP_HASOID);#endif HeapTupleSetOid(newtup, HeapTupleGetOid(&oldtup)); } else { /* check there is not space for an OID */ Assert(!(newtup->t_data->t_infomask & HEAP_HASOID)); } newtup->t_data->t_infomask &= ~(HEAP_XACT_MASK); newtup->t_data->t_infomask |= (HEAP_XMAX_INVALID | HEAP_UPDATED); HeapTupleHeaderSetXmin(newtup->t_data, GetCurrentTransactionId()); HeapTupleHeaderSetCmin(newtup->t_data, cid); /* * If the toaster needs to be activated, OR if the new tuple will not * fit on the same page as the old, then we need to release the * context lock (but not the pin!) on the old tuple's buffer while we * are off doing TOAST and/or table-file-extension work. We must mark * the old tuple to show that it's already being updated, else other * processes may try to update it themselves. To avoid second XLOG log * record, we use xact mgr hook to unlock old tuple without reading * log if xact will abort before update is logged. In the event of * crash prio logging, TQUAL routines will see HEAP_XMAX_UNLOGGED * flag... * * NOTE: this trick is useless currently but saved for future when we'll * implement UNDO and will re-use transaction IDs after postmaster * startup. * * We need to invoke the toaster if there are already any toasted values * present, or if the new tuple is over-threshold. */ need_toast = (HeapTupleHasExtended(&oldtup) || HeapTupleHasExtended(newtup) || (MAXALIGN(newtup->t_len) > TOAST_TUPLE_THRESHOLD)); newtupsize = MAXALIGN(newtup->t_len); pagefree = PageGetFreeSpace((Page) dp); if (need_toast || newtupsize > pagefree) { _locked_tuple_.node = relation->rd_node; _locked_tuple_.tid = oldtup.t_self; XactPushRollback(_heap_unlock_tuple, (void *) &_locked_tuple_); oldtup.t_data->t_infomask &= ~(HEAP_XMAX_COMMITTED | HEAP_XMAX_INVALID | HEAP_MARKED_FOR_UPDATE | HEAP_MOVED); oldtup.t_data->t_infomask |= HEAP_XMAX_UNLOGGED; HeapTupleHeaderSetXmax(oldtup.t_data, GetCurrentTransactionId()); HeapTupleHeaderSetCmax(oldtup.t_data, cid); already_marked = true; LockBuffer(buffer, BUFFER_LOCK_UNLOCK); /* Let the toaster do its thing */ if (need_toast) { heap_tuple_toast_attrs(relation, newtup, &oldtup); newtupsize = MAXALIGN(newtup->t_len); } /* * Now, do we need a new page for the tuple, or not? This is a * bit tricky since someone else could have added tuples to the * page while we weren't looking. We have to recheck the * available space after reacquiring the buffer lock. But don't * bother to do that if the former amount of free space is still * not enough; it's unlikely there's more free now than before. * * What's more, if we need to get a new page, we will need to acquire * buffer locks on both old and new pages. To avoid deadlock * against some other backend trying to get the same two locks in * the other order, we must be consistent about the order we get * the locks in. We use the rule "lock the lower-numbered page of * the relation first". To implement this, we must do * RelationGetBufferForTuple while not holding the lock on the old * page, and we must rely on it to get the locks on both pages in * the correct order. */ if (newtupsize > pagefree) { /* Assume there's no chance to put newtup on same page. */ newbuf = RelationGetBufferForTuple(relation, newtup->t_len, buffer); } else { /* Re-acquire the lock on the old tuple's page. */ LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE); /* Re-check using the up-to-date free space */ pagefree = PageGetFreeSpace((Page) dp); if (newtupsize > pagefree) { /* * Rats, it doesn't fit anymore. We must now unlock and * relock to avoid deadlock. Fortunately, this path * should seldom be taken. */ LockBuffer(buffer, BUFFER_LOCK_UNLOCK); newbuf = RelationGetBufferForTuple(relation, newtup->t_len, buffer); } else { /* OK, it fits here, so we're done. */ newbuf = buffer; } } } else { /* No TOAST work needed, and it'll fit on same page */ already_marked = false; newbuf = buffer; } pgstat_count_heap_update(&relation->pgstat_info); /* * At this point newbuf and buffer are both pinned and locked, and * newbuf has enough space for the new tuple. If they are the same * buffer, only one pin is held. */ /* NO EREPORT(ERROR) from here till changes are logged */ START_CRIT_SECTION(); RelationPutHeapTuple(relation, newbuf, newtup); /* insert new tuple */ if (already_marked) { oldtup.t_data->t_infomask &= ~HEAP_XMAX_UNLOGGED; XactPopRollback(); } else { oldtup.t_data->t_infomask &= ~(HEAP_XMAX_COMMITTED | HEAP_XMAX_INVALID | HEAP_MARKED_FOR_UPDATE | HEAP_MOVED); HeapTupleHeaderSetXmax(oldtup.t_data, GetCurrentTransactionId()); HeapTupleHeaderSetCmax(oldtup.t_data, cid); } /* record address of new tuple in t_ctid of old one */ oldtup.t_data->t_ctid = newtup->t_self; /* XLOG stuff */ if (!relation->rd_istemp) { XLogRecPtr recptr = log_heap_update(relation, buffer, oldtup.t_self, newbuf, newtup, false); if (newbuf != buffer) { PageSetLSN(BufferGetPage(newbuf), recptr); PageSetSUI(BufferGetPage(newbuf), ThisStartUpID); } PageSetLSN(BufferGetPage(buffer), recptr); PageSetSUI(BufferGetPage(buffer), ThisStartUpID); } else { /* No XLOG record, but still need to flag that XID exists on disk */ MyXactMadeTempRelUpdate = true; } END_CRIT_SECTION(); if (newbuf != buffer) LockBuffer(newbuf, BUFFER_LOCK_UNLOCK); LockBuffer(buffer, BUFFER_LOCK_UNLOCK); /* * Mark old tuple for invalidation from system caches at next command * boundary. We have to do this before WriteBuffer because we need to * look at the contents of the tuple, so we need to hold our refcount. */ CacheInvalidateHeapTuple(relation, &oldtup); if (newbuf != buffer) WriteBuffer(newbuf); WriteBuffer(buffer); /* * If new tuple is cachable, mark it for invalidation from the caches * in case we abort. Note it is OK to do this after WriteBuffer * releases the buffer, because the "newtup" data structure is all in * local memory, not in the shared buffer. */ CacheInvalidateHeapTuple(relation, newtup); return HeapTupleMayBeUpdated;}/* * simple_heap_update - replace a tuple * * This routine may be used to update a tuple when concurrent updates of * the target tuple are not expected (for example, because we have a lock * on the relation associated with the tuple). Any failure is reported * via ereport(). */voidsimple_heap_update(Relation relation, ItemPointer otid, HeapTuple tup){ ItemPointerData ctid; int result; result = heap_update(relation, otid, tup, &ctid, GetCurrentCommandId(), SnapshotAny, true /* wait for commit */); switch (result) { case HeapTupleSelfUpdated: /* Tuple was already updated in current command? */ elog(ERROR, "tuple already updated by self"); break; case HeapTupleMayBeUpdated: /* done successfully */ break; case HeapTupleUpdated: elog(ERROR, "tuple concurrently updated"); break; default: elog(ERROR, "unrecognized heap_update status: %u", result); break; }}/* * heap_mark4update - mark a tuple for update */intheap_mark4update(Relation relation, HeapTuple tuple, Buffer *buffer, CommandId cid){ ItemPointer tid = &(tuple->t_self); ItemId lp; PageHeader dp; int result; uint16 sv_infomask; *buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid)); if (!BufferIsValid(*buffer)) elog(ERROR, "ReadBuffer failed"); LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE); dp = (PageHeader) BufferGetPage(*buffer); lp = PageGetItemId(dp, ItemPointerGetOffsetNumber(tid)); tuple->t_datamcxt = NULL; tuple->t_data = (HeapTupleHeader) PageGetItem((Page) dp, lp); tuple->t_len = ItemIdGetLength(lp);l3: sv_infomask = tuple->t_data->t_infomask; result = HeapTupleSatisfiesUpdate(tuple->t_data, cid); if (sv_infomask != tuple->t_data->t_infomask) SetBufferCommitInfoNeedsSave(*buffer); if (result == HeapTupleInvisible) { LockBuffer(*buffer, BUFFER_LOCK_UNLOCK); ReleaseBuffer(*buffer); elog(ERROR, "attempted to mark4update invisible tuple"); } else if (result == HeapTupleBeingUpdated) { TransactionId xwait = HeapTupleHeaderGetXmax(tuple->t_data); /* sleep untill concurrent transaction ends */ LockBuffer(*buffer, BUFFER_LOCK_UNLOCK); XactLockTableWait(xwait); LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE); if (!TransactionIdDidCommit(xwait)) goto l3; /* * xwait is committed but if xwait had just marked the tuple for * update then some other xaction could update this tuple before * we got to this point. */ if (!TransactionIdEquals(HeapTupleHeaderGetXmax(tuple->t_data), xwait)) goto l3; if (!(tuple->t_data->t_infomask & HEAP_XMAX_COMMITTED)) { tuple->t_data->t_infomask |= HEAP_XMAX_COMMITTED; SetBufferCommitInfoNeedsSave(*buffer); } /* if tuple was marked for update but not updated... */ if (tuple->t_data->t_infomask & HEAP_MARKED_FOR_UPDATE) result = HeapTupleMayBeUpdated; else result = HeapTupleUpdated; } if (result != HeapTupleMayBeUpdated) { Assert(result == HeapTupleSelfUpdated || result == HeapTupleUpdated); tuple->t_self = tuple->t_data->t_ctid; LockBuffer(*buffer, BUFFER_LOCK_UNLOCK); return result; } /* * XLOG stuff: no logging is required as long as we have no * savepoints. For savepoints private log could be used... */ ((PageHeader) BufferGetPage(*buffer))->pd_sui = ThisStartUpID; /* store transaction information of xact marking the tuple */ tuple->t_data->t_infomask &= ~(HEAP_XMAX_COMMITTED | HEAP_XMAX_INVALID | HEAP_MOVED); tuple->t_data->t_infomask |= HEAP_MARKED_FOR_UPDATE; HeapTupleHeaderSetXmax(tuple->t_data, GetCurrentTransactionId()); HeapTupleHeaderSetCmax(tuple->t_data, cid); /* Make sure there is no forward chain link in t_ctid */ tuple->t_data->t_ctid = *tid; LockBuffer(*buffer, BUFFER_LOCK_UNLOCK); WriteNoReleaseBuffer(*buffer); return HeapTupleMayBeUpdated;}/* ---------------- * heap_markpos - mark scan position * * Note: * Should only one mark be maintained per scan at one time. * Check if this can be done generally--say calls to get the * next/previous tuple and NEVER pass struct scandesc to the * user AM's. Now, the mark is sent to the executor for safekeeping. * Probably can store this info into a GENERAL scan structure. * * May be best to change this call to store the marked position * (up to 2?) in the scan structure itself. * Fix to use the proper caching structure. * ---------------- */voidheap_markpos(HeapScanDesc scan){ /* Note: no locking manipulations needed */ if (scan->rs_ctup.t_data != NULL) scan->rs_mctid = scan->rs_ctup.t_self; else ItemPointerSetInvalid(&scan->rs_mctid);}/* ---------------- * heap_restrpos - restore position to marked location * * Note: there are bad side effects here. If we were past the end * of a relation when heapmarkpos is called, then if the relation is * extended via insert, then the next call to heaprestrpos will set * cause the added tuples to be visible when the scan continues. * Problems also arise if the TID's are rearranged!!! *
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?