📄 inval.c
字号:
break; case TWOPHASE_INFO_FILE_AFTER: RelationCacheInitFileInvalidate(false); break; default: Assert(false); break; }}/* * AtEOXact_Inval * Process queued-up invalidation messages at end of main transaction. * * If isCommit, we must send out the messages in our PriorCmdInvalidMsgs list * to the shared invalidation message queue. Note that these will be read * not only by other backends, but also by our own backend at the next * transaction start (via AcceptInvalidationMessages). This means that * we can skip immediate local processing of anything that's still in * CurrentCmdInvalidMsgs, and just send that list out too. * * If not isCommit, we are aborting, and must locally process the messages * in PriorCmdInvalidMsgs. No messages need be sent to other backends, * since they'll not have seen our changed tuples anyway. We can forget * about CurrentCmdInvalidMsgs too, since those changes haven't touched * the caches yet. * * In any case, reset the various lists to empty. We need not physically * free memory here, since TopTransactionContext is about to be emptied * anyway. * * Note: * This should be called as the last step in processing a transaction. */voidAtEOXact_Inval(bool isCommit){ if (isCommit) { /* Must be at top of stack */ Assert(transInvalInfo != NULL && transInvalInfo->parent == NULL); /* * Relcache init file invalidation requires processing both before and * after we send the SI messages. However, we need not do anything * unless we committed. */ if (transInvalInfo->RelcacheInitFileInval) RelationCacheInitFileInvalidate(true); AppendInvalidationMessages(&transInvalInfo->PriorCmdInvalidMsgs, &transInvalInfo->CurrentCmdInvalidMsgs); ProcessInvalidationMessages(&transInvalInfo->PriorCmdInvalidMsgs, SendSharedInvalidMessage); if (transInvalInfo->RelcacheInitFileInval) RelationCacheInitFileInvalidate(false); } else if (transInvalInfo != NULL) { /* Must be at top of stack */ Assert(transInvalInfo->parent == NULL); ProcessInvalidationMessages(&transInvalInfo->PriorCmdInvalidMsgs, LocalExecuteInvalidationMessage); } /* Need not free anything explicitly */ transInvalInfo = NULL;}/* * AtEOSubXact_Inval * Process queued-up invalidation messages at end of subtransaction. * * If isCommit, process CurrentCmdInvalidMsgs if any (there probably aren't), * and then attach both CurrentCmdInvalidMsgs and PriorCmdInvalidMsgs to the * parent's PriorCmdInvalidMsgs list. * * If not isCommit, we are aborting, and must locally process the messages * in PriorCmdInvalidMsgs. No messages need be sent to other backends. * We can forget about CurrentCmdInvalidMsgs too, since those changes haven't * touched the caches yet. * * In any case, pop the transaction stack. We need not physically free memory * here, since CurTransactionContext is about to be emptied anyway * (if aborting). Beware of the possibility of aborting the same nesting * level twice, though. */voidAtEOSubXact_Inval(bool isCommit){ int my_level = GetCurrentTransactionNestLevel(); TransInvalidationInfo *myInfo = transInvalInfo; if (isCommit) { /* Must be at non-top of stack */ Assert(myInfo != NULL && myInfo->parent != NULL); Assert(myInfo->my_level == my_level); /* If CurrentCmdInvalidMsgs still has anything, fix it */ CommandEndInvalidationMessages(); /* Pass up my inval messages to parent */ AppendInvalidationMessages(&myInfo->parent->PriorCmdInvalidMsgs, &myInfo->PriorCmdInvalidMsgs); /* Pending relcache inval becomes parent's problem too */ if (myInfo->RelcacheInitFileInval) myInfo->parent->RelcacheInitFileInval = true; /* Pop the transaction state stack */ transInvalInfo = myInfo->parent; /* Need not free anything else explicitly */ pfree(myInfo); } else if (myInfo != NULL && myInfo->my_level == my_level) { /* Must be at non-top of stack */ Assert(myInfo->parent != NULL); ProcessInvalidationMessages(&myInfo->PriorCmdInvalidMsgs, LocalExecuteInvalidationMessage); /* Pop the transaction state stack */ transInvalInfo = myInfo->parent; /* Need not free anything else explicitly */ pfree(myInfo); }}/* * CommandEndInvalidationMessages * Process queued-up invalidation messages at end of one command * in a transaction. * * Here, we send no messages to the shared queue, since we don't know yet if * we will commit. We do need to locally process the CurrentCmdInvalidMsgs * list, so as to flush our caches of any entries we have outdated in the * current command. We then move the current-cmd list over to become part * of the prior-cmds list. * * Note: * This should be called during CommandCounterIncrement(), * after we have advanced the command ID. */voidCommandEndInvalidationMessages(void){ /* * You might think this shouldn't be called outside any transaction, but * bootstrap does it, and also ABORT issued when not in a transaction. So * just quietly return if no state to work on. */ if (transInvalInfo == NULL) return; ProcessInvalidationMessages(&transInvalInfo->CurrentCmdInvalidMsgs, LocalExecuteInvalidationMessage); AppendInvalidationMessages(&transInvalInfo->PriorCmdInvalidMsgs, &transInvalInfo->CurrentCmdInvalidMsgs);}/* * BeginNonTransactionalInvalidation * Prepare for invalidation messages for nontransactional updates. * * A nontransactional invalidation is one that must be sent whether or not * the current transaction eventually commits. We arrange for all invals * queued between this call and EndNonTransactionalInvalidation() to be sent * immediately when the latter is called. * * Currently, this is only used by heap_page_prune(), and only when it is * invoked during VACUUM FULL's first pass over a table. We expect therefore * that we are not inside a subtransaction and there are no already-pending * invalidations. This could be relaxed by setting up a new nesting level of * invalidation data, but for now there's no need. Note that heap_page_prune * knows that this function does not change any state, and therefore there's * no need to worry about cleaning up if there's an elog(ERROR) before * reaching EndNonTransactionalInvalidation (the invals will just be thrown * away if that happens). */voidBeginNonTransactionalInvalidation(void){ /* Must be at top of stack */ Assert(transInvalInfo != NULL && transInvalInfo->parent == NULL); /* Must not have any previously-queued activity */ Assert(transInvalInfo->PriorCmdInvalidMsgs.cclist == NULL); Assert(transInvalInfo->PriorCmdInvalidMsgs.rclist == NULL); Assert(transInvalInfo->CurrentCmdInvalidMsgs.cclist == NULL); Assert(transInvalInfo->CurrentCmdInvalidMsgs.rclist == NULL); Assert(transInvalInfo->RelcacheInitFileInval == false);}/* * EndNonTransactionalInvalidation * Process queued-up invalidation messages for nontransactional updates. * * We expect to find messages in CurrentCmdInvalidMsgs only (else there * was a CommandCounterIncrement within the "nontransactional" update). * We must process them locally and send them out to the shared invalidation * message queue. * * We must also reset the lists to empty and explicitly free memory (we can't * rely on end-of-transaction cleanup for that). */voidEndNonTransactionalInvalidation(void){ InvalidationChunk *chunk; InvalidationChunk *next; /* Must be at top of stack */ Assert(transInvalInfo != NULL && transInvalInfo->parent == NULL); /* Must not have any prior-command messages */ Assert(transInvalInfo->PriorCmdInvalidMsgs.cclist == NULL); Assert(transInvalInfo->PriorCmdInvalidMsgs.rclist == NULL); /* * At present, this function is only used for CTID-changing updates; * since the relcache init file doesn't store any tuple CTIDs, we * don't have to invalidate it. That might not be true forever * though, in which case we'd need code similar to AtEOXact_Inval. */ /* Send out the invals */ ProcessInvalidationMessages(&transInvalInfo->CurrentCmdInvalidMsgs, LocalExecuteInvalidationMessage); ProcessInvalidationMessages(&transInvalInfo->CurrentCmdInvalidMsgs, SendSharedInvalidMessage); /* Clean up and release memory */ for (chunk = transInvalInfo->CurrentCmdInvalidMsgs.cclist; chunk != NULL; chunk = next) { next = chunk->next; pfree(chunk); } for (chunk = transInvalInfo->CurrentCmdInvalidMsgs.rclist; chunk != NULL; chunk = next) { next = chunk->next; pfree(chunk); } transInvalInfo->CurrentCmdInvalidMsgs.cclist = NULL; transInvalInfo->CurrentCmdInvalidMsgs.rclist = NULL; transInvalInfo->RelcacheInitFileInval = false;}/* * CacheInvalidateHeapTuple * Register the given tuple for invalidation at end of command * (ie, current command is creating or outdating this tuple). */voidCacheInvalidateHeapTuple(Relation relation, HeapTuple tuple){ PrepareForTupleInvalidation(relation, tuple);}/* * CacheInvalidateRelcache * Register invalidation of the specified relation's relcache entry * at end of command. * * This is used in places that need to force relcache rebuild but aren't * changing any of the tuples recognized as contributors to the relcache * entry by PrepareForTupleInvalidation. (An example is dropping an index.) * We assume in particular that relfilenode/reltablespace aren't changing * (so the rd_node value is still good). * * XXX most callers of this probably don't need to force an smgr flush. */voidCacheInvalidateRelcache(Relation relation){ Oid databaseId; Oid relationId; relationId = RelationGetRelid(relation); if (relation->rd_rel->relisshared) databaseId = InvalidOid; else databaseId = MyDatabaseId; RegisterRelcacheInvalidation(databaseId, relationId); RegisterSmgrInvalidation(relation->rd_node);}/* * CacheInvalidateRelcacheByTuple * As above, but relation is identified by passing its pg_class tuple. */voidCacheInvalidateRelcacheByTuple(HeapTuple classTuple){ Form_pg_class classtup = (Form_pg_class) GETSTRUCT(classTuple); Oid databaseId; Oid relationId; RelFileNode rnode; relationId = HeapTupleGetOid(classTuple); if (classtup->relisshared) databaseId = InvalidOid; else databaseId = MyDatabaseId; if (classtup->reltablespace) rnode.spcNode = classtup->reltablespace; else rnode.spcNode = MyDatabaseTableSpace; rnode.dbNode = databaseId; rnode.relNode = classtup->relfilenode; RegisterRelcacheInvalidation(databaseId, relationId); RegisterSmgrInvalidation(rnode);}/* * CacheInvalidateRelcacheByRelid * As above, but relation is identified by passing its OID. * This is the least efficient of the three options; use one of * the above routines if you have a Relation or pg_class tuple. */voidCacheInvalidateRelcacheByRelid(Oid relid){ HeapTuple tup; tup = SearchSysCache(RELOID, ObjectIdGetDatum(relid), 0, 0, 0); if (!HeapTupleIsValid(tup)) elog(ERROR, "cache lookup failed for relation %u", relid); CacheInvalidateRelcacheByTuple(tup); ReleaseSysCache(tup);}/* * CacheRegisterSyscacheCallback * Register the specified function to be called for all future * invalidation events in the specified cache. * * NOTE: currently, the OID argument to the callback routine is not * provided for syscache callbacks; the routine doesn't really get any * useful info as to exactly what changed. It should treat every call * as a "cache flush" request. */voidCacheRegisterSyscacheCallback(int cacheid, CacheCallbackFunction func, Datum arg){ if (cache_callback_count >= MAX_CACHE_CALLBACKS) elog(FATAL, "out of cache_callback_list slots"); cache_callback_list[cache_callback_count].id = cacheid; cache_callback_list[cache_callback_count].function = func; cache_callback_list[cache_callback_count].arg = arg; ++cache_callback_count;}/* * CacheRegisterRelcacheCallback * Register the specified function to be called for all future * relcache invalidation events. The OID of the relation being * invalidated will be passed to the function. * * NOTE: InvalidOid will be passed if a cache reset request is received. * In this case the called routines should flush all cached state. */voidCacheRegisterRelcacheCallback(CacheCallbackFunction func, Datum arg){ if (cache_callback_count >= MAX_CACHE_CALLBACKS) elog(FATAL, "out of cache_callback_list slots"); cache_callback_list[cache_callback_count].id = SHAREDINVALRELCACHE_ID; cache_callback_list[cache_callback_count].function = func; cache_callback_list[cache_callback_count].arg = arg; ++cache_callback_count;}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -