📄 inval.c
字号:
/* * Execute the given function for all the messages in an invalidation list. * The list is not altered. * * catcache entries are processed first, for reasons mentioned above. */static voidProcessInvalidationMessages(InvalidationListHeader *hdr, void (*func) (SharedInvalidationMessage *msg)){ ProcessMessageList(hdr->cclist, func(msg)); ProcessMessageList(hdr->rclist, func(msg));}/* ---------------------------------------------------------------- * private support functions * ---------------------------------------------------------------- *//* * RegisterCatcacheInvalidation * * Register an invalidation event for a catcache tuple entry. */static voidRegisterCatcacheInvalidation(int cacheId, uint32 hashValue, ItemPointer tuplePtr, Oid dbId){ AddCatcacheInvalidationMessage(&transInvalInfo->CurrentCmdInvalidMsgs, cacheId, hashValue, tuplePtr, dbId);}/* * RegisterRelcacheInvalidation * * As above, but register a relcache invalidation event. */static voidRegisterRelcacheInvalidation(Oid dbId, Oid relId){ AddRelcacheInvalidationMessage(&transInvalInfo->CurrentCmdInvalidMsgs, dbId, relId); /* * If the relation being invalidated is one of those cached in the * relcache init file, mark that we need to zap that file at commit. */ if (RelationIdIsInInitFile(relId)) transInvalInfo->RelcacheInitFileInval = true;}/* * RegisterSmgrInvalidation * * As above, but register an smgr invalidation event. */static voidRegisterSmgrInvalidation(RelFileNode rnode){ AddSmgrInvalidationMessage(&transInvalInfo->CurrentCmdInvalidMsgs, rnode);}/* * LocalExecuteInvalidationMessage * * Process a single invalidation message (which could be of any type). * Only the local caches are flushed; this does not transmit the message * to other backends. */static voidLocalExecuteInvalidationMessage(SharedInvalidationMessage *msg){ int i; if (msg->id >= 0) { if (msg->cc.dbId == MyDatabaseId || msg->cc.dbId == 0) { CatalogCacheIdInvalidate(msg->cc.id, msg->cc.hashValue, &msg->cc.tuplePtr); for (i = 0; i < cache_callback_count; i++) { struct CACHECALLBACK *ccitem = cache_callback_list + i; if (ccitem->id == msg->cc.id) (*ccitem->function) (ccitem->arg, InvalidOid); } } } else if (msg->id == SHAREDINVALRELCACHE_ID) { if (msg->rc.dbId == MyDatabaseId || msg->rc.dbId == InvalidOid) { RelationCacheInvalidateEntry(msg->rc.relId); for (i = 0; i < cache_callback_count; i++) { struct CACHECALLBACK *ccitem = cache_callback_list + i; if (ccitem->id == SHAREDINVALRELCACHE_ID) (*ccitem->function) (ccitem->arg, msg->rc.relId); } } } else if (msg->id == SHAREDINVALSMGR_ID) { /* * We could have smgr entries for relations of other databases, so no * short-circuit test is possible here. */ smgrclosenode(msg->sm.rnode); } else elog(FATAL, "unrecognized SI message id: %d", msg->id);}/* * InvalidateSystemCaches * * This blows away all tuples in the system catalog caches and * all the cached relation descriptors and smgr cache entries. * Relation descriptors that have positive refcounts are then rebuilt. * * We call this when we see a shared-inval-queue overflow signal, * since that tells us we've lost some shared-inval messages and hence * don't know what needs to be invalidated. */static voidInvalidateSystemCaches(void){ int i; ResetCatalogCaches(); RelationCacheInvalidate(); /* gets smgr cache too */ for (i = 0; i < cache_callback_count; i++) { struct CACHECALLBACK *ccitem = cache_callback_list + i; (*ccitem->function) (ccitem->arg, InvalidOid); }}/* * PrepareForTupleInvalidation * Detect whether invalidation of this tuple implies invalidation * of catalog/relation cache entries; if so, register inval events. */static voidPrepareForTupleInvalidation(Relation relation, HeapTuple tuple){ Oid tupleRelId; Oid databaseId; Oid relationId; /* Do nothing during bootstrap */ if (IsBootstrapProcessingMode()) return; /* * We only need to worry about invalidation for tuples that are in system * relations; user-relation tuples are never in catcaches and can't affect * the relcache either. */ if (!IsSystemRelation(relation)) return; /* * TOAST tuples can likewise be ignored here. Note that TOAST tables are * considered system relations so they are not filtered by the above test. */ if (IsToastRelation(relation)) return; /* * First let the catcache do its thing */ PrepareToInvalidateCacheTuple(relation, tuple, RegisterCatcacheInvalidation); /* * Now, is this tuple one of the primary definers of a relcache entry? */ tupleRelId = RelationGetRelid(relation); if (tupleRelId == RelationRelationId) { Form_pg_class classtup = (Form_pg_class) GETSTRUCT(tuple); RelFileNode rnode; relationId = HeapTupleGetOid(tuple); if (classtup->relisshared) databaseId = InvalidOid; else databaseId = MyDatabaseId; /* * We need to send out an smgr inval as well as a relcache inval. This * is needed because other backends might possibly possess smgr cache * but not relcache entries for the target relation. * * Note: during a pg_class row update that assigns a new relfilenode * or reltablespace value, we will be called on both the old and new * tuples, and thus will broadcast invalidation messages showing both * the old and new RelFileNode values. This ensures that other * backends will close smgr references to the old file. * * XXX possible future cleanup: it might be better to trigger smgr * flushes explicitly, rather than indirectly from pg_class updates. */ if (classtup->reltablespace) rnode.spcNode = classtup->reltablespace; else rnode.spcNode = MyDatabaseTableSpace; rnode.dbNode = databaseId; rnode.relNode = classtup->relfilenode; RegisterSmgrInvalidation(rnode); } else if (tupleRelId == AttributeRelationId) { Form_pg_attribute atttup = (Form_pg_attribute) GETSTRUCT(tuple); relationId = atttup->attrelid; /* * KLUGE ALERT: we always send the relcache event with MyDatabaseId, * even if the rel in question is shared (which we can't easily tell). * This essentially means that only backends in this same database * will react to the relcache flush request. This is in fact * appropriate, since only those backends could see our pg_attribute * change anyway. It looks a bit ugly though. */ databaseId = MyDatabaseId; } else return; /* * Yes. We need to register a relcache invalidation event. */ RegisterRelcacheInvalidation(databaseId, relationId);}/* ---------------------------------------------------------------- * public functions * ---------------------------------------------------------------- *//* * AcceptInvalidationMessages * Read and process invalidation messages from the shared invalidation * message queue. * * Note: * This should be called as the first step in processing a transaction. */voidAcceptInvalidationMessages(void){ ReceiveSharedInvalidMessages(LocalExecuteInvalidationMessage, InvalidateSystemCaches); /* * Test code to force cache flushes anytime a flush could happen. * * If used with CLOBBER_FREED_MEMORY, CLOBBER_CACHE_ALWAYS provides a * fairly thorough test that the system contains no cache-flush hazards. * However, it also makes the system unbelievably slow --- the regression * tests take about 100 times longer than normal. * * If you're a glutton for punishment, try CLOBBER_CACHE_RECURSIVELY. * This slows things by at least a factor of 10000, so I wouldn't suggest * trying to run the entire regression tests that way. It's useful to * try a few simple tests, to make sure that cache reload isn't subject * to internal cache-flush hazards, but after you've done a few thousand * recursive reloads it's unlikely you'll learn more. */#if defined(CLOBBER_CACHE_ALWAYS) { static bool in_recursion = false; if (!in_recursion) { in_recursion = true; InvalidateSystemCaches(); in_recursion = false; } }#elif defined(CLOBBER_CACHE_RECURSIVELY) InvalidateSystemCaches();#endif}/* * AtStart_Inval * Initialize inval lists at start of a main transaction. */voidAtStart_Inval(void){ Assert(transInvalInfo == NULL); transInvalInfo = (TransInvalidationInfo *) MemoryContextAllocZero(TopTransactionContext, sizeof(TransInvalidationInfo)); transInvalInfo->my_level = GetCurrentTransactionNestLevel();}/* * AtPrepare_Inval * Save the inval lists state at 2PC transaction prepare. * * In this phase we just generate 2PC records for all the pending invalidation * work. */voidAtPrepare_Inval(void){ /* Must be at top of stack */ Assert(transInvalInfo != NULL && transInvalInfo->parent == NULL); /* * Relcache init file invalidation requires processing both before and * after we send the SI messages. */ if (transInvalInfo->RelcacheInitFileInval) RegisterTwoPhaseRecord(TWOPHASE_RM_INVAL_ID, TWOPHASE_INFO_FILE_BEFORE, NULL, 0); AppendInvalidationMessages(&transInvalInfo->PriorCmdInvalidMsgs, &transInvalInfo->CurrentCmdInvalidMsgs); ProcessInvalidationMessages(&transInvalInfo->PriorCmdInvalidMsgs, PersistInvalidationMessage); if (transInvalInfo->RelcacheInitFileInval) RegisterTwoPhaseRecord(TWOPHASE_RM_INVAL_ID, TWOPHASE_INFO_FILE_AFTER, NULL, 0);}/* * PostPrepare_Inval * Clean up after successful PREPARE. * * Here, we want to act as though the transaction aborted, so that we will * undo any syscache changes it made, thereby bringing us into sync with the * outside world, which doesn't believe the transaction committed yet. * * If the prepared transaction is later aborted, there is nothing more to * do; if it commits, we will receive the consequent inval messages just * like everyone else. */void
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -