📄 execmain.c
字号:
* * *tid is also an output parameter: it's modified to hold the TID of the * latest version of the tuple (note this may be changed even on failure) * * Returns a slot containing the new candidate update/delete tuple, or * NULL if we determine we shouldn't process the row. */TupleTableSlot *EvalPlanQual(EState *estate, Index rti, ItemPointer tid, TransactionId priorXmax, CommandId curCid){ evalPlanQual *epq; EState *epqstate; Relation relation; HeapTupleData tuple; HeapTuple copyTuple = NULL; bool endNode; Assert(rti != 0); /* * find relation containing target tuple */ if (estate->es_result_relation_info != NULL && estate->es_result_relation_info->ri_RangeTableIndex == rti) relation = estate->es_result_relation_info->ri_RelationDesc; else { ListCell *l; relation = NULL; foreach(l, estate->es_rowMarks) { if (((execRowMark *) lfirst(l))->rti == rti) { relation = ((execRowMark *) lfirst(l))->relation; break; } } if (relation == NULL) elog(ERROR, "could not find RowMark for RT index %u", rti); } /* * fetch tid tuple * * Loop here to deal with updated or busy tuples */ tuple.t_self = *tid; for (;;) { Buffer buffer; if (heap_fetch(relation, SnapshotDirty, &tuple, &buffer, true, NULL)) { /* * If xmin isn't what we're expecting, the slot must have been * recycled and reused for an unrelated tuple. This implies that * the latest version of the row was deleted, so we need do * nothing. (Should be safe to examine xmin without getting * buffer's content lock, since xmin never changes in an existing * tuple.) */ if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data), priorXmax)) { ReleaseBuffer(buffer); return NULL; } /* otherwise xmin should not be dirty... */ if (TransactionIdIsValid(SnapshotDirty->xmin)) elog(ERROR, "t_xmin is uncommitted in tuple to be updated"); /* * If tuple is being updated by other transaction then we have to * wait for its commit/abort. */ if (TransactionIdIsValid(SnapshotDirty->xmax)) { ReleaseBuffer(buffer); XactLockTableWait(SnapshotDirty->xmax); continue; /* loop back to repeat heap_fetch */ } /* * If tuple was inserted by our own transaction, we have to check * cmin against curCid: cmin >= curCid means our command cannot * see the tuple, so we should ignore it. Without this we are * open to the "Halloween problem" of indefinitely re-updating * the same tuple. (We need not check cmax because * HeapTupleSatisfiesDirty will consider a tuple deleted by * our transaction dead, regardless of cmax.) We just checked * that priorXmax == xmin, so we can test that variable instead * of doing HeapTupleHeaderGetXmin again. */ if (TransactionIdIsCurrentTransactionId(priorXmax) && HeapTupleHeaderGetCmin(tuple.t_data) >= curCid) { ReleaseBuffer(buffer); return NULL; } /* * We got tuple - now copy it for use by recheck query. */ copyTuple = heap_copytuple(&tuple); ReleaseBuffer(buffer); break; } /* * If the referenced slot was actually empty, the latest version of * the row must have been deleted, so we need do nothing. */ if (tuple.t_data == NULL) { ReleaseBuffer(buffer); return NULL; } /* * As above, if xmin isn't what we're expecting, do nothing. */ if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data), priorXmax)) { ReleaseBuffer(buffer); return NULL; } /* * If we get here, the tuple was found but failed SnapshotDirty. * Assuming the xmin is either a committed xact or our own xact (as it * certainly should be if we're trying to modify the tuple), this must * mean that the row was updated or deleted by either a committed xact * or our own xact. If it was deleted, we can ignore it; if it was * updated then chain up to the next version and repeat the whole * test. * * As above, it should be safe to examine xmax and t_ctid without the * buffer content lock, because they can't be changing. */ if (ItemPointerEquals(&tuple.t_self, &tuple.t_data->t_ctid)) { /* deleted, so forget about it */ ReleaseBuffer(buffer); return NULL; } /* updated, so look at the updated row */ tuple.t_self = tuple.t_data->t_ctid; /* updated row should have xmin matching this xmax */ priorXmax = HeapTupleHeaderGetXmax(tuple.t_data); ReleaseBuffer(buffer); /* loop back to fetch next in chain */ } /* * For UPDATE/DELETE we have to return tid of actual row we're executing * PQ for. */ *tid = tuple.t_self; /* * Need to run a recheck subquery. Find or create a PQ stack entry. */ epq = estate->es_evalPlanQual; endNode = true; if (epq != NULL && epq->rti == 0) { /* Top PQ stack entry is idle, so re-use it */ Assert(!(estate->es_useEvalPlan) && epq->next == NULL); epq->rti = rti; endNode = false; } /* * If this is request for another RTE - Ra, - then we have to check wasn't * PlanQual requested for Ra already and if so then Ra' row was updated * again and we have to re-start old execution for Ra and forget all what * we done after Ra was suspended. Cool? -:)) */ if (epq != NULL && epq->rti != rti && epq->estate->es_evTuple[rti - 1] != NULL) { do { evalPlanQual *oldepq; /* stop execution */ EvalPlanQualStop(epq); /* pop previous PlanQual from the stack */ oldepq = epq->next; Assert(oldepq && oldepq->rti != 0); /* push current PQ to freePQ stack */ oldepq->free = epq; epq = oldepq; estate->es_evalPlanQual = epq; } while (epq->rti != rti); } /* * If we are requested for another RTE then we have to suspend execution * of current PlanQual and start execution for new one. */ if (epq == NULL || epq->rti != rti) { /* try to reuse plan used previously */ evalPlanQual *newepq = (epq != NULL) ? epq->free : NULL; if (newepq == NULL) /* first call or freePQ stack is empty */ { newepq = (evalPlanQual *) palloc0(sizeof(evalPlanQual)); newepq->free = NULL; newepq->estate = NULL; newepq->planstate = NULL; } else { /* recycle previously used PlanQual */ Assert(newepq->estate == NULL); epq->free = NULL; } /* push current PQ to the stack */ newepq->next = epq; epq = newepq; estate->es_evalPlanQual = epq; epq->rti = rti; endNode = false; } Assert(epq->rti == rti); /* * Ok - we're requested for the same RTE. Unfortunately we still have to * end and restart execution of the plan, because ExecReScan wouldn't * ensure that upper plan nodes would reset themselves. We could make * that work if insertion of the target tuple were integrated with the * Param mechanism somehow, so that the upper plan nodes know that their * children's outputs have changed. * * Note that the stack of free evalPlanQual nodes is quite useless at the * moment, since it only saves us from pallocing/releasing the * evalPlanQual nodes themselves. But it will be useful once we implement * ReScan instead of end/restart for re-using PlanQual nodes. */ if (endNode) { /* stop execution */ EvalPlanQualStop(epq); } /* * Initialize new recheck query. * * Note: if we were re-using PlanQual plans via ExecReScan, we'd need to * instead copy down changeable state from the top plan (including * es_result_relation_info, es_junkFilter) and reset locally changeable * state in the epq (including es_param_exec_vals, es_evTupleNull). */ EvalPlanQualStart(epq, estate, epq->next); /* * free old RTE' tuple, if any, and store target tuple where relation's * scan node will see it */ epqstate = epq->estate; if (epqstate->es_evTuple[rti - 1] != NULL) heap_freetuple(epqstate->es_evTuple[rti - 1]); epqstate->es_evTuple[rti - 1] = copyTuple; return EvalPlanQualNext(estate);}static TupleTableSlot *EvalPlanQualNext(EState *estate){ evalPlanQual *epq = estate->es_evalPlanQual; MemoryContext oldcontext; TupleTableSlot *slot; Assert(epq->rti != 0);lpqnext:; oldcontext = MemoryContextSwitchTo(epq->estate->es_query_cxt); slot = ExecProcNode(epq->planstate); MemoryContextSwitchTo(oldcontext); /* * No more tuples for this PQ. Continue previous one. */ if (TupIsNull(slot)) { evalPlanQual *oldepq; /* stop execution */ EvalPlanQualStop(epq); /* pop old PQ from the stack */ oldepq = epq->next; if (oldepq == NULL) { /* this is the first (oldest) PQ - mark as free */ epq->rti = 0; estate->es_useEvalPlan = false; /* and continue Query execution */ return (NULL); } Assert(oldepq->rti != 0); /* push current PQ to freePQ stack */ oldepq->free = epq; epq = oldepq; estate->es_evalPlanQual = epq; goto lpqnext; } return (slot);}static voidEndEvalPlanQual(EState *estate){ evalPlanQual *epq = estate->es_evalPlanQual; if (epq->rti == 0) /* plans already shutdowned */ { Assert(epq->next == NULL); return; } for (;;) { evalPlanQual *oldepq; /* stop execution */ EvalPlanQualStop(epq); /* pop old PQ from the stack */ oldepq = epq->next; if (oldepq == NULL) { /* this is the first (oldest) PQ - mark as free */ epq->rti = 0; estate->es_useEvalPlan = false; break; } Assert(oldepq->rti != 0); /* push current PQ to freePQ stack */ oldepq->free = epq; epq = oldepq; estate->es_evalPlanQual = epq; }}/* * Start execution of one level of PlanQual. * * This is a cut-down version of ExecutorStart(): we copy some state from * the top-level estate rather than initializing it fresh. */static voidEvalPlanQualStart(evalPlanQual *epq, EState *estate, evalPlanQual *priorepq){ EState *epqstate; int rtsize; MemoryContext oldcontext; rtsize = list_length(estate->es_range_table); epq->estate = epqstate = CreateExecutorState(); oldcontext = MemoryContextSwitchTo(epqstate->es_query_cxt); /* * The epqstates share the top query's copy of unchanging state such as * the snapshot, rangetable, result-rel info, and external Param info. * They need their own copies of local state, including a tuple table, * es_param_exec_vals, etc. */ epqstate->es_direction = ForwardScanDirection; epqstate->es_snapshot = estate->es_snapshot; epqstate->es_crosscheck_snapshot = estate->es_crosscheck_snapshot; epqstate->es_range_table = estate->es_range_table; epqstate->es_result_relations = estate->es_result_relations; epqstate->es_num_result_relations = estate->es_num_result_relations; epqstate->es_result_relation_info = estate->es_result_relation_info; epqstate->es_junkFilter = estate->es_junkFilter; epqstate->es_into_relation_descriptor = estate->es_into_relation_descriptor; epqstate->es_into_relation_use_wal = estate->es_into_relation_use_wal; epqstate->es_param_list_info = estate->es_param_list_info; if (estate->es_topPlan->nParamExec > 0) epqstate->es_param_exec_vals = (ParamExecData *) palloc0(estate->es_topPlan->nParamExec * sizeof(ParamExecData)); epqstate->es_rowMarks = estate->es_rowMarks; epqstate->es_forUpdate = estate->es_forUpdate; epqstate->es_rowNoWait = estate->es_rowNoWait; epqstate->es_instrument = estate->es_instrument; epqstate->es_select_into = estate->es_select_into; epqstate->es_into_oids = estate->es_into_oids; epqstate->es_topPlan = estate->es_topPlan; /* * Each epqstate must have its own es_evTupleNull state, but all the stack * entries share es_evTuple state. This allows sub-rechecks to inherit * the value being examined by an outer recheck. */ epqstate->es_evTupleNull = (bool *) palloc0(rtsize * sizeof(bool)); if (priorepq == NULL) /* first PQ stack entry */ epqstate->es_evTuple = (HeapTuple *) palloc0(rtsize * sizeof(HeapTuple)); else /* later stack entries share the same storage */ epqstate->es_evTuple = priorepq->estate->es_evTuple; epqstate->es_tupleTable = ExecCreateTupleTable(estate->es_tupleTable->size); epq->planstate = ExecInitNode(estate->es_topPlan, epqstate); MemoryContextSwitchTo(oldcontext);}/* * End execution of one level of PlanQual. * * This is a cut-down version of ExecutorEnd(); basically we want to do most * of the normal cleanup, but *not* close result relations (which we are * just sharing from the outer query). */static voidEvalPlanQualStop(evalPlanQual *epq){ EState *epqstate = epq->estate; MemoryContext oldcontext; oldcontext = MemoryContextSwitchTo(epqstate->es_query_cxt); ExecEndNode(epq->planstate); ExecDropTupleTable(epqstate->es_tupleTable, true); epqstate->es_tupleTable = NULL; if (epqstate->es_evTuple[epq->rti - 1] != NULL) { heap_freetuple(epqstate->es_evTuple[epq->rti - 1]); epqstate->es_evTuple[epq->rti - 1] = NULL; } MemoryContextSwitchTo(oldcontext); FreeExecutorState(epqstate); epq->estate = NULL; epq->planstate = NULL;}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -