📄 sinval.c
字号:
}/*---------- * GetSnapshotData -- returns information about running transactions. * * The returned snapshot includes xmin (lowest still-running xact ID), * xmax (next xact ID to be assigned), and a list of running xact IDs * in the range xmin <= xid < xmax. It is used as follows: * All xact IDs < xmin are considered finished. * All xact IDs >= xmax are considered still running. * For an xact ID xmin <= xid < xmax, consult list to see whether * it is considered running or not. * This ensures that the set of transactions seen as "running" by the * current xact will not change after it takes the snapshot. * * We also compute the current global xmin (oldest xmin across all running * transactions) and save it in RecentGlobalXmin. This is the same * computation done by GetOldestXmin(TRUE). The xmin value is also stored * into RecentXmin. *---------- */SnapshotGetSnapshotData(Snapshot snapshot, bool serializable){ SISeg *segP = shmInvalBuffer; ProcState *stateP = segP->procState; TransactionId xmin; TransactionId xmax; TransactionId globalxmin; int index; int count = 0; Assert(snapshot != NULL); /* * Allocating space for MaxBackends xids is usually overkill; * lastBackend would be sufficient. But it seems better to do the * malloc while not holding the lock, so we can't look at lastBackend. * * This does open a possibility for avoiding repeated malloc/free: * since MaxBackends does not change at runtime, we can simply reuse * the previous xip array if any. (This relies on the fact that all * calls pass static SnapshotData structs.) */ if (snapshot->xip == NULL) { /* * First call for this snapshot */ snapshot->xip = (TransactionId *) malloc(MaxBackends * sizeof(TransactionId)); if (snapshot->xip == NULL) ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("out of memory"))); } globalxmin = xmin = GetCurrentTransactionId(); /* * If we are going to set MyProc->xmin then we'd better get exclusive * lock; if not, this is a read-only operation so it can be shared. */ LWLockAcquire(SInvalLock, serializable ? LW_EXCLUSIVE : LW_SHARED); /*-------------------- * Unfortunately, we have to call ReadNewTransactionId() after acquiring * SInvalLock above. It's not good because ReadNewTransactionId() does * LWLockAcquire(XidGenLock), but *necessary*. We need to be sure that * no transactions exit the set of currently-running transactions * between the time we fetch xmax and the time we finish building our * snapshot. Otherwise we could have a situation like this: * * 1. Tx Old is running (in Read Committed mode). * 2. Tx S reads new transaction ID into xmax, then * is swapped out before acquiring SInvalLock. * 3. Tx New gets new transaction ID (>= S' xmax), * makes changes and commits. * 4. Tx Old changes some row R changed by Tx New and commits. * 5. Tx S finishes getting its snapshot data. It sees Tx Old as * done, but sees Tx New as still running (since New >= xmax). * * Now S will see R changed by both Tx Old and Tx New, *but* does not * see other changes made by Tx New. If S is supposed to be in * Serializable mode, this is wrong. * * By locking SInvalLock before we read xmax, we ensure that TX Old * cannot exit the set of running transactions seen by Tx S. Therefore * both Old and New will be seen as still running => no inconsistency. *-------------------- */ xmax = ReadNewTransactionId(); for (index = 0; index < segP->lastBackend; index++) { SHMEM_OFFSET pOffset = stateP[index].procStruct; if (pOffset != INVALID_OFFSET) { PGPROC *proc = (PGPROC *) MAKE_PTR(pOffset); /* Fetch xid just once - see GetNewTransactionId */ TransactionId xid = proc->xid; /* * Ignore my own proc (dealt with my xid above), procs not * running a transaction, and xacts started since we read the * next transaction ID. There's no need to store XIDs above * what we got from ReadNewTransactionId, since we'll treat * them as running anyway. We also assume that such xacts * can't compute an xmin older than ours, so they needn't be * considered in computing globalxmin. */ if (proc == MyProc || !TransactionIdIsNormal(xid) || TransactionIdFollowsOrEquals(xid, xmax)) continue; if (TransactionIdPrecedes(xid, xmin)) xmin = xid; snapshot->xip[count] = xid; count++; /* Update globalxmin to be the smallest valid xmin */ xid = proc->xmin; if (TransactionIdIsNormal(xid)) if (TransactionIdPrecedes(xid, globalxmin)) globalxmin = xid; } } if (serializable) MyProc->xmin = xmin; LWLockRelease(SInvalLock); /* Serializable snapshot must be computed before any other... */ Assert(TransactionIdIsValid(MyProc->xmin)); /* * Update globalxmin to include actual process xids. This is a * slightly different way of computing it than GetOldestXmin uses, but * should give the same result. */ if (TransactionIdPrecedes(xmin, globalxmin)) globalxmin = xmin; /* Update globals for use by VACUUM */ RecentGlobalXmin = globalxmin; RecentXmin = xmin; snapshot->xmin = xmin; snapshot->xmax = xmax; snapshot->xcnt = count; snapshot->curcid = GetCurrentCommandId(); return snapshot;}/* * CountActiveBackends --- count backends (other than myself) that are in * active transactions. This is used as a heuristic to decide if * a pre-XLOG-flush delay is worthwhile during commit. * * An active transaction is something that has written at least one XLOG * record; read-only transactions don't count. Also, do not count backends * that are blocked waiting for locks, since they are not going to get to * run until someone else commits. */intCountActiveBackends(void){ SISeg *segP = shmInvalBuffer; ProcState *stateP = segP->procState; int count = 0; int index; /* * Note: for speed, we don't acquire SInvalLock. This is a little bit * bogus, but since we are only testing xrecoff for zero or nonzero, * it should be OK. The result is only used for heuristic purposes * anyway... */ for (index = 0; index < segP->lastBackend; index++) { SHMEM_OFFSET pOffset = stateP[index].procStruct; if (pOffset != INVALID_OFFSET) { PGPROC *proc = (PGPROC *) MAKE_PTR(pOffset); if (proc == MyProc) continue; /* do not count myself */ if (proc->logRec.xrecoff == 0) continue; /* do not count if not in a transaction */ if (proc->waitLock != NULL) continue; /* do not count if blocked on a lock */ count++; } } return count;}/* * GetUndoRecPtr -- returns oldest PGPROC->logRec. */XLogRecPtrGetUndoRecPtr(void){ SISeg *segP = shmInvalBuffer; ProcState *stateP = segP->procState; XLogRecPtr urec = {0, 0}; XLogRecPtr tempr; int index; LWLockAcquire(SInvalLock, LW_SHARED); for (index = 0; index < segP->lastBackend; index++) { SHMEM_OFFSET pOffset = stateP[index].procStruct; if (pOffset != INVALID_OFFSET) { PGPROC *proc = (PGPROC *) MAKE_PTR(pOffset); tempr = proc->logRec; if (tempr.xrecoff == 0) continue; if (urec.xrecoff != 0 && XLByteLT(urec, tempr)) continue; urec = tempr; } } LWLockRelease(SInvalLock); return (urec);}/* * BackendIdGetProc - given a BackendId, find its PGPROC structure * * This is a trivial lookup in the ProcState array. We assume that the caller * knows that the backend isn't going to go away, so we do not bother with * locking. */struct PGPROC *BackendIdGetProc(BackendId procId){ SISeg *segP = shmInvalBuffer; if (procId > 0 && procId <= segP->lastBackend) { ProcState *stateP = &segP->procState[procId - 1]; SHMEM_OFFSET pOffset = stateP->procStruct; if (pOffset != INVALID_OFFSET) { PGPROC *proc = (PGPROC *) MAKE_PTR(pOffset); return proc; } } return NULL;}/* * CountEmptyBackendSlots - count empty slots in backend process table * * We don't actually need to count, since sinvaladt.c maintains a * freeBackends counter in the SI segment. * * Acquiring the lock here is almost certainly overkill, but just in * case fetching an int is not atomic on your machine ... */intCountEmptyBackendSlots(void){ int count; LWLockAcquire(SInvalLock, LW_SHARED); count = shmInvalBuffer->freeBackends; LWLockRelease(SInvalLock); return count;}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -