📄 pgstat.c
字号:
/* * If the slot contains the PID of this backend, everything is fine and we * have nothing to do. Note that all the slots are zero'd out when the * collector is started. We assume that a slot is "empty" iff procpid == * 0. */ if (beentry->procpid > 0 && beentry->procpid == msg->m_procpid) return 0; /* * Lookup if this backend is known to be dead. This can be caused due to * messages arriving in the wrong order - e.g. postmaster's BETERM message * might have arrived before we received all the backends stats messages, * or even a new backend with the same backendid was faster in sending his * BESTART. * * If the backend is known to be dead, we ignore this add. */ deadbe = (PgStat_StatBeDead *) hash_search(pgStatBeDead, (void *) &(msg->m_procpid), HASH_FIND, NULL); if (deadbe) return 1; /* * Backend isn't known to be dead. If it's slot is currently used, we have * to kick out the old backend. */ if (beentry->procpid > 0) pgstat_sub_backend(beentry->procpid); /* Must be able to distinguish between empty and non-empty slots */ Assert(msg->m_procpid > 0); /* Put this new backend into the slot */ beentry->procpid = msg->m_procpid; beentry->start_timestamp = GetCurrentTimestamp(); beentry->activity_start_timestamp = 0; beentry->activity[0] = '\0'; /* * We can't initialize the rest of the data in this slot until we see the * BESTART message. Therefore, we set the database and user to sentinel * values, to indicate "undefined". There is no easy way to do this for * the client address, so make sure to check that the database or user are * defined before accessing the client address. */ beentry->userid = InvalidOid; beentry->databaseid = InvalidOid; return 0;}/* * Lookup the hash table entry for the specified database. If no hash * table entry exists, initialize it, if the create parameter is true. * Else, return NULL. */static PgStat_StatDBEntry *pgstat_get_db_entry(Oid databaseid, bool create){ PgStat_StatDBEntry *result; bool found; HASHACTION action = (create ? HASH_ENTER : HASH_FIND); /* Lookup or create the hash table entry for this database */ result = (PgStat_StatDBEntry *) hash_search(pgStatDBHash, &databaseid, action, &found); if (!create && !found) return NULL; /* If not found, initialize the new one. */ if (!found) { HASHCTL hash_ctl; result->tables = NULL; result->n_xact_commit = 0; result->n_xact_rollback = 0; result->n_blocks_fetched = 0; result->n_blocks_hit = 0; result->destroy = 0; result->last_autovac_time = 0; memset(&hash_ctl, 0, sizeof(hash_ctl)); hash_ctl.keysize = sizeof(Oid); hash_ctl.entrysize = sizeof(PgStat_StatTabEntry); hash_ctl.hash = oid_hash; result->tables = hash_create("Per-database table", PGSTAT_TAB_HASH_SIZE, &hash_ctl, HASH_ELEM | HASH_FUNCTION); } return result;}/* ---------- * pgstat_sub_backend() - * * Remove a backend from the actual backends list. * ---------- */static voidpgstat_sub_backend(int procpid){ int i; PgStat_StatBeDead *deadbe; bool found; /* * Search in the known-backends table for the slot containing this PID. */ for (i = 0; i < MaxBackends; i++) { if (pgStatBeTable[i].procpid == procpid) { /* * That's him. Add an entry to the known to be dead backends. Due * to possible misorder in the arrival of UDP packets it's * possible that even if we know the backend is dead, there could * still be messages queued that arrive later. Those messages must * not cause our number of backends statistics to get screwed up, * so we remember for a couple of seconds that this PID is dead * and ignore them (only the counting of backends, not the table * access stats they sent). */ deadbe = (PgStat_StatBeDead *) hash_search(pgStatBeDead, (void *) &procpid, HASH_ENTER, &found); if (!found) { deadbe->backendid = i + 1; deadbe->destroy = PGSTAT_DESTROY_COUNT; } /* * Declare the backend slot empty. */ pgStatBeTable[i].procpid = 0; return; } } /* * No big problem if not found. This can happen if UDP messages arrive out * of order here. */}/* ---------- * pgstat_write_statsfile() - * * Tell the news. * ---------- */static voidpgstat_write_statsfile(void){ HASH_SEQ_STATUS hstat; HASH_SEQ_STATUS tstat; PgStat_StatDBEntry *dbentry; PgStat_StatTabEntry *tabentry; PgStat_StatBeDead *deadbe; FILE *fpout; int i; int32 format_id; /* * Open the statistics temp file to write out the current values. */ fpout = fopen(PGSTAT_STAT_TMPFILE, PG_BINARY_W); if (fpout == NULL) { ereport(LOG, (errcode_for_file_access(), errmsg("could not open temporary statistics file \"%s\": %m", PGSTAT_STAT_TMPFILE))); return; } /* * Write the file header --- currently just a format ID. */ format_id = PGSTAT_FILE_FORMAT_ID; fwrite(&format_id, sizeof(format_id), 1, fpout); /* * Walk through the database table. */ hash_seq_init(&hstat, pgStatDBHash); while ((dbentry = (PgStat_StatDBEntry *) hash_seq_search(&hstat)) != NULL) { /* * If this database is marked destroyed, count down and do so if it * reaches 0. */ if (dbentry->destroy > 0) { if (--(dbentry->destroy) == 0) { if (dbentry->tables != NULL) hash_destroy(dbentry->tables); if (hash_search(pgStatDBHash, (void *) &(dbentry->databaseid), HASH_REMOVE, NULL) == NULL) ereport(ERROR, (errmsg("database hash table corrupted " "during cleanup --- abort"))); } /* * Don't include statistics for it. */ continue; } /* * Write out the DB entry including the number of live backends. * We don't write the tables pointer since it's of no use to any * other process. */ fputc('D', fpout); fwrite(dbentry, offsetof(PgStat_StatDBEntry, tables), 1, fpout); /* * Walk through the database's access stats per table. */ hash_seq_init(&tstat, dbentry->tables); while ((tabentry = (PgStat_StatTabEntry *) hash_seq_search(&tstat)) != NULL) { /* * If table entry marked for destruction, same as above for the * database entry. */ if (tabentry->destroy > 0) { if (--(tabentry->destroy) == 0) { if (hash_search(dbentry->tables, (void *) &(tabentry->tableid), HASH_REMOVE, NULL) == NULL) ereport(ERROR, (errmsg("tables hash table for " "database %u corrupted during " "cleanup --- abort", dbentry->databaseid))); } continue; } /* * At least we think this is still a live table. Emit its access * stats. */ fputc('T', fpout); fwrite(tabentry, sizeof(PgStat_StatTabEntry), 1, fpout); } /* * Mark the end of this DB */ fputc('d', fpout); } /* * Write out the known running backends to the stats file. */ i = MaxBackends; fputc('M', fpout); fwrite(&i, sizeof(i), 1, fpout); for (i = 0; i < MaxBackends; i++) { PgStat_StatBeEntry *beentry = &pgStatBeTable[i]; if (beentry->procpid > 0) { int len; len = offsetof(PgStat_StatBeEntry, activity) + strlen(beentry->activity) + 1; fputc('B', fpout); fwrite(&len, sizeof(len), 1, fpout); fwrite(beentry, len, 1, fpout); } } /* * No more output to be done. Close the temp file and replace the old * pgstat.stat with it. The ferror() check replaces testing for error * after each individual fputc or fwrite above. */ fputc('E', fpout); if (ferror(fpout)) { ereport(LOG, (errcode_for_file_access(), errmsg("could not write temporary statistics file \"%s\": %m", PGSTAT_STAT_TMPFILE))); fclose(fpout); unlink(PGSTAT_STAT_TMPFILE); } else if (fclose(fpout) < 0) { ereport(LOG, (errcode_for_file_access(), errmsg("could not close temporary statistics file \"%s\": %m", PGSTAT_STAT_TMPFILE))); unlink(PGSTAT_STAT_TMPFILE); } else if (rename(PGSTAT_STAT_TMPFILE, PGSTAT_STAT_FILENAME) < 0) { ereport(LOG, (errcode_for_file_access(), errmsg("could not rename temporary statistics file \"%s\" to \"%s\": %m", PGSTAT_STAT_TMPFILE, PGSTAT_STAT_FILENAME))); unlink(PGSTAT_STAT_TMPFILE); } /* * Clear out the dead backends table */ hash_seq_init(&hstat, pgStatBeDead); while ((deadbe = (PgStat_StatBeDead *) hash_seq_search(&hstat)) != NULL) { /* * Count down the destroy delay and remove entries where it reaches 0. */ if (--(deadbe->destroy) <= 0) { if (hash_search(pgStatBeDead, (void *) &(deadbe->procpid), HASH_REMOVE, NULL) == NULL) ereport(ERROR, (errmsg("dead-server-process hash table corrupted " "during cleanup --- abort"))); } }}/* ---------- * pgstat_read_statsfile() - * * Reads in an existing statistics collector and initializes the * databases' hash table (whose entries point to the tables' hash tables) * and the current backend table. * ---------- */static voidpgstat_read_statsfile(HTAB **dbhash, Oid onlydb, PgStat_StatBeEntry **betab, int *numbackends){ PgStat_StatDBEntry *dbentry; PgStat_StatDBEntry dbbuf; PgStat_StatTabEntry *tabentry; PgStat_StatTabEntry tabbuf; PgStat_StatBeEntry *beentry; HASHCTL hash_ctl; HTAB *tabhash = NULL; FILE *fpin; int32 format_id; int len; int maxbackends = 0; int havebackends = 0; bool found; bool check_pids; MemoryContext use_mcxt; int mcxt_flags; /* * If running in the collector or the autovacuum process, we use the * DynaHashCxt memory context. If running in a backend, we use the * TopTransactionContext instead, so the caller must only know the last * XactId when this call happened to know if his tables are still valid or * already gone! * * Also, if running in a regular backend, we check backend entries against * the PGPROC array so that we can detect stale entries. This lets us * discard entries whose BETERM message got lost for some reason. */ if (pgStatRunningInCollector || IsAutoVacuumProcess()) { use_mcxt = NULL; mcxt_flags = 0; check_pids = false; } else { use_mcxt = TopTransactionContext; mcxt_flags = HASH_CONTEXT; check_pids = true; } /* * Create the DB hashtable */ memset(&hash_ctl, 0, sizeof(hash_ctl)); hash_ctl.keysize = sizeof(Oid); hash_ctl.entrysize = sizeof(PgStat_StatDBEntry); hash_ctl.hash = oid_hash; hash_ctl.hcxt = use_mcxt; *dbhash = hash_create("Databases hash", PGSTAT_DB_HASH_SIZE, &hash_ctl, HASH_ELEM | HASH_FUNCTION | mcxt_flags); /* * Initialize the number of known backends to zero, just in case we do a * silent error return below. */ if (numbackends != NULL) *numbackends = 0; if (betab != NULL) *betab = NULL; /* * Try to open the status file. If it doesn't exist, the backends simply * return zero for anything and the collector simply starts from scratch * with empty counters. */ if ((fpin = AllocateFile(PGSTAT_STAT_FILENAME, PG_BINARY_R)) == NULL) return; /* * Verify it's of the expected format. */ if (fread(&format_id, 1, sizeof(format_id), fpin) != sizeof(format_id) || format_id != PGSTAT_FILE_FORMAT_ID) { ereport(pgStatRunningInCollector ? LOG : WARNING, (errmsg("corrupted pgstat.stat file"))); goto done; } /* * We found an existing collector stats file. Read it and put all the * hashtable entries into place. */ for (;;) { switch (fgetc(fpin)) { /* * 'D' A PgStat_StatDBEntry struct describing a database * follows. Subsequently, zero to many 'T' entries will follow * until a 'd' is encountered. */ case 'D': if (fread(&dbbuf, 1, offsetof(PgStat_StatDBEntry, tables), fpin) != offsetof(PgStat_StatDBEntry, tables)) { ereport(pgStatRunningInCollector ? LOG : WARNING, (errmsg("corrupted pgstat.stat file"))); goto done; } /* * Add to the DB hash */ dbentry = (PgStat_StatDBEntry *) hash_search(*dbhash, (void *) &dbbuf.databaseid, HASH_ENTER, &found); if (found) { ereport(pgStatRunningInCollector ? LOG : WARNING, (errmsg("corrupted pgstat.stat file"))); goto done; } memcpy(dbentry, &dbbuf, sizeof(PgStat_StatDBEntry)); dbentry->tables = NULL; dbentry->destroy = 0; dbentry->n_backends = 0; /* * Don't collect tables if not the requested DB (or the * shared-table info) */ if (onlydb != InvalidOid) { if (dbbuf.databaseid != onlydb && dbbuf.databaseid != InvalidOid) break; } memset(&hash_ctl, 0, sizeof(hash_ctl)); hash_ctl.keysize = sizeof(Oid); hash_ctl.entrysize = sizeof(PgStat_StatTabEntry); hash_ctl.hash = oid_hash; hash_ctl.hcxt = use_mcxt; dbentry->tables = hash_create("Per-database table", PGSTAT_TAB_HASH_SIZE, &hash_ctl, HASH_ELEM | HASH_FUNCTION | mcxt_flags); /* * Arrange that following 'T's add entries to this database's * tables hash table. */ tabhash = dbentry->tables; break; /* * 'd' End of this database. */ case 'd': tabhash = NULL; break; /* * 'T' A PgStat_StatTabEntry follows. */ case 'T': if (fread(&tabbuf, 1, sizeof(PgStat_StatTabEntry), fpin) != sizeof(PgStat_StatTabEntry)) { ereport(pgStatRunningInCollector ? LOG : WARNING, (errmsg("corrupted pgstat.stat file")));
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -