📄 pg_dump.c
字号:
for (tuple = 0; tuple < PQntuples(res); tuple++) { archprintf(fout, "INSERT INTO %s ", fmtId(classname)); if (nfields == 0) { /* corner case for zero-column table */ archprintf(fout, "DEFAULT VALUES;\n"); continue; } if (attrNames == true) { resetPQExpBuffer(q); appendPQExpBuffer(q, "("); for (field = 0; field < nfields; field++) { if (field > 0) appendPQExpBuffer(q, ", "); appendPQExpBuffer(q, fmtId(PQfname(res, field))); } appendPQExpBuffer(q, ") "); archprintf(fout, "%s", q->data); } archprintf(fout, "VALUES ("); for (field = 0; field < nfields; field++) { if (field > 0) archprintf(fout, ", "); if (PQgetisnull(res, tuple, field)) { archprintf(fout, "NULL"); continue; } /* XXX This code is partially duplicated in ruleutils.c */ switch (PQftype(res, field)) { case INT2OID: case INT4OID: case INT8OID: case OIDOID: case FLOAT4OID: case FLOAT8OID: case NUMERICOID: { /* * These types are printed without quotes * unless they contain values that aren't * accepted by the scanner unquoted (e.g., * 'NaN'). Note that strtod() and friends * might accept NaN, so we can't use that to * test. * * In reality we only need to defend against * infinity and NaN, so we need not get too * crazy about pattern matching here. */ const char *s = PQgetvalue(res, tuple, field); if (strspn(s, "0123456789 +-eE.") == strlen(s)) archprintf(fout, "%s", s); else archprintf(fout, "'%s'", s); } break; case BITOID: case VARBITOID: archprintf(fout, "B'%s'", PQgetvalue(res, tuple, field)); break; case BOOLOID: if (strcmp(PQgetvalue(res, tuple, field), "t") == 0) archprintf(fout, "true"); else archprintf(fout, "false"); break; default: /* All other types are printed as string literals. */ resetPQExpBuffer(q); appendStringLiteral(q, PQgetvalue(res, tuple, field), false); archprintf(fout, "%s", q->data); break; } } archprintf(fout, ");\n"); } } while (PQntuples(res) > 0); archprintf(fout, "\n\n"); PQclear(res); res = PQexec(g_conn, "CLOSE _pg_dump_cursor"); if (!res || PQresultStatus(res) != PGRES_COMMAND_OK) { write_msg(NULL, "dumpClasses(): SQL command failed\n"); write_msg(NULL, "Error message from server: %s", PQerrorMessage(g_conn)); write_msg(NULL, "The command was: CLOSE _pg_dump_cursor\n"); exit_nicely(); } PQclear(res); destroyPQExpBuffer(q); return 1;}/* * DumpClasses - * dump the contents of all the classes. */static voiddumpClasses(const TableInfo *tblinfo, const int numTables, Archive *fout, const bool oids){ PQExpBuffer copyBuf = createPQExpBuffer(); DataDumperPtr dumpFn; DumpContext *dumpCtx; char *copyStmt; int i; for (i = 0; i < numTables; i++) { const char *classname = tblinfo[i].relname; /* Skip VIEW relations */ if (tblinfo[i].relkind == RELKIND_VIEW) continue; if (tblinfo[i].relkind == RELKIND_SEQUENCE) /* already dumped */ continue; if (tblinfo[i].dump) { if (g_verbose) write_msg(NULL, "preparing to dump the contents of table %s\n", classname); dumpCtx = (DumpContext *) calloc(1, sizeof(DumpContext)); dumpCtx->tblinfo = (TableInfo *) tblinfo; dumpCtx->tblidx = i; dumpCtx->oids = oids; if (!dumpData) { /* Dump/restore using COPY */ dumpFn = dumpClasses_nodumpData; resetPQExpBuffer(copyBuf); /* must use 2 steps here 'cause fmtId is nonreentrant */ appendPQExpBuffer(copyBuf, "COPY %s ", fmtId(tblinfo[i].relname)); appendPQExpBuffer(copyBuf, "%s %sFROM stdin;\n", fmtCopyColumnList(&(tblinfo[i])), (oids && tblinfo[i].hasoids) ? "WITH OIDS " : ""); copyStmt = copyBuf->data; } else { /* Restore using INSERT */ dumpFn = dumpClasses_dumpData; copyStmt = NULL; } ArchiveEntry(fout, tblinfo[i].oid, tblinfo[i].relname, tblinfo[i].relnamespace->nspname, tblinfo[i].usename, "TABLE DATA", NULL, "", "", copyStmt, dumpFn, dumpCtx); } } destroyPQExpBuffer(copyBuf);}/* * dumpDatabase: * dump the database definition */static intdumpDatabase(Archive *AH){ PQExpBuffer dbQry = createPQExpBuffer(); PQExpBuffer delQry = createPQExpBuffer(); PQExpBuffer creaQry = createPQExpBuffer(); PGresult *res; int ntups; int i_dba, i_encoding, i_datpath; const char *datname, *dba, *encoding, *datpath; datname = PQdb(g_conn); if (g_verbose) write_msg(NULL, "saving database definition\n"); /* Make sure we are in proper schema */ selectSourceSchema("pg_catalog"); /* Get the database owner and parameters from pg_database */ if (g_fout->remoteVersion >= 70100) { appendPQExpBuffer(dbQry, "SELECT " "(SELECT usename FROM pg_user WHERE usesysid = datdba) as dba, " "pg_encoding_to_char(encoding) as encoding, " "datpath " "FROM pg_database " "WHERE datname = "); appendStringLiteral(dbQry, datname, true); } else { /* * In 7.0, datpath is either the same as datname, or the user-given * location with "/" and the datname appended. We must strip this * junk off to produce a correct LOCATION value. */ appendPQExpBuffer(dbQry, "SELECT " "(SELECT usename FROM pg_user WHERE usesysid = datdba) as dba, " "pg_encoding_to_char(encoding) as encoding, " "CASE WHEN length(datpath) > length(datname) THEN " "substr(datpath,1,length(datpath)-length(datname)-1) " "ELSE '' END as datpath " "FROM pg_database " "WHERE datname = "); appendStringLiteral(dbQry, datname, true); } res = PQexec(g_conn, dbQry->data); if (!res || PQresultStatus(res) != PGRES_TUPLES_OK) { write_msg(NULL, "SQL command failed\n"); write_msg(NULL, "Error message from server: %s", PQerrorMessage(g_conn)); write_msg(NULL, "The command was: %s\n", dbQry->data); exit_nicely(); } ntups = PQntuples(res); if (ntups <= 0) { write_msg(NULL, "missing pg_database entry for database \"%s\"\n", datname); exit_nicely(); } if (ntups != 1) { write_msg(NULL, "query returned more than one (%d) pg_database entry for database \"%s\"\n", ntups, datname); exit_nicely(); } i_dba = PQfnumber(res, "dba"); i_encoding = PQfnumber(res, "encoding"); i_datpath = PQfnumber(res, "datpath"); dba = PQgetvalue(res, 0, i_dba); encoding = PQgetvalue(res, 0, i_encoding); datpath = PQgetvalue(res, 0, i_datpath); appendPQExpBuffer(creaQry, "CREATE DATABASE %s WITH TEMPLATE = template0", fmtId(datname)); if (strlen(datpath) > 0) { appendPQExpBuffer(creaQry, " LOCATION = "); appendStringLiteral(creaQry, datpath, true); } if (strlen(encoding) > 0) { appendPQExpBuffer(creaQry, " ENCODING = "); appendStringLiteral(creaQry, encoding, true); } appendPQExpBuffer(creaQry, ";\n"); appendPQExpBuffer(delQry, "DROP DATABASE %s;\n", fmtId(datname)); ArchiveEntry(AH, "0", /* OID */ datname, /* Name */ NULL, /* Namespace */ dba, /* Owner */ "DATABASE", /* Desc */ NULL, /* Deps */ creaQry->data, /* Create */ delQry->data, /* Del */ NULL, /* Copy */ NULL, /* Dumper */ NULL); /* Dumper Arg */ PQclear(res); destroyPQExpBuffer(dbQry); destroyPQExpBuffer(delQry); destroyPQExpBuffer(creaQry); return 1;}/* * dumpEncoding: put the correct encoding into the archive */static voiddumpEncoding(Archive *AH){ PQExpBuffer qry; PGresult *res; /* Can't read the encoding from pre-7.3 servers (SHOW isn't a query) */ if (AH->remoteVersion < 70300) return; if (g_verbose) write_msg(NULL, "saving encoding\n"); qry = createPQExpBuffer(); appendPQExpBuffer(qry, "SHOW client_encoding"); res = PQexec(g_conn, qry->data); if (!res || PQresultStatus(res) != PGRES_TUPLES_OK || PQntuples(res) != 1) { write_msg(NULL, "SQL command failed\n"); write_msg(NULL, "Error message from server: %s", PQerrorMessage(g_conn)); write_msg(NULL, "The command was: %s\n", qry->data); exit_nicely(); } resetPQExpBuffer(qry); appendPQExpBuffer(qry, "SET client_encoding = "); appendStringLiteral(qry, PQgetvalue(res, 0, 0), true); appendPQExpBuffer(qry, ";\n"); ArchiveEntry(AH, "0", /* OID */ "ENCODING", /* Name */ NULL, /* Namespace */ "", /* Owner */ "ENCODING", /* Desc */ NULL, /* Deps */ qry->data, /* Create */ "", /* Del */ NULL, /* Copy */ NULL, /* Dumper */ NULL); /* Dumper Arg */ PQclear(res); destroyPQExpBuffer(qry);}/* * dumpBlobs: * dump all blobs * */#define loBufSize 16384#define loFetchSize 1000static intdumpBlobs(Archive *AH, char *junkOid, void *junkVal){ PQExpBuffer oidQry = createPQExpBuffer(); PQExpBuffer oidFetchQry = createPQExpBuffer(); PGresult *res; int i; int loFd; char buf[loBufSize]; int cnt; Oid blobOid; if (g_verbose) write_msg(NULL, "saving large objects\n"); /* Make sure we are in proper schema */ selectSourceSchema("pg_catalog"); /* Cursor to get all BLOB tables */ if (AH->remoteVersion >= 70100) appendPQExpBuffer(oidQry, "Declare blobOid Cursor for SELECT DISTINCT loid FROM pg_largeobject"); else appendPQExpBuffer(oidQry, "Declare blobOid Cursor for SELECT oid from pg_class where relkind = 'l'"); res = PQexec(g_conn, oidQry->data); if (!res || PQresultStatus(res) != PGRES_COMMAND_OK) { write_msg(NULL, "dumpBlobs(): cursor declaration failed: %s", PQerrorMessage(g_conn)); exit_nicely(); } /* Fetch for cursor */ appendPQExpBuffer(oidFetchQry, "Fetch %d in blobOid", loFetchSize); do { /* Do a fetch */ PQclear(res); res = PQexec(g_conn, oidFetchQry->data); if (!res || PQresultStatus(res) != PGRES_TUPLES_OK) { write_msg(NULL, "dumpBlobs(): fetch from cursor failed: %s", PQerrorMessage(g_conn)); exit_nicely(); } /* Process the tuples, if any */ for (i = 0; i < PQntuples(res); i++) { blobOid = atooid(PQgetvalue(res, i, 0)); /* Open the BLOB */ loFd = lo_open(g_conn, blobOid, INV_READ); if (loFd == -1) { write_msg(NULL, "dumpBlobs(): could not open large object: %s", PQerrorMessage(g_conn)); exit_nicely(); } StartBlob(AH, blobOid); /* Now read it in chunks, sending data to archive */ do { cnt = lo_read(g_conn, loFd, buf, loBufSize); if (cnt < 0) { write_msg(NULL, "dumpBlobs(): error reading large object: %s", PQerrorMessage(g_conn)); exit_nicely(); } WriteData(AH, buf, cnt); } while (cnt > 0); lo_close(g_conn, loFd); EndBlob(AH, blobOid); } } while (PQntuples(res) > 0); destroyPQExpBuffer(oidQry); destroyPQExpBuffer(oidFetchQry); return 1;}/* * getNamespaces: * read all namespaces in the system catalogs and return them in the * NamespaceInfo* structure * * numNamespaces is set to the number of namespaces read in */NamespaceInfo *getNamespaces(int *numNamespaces){ PGresult *res; int ntups;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -