📄 pg_dump.c
字号:
break; case 'p': case 'P': plainText = 1; g_fout = CreateArchive(filename, archNull, 0); break; case 't': case 'T': g_fout = CreateArchive(filename, archTar, compressLevel); break; default: write_msg(NULL, "invalid output format \"%s\" specified\n", format); exit(1); } if (g_fout == NULL) { write_msg(NULL, "could not open output file \"%s\" for writing\n", filename); exit(1); } /* Let the archiver know how noisy to be */ g_fout->verbose = g_verbose; g_fout->minRemoteVersion = 70000; /* we can handle back to 7.0 */ g_fout->maxRemoteVersion = parse_version(PG_VERSION); if (g_fout->maxRemoteVersion < 0) { write_msg(NULL, "could not parse version string \"%s\"\n", PG_VERSION); exit(1); } /* * Open the database using the Archiver, so it knows about it. Errors mean * death. */ g_conn = ConnectDatabase(g_fout, dbname, pghost, pgport, username, force_password, ignore_version); /* * Start serializable transaction to dump consistent data. */ do_sql_command(g_conn, "BEGIN"); do_sql_command(g_conn, "SET TRANSACTION ISOLATION LEVEL SERIALIZABLE"); /* Set the datestyle to ISO to ensure the dump's portability */ do_sql_command(g_conn, "SET DATESTYLE = ISO"); /* Set the client encoding */ if (dumpencoding) { char *cmd = malloc(strlen(dumpencoding) + 32); sprintf(cmd, "SET client_encoding='%s'", dumpencoding); do_sql_command(g_conn, cmd); free(cmd); } /* Select the appropriate subquery to convert user IDs to names */ if (g_fout->remoteVersion >= 80100) username_subquery = "SELECT rolname FROM pg_catalog.pg_roles WHERE oid ="; else if (g_fout->remoteVersion >= 70300) username_subquery = "SELECT usename FROM pg_catalog.pg_user WHERE usesysid ="; else username_subquery = "SELECT usename FROM pg_user WHERE usesysid ="; /* * If supported, set extra_float_digits so that we can dump float data * exactly (given correctly implemented float I/O code, anyway) */ if (g_fout->remoteVersion >= 70400) do_sql_command(g_conn, "SET extra_float_digits TO 2"); /* Find the last built-in OID, if needed */ if (g_fout->remoteVersion < 70300) { if (g_fout->remoteVersion >= 70100) g_last_builtin_oid = findLastBuiltinOid_V71(PQdb(g_conn)); else g_last_builtin_oid = findLastBuiltinOid_V70(); if (g_verbose) write_msg(NULL, "last built-in OID is %u\n", g_last_builtin_oid); } /* * Now scan the database and create DumpableObject structs for all the * objects we intend to dump. */ tblinfo = getSchemaData(&numTables, schemaOnly, dataOnly); if (!schemaOnly) getTableData(tblinfo, numTables, oids); if (outputBlobs && hasBlobs(g_fout)) { /* Add placeholders to allow correct sorting of blobs */ DumpableObject *blobobj; blobobj = (DumpableObject *) malloc(sizeof(DumpableObject)); blobobj->objType = DO_BLOBS; blobobj->catId = nilCatalogId; AssignDumpId(blobobj); blobobj->name = strdup("BLOBS"); blobobj = (DumpableObject *) malloc(sizeof(DumpableObject)); blobobj->objType = DO_BLOB_COMMENTS; blobobj->catId = nilCatalogId; AssignDumpId(blobobj); blobobj->name = strdup("BLOB COMMENTS"); } /* * Collect dependency data to assist in ordering the objects. */ getDependencies(); /* * Sort the objects into a safe dump order (no forward references). * * In 7.3 or later, we can rely on dependency information to help us * determine a safe order, so the initial sort is mostly for cosmetic * purposes: we sort by name to ensure that logically identical schemas * will dump identically. Before 7.3 we don't have dependencies and we * use OID ordering as an (unreliable) guide to creation order. */ getDumpableObjects(&dobjs, &numObjs); if (g_fout->remoteVersion >= 70300) sortDumpableObjectsByTypeName(dobjs, numObjs); else sortDumpableObjectsByTypeOid(dobjs, numObjs); sortDumpableObjects(dobjs, numObjs); /* * Create archive TOC entries for all the objects to be dumped, in a safe * order. */ /* First the special encoding entry. */ dumpEncoding(g_fout); /* The database item is always second, unless we don't want it at all */ if (!dataOnly && selectTableName == NULL && selectSchemaName == NULL) dumpDatabase(g_fout); /* Now the rearrangeable objects. */ for (i = 0; i < numObjs; i++) dumpDumpableObject(g_fout, dobjs[i]); /* * And finally we can do the actual output. */ if (plainText) { ropt = NewRestoreOptions(); ropt->filename = (char *) filename; ropt->dropSchema = outputClean; ropt->aclsSkip = aclsSkip; ropt->superuser = outputSuperuser; ropt->create = outputCreate; ropt->noOwner = outputNoOwner; ropt->disable_triggers = disable_triggers; ropt->use_setsessauth = use_setsessauth; ropt->dataOnly = dataOnly; if (compressLevel == -1) ropt->compression = 0; else ropt->compression = compressLevel; ropt->suppressDumpWarnings = true; /* We've already shown them */ RestoreArchive(g_fout, ropt); } CloseArchive(g_fout); PQfinish(g_conn); exit(0);}static voidhelp(const char *progname){ printf(_("%s dumps a database as a text file or to other formats.\n\n"), progname); printf(_("Usage:\n")); printf(_(" %s [OPTION]... [DBNAME]\n"), progname); printf(_("\nGeneral options:\n")); printf(_(" -f, --file=FILENAME output file name\n")); printf(_(" -F, --format=c|t|p output file format (custom, tar, plain text)\n")); printf(_(" -i, --ignore-version proceed even when server version mismatches\n" " pg_dump version\n")); printf(_(" -v, --verbose verbose mode\n")); printf(_(" -Z, --compress=0-9 compression level for compressed formats\n")); printf(_(" --help show this help, then exit\n")); printf(_(" --version output version information, then exit\n")); printf(_("\nOptions controlling the output content:\n")); printf(_(" -a, --data-only dump only the data, not the schema\n")); printf(_(" -c, --clean clean (drop) schema prior to create\n")); printf(_(" -C, --create include commands to create database in dump\n")); printf(_(" -d, --inserts dump data as INSERT, rather than COPY, commands\n")); printf(_(" -D, --column-inserts dump data as INSERT commands with column names\n")); printf(_(" -E, --encoding=ENCODING dump the data in encoding ENCODING\n")); printf(_(" -n, --schema=SCHEMA dump the named schema only\n")); printf(_(" -o, --oids include OIDs in dump\n")); printf(_(" -O, --no-owner skip restoration of object ownership\n" " in plain text format\n")); printf(_(" -s, --schema-only dump only the schema, no data\n")); printf(_(" -S, --superuser=NAME specify the superuser user name to use in\n" " plain text format\n")); printf(_(" -t, --table=TABLE dump the named table only\n")); printf(_(" -x, --no-privileges do not dump privileges (grant/revoke)\n")); printf(_(" -X disable-dollar-quoting, --disable-dollar-quoting\n" " disable dollar quoting, use SQL standard quoting\n")); printf(_(" -X disable-triggers, --disable-triggers\n" " disable triggers during data-only restore\n")); printf(_(" -X use-set-session-authorization, --use-set-session-authorization\n" " use SESSION AUTHORIZATION commands instead of\n" " OWNER TO commands\n")); printf(_("\nConnection options:\n")); printf(_(" -h, --host=HOSTNAME database server host or socket directory\n")); printf(_(" -p, --port=PORT database server port number\n")); printf(_(" -U, --username=NAME connect as specified database user\n")); printf(_(" -W, --password force password prompt (should happen automatically)\n")); printf(_("\nIf no database name is supplied, then the PGDATABASE environment\n" "variable value is used.\n\n")); printf(_("Report bugs to <pgsql-bugs@postgresql.org>.\n"));}voidexit_nicely(void){ PQfinish(g_conn); if (g_verbose) write_msg(NULL, "*** aborted because of error\n"); exit(1);}/* * selectDumpableNamespace: policy-setting subroutine * Mark a namespace as to be dumped or not */static voidselectDumpableNamespace(NamespaceInfo *nsinfo){ /* * If a specific table is being dumped, do not dump any complete * namespaces. If a specific namespace is being dumped, dump just that * namespace. Otherwise, dump all non-system namespaces. */ if (selectTableName != NULL) nsinfo->dump = false; else if (selectSchemaName != NULL) { if (strcmp(nsinfo->dobj.name, selectSchemaName) == 0) nsinfo->dump = true; else nsinfo->dump = false; } else if (strncmp(nsinfo->dobj.name, "pg_", 3) == 0 || strcmp(nsinfo->dobj.name, "information_schema") == 0) nsinfo->dump = false; else nsinfo->dump = true;}/* * selectDumpableTable: policy-setting subroutine * Mark a table as to be dumped or not */static voidselectDumpableTable(TableInfo *tbinfo){ /* * Always dump if dumping parent namespace; else, if a particular * tablename has been specified, dump matching table name; else, do not * dump. */ tbinfo->dump = false; if (tbinfo->dobj.namespace->dump) tbinfo->dump = true; else if (selectTableName != NULL && strcmp(tbinfo->dobj.name, selectTableName) == 0) { /* If both -s and -t specified, must match both to dump */ if (selectSchemaName == NULL) tbinfo->dump = true; else if (strcmp(tbinfo->dobj.namespace->dobj.name, selectSchemaName) == 0) tbinfo->dump = true; }}/* * Dump a table's contents for loading using the COPY command * - this routine is called by the Archiver when it wants the table * to be dumped. */#define COPYBUFSIZ 8192static intdumpTableData_copy(Archive *fout, void *dcontext){ TableDataInfo *tdinfo = (TableDataInfo *) dcontext; TableInfo *tbinfo = tdinfo->tdtable; const char *classname = tbinfo->dobj.name; const bool hasoids = tbinfo->hasoids; const bool oids = tdinfo->oids; PQExpBuffer q = createPQExpBuffer(); PGresult *res; int ret; bool copydone; char copybuf[COPYBUFSIZ]; const char *column_list; if (g_verbose) write_msg(NULL, "dumping contents of table %s\n", classname); /* * Make sure we are in proper schema. We will qualify the table name * below anyway (in case its name conflicts with a pg_catalog table); but * this ensures reproducible results in case the table contains regproc, * regclass, etc columns. */ selectSourceSchema(tbinfo->dobj.namespace->dobj.name); /* * If possible, specify the column list explicitly so that we have no * possibility of retrieving data in the wrong column order. (The default * column ordering of COPY will not be what we want in certain corner * cases involving ADD COLUMN and inheritance.) */ if (g_fout->remoteVersion >= 70300) column_list = fmtCopyColumnList(tbinfo); else column_list = ""; /* can't select columns in COPY */ if (oids && hasoids) { appendPQExpBuffer(q, "COPY %s %s WITH OIDS TO stdout;", fmtQualifiedId(tbinfo->dobj.namespace->dobj.name, classname), column_list); } else { appendPQExpBuffer(q, "COPY %s %s TO stdout;", fmtQualifiedId(tbinfo->dobj.namespace->dobj.name, classname), column_list); } res = PQexec(g_conn, q->data); check_sql_result(res, g_conn, q->data, PGRES_COPY_OUT); copydone = false; while (!copydone) { ret = PQgetline(g_conn, copybuf, COPYBUFSIZ); if (copybuf[0] == '\\' && copybuf[1] == '.' && copybuf[2] == '\0') { copydone = true; /* don't print this... */ } else { archputs(copybuf, fout); switch (ret) { case EOF: copydone = true; /* FALLTHROUGH */ case 0: archputs("\n", fout); break; case 1: break; } } /* * THROTTLE: * * There was considerable discussion in late July, 2000 regarding * slowing down pg_dump when backing up large tables. Users with both * slow & fast (muti-processor) machines experienced performance * degradation when doing a backup. * * Initial attempts based on sleeping for a number of ms for each ms * of work were deemed too complex, then a simple 'sleep in each loop' * implementation was suggested. The latter failed because the loop * was too tight. Finally, the following was implemented: * * If throttle is non-zero, then See how long since the last sleep. * Work out how long to sleep (based on ratio). If sleep is more than * 100ms, then sleep reset timer EndIf EndIf * * where the throttle value was the number of ms to sleep per ms of * work. The calculation was done in each loop. * * Most of the hard work is done in the backend, and this solution * still did not work particularly well: on slow machines, the ratio * was 50:1, and on medium paced machines, 1:1, and on fast * multi-processor machines, it had little or no effect, for reasons * that were unclear. * * Further discussion ensued, and the proposal was dropped. * * For those people who want this feature, it can be implemented using * gettimeofday in each loop, calculating the time since last sleep, * multiplying that by the sleep ratio, then if the result is more * than a preset 'minimum sleep time' (say 100ms), call the 'select' * function to sleep for a subsecond period ie. * * select(0, NULL, NULL, NULL, &tvi); * * This will return after the interval specified in the structure tvi. * Finally, call gettimeofday again to save the 'last sleep time'. */ } archprintf(fout, "\\.\n\n\n"); ret = PQendcopy(g_conn); if (ret != 0) { write_msg(NULL, "SQL command to dump the contents of table \"%s\" failed: PQendcopy() failed.\n", classname); write_msg(NULL, "Error message from server: %s", PQerrorMessage(g_conn)); write_msg(NULL, "The command was: %s\n", q->data); exit_nicely(); } PQclear(res); destroyPQExpBuffer(q); return 1;}static intdumpTableData_insert(Archive *fout, void *dcontext){ TableDataInfo *tdinfo = (TableDataInfo *) dcontext; TableInfo *tbinfo = tdinfo->tdtable; const char *classname = tbinfo->dobj.name; PQExpBuffer q = createPQExpBuffer(); PGresult *res; int tuple; int nfields; int field; /* * Make sure we are in proper schema. We will qualify the table name * below anyway (in case its name conflicts with a pg_catalog table); but * this ensures reproducible results in case the table contains regproc, * regclass, etc columns. */ selectSourceSchema(tbinfo->dobj.namespace->dobj.name); if (fout->remoteVersion >= 70100) {
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -