⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 pg_dump.c

📁 PostgreSQL 8.1.4的源码 适用于Linux下的开源数据库系统
💻 C
📖 第 1 页 / 共 5 页
字号:
		appendPQExpBuffer(q, "DECLARE _pg_dump_cursor CURSOR FOR "						  "SELECT * FROM ONLY %s",						  fmtQualifiedId(tbinfo->dobj.namespace->dobj.name,										 classname));	}	else	{		appendPQExpBuffer(q, "DECLARE _pg_dump_cursor CURSOR FOR "						  "SELECT * FROM %s",						  fmtQualifiedId(tbinfo->dobj.namespace->dobj.name,										 classname));	}	res = PQexec(g_conn, q->data);	check_sql_result(res, g_conn, q->data, PGRES_COMMAND_OK);	do	{		PQclear(res);		res = PQexec(g_conn, "FETCH 100 FROM _pg_dump_cursor");		check_sql_result(res, g_conn, "FETCH 100 FROM _pg_dump_cursor",						 PGRES_TUPLES_OK);		nfields = PQnfields(res);		for (tuple = 0; tuple < PQntuples(res); tuple++)		{			archprintf(fout, "INSERT INTO %s ", fmtId(classname));			if (nfields == 0)			{				/* corner case for zero-column table */				archprintf(fout, "DEFAULT VALUES;\n");				continue;			}			if (attrNames == true)			{				resetPQExpBuffer(q);				appendPQExpBuffer(q, "(");				for (field = 0; field < nfields; field++)				{					if (field > 0)						appendPQExpBuffer(q, ", ");					appendPQExpBufferStr(q, fmtId(PQfname(res, field)));				}				appendPQExpBuffer(q, ") ");				archputs(q->data, fout);			}			archprintf(fout, "VALUES (");			for (field = 0; field < nfields; field++)			{				if (field > 0)					archprintf(fout, ", ");				if (PQgetisnull(res, tuple, field))				{					archprintf(fout, "NULL");					continue;				}				/* XXX This code is partially duplicated in ruleutils.c */				switch (PQftype(res, field))				{					case INT2OID:					case INT4OID:					case INT8OID:					case OIDOID:					case FLOAT4OID:					case FLOAT8OID:					case NUMERICOID:						{							/*							 * These types are printed without quotes unless							 * they contain values that aren't accepted by the							 * scanner unquoted (e.g., 'NaN').	Note that							 * strtod() and friends might accept NaN, so we							 * can't use that to test.							 *							 * In reality we only need to defend against							 * infinity and NaN, so we need not get too crazy							 * about pattern matching here.							 */							const char *s = PQgetvalue(res, tuple, field);							if (strspn(s, "0123456789 +-eE.") == strlen(s))								archprintf(fout, "%s", s);							else								archprintf(fout, "'%s'", s);						}						break;					case BITOID:					case VARBITOID:						archprintf(fout, "B'%s'",								   PQgetvalue(res, tuple, field));						break;					case BOOLOID:						if (strcmp(PQgetvalue(res, tuple, field), "t") == 0)							archprintf(fout, "true");						else							archprintf(fout, "false");						break;					default:						/* All other types are printed as string literals. */						resetPQExpBuffer(q);						appendStringLiteral(q, PQgetvalue(res, tuple, field), false);						archputs(q->data, fout);						break;				}			}			archprintf(fout, ");\n");		}	} while (PQntuples(res) > 0);	PQclear(res);	archprintf(fout, "\n\n");	do_sql_command(g_conn, "CLOSE _pg_dump_cursor");	destroyPQExpBuffer(q);	return 1;}/* * dumpTableData - *	  dump the contents of a single table * * Actually, this just makes an ArchiveEntry for the table contents. */static voiddumpTableData(Archive *fout, TableDataInfo *tdinfo){	TableInfo  *tbinfo = tdinfo->tdtable;	PQExpBuffer copyBuf = createPQExpBuffer();	DataDumperPtr dumpFn;	char	   *copyStmt;	if (!dumpInserts)	{		/* Dump/restore using COPY */		dumpFn = dumpTableData_copy;		/* must use 2 steps here 'cause fmtId is nonreentrant */		appendPQExpBuffer(copyBuf, "COPY %s ",						  fmtId(tbinfo->dobj.name));		appendPQExpBuffer(copyBuf, "%s %sFROM stdin;\n",						  fmtCopyColumnList(tbinfo),					  (tdinfo->oids && tbinfo->hasoids) ? "WITH OIDS " : "");		copyStmt = copyBuf->data;	}	else	{		/* Restore using INSERT */		dumpFn = dumpTableData_insert;		copyStmt = NULL;	}	ArchiveEntry(fout, tdinfo->dobj.catId, tdinfo->dobj.dumpId,				 tbinfo->dobj.name,				 tbinfo->dobj.namespace->dobj.name,				 NULL,				 tbinfo->rolname, false,				 "TABLE DATA", "", "", copyStmt,				 tdinfo->dobj.dependencies, tdinfo->dobj.nDeps,				 dumpFn, tdinfo);	destroyPQExpBuffer(copyBuf);}/* * getTableData - *	  set up dumpable objects representing the contents of tables */static voidgetTableData(TableInfo *tblinfo, int numTables, bool oids){	int			i;	for (i = 0; i < numTables; i++)	{		/* Skip VIEWs (no data to dump) */		if (tblinfo[i].relkind == RELKIND_VIEW)			continue;		/* Skip SEQUENCEs (handled elsewhere) */		if (tblinfo[i].relkind == RELKIND_SEQUENCE)			continue;		if (tblinfo[i].dump)		{			TableDataInfo *tdinfo;			tdinfo = (TableDataInfo *) malloc(sizeof(TableDataInfo));			tdinfo->dobj.objType = DO_TABLE_DATA;			/*			 * Note: use tableoid 0 so that this object won't be mistaken for			 * something that pg_depend entries apply to.			 */			tdinfo->dobj.catId.tableoid = 0;			tdinfo->dobj.catId.oid = tblinfo[i].dobj.catId.oid;			AssignDumpId(&tdinfo->dobj);			tdinfo->dobj.name = tblinfo[i].dobj.name;			tdinfo->dobj.namespace = tblinfo[i].dobj.namespace;			tdinfo->tdtable = &(tblinfo[i]);			tdinfo->oids = oids;			addObjectDependency(&tdinfo->dobj, tblinfo[i].dobj.dumpId);		}	}}/* * dumpDatabase: *	dump the database definition */static voiddumpDatabase(Archive *AH){	PQExpBuffer dbQry = createPQExpBuffer();	PQExpBuffer delQry = createPQExpBuffer();	PQExpBuffer creaQry = createPQExpBuffer();	PGresult   *res;	int			ntups;	int			i_tableoid,				i_oid,				i_dba,				i_encoding,				i_tablespace;	CatalogId	dbCatId;	DumpId		dbDumpId;	const char *datname,			   *dba,			   *encoding,			   *tablespace;	datname = PQdb(g_conn);	if (g_verbose)		write_msg(NULL, "saving database definition\n");	/* Make sure we are in proper schema */	selectSourceSchema("pg_catalog");	/* Get the database owner and parameters from pg_database */	if (g_fout->remoteVersion >= 80000)	{		appendPQExpBuffer(dbQry, "SELECT tableoid, oid, "						  "(%s datdba) as dba, "						  "pg_encoding_to_char(encoding) as encoding, "						  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) as tablespace "						  "FROM pg_database "						  "WHERE datname = ",						  username_subquery);		appendStringLiteral(dbQry, datname, true);	}	else if (g_fout->remoteVersion >= 70100)	{		appendPQExpBuffer(dbQry, "SELECT tableoid, oid, "						  "(%s datdba) as dba, "						  "pg_encoding_to_char(encoding) as encoding, "						  "NULL as tablespace "						  "FROM pg_database "						  "WHERE datname = ",						  username_subquery);		appendStringLiteral(dbQry, datname, true);	}	else	{		appendPQExpBuffer(dbQry, "SELECT "						  "(SELECT oid FROM pg_class WHERE relname = 'pg_database') AS tableoid, "						  "oid, "						  "(%s datdba) as dba, "						  "pg_encoding_to_char(encoding) as encoding, "						  "NULL as tablespace "						  "FROM pg_database "						  "WHERE datname = ",						  username_subquery);		appendStringLiteral(dbQry, datname, true);	}	res = PQexec(g_conn, dbQry->data);	check_sql_result(res, g_conn, dbQry->data, PGRES_TUPLES_OK);	ntups = PQntuples(res);	if (ntups <= 0)	{		write_msg(NULL, "missing pg_database entry for database \"%s\"\n",				  datname);		exit_nicely();	}	if (ntups != 1)	{		write_msg(NULL, "query returned more than one (%d) pg_database entry for database \"%s\"\n",				  ntups, datname);		exit_nicely();	}	i_tableoid = PQfnumber(res, "tableoid");	i_oid = PQfnumber(res, "oid");	i_dba = PQfnumber(res, "dba");	i_encoding = PQfnumber(res, "encoding");	i_tablespace = PQfnumber(res, "tablespace");	dbCatId.tableoid = atooid(PQgetvalue(res, 0, i_tableoid));	dbCatId.oid = atooid(PQgetvalue(res, 0, i_oid));	dba = PQgetvalue(res, 0, i_dba);	encoding = PQgetvalue(res, 0, i_encoding);	tablespace = PQgetvalue(res, 0, i_tablespace);	appendPQExpBuffer(creaQry, "CREATE DATABASE %s WITH TEMPLATE = template0",					  fmtId(datname));	if (strlen(encoding) > 0)	{		appendPQExpBuffer(creaQry, " ENCODING = ");		appendStringLiteral(creaQry, encoding, true);	}	if (strlen(tablespace) > 0 && strcmp(tablespace, "pg_default") != 0)		appendPQExpBuffer(creaQry, " TABLESPACE = %s",						  fmtId(tablespace));	appendPQExpBuffer(creaQry, ";\n");	appendPQExpBuffer(delQry, "DROP DATABASE %s;\n",					  fmtId(datname));	dbDumpId = createDumpId();	ArchiveEntry(AH,				 dbCatId,		/* catalog ID */				 dbDumpId,		/* dump ID */				 datname,		/* Name */				 NULL,			/* Namespace */				 NULL,			/* Tablespace */				 dba,			/* Owner */				 false,			/* with oids */				 "DATABASE",	/* Desc */				 creaQry->data, /* Create */				 delQry->data,	/* Del */				 NULL,			/* Copy */				 NULL,			/* Deps */				 0,				/* # Deps */				 NULL,			/* Dumper */				 NULL);			/* Dumper Arg */	/* Dump DB comment if any */	resetPQExpBuffer(dbQry);	appendPQExpBuffer(dbQry, "DATABASE %s", fmtId(datname));	dumpComment(AH, dbQry->data, NULL, "",				dbCatId, 0, dbDumpId);	PQclear(res);	destroyPQExpBuffer(dbQry);	destroyPQExpBuffer(delQry);	destroyPQExpBuffer(creaQry);}/* * dumpEncoding: put the correct encoding into the archive */static voiddumpEncoding(Archive *AH){	PQExpBuffer qry;	PGresult   *res;	/* Can't read the encoding from pre-7.3 servers (SHOW isn't a query) */	if (AH->remoteVersion < 70300)		return;	if (g_verbose)		write_msg(NULL, "saving encoding\n");	qry = createPQExpBuffer();	appendPQExpBuffer(qry, "SHOW client_encoding");	res = PQexec(g_conn, qry->data);	check_sql_result(res, g_conn, qry->data, PGRES_TUPLES_OK);	resetPQExpBuffer(qry);	appendPQExpBuffer(qry, "SET client_encoding = ");	appendStringLiteral(qry, PQgetvalue(res, 0, 0), true);	appendPQExpBuffer(qry, ";\n");	ArchiveEntry(AH, nilCatalogId, createDumpId(),				 "ENCODING", NULL, NULL, "",				 false, "ENCODING", qry->data, "", NULL,				 NULL, 0,				 NULL, NULL);	PQclear(res);	destroyPQExpBuffer(qry);}/* * hasBlobs: *	Test whether database contains any large objects */static boolhasBlobs(Archive *AH){	bool		result;	const char *blobQry;	PGresult   *res;	/* Make sure we are in proper schema */	selectSourceSchema("pg_catalog");	/* Check for BLOB OIDs */	if (AH->remoteVersion >= 70100)		blobQry = "SELECT loid FROM pg_largeobject LIMIT 1";	else		blobQry = "SELECT oid FROM pg_class WHERE relkind = 'l' LIMIT 1";	res = PQexec(g_conn, blobQry);	check_sql_result(res, g_conn, blobQry, PGRES_TUPLES_OK);	result = PQntuples(res) > 0;	PQclear(res);	return result;}/* * dumpBlobs: *	dump all blobs */static intdumpBlobs(Archive *AH, void *arg){	const char *blobQry;	const char *blobFetchQry;	PGresult   *res;	char		buf[LOBBUFSIZE];	int			i;	int			cnt;	if (g_verbose)		write_msg(NULL, "saving large objects\n");	/* Make sure we are in proper schema */	selectSourceSchema("pg_catalog");	/* Cursor to get all BLOB OIDs */	if (AH->remoteVersion >= 70100)		blobQry = "DECLARE bloboid CURSOR FOR SELECT DISTINCT loid FROM pg_largeobject";	else		blobQry = "DECLARE bloboid CURSOR FOR SELECT oid FROM pg_class WHERE relkind = 'l'";	res = PQexec(g_conn, blobQry);	check_sql_result(res, g_conn, blobQry, PGRES_COMMAND_OK);	/* Command to fetch from cursor */	blobFetchQry = "FETCH 1000 IN bloboid";	do	{		PQclear(res);		/* Do a fetch */		res = PQexec(g_conn, blobFetchQry);		check_sql_result(res, g_conn, blobFetchQry, PGRES_TUPLES_OK);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -