postgresql/contrib/pg_upgrade/version.c
Alvaro Herrera 8396447cdb Create libpgcommon, and move pg_malloc et al to it
libpgcommon is a new static library to allow sharing code among the
various frontend programs and backend; this lets us eliminate duplicate
implementations of common routines.  We avoid libpgport, because that's
intended as a place for porting issues; per discussion, it seems better
to keep them separate.

The first use case, and the only implemented by this patch, is pg_malloc
and friends, which many frontend programs were already using.

At the same time, we can use this to provide palloc emulation functions
for the frontend; this way, some palloc-using files in the backend can
also be used by the frontend cleanly.  To do this, we change palloc() in
the backend to be a function instead of a macro on top of
MemoryContextAlloc().  This was previously believed to cause loss of
performance, but this implementation has been tweaked by Tom and Andres
so that on modern compilers it provides a slight improvement over the
previous one.

This lets us clean up some places that were already with
localized hacks.

Most of the pg_malloc/palloc changes in this patch were authored by
Andres Freund. Zoltán Böszörményi also independently provided a form of
that.  libpgcommon infrastructure was authored by Álvaro.
2013-02-12 11:21:05 -03:00

90 lines
2.3 KiB
C

/*
* version.c
*
* Postgres-version-specific routines
*
* Copyright (c) 2010-2013, PostgreSQL Global Development Group
* contrib/pg_upgrade/version.c
*/
#include "postgres_fe.h"
#include "pg_upgrade.h"
/*
* new_9_0_populate_pg_largeobject_metadata()
* new >= 9.0, old <= 8.4
* 9.0 has a new pg_largeobject permission table
*/
void
new_9_0_populate_pg_largeobject_metadata(ClusterInfo *cluster, bool check_mode)
{
int dbnum;
FILE *script = NULL;
bool found = false;
char output_path[MAXPGPATH];
prep_status("Checking for large objects");
snprintf(output_path, sizeof(output_path), "pg_largeobject.sql");
for (dbnum = 0; dbnum < cluster->dbarr.ndbs; dbnum++)
{
PGresult *res;
int i_count;
DbInfo *active_db = &cluster->dbarr.dbs[dbnum];
PGconn *conn = connectToServer(cluster, active_db->db_name);
/* find if there are any large objects */
res = executeQueryOrDie(conn,
"SELECT count(*) "
"FROM pg_catalog.pg_largeobject ");
i_count = PQfnumber(res, "count");
if (atoi(PQgetvalue(res, 0, i_count)) != 0)
{
found = true;
if (!check_mode)
{
if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL)
pg_log(PG_FATAL, "could not open file \"%s\": %s\n", output_path, getErrorText(errno));
fprintf(script, "\\connect %s\n",
quote_identifier(active_db->db_name));
fprintf(script,
"SELECT pg_catalog.lo_create(t.loid)\n"
"FROM (SELECT DISTINCT loid FROM pg_catalog.pg_largeobject) AS t;\n");
}
}
PQclear(res);
PQfinish(conn);
}
if (script)
fclose(script);
if (found)
{
report_status(PG_WARNING, "warning");
if (check_mode)
pg_log(PG_WARNING, "\n"
"Your installation contains large objects. The new database has an\n"
"additional large object permission table. After upgrading, you will be\n"
"given a command to populate the pg_largeobject permission table with\n"
"default permissions.\n\n");
else
pg_log(PG_WARNING, "\n"
"Your installation contains large objects. The new database has an\n"
"additional large object permission table, so default permissions must be\n"
"defined for all large objects. The file\n"
" %s\n"
"when executed by psql by the database superuser will set the default\n"
"permissions.\n\n",
output_path);
}
else
check_ok();
}