pgindent run before PG 9.1 beta 1.

This commit is contained in:
Bruce Momjian 2011-04-10 11:42:00 -04:00
parent 9a8b73147c
commit bf50caf105
446 changed files with 5737 additions and 5258 deletions

View File

@ -80,6 +80,7 @@ convert_and_check_filename(text *arg, bool logAllowed)
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
(errmsg("reference to parent directory (\"..\") not allowed"))));
/*
* Allow absolute paths if within DataDir or Log_directory, even
* though Log_directory might be outside DataDir.

View File

@ -169,7 +169,6 @@ ts_dist(PG_FUNCTION_ARGS)
PG_RETURN_INTERVAL_P(p);
}
else
r = DatumGetIntervalP(DirectFunctionCall2(timestamp_mi,
PG_GETARG_DATUM(0),
PG_GETARG_DATUM(1)));

View File

@ -149,16 +149,16 @@ file_fdw_validator(PG_FUNCTION_ARGS)
/*
* Only superusers are allowed to set options of a file_fdw foreign table.
* This is because the filename is one of those options, and we don't
* want non-superusers to be able to determine which file gets read.
* This is because the filename is one of those options, and we don't want
* non-superusers to be able to determine which file gets read.
*
* Putting this sort of permissions check in a validator is a bit of a
* crock, but there doesn't seem to be any other place that can enforce
* the check more cleanly.
*
* Note that the valid_options[] array disallows setting filename at
* any options level other than foreign table --- otherwise there'd
* still be a security hole.
* Note that the valid_options[] array disallows setting filename at any
* options level other than foreign table --- otherwise there'd still be a
* security hole.
*/
if (catalog == ForeignTableRelationId && !superuser())
ereport(ERROR,
@ -368,8 +368,8 @@ fileBeginForeignScan(ForeignScanState *node, int eflags)
&filename, &options);
/*
* Create CopyState from FDW options. We always acquire all columns,
* so as to match the expected ScanTupleSlot signature.
* Create CopyState from FDW options. We always acquire all columns, so
* as to match the expected ScanTupleSlot signature.
*/
cstate = BeginCopyFrom(node->ss.ss_currentRelation,
filename,
@ -410,8 +410,8 @@ fileIterateForeignScan(ForeignScanState *node)
/*
* The protocol for loading a virtual tuple into a slot is first
* ExecClearTuple, then fill the values/isnull arrays, then
* ExecStoreVirtualTuple. If we don't find another row in the file,
* we just skip the last step, leaving the slot empty as required.
* ExecStoreVirtualTuple. If we don't find another row in the file, we
* just skip the last step, leaving the slot empty as required.
*
* We can pass ExprContext = NULL because we read all columns from the
* file, so no need to evaluate default expressions.
@ -480,8 +480,8 @@ estimate_costs(PlannerInfo *root, RelOptInfo *baserel,
Cost cpu_per_tuple;
/*
* Get size of the file. It might not be there at plan time, though,
* in which case we have to use a default estimate.
* Get size of the file. It might not be there at plan time, though, in
* which case we have to use a default estimate.
*/
if (stat(filename, &stat_buf) < 0)
stat_buf.st_size = 10 * BLCKSZ;
@ -505,10 +505,9 @@ estimate_costs(PlannerInfo *root, RelOptInfo *baserel,
ntuples = clamp_row_est((double) stat_buf.st_size / (double) tuple_width);
/*
* Now estimate the number of rows returned by the scan after applying
* the baserestrictinfo quals. This is pretty bogus too, since the
* planner will have no stats about the relation, but it's better than
* nothing.
* Now estimate the number of rows returned by the scan after applying the
* baserestrictinfo quals. This is pretty bogus too, since the planner
* will have no stats about the relation, but it's better than nothing.
*/
nrows = ntuples *
clauselist_selectivity(root,

View File

@ -87,11 +87,13 @@ levenshtein_internal(text *s, text *t,
/*
* For levenshtein_less_equal_internal, we have real variables called
* start_column and stop_column; otherwise it's just short-hand for 0
* and m.
* start_column and stop_column; otherwise it's just short-hand for 0 and
* m.
*/
#ifdef LEVENSHTEIN_LESS_EQUAL
int start_column, stop_column;
int start_column,
stop_column;
#undef START_COLUMN
#undef STOP_COLUMN
#define START_COLUMN start_column
@ -162,20 +164,20 @@ levenshtein_internal(text *s, text *t,
else if (ins_c + del_c > 0)
{
/*
* Figure out how much of the first row of the notional matrix
* we need to fill in. If the string is growing, the theoretical
* Figure out how much of the first row of the notional matrix we
* need to fill in. If the string is growing, the theoretical
* minimum distance already incorporates the cost of deleting the
* number of characters necessary to make the two strings equal
* in length. Each additional deletion forces another insertion,
* so the best-case total cost increases by ins_c + del_c.
* If the string is shrinking, the minimum theoretical cost
* assumes no excess deletions; that is, we're starting no futher
* right than column n - m. If we do start further right, the
* best-case total cost increases by ins_c + del_c for each move
* right.
* number of characters necessary to make the two strings equal in
* length. Each additional deletion forces another insertion, so
* the best-case total cost increases by ins_c + del_c. If the
* string is shrinking, the minimum theoretical cost assumes no
* excess deletions; that is, we're starting no futher right than
* column n - m. If we do start further right, the best-case
* total cost increases by ins_c + del_c for each move right.
*/
int slack_d = max_d - min_theo_d;
int best_column = net_inserts < 0 ? -net_inserts : 0;
stop_column = best_column + (slack_d / (ins_c + del_c)) + 1;
if (stop_column > m)
stop_column = m + 1;
@ -185,11 +187,11 @@ levenshtein_internal(text *s, text *t,
/*
* In order to avoid calling pg_mblen() repeatedly on each character in s,
* we cache all the lengths before starting the main loop -- but if all the
* characters in both strings are single byte, then we skip this and use
* a fast-path in the main loop. If only one string contains multi-byte
* characters, we still build the array, so that the fast-path needn't
* deal with the case where the array hasn't been initialized.
* we cache all the lengths before starting the main loop -- but if all
* the characters in both strings are single byte, then we skip this and
* use a fast-path in the main loop. If only one string contains
* multi-byte characters, we still build the array, so that the fast-path
* needn't deal with the case where the array hasn't been initialized.
*/
if (m != s_bytes || n != t_bytes)
{
@ -214,8 +216,8 @@ levenshtein_internal(text *s, text *t,
curr = prev + m;
/*
* To transform the first i characters of s into the first 0 characters
* of t, we must perform i deletions.
* To transform the first i characters of s into the first 0 characters of
* t, we must perform i deletions.
*/
for (i = START_COLUMN; i < STOP_COLUMN; i++)
prev[i] = i * del_c;
@ -228,6 +230,7 @@ levenshtein_internal(text *s, text *t,
int y_char_len = n != t_bytes + 1 ? pg_mblen(y) : 1;
#ifdef LEVENSHTEIN_LESS_EQUAL
/*
* In the best case, values percolate down the diagonal unchanged, so
* we must increment stop_column unless it's already on the right end
@ -241,10 +244,10 @@ levenshtein_internal(text *s, text *t,
}
/*
* The main loop fills in curr, but curr[0] needs a special case:
* to transform the first 0 characters of s into the first j
* characters of t, we must perform j insertions. However, if
* start_column > 0, this special case does not apply.
* The main loop fills in curr, but curr[0] needs a special case: to
* transform the first 0 characters of s into the first j characters
* of t, we must perform j insertions. However, if start_column > 0,
* this special case does not apply.
*/
if (start_column == 0)
{
@ -331,6 +334,7 @@ levenshtein_internal(text *s, text *t,
y += y_char_len;
#ifdef LEVENSHTEIN_LESS_EQUAL
/*
* This chunk of code represents a significant performance hit if used
* in the case where there is no max_d bound. This is probably not
@ -355,6 +359,7 @@ levenshtein_internal(text *s, text *t,
{
int ii = stop_column - 1;
int net_inserts = ii - zp;
if (prev[ii] + (net_inserts > 0 ? net_inserts * ins_c :
-net_inserts * del_c) <= max_d)
break;
@ -365,13 +370,15 @@ levenshtein_internal(text *s, text *t,
while (start_column < stop_column)
{
int net_inserts = start_column - zp;
if (prev[start_column] +
(net_inserts > 0 ? net_inserts * ins_c :
-net_inserts * del_c) <= max_d)
break;
/*
* We'll never again update these values, so we must make
* sure there's nothing here that could confuse any future
* We'll never again update these values, so we must make sure
* there's nothing here that could confuse any future
* iteration of the outer loop.
*/
prev[start_column] = max_d + 1;

View File

@ -355,8 +355,8 @@ gin_bool_consistent(QUERYTYPE *query, bool *check)
return FALSE;
/*
* Set up data for checkcondition_gin. This must agree with the
* query extraction code in ginint4_queryextract.
* Set up data for checkcondition_gin. This must agree with the query
* extraction code in ginint4_queryextract.
*/
gcv.first = items;
gcv.mapped_check = (bool *) palloc(sizeof(bool) * query->size);

View File

@ -34,8 +34,8 @@ ginint4_queryextract(PG_FUNCTION_ARGS)
/*
* If the query doesn't have any required primitive values (for
* instance, it's something like '! 42'), we have to do a full
* index scan.
* instance, it's something like '! 42'), we have to do a full index
* scan.
*/
if (query_has_required_values(query))
*searchMode = GIN_SEARCH_MODE_DEFAULT;
@ -116,6 +116,7 @@ ginint4_consistent(PG_FUNCTION_ARGS)
bool *check = (bool *) PG_GETARG_POINTER(0);
StrategyNumber strategy = PG_GETARG_UINT16(1);
int32 nkeys = PG_GETARG_INT32(3);
/* Pointer *extra_data = (Pointer *) PG_GETARG_POINTER(4); */
bool *recheck = (bool *) PG_GETARG_POINTER(5);
bool res = FALSE;

View File

@ -988,4 +988,3 @@ const char *ISBN_range_new[][2] = {
{"10-976000", "10-999999"},
{NULL, NULL},
};

View File

@ -29,8 +29,11 @@
static const char *progname;
static int ops_per_test = 2000;
static char full_buf[XLOG_SEG_SIZE], *buf, *filename = FSYNC_FILENAME;
static struct timeval start_t, stop_t;
static char full_buf[XLOG_SEG_SIZE],
*buf,
*filename = FSYNC_FILENAME;
static struct timeval start_t,
stop_t;
static void handle_args(int argc, char *argv[]);
@ -41,6 +44,7 @@ static void test_sync(int writes_per_op);
static void test_open_syncs(void);
static void test_open_sync(const char *msg, int writes_size);
static void test_file_descriptor_sync(void);
#ifdef HAVE_FSYNC_WRITETHROUGH
static int pg_fsync_writethrough(int fd);
#endif
@ -176,7 +180,9 @@ test_open(void)
static void
test_sync(int writes_per_op)
{
int tmpfile, ops, writes;
int tmpfile,
ops,
writes;
bool fs_warning = false;
if (writes_per_op == 1)
@ -353,7 +359,9 @@ test_open_syncs(void)
static void
test_open_sync(const char *msg, int writes_size)
{
int tmpfile, ops, writes;
int tmpfile,
ops,
writes;
printf(LABEL_FORMAT, msg);
fflush(stdout);
@ -377,7 +385,6 @@ test_open_sync(const char *msg, int writes_size)
close(tmpfile);
print_elapse(start_t, stop_t);
}
#else
printf(NA_FORMAT, "n/a\n");
#endif
@ -386,22 +393,22 @@ test_open_sync(const char *msg, int writes_size)
static void
test_file_descriptor_sync(void)
{
int tmpfile, ops;
int tmpfile,
ops;
/*
* Test whether fsync can sync data written on a different
* descriptor for the same file. This checks the efficiency
* of multi-process fsyncs against the same file.
* Possibly this should be done with writethrough on platforms
* which support it.
* Test whether fsync can sync data written on a different descriptor for
* the same file. This checks the efficiency of multi-process fsyncs
* against the same file. Possibly this should be done with writethrough
* on platforms which support it.
*/
printf("\nTest if fsync on non-write file descriptor is honored:\n");
printf("(If the times are similar, fsync() can sync data written\n");
printf("on a different descriptor.)\n");
/*
* first write, fsync and close, which is the
* normal behavior without multiple descriptors
* first write, fsync and close, which is the normal behavior without
* multiple descriptors
*/
printf(LABEL_FORMAT, "write, fsync, close");
fflush(stdout);
@ -416,9 +423,10 @@ test_file_descriptor_sync(void)
if (fsync(tmpfile) != 0)
die("fsync failed");
close(tmpfile);
/*
* open and close the file again to be consistent
* with the following test
* open and close the file again to be consistent with the following
* test
*/
if ((tmpfile = open(filename, O_RDWR, 0)) == -1)
die("could not open output file");
@ -428,9 +436,8 @@ test_file_descriptor_sync(void)
print_elapse(start_t, stop_t);
/*
* Now open, write, close, open again and fsync
* This simulates processes fsyncing each other's
* writes.
* Now open, write, close, open again and fsync This simulates processes
* fsyncing each other's writes.
*/
printf(LABEL_FORMAT, "write, close, fsync");
fflush(stdout);
@ -458,7 +465,8 @@ test_file_descriptor_sync(void)
static void
test_non_sync(void)
{
int tmpfile, ops;
int tmpfile,
ops;
/*
* Test a simple write without fsync
@ -494,7 +502,6 @@ pg_fsync_writethrough(int fd)
return -1;
#endif
}
#endif
/*

View File

@ -52,7 +52,8 @@ uint32 trgm2int(trgm *ptr);
#define ISPRINTABLETRGM(t) ( ISPRINTABLECHAR( ((char*)(t)) ) && ISPRINTABLECHAR( ((char*)(t))+1 ) && ISPRINTABLECHAR( ((char*)(t))+2 ) )
#define ISESCAPECHAR(x) (*(x) == '\\') /* Wildcard escape character */
#define ISWILDCARDCHAR(x) (*(x) == '_' || *(x) == '%') /* Wildcard meta-character */
#define ISWILDCARDCHAR(x) (*(x) == '_' || *(x) == '%') /* Wildcard
* meta-character */
typedef struct
{

View File

@ -83,6 +83,7 @@ gin_extract_query_trgm(PG_FUNCTION_ARGS)
text *val = (text *) PG_GETARG_TEXT_P(0);
int32 *nentries = (int32 *) PG_GETARG_POINTER(1);
StrategyNumber strategy = PG_GETARG_UINT16(2);
/* bool **pmatch = (bool **) PG_GETARG_POINTER(3); */
/* Pointer *extra_data = (Pointer *) PG_GETARG_POINTER(4); */
/* bool **nullFlags = (bool **) PG_GETARG_POINTER(5); */
@ -104,6 +105,7 @@ gin_extract_query_trgm(PG_FUNCTION_ARGS)
#endif
/* FALL THRU */
case LikeStrategyNumber:
/*
* For wildcard search we extract all the trigrams that every
* potentially-matching string must include.
@ -146,8 +148,10 @@ gin_trgm_consistent(PG_FUNCTION_ARGS)
{
bool *check = (bool *) PG_GETARG_POINTER(0);
StrategyNumber strategy = PG_GETARG_UINT16(1);
/* text *query = PG_GETARG_TEXT_P(2); */
int32 nkeys = PG_GETARG_INT32(3);
/* Pointer *extra_data = (Pointer *) PG_GETARG_POINTER(4); */
bool *recheck = (bool *) PG_GETARG_POINTER(5);
bool res;

View File

@ -190,6 +190,7 @@ gtrgm_consistent(PG_FUNCTION_ARGS)
GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0);
text *query = PG_GETARG_TEXT_P(1);
StrategyNumber strategy = (StrategyNumber) PG_GETARG_UINT16(2);
/* Oid subtype = PG_GETARG_OID(3); */
bool *recheck = (bool *) PG_GETARG_POINTER(4);
TRGM *key = (TRGM *) DatumGetPointer(entry->key);
@ -328,6 +329,7 @@ gtrgm_distance(PG_FUNCTION_ARGS)
GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0);
text *query = PG_GETARG_TEXT_P(1);
StrategyNumber strategy = (StrategyNumber) PG_GETARG_UINT16(2);
/* Oid subtype = PG_GETARG_OID(3); */
TRGM *key = (TRGM *) DatumGetPointer(entry->key);
TRGM *qtrg;

View File

@ -638,6 +638,7 @@ similarity_dist(PG_FUNCTION_ARGS)
float4 res = DatumGetFloat4(DirectFunctionCall2(similarity,
PG_GETARG_DATUM(0),
PG_GETARG_DATUM(1)));
PG_RETURN_FLOAT4(1.0 - res);
}

View File

@ -212,7 +212,10 @@ check_cluster_versions(void)
old_cluster.major_version = get_major_server_version(&old_cluster);
new_cluster.major_version = get_major_server_version(&new_cluster);
/* We allow upgrades from/to the same major version for alpha/beta upgrades */
/*
* We allow upgrades from/to the same major version for alpha/beta
* upgrades
*/
if (GET_MAJOR_VERSION(old_cluster.major_version) < 803)
pg_log(PG_FATAL, "This utility can only upgrade from PostgreSQL version 8.3 and later.\n");

View File

@ -505,8 +505,7 @@ check_control_data(ControlData *oldctrl,
"\nOld and new pg_controldata date/time storage types do not match.\n");
/*
* This is a common 8.3 -> 8.4 upgrade problem, so we are more
* verbose
* This is a common 8.3 -> 8.4 upgrade problem, so we are more verbose
*/
pg_log(PG_FATAL,
"You will need to rebuild the new server with configure\n"

View File

@ -377,4 +377,5 @@ win32_pghardlink(const char *src, const char *dst)
else
return 0;
}
#endif

View File

@ -56,8 +56,8 @@ gen_db_file_maps(DbInfo *old_db, DbInfo *new_db,
/*
* In pre-8.4, TOAST table names change during CLUSTER; in >= 8.4
* TOAST relation names always use heap table oids, hence we
* cannot check relation names when upgrading from pre-8.4.
* TOAST relation names always use heap table oids, hence we cannot
* check relation names when upgrading from pre-8.4.
*/
if (strcmp(old_rel->nspname, new_rel->nspname) != 0 ||
((GET_MAJOR_VERSION(old_cluster.major_version) >= 804 ||
@ -185,7 +185,9 @@ get_db_infos(ClusterInfo *cluster)
int ntups;
int tupnum;
DbInfo *dbinfos;
int i_datname, i_oid, i_spclocation;
int i_datname,
i_oid,
i_spclocation;
res = executeQueryOrDie(conn,
"SELECT d.oid, d.datname, t.spclocation "
@ -241,15 +243,19 @@ get_rel_infos(ClusterInfo *cluster, DbInfo *dbinfo)
int num_rels = 0;
char *nspname = NULL;
char *relname = NULL;
int i_spclocation, i_nspname, i_relname, i_oid, i_relfilenode;
int i_spclocation,
i_nspname,
i_relname,
i_oid,
i_relfilenode;
char query[QUERY_ALLOC];
/*
* pg_largeobject contains user data that does not appear in pg_dumpall
* --schema-only output, so we have to copy that system table heap and
* index. We could grab the pg_largeobject oids from template1, but
* it is easy to treat it as a normal table.
* Order by oid so we can join old/new structures efficiently.
* index. We could grab the pg_largeobject oids from template1, but it is
* easy to treat it as a normal table. Order by oid so we can join old/new
* structures efficiently.
*/
snprintf(query, sizeof(query),

View File

@ -53,7 +53,8 @@ static void cleanup(void);
/* This is the database used by pg_dumpall to restore global tables */
#define GLOBAL_DUMP_DB "postgres"
ClusterInfo old_cluster, new_cluster;
ClusterInfo old_cluster,
new_cluster;
OSInfo os_info;
int
@ -229,16 +230,16 @@ prepare_new_databases(void)
prep_status("Creating databases in the new cluster");
/*
* Install support functions in the global-restore database
* to preserve pg_authid.oid.
* Install support functions in the global-restore database to preserve
* pg_authid.oid.
*/
install_support_functions_in_new_db(GLOBAL_DUMP_DB);
/*
* We have to create the databases first so we can install support
* functions in all the other databases. Ideally we could create
* the support functions in template1 but pg_dumpall creates database
* using the template0 template.
* functions in all the other databases. Ideally we could create the
* support functions in template1 but pg_dumpall creates database using
* the template0 template.
*/
exec_prog(true,
SYSTEMQUOTE "\"%s/psql\" --set ON_ERROR_STOP=on "

View File

@ -85,6 +85,7 @@ typedef struct
{
char old_dir[MAXPGPATH];
char new_dir[MAXPGPATH];
/*
* old/new relfilenodes might differ for pg_largeobject(_metadata) indexes
* due to VACUUM FULL or REINDEX. Other relfilenodes are preserved.
@ -234,7 +235,8 @@ typedef struct
*/
extern LogOpts log_opts;
extern UserOpts user_opts;
extern ClusterInfo old_cluster, new_cluster;
extern ClusterInfo old_cluster,
new_cluster;
extern OSInfo os_info;
extern char scandir_file_pattern[];

View File

@ -194,11 +194,11 @@ start_postmaster(ClusterInfo *cluster, bool quiet)
* because it is being used by another process." so we have to send all
* other output to 'nul'.
*
* Using autovacuum=off disables cleanup vacuum and analyze, but
* freeze vacuums can still happen, so we set
* autovacuum_freeze_max_age to its maximum. We assume all datfrozenxid
* and relfrozen values are less than a gap of 2000000000 from the current
* xid counter, so autovacuum will not touch them.
* Using autovacuum=off disables cleanup vacuum and analyze, but freeze
* vacuums can still happen, so we set autovacuum_freeze_max_age to its
* maximum. We assume all datfrozenxid and relfrozen values are less than
* a gap of 2000000000 from the current xid counter, so autovacuum will
* not touch them.
*/
snprintf(cmd, sizeof(cmd),
SYSTEMQUOTE "\"%s/pg_ctl\" -l \"%s\" -D \"%s\" "

View File

@ -850,8 +850,8 @@ top:
if (commands[st->state]->type == SQL_COMMAND)
{
/*
* Read and discard the query result; note this is not included
* in the statement latency numbers.
* Read and discard the query result; note this is not included in
* the statement latency numbers.
*/
res = PQgetResult(st->con);
switch (PQresultStatus(res))
@ -2014,9 +2014,9 @@ main(int argc, char **argv)
* is_latencies only works with multiple threads in thread-based
* implementations, not fork-based ones, because it supposes that the
* parent can see changes made to the per-thread execution stats by child
* threads. It seems useful enough to accept despite this limitation,
* but perhaps we should FIXME someday (by passing the stats data back
* up through the parent-to-child pipes).
* threads. It seems useful enough to accept despite this limitation, but
* perhaps we should FIXME someday (by passing the stats data back up
* through the parent-to-child pipes).
*/
#ifndef ENABLE_THREAD_SAFETY
if (is_latencies && nthreads > 1)

View File

@ -108,6 +108,7 @@ fixup_inherited_columns(Oid parentId, Oid childId, Bitmapset *columns)
while ((index = bms_first_member(tmpset)) > 0)
{
attno = index + FirstLowInvalidHeapAttributeNumber;
/*
* whole-row-reference shall be fixed-up later
*/
@ -158,10 +159,9 @@ check_relation_privileges(Oid relOid,
bool result = true;
/*
* Hardwired Policies:
* SE-PostgreSQL enforces
* - clients cannot modify system catalogs using DMLs
* - clients cannot reference/modify toast relations using DMLs
* Hardwired Policies: SE-PostgreSQL enforces - clients cannot modify
* system catalogs using DMLs - clients cannot reference/modify toast
* relations using DMLs
*/
if (sepgsql_getenforce() > 0)
{
@ -328,10 +328,9 @@ sepgsql_dml_privileges(List *rangeTabls, bool abort)
/*
* If this RangeTblEntry is also supposed to reference inherited
* tables, we need to check security label of the child tables.
* So, we expand rte->relid into list of OIDs of inheritance
* hierarchy, then checker routine will be invoked for each
* relations.
* tables, we need to check security label of the child tables. So, we
* expand rte->relid into list of OIDs of inheritance hierarchy, then
* checker routine will be invoked for each relations.
*/
if (!rte->inh)
tableIds = list_make1_oid(rte->relid);
@ -345,8 +344,8 @@ sepgsql_dml_privileges(List *rangeTabls, bool abort)
Bitmapset *modifiedCols;
/*
* child table has different attribute numbers, so we need
* to fix up them.
* child table has different attribute numbers, so we need to fix
* up them.
*/
selectedCols = fixup_inherited_columns(rte->relid, tableOid,
rte->selectedCols);

View File

@ -79,8 +79,8 @@ sepgsql_client_auth(Port *port, int status)
(*next_client_auth_hook) (port, status);
/*
* In the case when authentication failed, the supplied socket
* shall be closed soon, so we don't need to do anything here.
* In the case when authentication failed, the supplied socket shall be
* closed soon, so we don't need to do anything here.
*/
if (status != STATUS_OK)
return;
@ -96,8 +96,8 @@ sepgsql_client_auth(Port *port, int status)
sepgsql_set_client_label(context);
/*
* Switch the current performing mode from INTERNAL to either
* DEFAULT or PERMISSIVE.
* Switch the current performing mode from INTERNAL to either DEFAULT or
* PERMISSIVE.
*/
if (sepgsql_permissive)
sepgsql_set_mode(SEPGSQL_MODE_PERMISSIVE);
@ -161,8 +161,8 @@ static bool
sepgsql_exec_check_perms(List *rangeTabls, bool abort)
{
/*
* If security provider is stacking and one of them replied 'false'
* at least, we don't need to check any more.
* If security provider is stacking and one of them replied 'false' at
* least, we don't need to check any more.
*/
if (next_exec_check_perms_hook &&
!(*next_exec_check_perms_hook) (rangeTabls, abort))
@ -193,11 +193,10 @@ sepgsql_needs_fmgr_hook(Oid functionId)
return true;
/*
* SELinux needs the function to be called via security_definer
* wrapper, if this invocation will take a domain-transition.
* We call these functions as trusted-procedure, if the security
* policy has a rule that switches security label of the client
* on execution.
* SELinux needs the function to be called via security_definer wrapper,
* if this invocation will take a domain-transition. We call these
* functions as trusted-procedure, if the security policy has a rule that
* switches security label of the client on execution.
*/
old_label = sepgsql_get_client_label();
new_label = sepgsql_proc_get_domtrans(functionId);
@ -210,9 +209,9 @@ sepgsql_needs_fmgr_hook(Oid functionId)
/*
* Even if not a trusted-procedure, this function should not be inlined
* unless the client has db_procedure:{execute} permission.
* Please note that it shall be actually failed later because of same
* reason with ACL_EXECUTE.
* unless the client has db_procedure:{execute} permission. Please note
* that it shall be actually failed later because of same reason with
* ACL_EXECUTE.
*/
function_label = sepgsql_get_label(ProcedureRelationId, functionId, 0);
if (sepgsql_check_perms(sepgsql_get_client_label(),
@ -238,7 +237,8 @@ static void
sepgsql_fmgr_hook(FmgrHookEventType event,
FmgrInfo *flinfo, Datum *private)
{
struct {
struct
{
char *old_label;
char *new_label;
Datum next_private;
@ -265,8 +265,8 @@ sepgsql_fmgr_hook(FmgrHookEventType event,
{
/*
* process:transition permission between old and new
* label, when user tries to switch security label of
* the client on execution of trusted procedure.
* label, when user tries to switch security label of the
* client on execution of trusted procedure.
*/
sepgsql_check_perms(cur_label, stack->new_label,
SEPG_CLASS_PROCESS,
@ -324,6 +324,7 @@ sepgsql_utility_command(Node *parsetree,
switch (nodeTag(parsetree))
{
case T_LoadStmt:
/*
* We reject LOAD command across the board on enforcing mode,
* because a binary module can arbitrarily override hooks.
@ -336,11 +337,12 @@ sepgsql_utility_command(Node *parsetree,
}
break;
default:
/*
* Right now we don't check any other utility commands,
* because it needs more detailed information to make
* access control decision here, but we don't want to
* have two parse and analyze routines individually.
* Right now we don't check any other utility commands, because it
* needs more detailed information to make access control decision
* here, but we don't want to have two parse and analyze routines
* individually.
*/
break;
}
@ -370,9 +372,9 @@ _PG_init(void)
errmsg("sepgsql must be loaded via shared_preload_libraries")));
/*
* Check availability of SELinux on the platform.
* If disabled, we cannot activate any SE-PostgreSQL features,
* and we have to skip rest of initialization.
* Check availability of SELinux on the platform. If disabled, we cannot
* activate any SE-PostgreSQL features, and we have to skip rest of
* initialization.
*/
if (is_selinux_enabled() < 1)
{
@ -383,8 +385,8 @@ _PG_init(void)
/*
* sepgsql.permissive = (on|off)
*
* This variable controls performing mode of SE-PostgreSQL
* on user's session.
* This variable controls performing mode of SE-PostgreSQL on user's
* session.
*/
DefineCustomBoolVariable("sepgsql.permissive",
"Turn on/off permissive mode in SE-PostgreSQL",
@ -400,10 +402,9 @@ _PG_init(void)
/*
* sepgsql.debug_audit = (on|off)
*
* This variable allows users to turn on/off audit logs on access
* control decisions, independent from auditallow/auditdeny setting
* in the security policy.
* We intend to use this option for debugging purpose.
* This variable allows users to turn on/off audit logs on access control
* decisions, independent from auditallow/auditdeny setting in the
* security policy. We intend to use this option for debugging purpose.
*/
DefineCustomBoolVariable("sepgsql.debug_audit",
"Turn on/off debug audit messages",
@ -419,13 +420,12 @@ _PG_init(void)
/*
* Set up dummy client label.
*
* XXX - note that PostgreSQL launches background worker process
* like autovacuum without authentication steps. So, we initialize
* sepgsql_mode with SEPGSQL_MODE_INTERNAL, and client_label with
* the security context of server process.
* Later, it also launches background of user session. In this case,
* the process is always hooked on post-authentication, and we can
* initialize the sepgsql_mode and client_label correctly.
* XXX - note that PostgreSQL launches background worker process like
* autovacuum without authentication steps. So, we initialize sepgsql_mode
* with SEPGSQL_MODE_INTERNAL, and client_label with the security context
* of server process. Later, it also launches background of user session.
* In this case, the process is always hooked on post-authentication, and
* we can initialize the sepgsql_mode and client_label correctly.
*/
if (getcon_raw(&context) < 0)
ereport(ERROR,

View File

@ -107,14 +107,15 @@ void
sepgsql_object_relabel(const ObjectAddress *object, const char *seclabel)
{
/*
* validate format of the supplied security label,
* if it is security context of selinux.
* validate format of the supplied security label, if it is security
* context of selinux.
*/
if (seclabel &&
security_check_context_raw((security_context_t) seclabel) < 0)
ereport(ERROR,
(errcode(ERRCODE_INVALID_NAME),
errmsg("SELinux: invalid security label: \"%s\"", seclabel)));
/*
* Do actual permission checks for each object classes
*/
@ -305,8 +306,8 @@ exec_object_restorecon(struct selabel_handle *sehnd, Oid catalogId)
char *relation_name;
/*
* Open the target catalog. We don't want to allow writable
* accesses by other session during initial labeling.
* Open the target catalog. We don't want to allow writable accesses by
* other session during initial labeling.
*/
rel = heap_open(catalogId, AccessShareLock);
@ -324,8 +325,8 @@ exec_object_restorecon(struct selabel_handle *sehnd, Oid catalogId)
security_context_t context;
/*
* The way to determine object name depends on object classes.
* So, any branches set up `objtype', `objname' and `object' here.
* The way to determine object name depends on object classes. So, any
* branches set up `objtype', `objname' and `object' here.
*/
switch (catalogId)
{
@ -474,9 +475,10 @@ sepgsql_restorecon(PG_FUNCTION_ARGS)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("sepgsql is not currently enabled")));
/*
* Check DAC permission. Only superuser can set up initial
* security labels, like root-user in filesystems
* Check DAC permission. Only superuser can set up initial security
* labels, like root-user in filesystems
*/
if (!superuser())
ereport(ERROR,
@ -484,9 +486,8 @@ sepgsql_restorecon(PG_FUNCTION_ARGS)
errmsg("SELinux: must be superuser to restore initial contexts")));
/*
* Open selabel_lookup(3) stuff. It provides a set of mapping
* between an initial security label and object class/name due
* to the system setting.
* Open selabel_lookup(3) stuff. It provides a set of mapping between an
* initial security label and object class/name due to the system setting.
*/
if (PG_ARGISNULL(0))
{
@ -506,8 +507,8 @@ sepgsql_restorecon(PG_FUNCTION_ARGS)
PG_TRY();
{
/*
* Right now, we have no support labeling on the shared
* database objects, such as database, role, or tablespace.
* Right now, we have no support labeling on the shared database
* objects, such as database, role, or tablespace.
*/
exec_object_restorecon(sehnd, NamespaceRelationId);
exec_object_restorecon(sehnd, RelationRelationId);

View File

@ -67,8 +67,8 @@ sepgsql_proc_post_create(Oid functionId)
heap_close(rel, AccessShareLock);
/*
* Compute a default security label when we create a new procedure
* object under the specified namespace.
* Compute a default security label when we create a new procedure object
* under the specified namespace.
*/
scontext = sepgsql_get_client_label();
tcontext = sepgsql_get_label(NamespaceRelationId, namespaceId, 0);

View File

@ -42,20 +42,21 @@ sepgsql_attribute_post_create(Oid relOid, AttrNumber attnum)
ObjectAddress object;
/*
* Only attributes within regular relation have individual
* security labels.
* Only attributes within regular relation have individual security
* labels.
*/
if (get_rel_relkind(relOid) != RELKIND_RELATION)
return;
/*
* Compute a default security label when we create a new procedure
* object under the specified namespace.
* Compute a default security label when we create a new procedure object
* under the specified namespace.
*/
scontext = sepgsql_get_client_label();
tcontext = sepgsql_get_label(RelationRelationId, relOid, 0);
ncontext = sepgsql_compute_create(scontext, tcontext,
SEPG_CLASS_DB_COLUMN);
/*
* Assign the default security label on a new procedure
*/
@ -140,8 +141,8 @@ sepgsql_relation_post_create(Oid relOid)
char *ccontext; /* column */
/*
* Fetch catalog record of the new relation. Because pg_class entry is
* not visible right now, we need to scan the catalog using SnapshotSelf.
* Fetch catalog record of the new relation. Because pg_class entry is not
* visible right now, we need to scan the catalog using SnapshotSelf.
*/
rel = heap_open(RelationRelationId, AccessShareLock);
@ -169,8 +170,8 @@ sepgsql_relation_post_create(Oid relOid)
goto out; /* No need to assign individual labels */
/*
* Compute a default security label when we create a new relation
* object under the specified namespace.
* Compute a default security label when we create a new relation object
* under the specified namespace.
*/
scontext = sepgsql_get_client_label();
tcontext = sepgsql_get_label(NamespaceRelationId,
@ -186,8 +187,8 @@ sepgsql_relation_post_create(Oid relOid)
SetSecurityLabel(&object, SEPGSQL_LABEL_TAG, rcontext);
/*
* We also assigns a default security label on columns of the new
* regular tables.
* We also assigns a default security label on columns of the new regular
* tables.
*/
if (classForm->relkind == RELKIND_RELATION)
{

View File

@ -32,15 +32,15 @@ sepgsql_schema_post_create(Oid namespaceId)
ObjectAddress object;
/*
* FIXME: Right now, we assume pg_database object has a fixed
* security label, because pg_seclabel does not support to store
* label of shared database objects.
* FIXME: Right now, we assume pg_database object has a fixed security
* label, because pg_seclabel does not support to store label of shared
* database objects.
*/
tcontext = "system_u:object_r:sepgsql_db_t:s0";
/*
* Compute a default security label when we create a new schema
* object under the working database.
* Compute a default security label when we create a new schema object
* under the working database.
*/
ncontext = sepgsql_compute_create(scontext, tcontext,
SEPG_CLASS_DB_SCHEMA);

View File

@ -36,248 +36,556 @@ static struct
const char *av_name;
uint32 av_code;
} av[32];
} selinux_catalog[] = {
} selinux_catalog[] =
{
{
"process", SEPG_CLASS_PROCESS,
{
{ "transition", SEPG_PROCESS__TRANSITION },
{ NULL, 0UL }
{
"transition", SEPG_PROCESS__TRANSITION
},
{
NULL, 0UL
}
}
},
{
"file", SEPG_CLASS_FILE,
{
{ "read", SEPG_FILE__READ },
{ "write", SEPG_FILE__WRITE },
{ "create", SEPG_FILE__CREATE },
{ "getattr", SEPG_FILE__GETATTR },
{ "unlink", SEPG_FILE__UNLINK },
{ "rename", SEPG_FILE__RENAME },
{ "append", SEPG_FILE__APPEND },
{ NULL, 0UL }
{
"read", SEPG_FILE__READ
},
{
"write", SEPG_FILE__WRITE
},
{
"create", SEPG_FILE__CREATE
},
{
"getattr", SEPG_FILE__GETATTR
},
{
"unlink", SEPG_FILE__UNLINK
},
{
"rename", SEPG_FILE__RENAME
},
{
"append", SEPG_FILE__APPEND
},
{
NULL, 0UL
}
}
},
{
"dir", SEPG_CLASS_DIR,
{
{ "read", SEPG_DIR__READ },
{ "write", SEPG_DIR__WRITE },
{ "create", SEPG_DIR__CREATE },
{ "getattr", SEPG_DIR__GETATTR },
{ "unlink", SEPG_DIR__UNLINK },
{ "rename", SEPG_DIR__RENAME },
{ "search", SEPG_DIR__SEARCH },
{ "add_name", SEPG_DIR__ADD_NAME },
{ "remove_name", SEPG_DIR__REMOVE_NAME },
{ "rmdir", SEPG_DIR__RMDIR },
{ "reparent", SEPG_DIR__REPARENT },
{ NULL, 0UL }
{
"read", SEPG_DIR__READ
},
{
"write", SEPG_DIR__WRITE
},
{
"create", SEPG_DIR__CREATE
},
{
"getattr", SEPG_DIR__GETATTR
},
{
"unlink", SEPG_DIR__UNLINK
},
{
"rename", SEPG_DIR__RENAME
},
{
"search", SEPG_DIR__SEARCH
},
{
"add_name", SEPG_DIR__ADD_NAME
},
{
"remove_name", SEPG_DIR__REMOVE_NAME
},
{
"rmdir", SEPG_DIR__RMDIR
},
{
"reparent", SEPG_DIR__REPARENT
},
{
NULL, 0UL
}
}
},
{
"lnk_file", SEPG_CLASS_LNK_FILE,
{
{ "read", SEPG_LNK_FILE__READ },
{ "write", SEPG_LNK_FILE__WRITE },
{ "create", SEPG_LNK_FILE__CREATE },
{ "getattr", SEPG_LNK_FILE__GETATTR },
{ "unlink", SEPG_LNK_FILE__UNLINK },
{ "rename", SEPG_LNK_FILE__RENAME },
{ NULL, 0UL }
{
"read", SEPG_LNK_FILE__READ
},
{
"write", SEPG_LNK_FILE__WRITE
},
{
"create", SEPG_LNK_FILE__CREATE
},
{
"getattr", SEPG_LNK_FILE__GETATTR
},
{
"unlink", SEPG_LNK_FILE__UNLINK
},
{
"rename", SEPG_LNK_FILE__RENAME
},
{
NULL, 0UL
}
}
},
{
"chr_file", SEPG_CLASS_CHR_FILE,
{
{ "read", SEPG_CHR_FILE__READ },
{ "write", SEPG_CHR_FILE__WRITE },
{ "create", SEPG_CHR_FILE__CREATE },
{ "getattr", SEPG_CHR_FILE__GETATTR },
{ "unlink", SEPG_CHR_FILE__UNLINK },
{ "rename", SEPG_CHR_FILE__RENAME },
{ NULL, 0UL }
{
"read", SEPG_CHR_FILE__READ
},
{
"write", SEPG_CHR_FILE__WRITE
},
{
"create", SEPG_CHR_FILE__CREATE
},
{
"getattr", SEPG_CHR_FILE__GETATTR
},
{
"unlink", SEPG_CHR_FILE__UNLINK
},
{
"rename", SEPG_CHR_FILE__RENAME
},
{
NULL, 0UL
}
}
},
{
"blk_file", SEPG_CLASS_BLK_FILE,
{
{ "read", SEPG_BLK_FILE__READ },
{ "write", SEPG_BLK_FILE__WRITE },
{ "create", SEPG_BLK_FILE__CREATE },
{ "getattr", SEPG_BLK_FILE__GETATTR },
{ "unlink", SEPG_BLK_FILE__UNLINK },
{ "rename", SEPG_BLK_FILE__RENAME },
{ NULL, 0UL }
{
"read", SEPG_BLK_FILE__READ
},
{
"write", SEPG_BLK_FILE__WRITE
},
{
"create", SEPG_BLK_FILE__CREATE
},
{
"getattr", SEPG_BLK_FILE__GETATTR
},
{
"unlink", SEPG_BLK_FILE__UNLINK
},
{
"rename", SEPG_BLK_FILE__RENAME
},
{
NULL, 0UL
}
}
},
{
"sock_file", SEPG_CLASS_SOCK_FILE,
{
{ "read", SEPG_SOCK_FILE__READ },
{ "write", SEPG_SOCK_FILE__WRITE },
{ "create", SEPG_SOCK_FILE__CREATE },
{ "getattr", SEPG_SOCK_FILE__GETATTR },
{ "unlink", SEPG_SOCK_FILE__UNLINK },
{ "rename", SEPG_SOCK_FILE__RENAME },
{ NULL, 0UL }
{
"read", SEPG_SOCK_FILE__READ
},
{
"write", SEPG_SOCK_FILE__WRITE
},
{
"create", SEPG_SOCK_FILE__CREATE
},
{
"getattr", SEPG_SOCK_FILE__GETATTR
},
{
"unlink", SEPG_SOCK_FILE__UNLINK
},
{
"rename", SEPG_SOCK_FILE__RENAME
},
{
NULL, 0UL
}
}
},
{
"fifo_file", SEPG_CLASS_FIFO_FILE,
{
{ "read", SEPG_FIFO_FILE__READ },
{ "write", SEPG_FIFO_FILE__WRITE },
{ "create", SEPG_FIFO_FILE__CREATE },
{ "getattr", SEPG_FIFO_FILE__GETATTR },
{ "unlink", SEPG_FIFO_FILE__UNLINK },
{ "rename", SEPG_FIFO_FILE__RENAME },
{ NULL, 0UL }
{
"read", SEPG_FIFO_FILE__READ
},
{
"write", SEPG_FIFO_FILE__WRITE
},
{
"create", SEPG_FIFO_FILE__CREATE
},
{
"getattr", SEPG_FIFO_FILE__GETATTR
},
{
"unlink", SEPG_FIFO_FILE__UNLINK
},
{
"rename", SEPG_FIFO_FILE__RENAME
},
{
NULL, 0UL
}
}
},
{
"db_database", SEPG_CLASS_DB_DATABASE,
{
{ "create", SEPG_DB_DATABASE__CREATE },
{ "drop", SEPG_DB_DATABASE__DROP },
{ "getattr", SEPG_DB_DATABASE__GETATTR },
{ "setattr", SEPG_DB_DATABASE__SETATTR },
{ "relabelfrom", SEPG_DB_DATABASE__RELABELFROM },
{ "relabelto", SEPG_DB_DATABASE__RELABELTO },
{ "access", SEPG_DB_DATABASE__ACCESS },
{ "load_module", SEPG_DB_DATABASE__LOAD_MODULE },
{ NULL, 0UL },
{
"create", SEPG_DB_DATABASE__CREATE
},
{
"drop", SEPG_DB_DATABASE__DROP
},
{
"getattr", SEPG_DB_DATABASE__GETATTR
},
{
"setattr", SEPG_DB_DATABASE__SETATTR
},
{
"relabelfrom", SEPG_DB_DATABASE__RELABELFROM
},
{
"relabelto", SEPG_DB_DATABASE__RELABELTO
},
{
"access", SEPG_DB_DATABASE__ACCESS
},
{
"load_module", SEPG_DB_DATABASE__LOAD_MODULE
},
{
NULL, 0UL
},
}
},
{
"db_schema", SEPG_CLASS_DB_SCHEMA,
{
{ "create", SEPG_DB_SCHEMA__CREATE },
{ "drop", SEPG_DB_SCHEMA__DROP },
{ "getattr", SEPG_DB_SCHEMA__GETATTR },
{ "setattr", SEPG_DB_SCHEMA__SETATTR },
{ "relabelfrom", SEPG_DB_SCHEMA__RELABELFROM },
{ "relabelto", SEPG_DB_SCHEMA__RELABELTO },
{ "search", SEPG_DB_SCHEMA__SEARCH },
{ "add_name", SEPG_DB_SCHEMA__ADD_NAME },
{ "remove_name", SEPG_DB_SCHEMA__REMOVE_NAME },
{ NULL, 0UL },
{
"create", SEPG_DB_SCHEMA__CREATE
},
{
"drop", SEPG_DB_SCHEMA__DROP
},
{
"getattr", SEPG_DB_SCHEMA__GETATTR
},
{
"setattr", SEPG_DB_SCHEMA__SETATTR
},
{
"relabelfrom", SEPG_DB_SCHEMA__RELABELFROM
},
{
"relabelto", SEPG_DB_SCHEMA__RELABELTO
},
{
"search", SEPG_DB_SCHEMA__SEARCH
},
{
"add_name", SEPG_DB_SCHEMA__ADD_NAME
},
{
"remove_name", SEPG_DB_SCHEMA__REMOVE_NAME
},
{
NULL, 0UL
},
}
},
{
"db_table", SEPG_CLASS_DB_TABLE,
{
{ "create", SEPG_DB_TABLE__CREATE },
{ "drop", SEPG_DB_TABLE__DROP },
{ "getattr", SEPG_DB_TABLE__GETATTR },
{ "setattr", SEPG_DB_TABLE__SETATTR },
{ "relabelfrom", SEPG_DB_TABLE__RELABELFROM },
{ "relabelto", SEPG_DB_TABLE__RELABELTO },
{ "select", SEPG_DB_TABLE__SELECT },
{ "update", SEPG_DB_TABLE__UPDATE },
{ "insert", SEPG_DB_TABLE__INSERT },
{ "delete", SEPG_DB_TABLE__DELETE },
{ "lock", SEPG_DB_TABLE__LOCK },
{ NULL, 0UL },
{
"create", SEPG_DB_TABLE__CREATE
},
{
"drop", SEPG_DB_TABLE__DROP
},
{
"getattr", SEPG_DB_TABLE__GETATTR
},
{
"setattr", SEPG_DB_TABLE__SETATTR
},
{
"relabelfrom", SEPG_DB_TABLE__RELABELFROM
},
{
"relabelto", SEPG_DB_TABLE__RELABELTO
},
{
"select", SEPG_DB_TABLE__SELECT
},
{
"update", SEPG_DB_TABLE__UPDATE
},
{
"insert", SEPG_DB_TABLE__INSERT
},
{
"delete", SEPG_DB_TABLE__DELETE
},
{
"lock", SEPG_DB_TABLE__LOCK
},
{
NULL, 0UL
},
}
},
{
"db_sequence", SEPG_CLASS_DB_SEQUENCE,
{
{ "create", SEPG_DB_SEQUENCE__CREATE },
{ "drop", SEPG_DB_SEQUENCE__DROP },
{ "getattr", SEPG_DB_SEQUENCE__GETATTR },
{ "setattr", SEPG_DB_SEQUENCE__SETATTR },
{ "relabelfrom", SEPG_DB_SEQUENCE__RELABELFROM },
{ "relabelto", SEPG_DB_SEQUENCE__RELABELTO },
{ "get_value", SEPG_DB_SEQUENCE__GET_VALUE },
{ "next_value", SEPG_DB_SEQUENCE__NEXT_VALUE },
{ "set_value", SEPG_DB_SEQUENCE__SET_VALUE },
{ NULL, 0UL },
{
"create", SEPG_DB_SEQUENCE__CREATE
},
{
"drop", SEPG_DB_SEQUENCE__DROP
},
{
"getattr", SEPG_DB_SEQUENCE__GETATTR
},
{
"setattr", SEPG_DB_SEQUENCE__SETATTR
},
{
"relabelfrom", SEPG_DB_SEQUENCE__RELABELFROM
},
{
"relabelto", SEPG_DB_SEQUENCE__RELABELTO
},
{
"get_value", SEPG_DB_SEQUENCE__GET_VALUE
},
{
"next_value", SEPG_DB_SEQUENCE__NEXT_VALUE
},
{
"set_value", SEPG_DB_SEQUENCE__SET_VALUE
},
{
NULL, 0UL
},
}
},
{
"db_procedure", SEPG_CLASS_DB_PROCEDURE,
{
{ "create", SEPG_DB_PROCEDURE__CREATE },
{ "drop", SEPG_DB_PROCEDURE__DROP },
{ "getattr", SEPG_DB_PROCEDURE__GETATTR },
{ "setattr", SEPG_DB_PROCEDURE__SETATTR },
{ "relabelfrom", SEPG_DB_PROCEDURE__RELABELFROM },
{ "relabelto", SEPG_DB_PROCEDURE__RELABELTO },
{ "execute", SEPG_DB_PROCEDURE__EXECUTE },
{ "entrypoint", SEPG_DB_PROCEDURE__ENTRYPOINT },
{ "install", SEPG_DB_PROCEDURE__INSTALL },
{ NULL, 0UL },
{
"create", SEPG_DB_PROCEDURE__CREATE
},
{
"drop", SEPG_DB_PROCEDURE__DROP
},
{
"getattr", SEPG_DB_PROCEDURE__GETATTR
},
{
"setattr", SEPG_DB_PROCEDURE__SETATTR
},
{
"relabelfrom", SEPG_DB_PROCEDURE__RELABELFROM
},
{
"relabelto", SEPG_DB_PROCEDURE__RELABELTO
},
{
"execute", SEPG_DB_PROCEDURE__EXECUTE
},
{
"entrypoint", SEPG_DB_PROCEDURE__ENTRYPOINT
},
{
"install", SEPG_DB_PROCEDURE__INSTALL
},
{
NULL, 0UL
},
}
},
{
"db_column", SEPG_CLASS_DB_COLUMN,
{
{ "create", SEPG_DB_COLUMN__CREATE },
{ "drop", SEPG_DB_COLUMN__DROP },
{ "getattr", SEPG_DB_COLUMN__GETATTR },
{ "setattr", SEPG_DB_COLUMN__SETATTR },
{ "relabelfrom", SEPG_DB_COLUMN__RELABELFROM },
{ "relabelto", SEPG_DB_COLUMN__RELABELTO },
{ "select", SEPG_DB_COLUMN__SELECT },
{ "update", SEPG_DB_COLUMN__UPDATE },
{ "insert", SEPG_DB_COLUMN__INSERT },
{ NULL, 0UL },
{
"create", SEPG_DB_COLUMN__CREATE
},
{
"drop", SEPG_DB_COLUMN__DROP
},
{
"getattr", SEPG_DB_COLUMN__GETATTR
},
{
"setattr", SEPG_DB_COLUMN__SETATTR
},
{
"relabelfrom", SEPG_DB_COLUMN__RELABELFROM
},
{
"relabelto", SEPG_DB_COLUMN__RELABELTO
},
{
"select", SEPG_DB_COLUMN__SELECT
},
{
"update", SEPG_DB_COLUMN__UPDATE
},
{
"insert", SEPG_DB_COLUMN__INSERT
},
{
NULL, 0UL
},
}
},
{
"db_tuple", SEPG_CLASS_DB_TUPLE,
{
{ "relabelfrom", SEPG_DB_TUPLE__RELABELFROM },
{ "relabelto", SEPG_DB_TUPLE__RELABELTO },
{ "select", SEPG_DB_TUPLE__SELECT },
{ "update", SEPG_DB_TUPLE__UPDATE },
{ "insert", SEPG_DB_TUPLE__INSERT },
{ "delete", SEPG_DB_TUPLE__DELETE },
{ NULL, 0UL },
{
"relabelfrom", SEPG_DB_TUPLE__RELABELFROM
},
{
"relabelto", SEPG_DB_TUPLE__RELABELTO
},
{
"select", SEPG_DB_TUPLE__SELECT
},
{
"update", SEPG_DB_TUPLE__UPDATE
},
{
"insert", SEPG_DB_TUPLE__INSERT
},
{
"delete", SEPG_DB_TUPLE__DELETE
},
{
NULL, 0UL
},
}
},
{
"db_blob", SEPG_CLASS_DB_BLOB,
{
{ "create", SEPG_DB_BLOB__CREATE },
{ "drop", SEPG_DB_BLOB__DROP },
{ "getattr", SEPG_DB_BLOB__GETATTR },
{ "setattr", SEPG_DB_BLOB__SETATTR },
{ "relabelfrom", SEPG_DB_BLOB__RELABELFROM },
{ "relabelto", SEPG_DB_BLOB__RELABELTO },
{ "read", SEPG_DB_BLOB__READ },
{ "write", SEPG_DB_BLOB__WRITE },
{ "import", SEPG_DB_BLOB__IMPORT },
{ "export", SEPG_DB_BLOB__EXPORT },
{ NULL, 0UL },
{
"create", SEPG_DB_BLOB__CREATE
},
{
"drop", SEPG_DB_BLOB__DROP
},
{
"getattr", SEPG_DB_BLOB__GETATTR
},
{
"setattr", SEPG_DB_BLOB__SETATTR
},
{
"relabelfrom", SEPG_DB_BLOB__RELABELFROM
},
{
"relabelto", SEPG_DB_BLOB__RELABELTO
},
{
"read", SEPG_DB_BLOB__READ
},
{
"write", SEPG_DB_BLOB__WRITE
},
{
"import", SEPG_DB_BLOB__IMPORT
},
{
"export", SEPG_DB_BLOB__EXPORT
},
{
NULL, 0UL
},
}
},
{
"db_language", SEPG_CLASS_DB_LANGUAGE,
{
{ "create", SEPG_DB_LANGUAGE__CREATE },
{ "drop", SEPG_DB_LANGUAGE__DROP },
{ "getattr", SEPG_DB_LANGUAGE__GETATTR },
{ "setattr", SEPG_DB_LANGUAGE__SETATTR },
{ "relabelfrom", SEPG_DB_LANGUAGE__RELABELFROM },
{ "relabelto", SEPG_DB_LANGUAGE__RELABELTO },
{ "implement", SEPG_DB_LANGUAGE__IMPLEMENT },
{ "execute", SEPG_DB_LANGUAGE__EXECUTE },
{ NULL, 0UL },
{
"create", SEPG_DB_LANGUAGE__CREATE
},
{
"drop", SEPG_DB_LANGUAGE__DROP
},
{
"getattr", SEPG_DB_LANGUAGE__GETATTR
},
{
"setattr", SEPG_DB_LANGUAGE__SETATTR
},
{
"relabelfrom", SEPG_DB_LANGUAGE__RELABELFROM
},
{
"relabelto", SEPG_DB_LANGUAGE__RELABELTO
},
{
"implement", SEPG_DB_LANGUAGE__IMPLEMENT
},
{
"execute", SEPG_DB_LANGUAGE__EXECUTE
},
{
NULL, 0UL
},
}
},
{
"db_view", SEPG_CLASS_DB_VIEW,
{
{ "create", SEPG_DB_VIEW__CREATE },
{ "drop", SEPG_DB_VIEW__DROP },
{ "getattr", SEPG_DB_VIEW__GETATTR },
{ "setattr", SEPG_DB_VIEW__SETATTR },
{ "relabelfrom", SEPG_DB_VIEW__RELABELFROM },
{ "relabelto", SEPG_DB_VIEW__RELABELTO },
{ "expand", SEPG_DB_VIEW__EXPAND },
{ NULL, 0UL },
{
"create", SEPG_DB_VIEW__CREATE
},
{
"drop", SEPG_DB_VIEW__DROP
},
{
"getattr", SEPG_DB_VIEW__GETATTR
},
{
"setattr", SEPG_DB_VIEW__SETATTR
},
{
"relabelfrom", SEPG_DB_VIEW__RELABELFROM
},
{
"relabelto", SEPG_DB_VIEW__RELABELTO
},
{
"expand", SEPG_DB_VIEW__EXPAND
},
{
NULL, 0UL
},
}
},
};
@ -423,7 +731,8 @@ sepgsql_compute_avd(const char *scontext,
const char *tclass_name;
security_class_t tclass_ex;
struct av_decision avd_ex;
int i, deny_unknown = security_deny_unknown();
int i,
deny_unknown = security_deny_unknown();
/* Get external code of the object class */
Assert(tclass < SEPG_CLASS_MAX);
@ -436,8 +745,7 @@ sepgsql_compute_avd(const char *scontext,
{
/*
* If the current security policy does not support permissions
* corresponding to database objects, we fill up them with dummy
* data.
* corresponding to database objects, we fill up them with dummy data.
* If security_deny_unknown() returns positive value, undefined
* permissions should be denied. Otherwise, allowed
*/
@ -464,9 +772,9 @@ sepgsql_compute_avd(const char *scontext,
/*
* SELinux returns its access control decision as a set of permissions
* represented in external code which depends on run-time environment.
* So, we need to translate it to the internal representation before
* returning results for the caller.
* represented in external code which depends on run-time environment. So,
* we need to translate it to the internal representation before returning
* results for the caller.
*/
memset(avd, 0, sizeof(struct av_decision));
@ -536,8 +844,8 @@ sepgsql_compute_create(const char *scontext,
tclass_ex = string_to_security_class(tclass_name);
/*
* Ask SELinux what is the default context for the given object class
* on a pair of security contexts
* Ask SELinux what is the default context for the given object class on a
* pair of security contexts
*/
if (security_compute_create_raw((security_context_t) scontext,
(security_context_t) tcontext,
@ -549,8 +857,8 @@ sepgsql_compute_create(const char *scontext,
scontext, tcontext, tclass_name)));
/*
* libselinux returns malloc()'ed string, so we need to copy it
* on the palloc()'ed region.
* libselinux returns malloc()'ed string, so we need to copy it on the
* palloc()'ed region.
*/
PG_TRY();
{
@ -610,8 +918,8 @@ sepgsql_check_perms(const char *scontext,
result = false;
/*
* It records a security audit for the request, if needed.
* But, when SE-PgSQL performs 'internal' mode, it needs to keep silent.
* It records a security audit for the request, if needed. But, when
* SE-PgSQL performs 'internal' mode, it needs to keep silent.
*/
if (audited && sepgsql_mode != SEPGSQL_MODE_INTERNAL)
{

View File

@ -245,6 +245,7 @@ extern bool sepgsql_check_perms(const char *scontext,
uint32 required,
const char *audit_name,
bool abort);
/*
* label.c
*/

View File

@ -42,7 +42,6 @@ extern void pgxml_parser_init(void);
/* local defs */
static const char **parse_params(text *paramstr);
#endif /* USE_LIBXSLT */

View File

@ -82,6 +82,7 @@ ginqueryarrayextract(PG_FUNCTION_ARGS)
ArrayType *array = PG_GETARG_ARRAYTYPE_P_COPY(0);
int32 *nkeys = (int32 *) PG_GETARG_POINTER(1);
StrategyNumber strategy = PG_GETARG_UINT16(2);
/* bool **pmatch = (bool **) PG_GETARG_POINTER(3); */
/* Pointer *extra_data = (Pointer *) PG_GETARG_POINTER(4); */
bool **nullFlags = (bool **) PG_GETARG_POINTER(5);
@ -142,10 +143,13 @@ ginarrayconsistent(PG_FUNCTION_ARGS)
{
bool *check = (bool *) PG_GETARG_POINTER(0);
StrategyNumber strategy = PG_GETARG_UINT16(1);
/* ArrayType *query = PG_GETARG_ARRAYTYPE_P(2); */
int32 nkeys = PG_GETARG_INT32(3);
/* Pointer *extra_data = (Pointer *) PG_GETARG_POINTER(4); */
bool *recheck = (bool *) PG_GETARG_POINTER(5);
/* Datum *queryKeys = (Datum *) PG_GETARG_POINTER(6); */
bool *nullFlags = (bool *) PG_GETARG_POINTER(7);
bool res;
@ -190,10 +194,11 @@ ginarrayconsistent(PG_FUNCTION_ARGS)
case GinEqualStrategy:
/* we will need recheck */
*recheck = true;
/*
* Must have all elements in check[] true; no discrimination
* against nulls here. This is because array_contain_compare
* and array_eq handle nulls differently ...
* against nulls here. This is because array_contain_compare and
* array_eq handle nulls differently ...
*/
res = true;
for (i = 0; i < nkeys; i++)

View File

@ -80,8 +80,8 @@ ginAllocEntryAccumulator(void *arg)
GinEntryAccumulator *ea;
/*
* Allocate memory by rather big chunks to decrease overhead. We have
* no need to reclaim RBNodes individually, so this costs nothing.
* Allocate memory by rather big chunks to decrease overhead. We have no
* need to reclaim RBNodes individually, so this costs nothing.
*/
if (accum->entryallocator == NULL || accum->eas_used >= DEF_NENTRY)
{
@ -145,8 +145,8 @@ ginInsertBAEntry(BuildAccumulator *accum,
bool isNew;
/*
* For the moment, fill only the fields of eatmp that will be looked at
* by cmpEntryAccumulator or ginCombineData.
* For the moment, fill only the fields of eatmp that will be looked at by
* cmpEntryAccumulator or ginCombineData.
*/
eatmp.attnum = attnum;
eatmp.key = key;

View File

@ -383,6 +383,7 @@ dataPlaceToPage(GinBtree btree, Buffer buf, OffsetNumber off, XLogRecData **prda
Page page = BufferGetPage(buf);
int sizeofitem = GinSizeOfDataPageItem(page);
int cnt = 0;
/* these must be static so they can be returned to caller */
static XLogRecData rdata[3];
static ginxlogInsert data;
@ -474,6 +475,7 @@ dataSplitPage(GinBtree btree, Buffer lbuf, Buffer rbuf, OffsetNumber off, XLogRe
Size pageSize = PageGetPageSize(lpage);
Size freeSpace;
uint32 nCopied = 1;
/* these must be static so they can be returned to caller */
static ginxlogSplit data;
static XLogRecData rdata[4];

View File

@ -488,6 +488,7 @@ entryPlaceToPage(GinBtree btree, Buffer buf, OffsetNumber off, XLogRecData **prd
Page page = BufferGetPage(buf);
OffsetNumber placed;
int cnt = 0;
/* these must be static so they can be returned to caller */
static XLogRecData rdata[3];
static ginxlogInsert data;
@ -561,6 +562,7 @@ entrySplitPage(GinBtree btree, Buffer lbuf, Buffer rbuf, OffsetNumber off, XLogR
Page lpage = PageGetTempPageCopy(BufferGetPage(lbuf));
Page rpage = BufferGetPage(rbuf);
Size pageSize = PageGetPageSize(lpage);
/* these must be static so they can be returned to caller */
static XLogRecData rdata[2];
static ginxlogSplit data;

View File

@ -88,9 +88,9 @@ writeListPage(Relation index, Buffer buffer,
GinPageGetOpaque(page)->rightlink = rightlink;
/*
* tail page may contain only whole row(s) or final part of row placed
* on previous pages (a "row" here meaning all the index tuples generated
* for one heap tuple)
* tail page may contain only whole row(s) or final part of row placed on
* previous pages (a "row" here meaning all the index tuples generated for
* one heap tuple)
*/
if (rightlink == InvalidBlockNumber)
{
@ -475,8 +475,8 @@ ginHeapTupleFastCollect(GinState *ginstate,
}
/*
* Build an index tuple for each key value, and add to array. In
* pending tuples we just stick the heap TID into t_tid.
* Build an index tuple for each key value, and add to array. In pending
* tuples we just stick the heap TID into t_tid.
*/
for (i = 0; i < nentries; i++)
{

View File

@ -40,8 +40,8 @@ static bool
callConsistentFn(GinState *ginstate, GinScanKey key)
{
/*
* If we're dealing with a dummy EVERYTHING key, we don't want to call
* the consistentFn; just claim it matches.
* If we're dealing with a dummy EVERYTHING key, we don't want to call the
* consistentFn; just claim it matches.
*/
if (key->searchMode == GIN_SEARCH_MODE_EVERYTHING)
{
@ -287,8 +287,8 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack,
* We should unlock current page (but not unpin) during tree scan
* to prevent deadlock with vacuum processes.
*
* We save current entry value (idatum) to be able to re-find
* our tuple after re-locking
* We save current entry value (idatum) to be able to re-find our
* tuple after re-locking
*/
if (icategory == GIN_CAT_NORM_KEY)
idatum = datumCopy(idatum, attr->attbyval, attr->attlen);
@ -442,11 +442,11 @@ restartScanEntry:
Page page;
/*
* We should unlock entry page before touching posting tree
* to prevent deadlocks with vacuum processes. Because entry is
* never deleted from page and posting tree is never reduced to
* the posting list, we can unlock page after getting BlockNumber
* of root of posting tree.
* We should unlock entry page before touching posting tree to
* prevent deadlocks with vacuum processes. Because entry is never
* deleted from page and posting tree is never reduced to the
* posting list, we can unlock page after getting BlockNumber of
* root of posting tree.
*/
LockBuffer(stackEntry->buffer, GIN_UNLOCK);
needUnlock = FALSE;
@ -656,10 +656,10 @@ entryGetItem(GinState *ginstate, GinScanEntry entry)
}
/*
* Reset counter to the beginning of entry->matchResult.
* Note: entry->offset is still greater than
* matchResult->ntuples if matchResult is lossy. So, on next
* call we will get next result from TIDBitmap.
* Reset counter to the beginning of entry->matchResult. Note:
* entry->offset is still greater than matchResult->ntuples if
* matchResult is lossy. So, on next call we will get next
* result from TIDBitmap.
*/
entry->offset = 0;
}
@ -745,10 +745,10 @@ keyGetItem(GinState *ginstate, MemoryContext tempCtx, GinScanKey key)
/*
* Find the minimum of the active entry curItems.
*
* Note: a lossy-page entry is encoded by a ItemPointer with max value
* for offset (0xffff), so that it will sort after any exact entries
* for the same page. So we'll prefer to return exact pointers not
* lossy pointers, which is good.
* Note: a lossy-page entry is encoded by a ItemPointer with max value for
* offset (0xffff), so that it will sort after any exact entries for the
* same page. So we'll prefer to return exact pointers not lossy
* pointers, which is good.
*/
ItemPointerSetMax(&minItem);
@ -782,28 +782,27 @@ keyGetItem(GinState *ginstate, MemoryContext tempCtx, GinScanKey key)
/*
* Lossy-page entries pose a problem, since we don't know the correct
* entryRes state to pass to the consistentFn, and we also don't know
* what its combining logic will be (could be AND, OR, or even NOT).
* If the logic is OR then the consistentFn might succeed for all
* items in the lossy page even when none of the other entries match.
* entryRes state to pass to the consistentFn, and we also don't know what
* its combining logic will be (could be AND, OR, or even NOT). If the
* logic is OR then the consistentFn might succeed for all items in the
* lossy page even when none of the other entries match.
*
* If we have a single lossy-page entry then we check to see if the
* consistentFn will succeed with only that entry TRUE. If so,
* we return a lossy-page pointer to indicate that the whole heap
* page must be checked. (On subsequent calls, we'll do nothing until
* minItem is past the page altogether, thus ensuring that we never return
* both regular and lossy pointers for the same page.)
* consistentFn will succeed with only that entry TRUE. If so, we return
* a lossy-page pointer to indicate that the whole heap page must be
* checked. (On subsequent calls, we'll do nothing until minItem is past
* the page altogether, thus ensuring that we never return both regular
* and lossy pointers for the same page.)
*
* This idea could be generalized to more than one lossy-page entry,
* but ideally lossy-page entries should be infrequent so it would
* seldom be the case that we have more than one at once. So it
* doesn't seem worth the extra complexity to optimize that case.
* If we do find more than one, we just punt and return a lossy-page
* pointer always.
* This idea could be generalized to more than one lossy-page entry, but
* ideally lossy-page entries should be infrequent so it would seldom be
* the case that we have more than one at once. So it doesn't seem worth
* the extra complexity to optimize that case. If we do find more than
* one, we just punt and return a lossy-page pointer always.
*
* Note that only lossy-page entries pointing to the current item's
* page should trigger this processing; we might have future lossy
* pages in the entry array, but they aren't relevant yet.
* Note that only lossy-page entries pointing to the current item's page
* should trigger this processing; we might have future lossy pages in the
* entry array, but they aren't relevant yet.
*/
ItemPointerSetLossyPage(&curPageLossy,
GinItemPointerGetBlockNumber(&key->curItem));
@ -853,15 +852,14 @@ keyGetItem(GinState *ginstate, MemoryContext tempCtx, GinScanKey key)
}
/*
* At this point we know that we don't need to return a lossy
* whole-page pointer, but we might have matches for individual exact
* item pointers, possibly in combination with a lossy pointer. Our
* strategy if there's a lossy pointer is to try the consistentFn both
* ways and return a hit if it accepts either one (forcing the hit to
* be marked lossy so it will be rechecked). An exception is that
* we don't need to try it both ways if the lossy pointer is in a
* "hidden" entry, because the consistentFn's result can't depend on
* that.
* At this point we know that we don't need to return a lossy whole-page
* pointer, but we might have matches for individual exact item pointers,
* possibly in combination with a lossy pointer. Our strategy if there's
* a lossy pointer is to try the consistentFn both ways and return a hit
* if it accepts either one (forcing the hit to be marked lossy so it will
* be rechecked). An exception is that we don't need to try it both ways
* if the lossy pointer is in a "hidden" entry, because the consistentFn's
* result can't depend on that.
*
* Prepare entryRes array to be passed to consistentFn.
*/
@ -1011,8 +1009,8 @@ scanGetItem(IndexScanDesc scan, ItemPointer advancePast,
break;
/*
* No hit. Update myAdvancePast to this TID, so that on the next
* pass we'll move to the next possible entry.
* No hit. Update myAdvancePast to this TID, so that on the next pass
* we'll move to the next possible entry.
*/
myAdvancePast = *item;
}
@ -1118,8 +1116,8 @@ scanGetCandidate(IndexScanDesc scan, pendingPosition *pos)
/*
* Now pos->firstOffset points to the first tuple of current heap
* row, pos->lastOffset points to the first tuple of next heap
* row (or to the end of page)
* row, pos->lastOffset points to the first tuple of next heap row
* (or to the end of page)
*/
break;
}
@ -1227,8 +1225,8 @@ collectMatchesForHeapRow(IndexScanDesc scan, pendingPosition *pos)
memset(pos->hasMatchKey, FALSE, so->nkeys);
/*
* Outer loop iterates over multiple pending-list pages when a single
* heap row has entries spanning those pages.
* Outer loop iterates over multiple pending-list pages when a single heap
* row has entries spanning those pages.
*/
for (;;)
{
@ -1322,11 +1320,11 @@ collectMatchesForHeapRow(IndexScanDesc scan, pendingPosition *pos)
if (res == 0)
{
/*
* Found exact match (there can be only one, except
* in EMPTY_QUERY mode).
* Found exact match (there can be only one, except in
* EMPTY_QUERY mode).
*
* If doing partial match, scan forward from
* here to end of page to check for matches.
* If doing partial match, scan forward from here to
* end of page to check for matches.
*
* See comment above about tuple's ordering.
*/
@ -1355,13 +1353,12 @@ collectMatchesForHeapRow(IndexScanDesc scan, pendingPosition *pos)
if (StopLow >= StopHigh && entry->isPartialMatch)
{
/*
* No exact match on this page. If doing partial
* match, scan from the first tuple greater than
* target value to end of page. Note that since we
* don't remember whether the comparePartialFn told us
* to stop early on a previous page, we will uselessly
* apply comparePartialFn to the first tuple on each
* subsequent page.
* No exact match on this page. If doing partial match,
* scan from the first tuple greater than target value to
* end of page. Note that since we don't remember whether
* the comparePartialFn told us to stop early on a
* previous page, we will uselessly apply comparePartialFn
* to the first tuple on each subsequent page.
*/
key->entryRes[j] =
matchPartialInPendingList(&so->ginstate,

View File

@ -195,14 +195,14 @@ buildFreshLeafTuple(GinState *ginstate,
BlockNumber postingRoot;
/*
* Build posting-tree-only result tuple. We do this first so as
* to fail quickly if the key is too big.
* Build posting-tree-only result tuple. We do this first so as to
* fail quickly if the key is too big.
*/
res = GinFormTuple(ginstate, attnum, key, category, NULL, 0, true);
/*
* Initialize posting tree with as many TIDs as will fit on the
* first page.
* Initialize posting tree with as many TIDs as will fit on the first
* page.
*/
postingRoot = createPostingTree(ginstate->index,
items,

View File

@ -294,8 +294,8 @@ ginNewScanKey(IndexScanDesc scan)
int32 searchMode = GIN_SEARCH_MODE_DEFAULT;
/*
* We assume that GIN-indexable operators are strict, so a null
* query argument means an unsatisfiable query.
* We assume that GIN-indexable operators are strict, so a null query
* argument means an unsatisfiable query.
*/
if (skey->sk_flags & SK_ISNULL)
{
@ -315,8 +315,8 @@ ginNewScanKey(IndexScanDesc scan)
PointerGetDatum(&searchMode)));
/*
* If bogus searchMode is returned, treat as GIN_SEARCH_MODE_ALL;
* note in particular we don't allow extractQueryFn to select
* If bogus searchMode is returned, treat as GIN_SEARCH_MODE_ALL; note
* in particular we don't allow extractQueryFn to select
* GIN_SEARCH_MODE_EVERYTHING.
*/
if (searchMode < GIN_SEARCH_MODE_DEFAULT ||
@ -344,8 +344,8 @@ ginNewScanKey(IndexScanDesc scan)
* If the extractQueryFn didn't create a nullFlags array, create one,
* assuming that everything's non-null. Otherwise, run through the
* array and make sure each value is exactly 0 or 1; this ensures
* binary compatibility with the GinNullCategory representation.
* While at it, detect whether any null keys are present.
* binary compatibility with the GinNullCategory representation. While
* at it, detect whether any null keys are present.
*/
if (nullFlags == NULL)
nullFlags = (bool *) palloc0(nQueryValues * sizeof(bool));
@ -410,6 +410,7 @@ ginrescan(PG_FUNCTION_ARGS)
{
IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
ScanKey scankey = (ScanKey) PG_GETARG_POINTER(1);
/* remaining arguments are ignored */
GinScanOpaque so = (GinScanOpaque) scan->opaque;

View File

@ -359,9 +359,9 @@ cmpEntries(const void *a, const void *b, void *arg)
aa->datum, bb->datum));
/*
* Detect if we have any duplicates. If there are equal keys, qsort
* must compare them at some point, else it wouldn't know whether one
* should go before or after the other.
* Detect if we have any duplicates. If there are equal keys, qsort must
* compare them at some point, else it wouldn't know whether one should go
* before or after the other.
*/
if (res == 0)
data->haveDups = true;
@ -422,9 +422,9 @@ ginExtractEntries(GinState *ginstate, OffsetNumber attnum,
/*
* If the extractValueFn didn't create a nullFlags array, create one,
* assuming that everything's non-null. Otherwise, run through the
* array and make sure each value is exactly 0 or 1; this ensures
* binary compatibility with the GinNullCategory representation.
* assuming that everything's non-null. Otherwise, run through the array
* and make sure each value is exactly 0 or 1; this ensures binary
* compatibility with the GinNullCategory representation.
*/
if (nullFlags == NULL)
nullFlags = (bool *) palloc0(*nentries * sizeof(bool));
@ -440,8 +440,8 @@ ginExtractEntries(GinState *ginstate, OffsetNumber attnum,
* If there's more than one key, sort and unique-ify.
*
* XXX Using qsort here is notationally painful, and the overhead is
* pretty bad too. For small numbers of keys it'd likely be better to
* use a simple insertion sort.
* pretty bad too. For small numbers of keys it'd likely be better to use
* a simple insertion sort.
*/
if (*nentries > 1)
{

View File

@ -306,13 +306,13 @@ gistplacetopage(GISTInsertState *state, GISTSTATE *giststate,
bool is_split;
/*
* Refuse to modify a page that's incompletely split. This should
* not happen because we finish any incomplete splits while we walk
* down the tree. However, it's remotely possible that another
* concurrent inserter splits a parent page, and errors out before
* completing the split. We will just throw an error in that case,
* and leave any split we had in progress unfinished too. The next
* insert that comes along will clean up the mess.
* Refuse to modify a page that's incompletely split. This should not
* happen because we finish any incomplete splits while we walk down the
* tree. However, it's remotely possible that another concurrent inserter
* splits a parent page, and errors out before completing the split. We
* will just throw an error in that case, and leave any split we had in
* progress unfinished too. The next insert that comes along will clean up
* the mess.
*/
if (GistFollowRight(page))
elog(ERROR, "concurrent GiST page split was incomplete");
@ -364,8 +364,8 @@ gistplacetopage(GISTInsertState *state, GISTSTATE *giststate,
/*
* Set up pages to work with. Allocate new buffers for all but the
* leftmost page. The original page becomes the new leftmost page,
* and is just replaced with the new contents.
* leftmost page. The original page becomes the new leftmost page, and
* is just replaced with the new contents.
*
* For a root-split, allocate new buffers for all child pages, the
* original page is overwritten with new root page containing
@ -443,6 +443,7 @@ gistplacetopage(GISTInsertState *state, GISTSTATE *giststate,
for (ptr = dist; ptr; ptr = ptr->next)
{
GISTPageSplitInfo *si = palloc(sizeof(GISTPageSplitInfo));
si->buf = ptr->buffer;
si->downlink = ptr->itup;
*splitinfo = lappend(*splitinfo, si);
@ -456,6 +457,7 @@ gistplacetopage(GISTInsertState *state, GISTSTATE *giststate,
for (ptr = dist; ptr; ptr = ptr->next)
{
char *data = (char *) (ptr->list);
for (i = 0; i < ptr->block.num; i++)
{
if (PageAddItem(ptr->page, (Item) data, IndexTupleSize((IndexTuple) data), i + FirstOffsetNumber, false, false) == InvalidOffsetNumber)
@ -495,8 +497,8 @@ gistplacetopage(GISTInsertState *state, GISTSTATE *giststate,
MarkBufferDirty(leftchildbuf);
/*
* The first page in the chain was a temporary working copy meant
* to replace the old page. Copy it over the old page.
* The first page in the chain was a temporary working copy meant to
* replace the old page. Copy it over the old page.
*/
PageRestoreTempPage(dist->page, BufferGetPage(dist->buffer));
dist->page = BufferGetPage(dist->buffer);
@ -518,8 +520,8 @@ gistplacetopage(GISTInsertState *state, GISTSTATE *giststate,
* Return the new child buffers to the caller.
*
* If this was a root split, we've already inserted the downlink
* pointers, in the form of a new root page. Therefore we can
* release all the new buffers, and keep just the root page locked.
* pointers, in the form of a new root page. Therefore we can release
* all the new buffers, and keep just the root page locked.
*/
if (is_rootsplit)
{
@ -572,16 +574,16 @@ gistplacetopage(GISTInsertState *state, GISTSTATE *giststate,
/*
* If we inserted the downlink for a child page, set NSN and clear
* F_FOLLOW_RIGHT flag on the left child, so that concurrent scans know
* to follow the rightlink if and only if they looked at the parent page
* F_FOLLOW_RIGHT flag on the left child, so that concurrent scans know to
* follow the rightlink if and only if they looked at the parent page
* before we inserted the downlink.
*
* Note that we do this *after* writing the WAL record. That means that
* the possible full page image in the WAL record does not include
* these changes, and they must be replayed even if the page is restored
* from the full page image. There's a chicken-and-egg problem: if we
* updated the child pages first, we wouldn't know the recptr of the WAL
* record we're about to write.
* the possible full page image in the WAL record does not include these
* changes, and they must be replayed even if the page is restored from
* the full page image. There's a chicken-and-egg problem: if we updated
* the child pages first, we wouldn't know the recptr of the WAL record
* we're about to write.
*/
if (BufferIsValid(leftchildbuf))
{
@ -636,8 +638,8 @@ gistdoinsert(Relation r, IndexTuple itup, Size freespace, GISTSTATE *giststate)
stack->buffer = ReadBuffer(state.r, stack->blkno);
/*
* Be optimistic and grab shared lock first. Swap it for an
* exclusive lock later if we need to update the page.
* Be optimistic and grab shared lock first. Swap it for an exclusive
* lock later if we need to update the page.
*/
if (!xlocked)
{
@ -650,9 +652,9 @@ gistdoinsert(Relation r, IndexTuple itup, Size freespace, GISTSTATE *giststate)
Assert(!RelationNeedsWAL(state.r) || !XLogRecPtrIsInvalid(stack->lsn));
/*
* If this page was split but the downlink was never inserted to
* the parent because the inserting backend crashed before doing
* that, fix that now.
* If this page was split but the downlink was never inserted to the
* parent because the inserting backend crashed before doing that, fix
* that now.
*/
if (GistFollowRight(stack->page))
{
@ -680,8 +682,8 @@ gistdoinsert(Relation r, IndexTuple itup, Size freespace, GISTSTATE *giststate)
/*
* Concurrent split detected. There's no guarantee that the
* downlink for this page is consistent with the tuple we're
* inserting anymore, so go back to parent and rechoose the
* best child.
* inserting anymore, so go back to parent and rechoose the best
* child.
*/
UnlockReleaseBuffer(stack->buffer);
xlocked = false;
@ -722,8 +724,8 @@ gistdoinsert(Relation r, IndexTuple itup, Size freespace, GISTSTATE *giststate)
if (newtup)
{
/*
* Swap shared lock for an exclusive one. Beware, the page
* may change while we unlock/lock the page...
* Swap shared lock for an exclusive one. Beware, the page may
* change while we unlock/lock the page...
*/
if (!xlocked)
{
@ -738,6 +740,7 @@ gistdoinsert(Relation r, IndexTuple itup, Size freespace, GISTSTATE *giststate)
continue;
}
}
/*
* Update the tuple.
*
@ -752,8 +755,8 @@ gistdoinsert(Relation r, IndexTuple itup, Size freespace, GISTSTATE *giststate)
stack->childoffnum, InvalidBuffer))
{
/*
* If this was a root split, the root page continues to
* be the parent and the updated tuple went to one of the
* If this was a root split, the root page continues to be
* the parent and the updated tuple went to one of the
* child pages, so we just need to retry from the root
* page.
*/
@ -779,13 +782,13 @@ gistdoinsert(Relation r, IndexTuple itup, Size freespace, GISTSTATE *giststate)
{
/*
* Leaf page. Insert the new key. We've already updated all the
* parents on the way down, but we might have to split the page
* if it doesn't fit. gistinserthere() will take care of that.
* parents on the way down, but we might have to split the page if
* it doesn't fit. gistinserthere() will take care of that.
*/
/*
* Swap shared lock for an exclusive one. Be careful, the page
* may change while we unlock/lock the page...
* Swap shared lock for an exclusive one. Be careful, the page may
* change while we unlock/lock the page...
*/
if (!xlocked)
{
@ -798,8 +801,8 @@ gistdoinsert(Relation r, IndexTuple itup, Size freespace, GISTSTATE *giststate)
if (stack->blkno == GIST_ROOT_BLKNO)
{
/*
* the only page that can become inner instead of leaf
* is the root page, so for root we should recheck it
* the only page that can become inner instead of leaf is
* the root page, so for root we should recheck it
*/
if (!GistPageIsLeaf(stack->page))
{
@ -1069,11 +1072,13 @@ gistformdownlink(Relation rel, Buffer buf, GISTSTATE *giststate,
{
IndexTuple ituple = (IndexTuple)
PageGetItem(page, PageGetItemId(page, offset));
if (downlink == NULL)
downlink = CopyIndexTuple(ituple);
else
{
IndexTuple newdownlink;
newdownlink = gistgetadjusted(rel, downlink, ituple,
giststate);
if (newdownlink)
@ -1082,15 +1087,14 @@ gistformdownlink(Relation rel, Buffer buf, GISTSTATE *giststate,
}
/*
* If the page is completely empty, we can't form a meaningful
* downlink for it. But we have to insert a downlink for the page.
* Any key will do, as long as its consistent with the downlink of
* parent page, so that we can legally insert it to the parent.
* A minimal one that matches as few scans as possible would be best,
* to keep scans from doing useless work, but we don't know how to
* construct that. So we just use the downlink of the original page
* that was split - that's as far from optimal as it can get but will
* do..
* If the page is completely empty, we can't form a meaningful downlink
* for it. But we have to insert a downlink for the page. Any key will do,
* as long as its consistent with the downlink of parent page, so that we
* can legally insert it to the parent. A minimal one that matches as few
* scans as possible would be best, to keep scans from doing useless work,
* but we don't know how to construct that. So we just use the downlink of
* the original page that was split - that's as far from optimal as it can
* get but will do..
*/
if (!downlink)
{
@ -1131,8 +1135,8 @@ gistfixsplit(GISTInsertState *state, GISTSTATE *giststate)
buf = stack->buffer;
/*
* Read the chain of split pages, following the rightlinks. Construct
* a downlink tuple for each page.
* Read the chain of split pages, following the rightlinks. Construct a
* downlink tuple for each page.
*/
for (;;)
{
@ -1214,11 +1218,11 @@ gistfinishsplit(GISTInsertState *state, GISTInsertStack *stack,
Assert(list_length(splitinfo) >= 2);
/*
* We need to insert downlinks for each new page, and update the
* downlink for the original (leftmost) page in the split. Begin at
* the rightmost page, inserting one downlink at a time until there's
* only two pages left. Finally insert the downlink for the last new
* page and update the downlink for the original page as one operation.
* We need to insert downlinks for each new page, and update the downlink
* for the original (leftmost) page in the split. Begin at the rightmost
* page, inserting one downlink at a time until there's only two pages
* left. Finally insert the downlink for the last new page and update the
* downlink for the original page as one operation.
*/
/* for convenience, create a copy of the list in reverse order */

View File

@ -62,9 +62,9 @@ gistindex_keytest(IndexScanDesc scan,
*recheck_p = false;
/*
* If it's a leftover invalid tuple from pre-9.1, treat it as a match
* with minimum possible distances. This means we'll always follow it
* to the referenced page.
* If it's a leftover invalid tuple from pre-9.1, treat it as a match with
* minimum possible distances. This means we'll always follow it to the
* referenced page.
*/
if (GistTupleIsInvalid(tuple))
{
@ -191,8 +191,8 @@ gistindex_keytest(IndexScanDesc scan,
* always be zero, but might as well pass it for possible future
* use.)
*
* Note that Distance functions don't get a recheck argument.
* We can't tolerate lossy distance calculations on leaf tuples;
* Note that Distance functions don't get a recheck argument. We
* can't tolerate lossy distance calculations on leaf tuples;
* there is no opportunity to re-sort the tuples afterwards.
*/
dist = FunctionCall4(&key->sk_func,
@ -525,8 +525,8 @@ gistgettuple(PG_FUNCTION_ARGS)
/*
* While scanning a leaf page, ItemPointers of matching heap
* tuples are stored in so->pageData. If there are any on
* this page, we fall out of the inner "do" and loop around
* to return them.
* this page, we fall out of the inner "do" and loop around to
* return them.
*/
gistScanPage(scan, item, so->curTreeItem->distances, NULL, NULL);

View File

@ -57,9 +57,9 @@ GISTSearchTreeItemCombiner(RBNode *existing, const RBNode *newrb, void *arg)
/*
* If new item is heap tuple, it goes to front of chain; otherwise insert
* it before the first index-page item, so that index pages are visited
* in LIFO order, ensuring depth-first search of index pages. See
* comments in gist_private.h.
* it before the first index-page item, so that index pages are visited in
* LIFO order, ensuring depth-first search of index pages. See comments
* in gist_private.h.
*/
if (GISTSearchItemIsHeap(*newitem))
{
@ -136,6 +136,7 @@ gistrescan(PG_FUNCTION_ARGS)
IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
ScanKey key = (ScanKey) PG_GETARG_POINTER(1);
ScanKey orderbys = (ScanKey) PG_GETARG_POINTER(3);
/* nkeys and norderbys arguments are ignored */
GISTScanOpaque so = (GISTScanOpaque) scan->opaque;
int i;
@ -164,8 +165,8 @@ gistrescan(PG_FUNCTION_ARGS)
scan->numberOfKeys * sizeof(ScanKeyData));
/*
* Modify the scan key so that the Consistent method is called for
* all comparisons. The original operator is passed to the Consistent
* Modify the scan key so that the Consistent method is called for all
* comparisons. The original operator is passed to the Consistent
* function in the form of its strategy number, which is available
* from the sk_strategy field, and its subtype from the sk_subtype
* field. Also, preserve sk_func.fn_collation which is the input

View File

@ -503,6 +503,7 @@ gistFormTuple(GISTSTATE *giststate, Relation r,
}
res = index_form_tuple(giststate->tupdesc, compatt, isnull);
/*
* The offset number on tuples on internal pages is unused. For historical
* reasons, it is set 0xffff.

View File

@ -103,6 +103,7 @@ gistRedoPageUpdateRecord(XLogRecPtr lsn, XLogRecord *record)
{
int i;
OffsetNumber *todelete = (OffsetNumber *) data;
data += sizeof(OffsetNumber) * xldata->ntodelete;
for (i = 0; i < xldata->ntodelete; i++)
@ -116,11 +117,13 @@ gistRedoPageUpdateRecord(XLogRecPtr lsn, XLogRecord *record)
{
OffsetNumber off = (PageIsEmpty(page)) ? FirstOffsetNumber :
OffsetNumberNext(PageGetMaxOffsetNumber(page));
while (data - begin < record->xl_len)
{
IndexTuple itup = (IndexTuple) data;
Size sz = IndexTupleSize(itup);
OffsetNumber l;
data += sz;
l = PageAddItem(page, (Item) itup, sz, off, false, false);
@ -540,8 +543,8 @@ gistXLogUpdate(RelFileNode node, Buffer buffer,
}
/*
* Include a full page image of the child buf. (only necessary if
* a checkpoint happened since the child page was split)
* Include a full page image of the child buf. (only necessary if a
* checkpoint happened since the child page was split)
*/
if (BufferIsValid(leftchildbuf))
{

View File

@ -413,6 +413,7 @@ hashrescan(PG_FUNCTION_ARGS)
{
IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
ScanKey scankey = (ScanKey) PG_GETARG_POINTER(1);
/* remaining arguments are ignored */
HashScanOpaque so = (HashScanOpaque) scan->opaque;
Relation rel = scan->indexRelation;

View File

@ -1922,8 +1922,8 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid,
/*
* We're about to do the actual insert -- check for conflict at the
* relation or buffer level first, to avoid possibly having to roll
* back work we've just done.
* relation or buffer level first, to avoid possibly having to roll back
* work we've just done.
*/
CheckForSerializableConflictIn(relation, NULL, buffer);
@ -2228,8 +2228,8 @@ l1:
}
/*
* We're about to do the actual delete -- check for conflict first,
* to avoid possibly having to roll back work we've just done.
* We're about to do the actual delete -- check for conflict first, to
* avoid possibly having to roll back work we've just done.
*/
CheckForSerializableConflictIn(relation, &tp, buffer);
@ -2587,8 +2587,8 @@ l2:
}
/*
* We're about to do the actual update -- check for conflict first,
* to avoid possibly having to roll back work we've just done.
* We're about to do the actual update -- check for conflict first, to
* avoid possibly having to roll back work we've just done.
*/
CheckForSerializableConflictIn(relation, &oldtup, buffer);
@ -2737,8 +2737,8 @@ l2:
}
/*
* We're about to create the new tuple -- check for conflict first,
* to avoid possibly having to roll back work we've just done.
* We're about to create the new tuple -- check for conflict first, to
* avoid possibly having to roll back work we've just done.
*
* NOTE: For a tuple insert, we only need to check for table locks, since
* predicate locking at the index level will cover ranges for anything
@ -3860,12 +3860,12 @@ HeapTupleHeaderAdvanceLatestRemovedXid(HeapTupleHeader tuple,
}
/*
* Ignore tuples inserted by an aborted transaction or
* if the tuple was updated/deleted by the inserting transaction.
* Ignore tuples inserted by an aborted transaction or if the tuple was
* updated/deleted by the inserting transaction.
*
* Look for a committed hint bit, or if no xmin bit is set, check clog.
* This needs to work on both master and standby, where it is used
* to assess btree delete records.
* This needs to work on both master and standby, where it is used to
* assess btree delete records.
*/
if ((tuple->t_infomask & HEAP_XMIN_COMMITTED) ||
(!(tuple->t_infomask & HEAP_XMIN_COMMITTED) &&
@ -4158,8 +4158,8 @@ log_newpage(RelFileNode *rnode, ForkNumber forkNum, BlockNumber blkno,
recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_NEWPAGE, rdata);
/*
* The page may be uninitialized. If so, we can't set the LSN
* and TLI because that would corrupt the page.
* The page may be uninitialized. If so, we can't set the LSN and TLI
* because that would corrupt the page.
*/
if (!PageIsNew(page))
{
@ -4352,8 +4352,8 @@ heap_xlog_newpage(XLogRecPtr lsn, XLogRecord *record)
memcpy(page, (char *) xlrec + SizeOfHeapNewpage, BLCKSZ);
/*
* The page may be uninitialized. If so, we can't set the LSN
* and TLI because that would corrupt the page.
* The page may be uninitialized. If so, we can't set the LSN and TLI
* because that would corrupt the page.
*/
if (!PageIsNew(page))
{

View File

@ -179,8 +179,8 @@ top:
* The only conflict predicate locking cares about for indexes is when
* an index tuple insert conflicts with an existing lock. Since the
* actual location of the insert is hard to predict because of the
* random search used to prevent O(N^2) performance when there are many
* duplicate entries, we can just use the "first valid" page.
* random search used to prevent O(N^2) performance when there are
* many duplicate entries, we can just use the "first valid" page.
*/
CheckForSerializableConflictIn(rel, NULL, buf);
/* do the insertion */
@ -915,13 +915,13 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
/*
* origpage is the original page to be split. leftpage is a temporary
* buffer that receives the left-sibling data, which will be copied back
* into origpage on success. rightpage is the new page that receives
* the right-sibling data. If we fail before reaching the critical
* section, origpage hasn't been modified and leftpage is only workspace.
* In principle we shouldn't need to worry about rightpage either,
* because it hasn't been linked into the btree page structure; but to
* avoid leaving possibly-confusing junk behind, we are careful to rewrite
* rightpage as zeroes before throwing any error.
* into origpage on success. rightpage is the new page that receives the
* right-sibling data. If we fail before reaching the critical section,
* origpage hasn't been modified and leftpage is only workspace. In
* principle we shouldn't need to worry about rightpage either, because it
* hasn't been linked into the btree page structure; but to avoid leaving
* possibly-confusing junk behind, we are careful to rewrite rightpage as
* zeroes before throwing any error.
*/
origpage = BufferGetPage(buf);
leftpage = PageGetTempPage(origpage);

View File

@ -1268,9 +1268,9 @@ _bt_pagedel(Relation rel, Buffer buf, BTStack stack)
/*
* Check that the parent-page index items we're about to delete/overwrite
* contain what we expect. This can fail if the index has become
* corrupt for some reason. We want to throw any error before entering
* the critical section --- otherwise it'd be a PANIC.
* contain what we expect. This can fail if the index has become corrupt
* for some reason. We want to throw any error before entering the
* critical section --- otherwise it'd be a PANIC.
*
* The test on the target item is just an Assert because _bt_getstackbuf
* should have guaranteed it has the expected contents. The test on the

View File

@ -403,6 +403,7 @@ btrescan(PG_FUNCTION_ARGS)
{
IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
ScanKey scankey = (ScanKey) PG_GETARG_POINTER(1);
/* remaining arguments are ignored */
BTScanOpaque so = (BTScanOpaque) scan->opaque;

View File

@ -70,8 +70,8 @@ _bt_mkscankey(Relation rel, IndexTuple itup)
/*
* We can use the cached (default) support procs since no cross-type
* comparison can be needed. The cached support proc entries have
* the right collation for the index, too.
* comparison can be needed. The cached support proc entries have the
* right collation for the index, too.
*/
procinfo = index_getprocinfo(rel, i + 1, BTORDER_PROC);
arg = index_getattr(itup, i + 1, itupdesc, &null);
@ -120,8 +120,8 @@ _bt_mkscankey_nodata(Relation rel)
/*
* We can use the cached (default) support procs since no cross-type
* comparison can be needed. The cached support proc entries have
* the right collation for the index, too.
* comparison can be needed. The cached support proc entries have the
* right collation for the index, too.
*/
procinfo = index_getprocinfo(rel, i + 1, BTORDER_PROC);
flags = SK_ISNULL | (indoption[i] << SK_BT_INDOPTION_SHIFT);

View File

@ -1029,8 +1029,8 @@ EndPrepare(GlobalTransaction gxact)
/* If we crash now, we have prepared: WAL replay will fix things */
/*
* Wake up all walsenders to send WAL up to the PREPARE record
* immediately if replication is enabled
* Wake up all walsenders to send WAL up to the PREPARE record immediately
* if replication is enabled
*/
if (max_wal_senders > 0)
WalSndWakeup();
@ -2043,8 +2043,8 @@ RecordTransactionCommitPrepared(TransactionId xid,
/*
* Wait for synchronous replication, if required.
*
* Note that at this stage we have marked clog, but still show as
* running in the procarray and continue to hold locks.
* Note that at this stage we have marked clog, but still show as running
* in the procarray and continue to hold locks.
*/
SyncRepWaitForLSN(recptr);
}
@ -2130,8 +2130,8 @@ RecordTransactionAbortPrepared(TransactionId xid,
/*
* Wait for synchronous replication, if required.
*
* Note that at this stage we have marked clog, but still show as
* running in the procarray and continue to hold locks.
* Note that at this stage we have marked clog, but still show as running
* in the procarray and continue to hold locks.
*/
SyncRepWaitForLSN(recptr);
}

View File

@ -355,9 +355,9 @@ SetTransactionIdLimit(TransactionId oldest_datfrozenxid, Oid oldest_datoid)
char *oldest_datname;
/*
* We can be called when not inside a transaction, for example
* during StartupXLOG(). In such a case we cannot do database
* access, so we must just report the oldest DB's OID.
* We can be called when not inside a transaction, for example during
* StartupXLOG(). In such a case we cannot do database access, so we
* must just report the oldest DB's OID.
*
* Note: it's also possible that get_database_name fails and returns
* NULL, for example because the database just got dropped. We'll

View File

@ -432,8 +432,8 @@ AssignTransactionId(TransactionState s)
}
/*
* This is technically a recursive call, but the recursion will
* never be more than one layer deep.
* This is technically a recursive call, but the recursion will never
* be more than one layer deep.
*/
while (parentOffset != 0)
AssignTransactionId(parents[--parentOffset]);
@ -1037,16 +1037,17 @@ RecordTransactionCommit(void)
/*
* Check if we want to commit asynchronously. We can allow the XLOG flush
* to happen asynchronously if synchronous_commit=off, or if the current
* transaction has not performed any WAL-logged operation. The latter case
* can arise if the current transaction wrote only to temporary and/or
* unlogged tables. In case of a crash, the loss of such a transaction
* will be irrelevant since temp tables will be lost anyway, and unlogged
* tables will be truncated. (Given the foregoing, you might think that it
* would be unnecessary to emit the XLOG record at all in this case, but we
* don't currently try to do that. It would certainly cause problems at
* least in Hot Standby mode, where the KnownAssignedXids machinery
* requires tracking every XID assignment. It might be OK to skip it only
* when wal_level < hot_standby, but for now we don't.)
* transaction has not performed any WAL-logged operation. The latter
* case can arise if the current transaction wrote only to temporary
* and/or unlogged tables. In case of a crash, the loss of such a
* transaction will be irrelevant since temp tables will be lost anyway,
* and unlogged tables will be truncated. (Given the foregoing, you might
* think that it would be unnecessary to emit the XLOG record at all in
* this case, but we don't currently try to do that. It would certainly
* cause problems at least in Hot Standby mode, where the
* KnownAssignedXids machinery requires tracking every XID assignment. It
* might be OK to skip it only when wal_level < hot_standby, but for now
* we don't.)
*
* However, if we're doing cleanup of any non-temp rels or committing any
* command that wanted to force sync commit, then we must flush XLOG
@ -1130,8 +1131,8 @@ RecordTransactionCommit(void)
/*
* Wait for synchronous replication, if required.
*
* Note that at this stage we have marked clog, but still show as
* running in the procarray and continue to hold locks.
* Note that at this stage we have marked clog, but still show as running
* in the procarray and continue to hold locks.
*/
SyncRepWaitForLSN(XactLastRecEnd);
@ -1785,10 +1786,10 @@ CommitTransaction(void)
}
/*
* The remaining actions cannot call any user-defined code, so it's
* safe to start shutting down within-transaction services. But note
* that most of this stuff could still throw an error, which would
* switch us into the transaction-abort path.
* The remaining actions cannot call any user-defined code, so it's safe
* to start shutting down within-transaction services. But note that most
* of this stuff could still throw an error, which would switch us into
* the transaction-abort path.
*/
/* Shut down the deferred-trigger manager */
@ -1805,8 +1806,8 @@ CommitTransaction(void)
/*
* Mark serializable transaction as complete for predicate locking
* purposes. This should be done as late as we can put it and still
* allow errors to be raised for failure patterns found at commit.
* purposes. This should be done as late as we can put it and still allow
* errors to be raised for failure patterns found at commit.
*/
PreCommit_CheckForSerializationFailure();
@ -1988,10 +1989,10 @@ PrepareTransaction(void)
}
/*
* The remaining actions cannot call any user-defined code, so it's
* safe to start shutting down within-transaction services. But note
* that most of this stuff could still throw an error, which would
* switch us into the transaction-abort path.
* The remaining actions cannot call any user-defined code, so it's safe
* to start shutting down within-transaction services. But note that most
* of this stuff could still throw an error, which would switch us into
* the transaction-abort path.
*/
/* Shut down the deferred-trigger manager */
@ -2008,8 +2009,8 @@ PrepareTransaction(void)
/*
* Mark serializable transaction as complete for predicate locking
* purposes. This should be done as late as we can put it and still
* allow errors to be raised for failure patterns found at commit.
* purposes. This should be done as late as we can put it and still allow
* errors to be raised for failure patterns found at commit.
*/
PreCommit_CheckForSerializationFailure();

View File

@ -160,6 +160,7 @@ static XLogRecPtr LastRec;
* known, need to check the shared state".
*/
static bool LocalRecoveryInProgress = true;
/*
* Local copy of SharedHotStandbyActive variable. False actually means "not
* known, need to check the shared state".
@ -355,10 +356,9 @@ typedef struct XLogCtlInsert
/*
* exclusiveBackup is true if a backup started with pg_start_backup() is
* in progress, and nonExclusiveBackups is a counter indicating the number
* of streaming base backups currently in progress. forcePageWrites is
* set to true when either of these is non-zero. lastBackupStart is the
* latest checkpoint redo location used as a starting point for an online
* backup.
* of streaming base backups currently in progress. forcePageWrites is set
* to true when either of these is non-zero. lastBackupStart is the latest
* checkpoint redo location used as a starting point for an online backup.
*/
bool exclusiveBackup;
int nonExclusiveBackups;
@ -425,9 +425,9 @@ typedef struct XLogCtlData
bool SharedHotStandbyActive;
/*
* recoveryWakeupLatch is used to wake up the startup process to
* continue WAL replay, if it is waiting for WAL to arrive or failover
* trigger file to appear.
* recoveryWakeupLatch is used to wake up the startup process to continue
* WAL replay, if it is waiting for WAL to arrive or failover trigger file
* to appear.
*/
Latch recoveryWakeupLatch;
@ -4273,6 +4273,7 @@ static bool
rescanLatestTimeLine(void)
{
TimeLineID newtarget;
newtarget = findNewestTimeLine(recoveryTargetTLI);
if (newtarget != recoveryTargetTLI)
{
@ -4280,19 +4281,20 @@ rescanLatestTimeLine(void)
* Determine the list of expected TLIs for the new TLI
*/
List *newExpectedTLIs;
newExpectedTLIs = readTimeLineHistory(newtarget);
/*
* If the current timeline is not part of the history of the
* new timeline, we cannot proceed to it.
* If the current timeline is not part of the history of the new
* timeline, we cannot proceed to it.
*
* XXX This isn't foolproof: The new timeline might have forked from
* the current one, but before the current recovery location. In that
* case we will still switch to the new timeline and proceed replaying
* from it even though the history doesn't match what we already
* replayed. That's not good. We will likely notice at the next online
* checkpoint, as the TLI won't match what we expected, but it's
* not guaranteed. The admin needs to make sure that doesn't happen.
* checkpoint, as the TLI won't match what we expected, but it's not
* guaranteed. The admin needs to make sure that doesn't happen.
*/
if (!list_member_int(newExpectedTLIs,
(int) recoveryTargetTLI))
@ -4954,8 +4956,8 @@ XLOGShmemSize(void)
/*
* If the value of wal_buffers is -1, use the preferred auto-tune value.
* This isn't an amazingly clean place to do this, but we must wait till
* NBuffers has received its final value, and must do it before using
* the value of XLOGbuffers to do anything important.
* NBuffers has received its final value, and must do it before using the
* value of XLOGbuffers to do anything important.
*/
if (XLOGbuffers == -1)
{
@ -5086,9 +5088,9 @@ BootStrapXLOG(void)
/*
* Set up information for the initial checkpoint record
*
* The initial checkpoint record is written to the beginning of the
* WAL segment with logid=0 logseg=1. The very first WAL segment, 0/0, is
* not used, so that we can use 0/0 to mean "before any valid WAL segment".
* The initial checkpoint record is written to the beginning of the WAL
* segment with logid=0 logseg=1. The very first WAL segment, 0/0, is not
* used, so that we can use 0/0 to mean "before any valid WAL segment".
*/
checkPoint.redo.xlogid = 0;
checkPoint.redo.xrecoff = XLogSegSize + SizeOfXLogLongPHD;
@ -5610,8 +5612,8 @@ recoveryStopsHere(XLogRecord *record, bool *includeThis)
if (recoveryTarget == RECOVERY_TARGET_UNSET)
{
/*
* Save timestamp of latest transaction commit/abort if this is
* a transaction record
* Save timestamp of latest transaction commit/abort if this is a
* transaction record
*/
if (record->xl_rmid == RM_XACT_ID)
SetLatestXTime(recordXtime);
@ -5636,8 +5638,8 @@ recoveryStopsHere(XLogRecord *record, bool *includeThis)
else if (recoveryTarget == RECOVERY_TARGET_NAME)
{
/*
* There can be many restore points that share the same name, so we stop
* at the first one
* There can be many restore points that share the same name, so we
* stop at the first one
*/
stopsHere = (strcmp(recordRPName, recoveryTargetName) == 0);
@ -5705,8 +5707,8 @@ recoveryStopsHere(XLogRecord *record, bool *includeThis)
}
/*
* Note that if we use a RECOVERY_TARGET_TIME then we can stop
* at a restore point since they are timestamped, though the latest
* Note that if we use a RECOVERY_TARGET_TIME then we can stop at a
* restore point since they are timestamped, though the latest
* transaction time is not updated.
*/
if (record->xl_rmid == RM_XACT_ID && recoveryStopAfter)
@ -6132,10 +6134,10 @@ StartupXLOG(void)
InRecovery = true; /* force recovery even if SHUTDOWNED */
/*
* Make sure that REDO location exists. This may not be
* the case if there was a crash during an online backup,
* which left a backup_label around that references a WAL
* segment that's already been archived.
* Make sure that REDO location exists. This may not be the case
* if there was a crash during an online backup, which left a
* backup_label around that references a WAL segment that's
* already been archived.
*/
if (XLByteLT(checkPoint.redo, checkPointLoc))
{
@ -6330,9 +6332,9 @@ StartupXLOG(void)
/*
* We're in recovery, so unlogged relations relations may be trashed
* and must be reset. This should be done BEFORE allowing Hot
* Standby connections, so that read-only backends don't try to
* read whatever garbage is left over from before.
* and must be reset. This should be done BEFORE allowing Hot Standby
* connections, so that read-only backends don't try to read whatever
* garbage is left over from before.
*/
ResetUnloggedRelations(UNLOGGED_RELATION_CLEANUP);
@ -6517,7 +6519,8 @@ StartupXLOG(void)
if (recoveryStopsHere(record, &recoveryApply))
{
/*
* Pause only if users can connect to send a resume message
* Pause only if users can connect to send a resume
* message
*/
if (recoveryPauseAtTarget && standbyState == STANDBY_SNAPSHOT_READY)
{
@ -7003,8 +7006,8 @@ HotStandbyActive(void)
{
/*
* We check shared state each time only until Hot Standby is active. We
* can't de-activate Hot Standby, so there's no need to keep checking after
* the shared variable has once been seen true.
* can't de-activate Hot Standby, so there's no need to keep checking
* after the shared variable has once been seen true.
*/
if (LocalHotStandbyActive)
return true;
@ -8898,24 +8901,26 @@ do_pg_start_backup(const char *backupidstr, bool fast, char **labelfile)
PG_ENSURE_ERROR_CLEANUP(pg_start_backup_callback, (Datum) BoolGetDatum(exclusive));
{
bool gotUniqueStartpoint = false;
do
{
/*
* Force a CHECKPOINT. Aside from being necessary to prevent torn
* page problems, this guarantees that two successive backup runs will
* have different checkpoint positions and hence different history
* file names, even if nothing happened in between.
* page problems, this guarantees that two successive backup runs
* will have different checkpoint positions and hence different
* history file names, even if nothing happened in between.
*
* We use CHECKPOINT_IMMEDIATE only if requested by user (via passing
* fast = true). Otherwise this can take awhile.
* We use CHECKPOINT_IMMEDIATE only if requested by user (via
* passing fast = true). Otherwise this can take awhile.
*/
RequestCheckpoint(CHECKPOINT_FORCE | CHECKPOINT_WAIT |
(fast ? CHECKPOINT_IMMEDIATE : 0));
/*
* Now we need to fetch the checkpoint record location, and also its
* REDO pointer. The oldest point in WAL that would be needed to
* restore starting from the checkpoint is precisely the REDO pointer.
* Now we need to fetch the checkpoint record location, and also
* its REDO pointer. The oldest point in WAL that would be needed
* to restore starting from the checkpoint is precisely the REDO
* pointer.
*/
LWLockAcquire(ControlFileLock, LW_SHARED);
checkpointloc = ControlFile->checkPoint;
@ -8923,16 +8928,15 @@ do_pg_start_backup(const char *backupidstr, bool fast, char **labelfile)
LWLockRelease(ControlFileLock);
/*
* If two base backups are started at the same time (in WAL
* sender processes), we need to make sure that they use
* different checkpoints as starting locations, because we use
* the starting WAL location as a unique identifier for the base
* backup in the end-of-backup WAL record and when we write the
* backup history file. Perhaps it would be better generate a
* separate unique ID for each backup instead of forcing another
* checkpoint, but taking a checkpoint right after another is
* not that expensive either because only few buffers have been
* dirtied yet.
* If two base backups are started at the same time (in WAL sender
* processes), we need to make sure that they use different
* checkpoints as starting locations, because we use the starting
* WAL location as a unique identifier for the base backup in the
* end-of-backup WAL record and when we write the backup history
* file. Perhaps it would be better generate a separate unique ID
* for each backup instead of forcing another checkpoint, but
* taking a checkpoint right after another is not that expensive
* either because only few buffers have been dirtied yet.
*/
LWLockAcquire(WALInsertLock, LW_SHARED);
if (XLByteLT(XLogCtl->Insert.lastBackupStart, startpoint))
@ -8970,8 +8974,8 @@ do_pg_start_backup(const char *backupidstr, bool fast, char **labelfile)
{
/*
* Check for existing backup label --- implies a backup is already
* running. (XXX given that we checked exclusiveBackup above, maybe
* it would be OK to just unlink any such label file?)
* running. (XXX given that we checked exclusiveBackup above,
* maybe it would be OK to just unlink any such label file?)
*/
if (stat(BACKUP_LABEL_FILE, &stat_buf) != 0)
{
@ -10177,8 +10181,8 @@ retry:
}
/*
* If it hasn't been long since last attempt, sleep
* to avoid busy-waiting.
* If it hasn't been long since last attempt, sleep to
* avoid busy-waiting.
*/
now = (pg_time_t) time(NULL);
if ((now - last_fail_time) < 5)
@ -10446,8 +10450,8 @@ CheckPromoteSignal(void)
if (stat(PROMOTE_SIGNAL_FILE, &stat_buf) == 0)
{
/*
* Since we are in a signal handler, it's not safe
* to elog. We silently ignore any error from unlink.
* Since we are in a signal handler, it's not safe to elog. We
* silently ignore any error from unlink.
*/
unlink(PROMOTE_SIGNAL_FILE);
return true;

View File

@ -1011,8 +1011,8 @@ SetDefaultACLsInSchemas(InternalDefaultACL *iacls, List *nspnames)
/*
* Note that we must do the permissions check against the target
* role not the calling user. We require CREATE privileges,
* since without CREATE you won't be able to do anything using the
* role not the calling user. We require CREATE privileges, since
* without CREATE you won't be able to do anything using the
* default privs anyway.
*/
iacls->nspid = get_namespace_oid(nspname, false);

View File

@ -97,6 +97,7 @@ forkname_chars(const char *str, ForkNumber *fork)
for (forkNum = 1; forkNum <= MAX_FORKNUM; forkNum++)
{
int len = strlen(forkNames[forkNum]);
if (strncmp(forkNames[forkNum], str, len) == 0)
{
if (fork)

View File

@ -1021,8 +1021,8 @@ deleteOneObject(const ObjectAddress *object, Relation depRel)
/*
* Delete any comments or security labels associated with this object.
* (This is a convenient place to do these things, rather than having every
* object type know to do it.)
* (This is a convenient place to do these things, rather than having
* every object type know to do it.)
*/
DeleteComments(object->objectId, object->classId, object->objectSubId);
DeleteSecurityLabel(object);
@ -1431,8 +1431,8 @@ find_expr_references_walker(Node *node,
/*
* We must also depend on the constant's collation: it could be
* different from the datatype's, if a CollateExpr was const-folded
* to a simple constant. However we can save work in the most common
* different from the datatype's, if a CollateExpr was const-folded to
* a simple constant. However we can save work in the most common
* case where the collation is "default", since we know that's pinned.
*/
if (OidIsValid(con->constcollid) &&

View File

@ -541,8 +541,8 @@ CheckAttributeType(const char *attname,
}
/*
* This might not be strictly invalid per SQL standard, but it is
* pretty useless, and it cannot be dumped, so we must disallow it.
* This might not be strictly invalid per SQL standard, but it is pretty
* useless, and it cannot be dumped, so we must disallow it.
*/
if (!OidIsValid(attcollation) && type_is_collatable(atttypid))
ereport(ERROR,
@ -992,9 +992,9 @@ heap_create_with_catalog(const char *relname,
CheckAttributeNamesTypes(tupdesc, relkind, allow_system_table_mods);
/*
* If the relation already exists, it's an error, unless the user specifies
* "IF NOT EXISTS". In that case, we just print a notice and do nothing
* further.
* If the relation already exists, it's an error, unless the user
* specifies "IF NOT EXISTS". In that case, we just print a notice and do
* nothing further.
*/
existing_relid = get_relname_relid(relname, relnamespace);
if (existing_relid != InvalidOid)
@ -1048,8 +1048,8 @@ heap_create_with_catalog(const char *relname,
if (!OidIsValid(relid))
{
/*
* Use binary-upgrade override for pg_class.oid/relfilenode,
* if supplied.
* Use binary-upgrade override for pg_class.oid/relfilenode, if
* supplied.
*/
if (OidIsValid(binary_upgrade_next_heap_pg_class_oid) &&
(relkind == RELKIND_RELATION || relkind == RELKIND_SEQUENCE ||
@ -1285,12 +1285,12 @@ heap_create_with_catalog(const char *relname,
register_on_commit_action(relid, oncommit);
/*
* If this is an unlogged relation, it needs an init fork so that it
* can be correctly reinitialized on restart. Since we're going to
* do an immediate sync, we ony need to xlog this if archiving or
* streaming is enabled. And the immediate sync is required, because
* otherwise there's no guarantee that this will hit the disk before
* the next checkpoint moves the redo pointer.
* If this is an unlogged relation, it needs an init fork so that it can
* be correctly reinitialized on restart. Since we're going to do an
* immediate sync, we ony need to xlog this if archiving or streaming is
* enabled. And the immediate sync is required, because otherwise there's
* no guarantee that this will hit the disk before the next checkpoint
* moves the redo pointer.
*/
if (relpersistence == RELPERSISTENCE_UNLOGGED)
{
@ -1654,8 +1654,8 @@ heap_drop_with_catalog(Oid relid)
/*
* There can no longer be anyone *else* touching the relation, but we
* might still have open queries or cursors, or pending trigger events,
* in our own session.
* might still have open queries or cursors, or pending trigger events, in
* our own session.
*/
CheckTableNotInUse(rel, "DROP TABLE");

View File

@ -187,10 +187,10 @@ index_check_primary_key(Relation heapRel,
int i;
/*
* If ALTER TABLE, check that there isn't already a PRIMARY KEY. In
* CREATE TABLE, we have faith that the parser rejected multiple pkey
* clauses; and CREATE INDEX doesn't have a way to say PRIMARY KEY, so
* it's no problem either.
* If ALTER TABLE, check that there isn't already a PRIMARY KEY. In CREATE
* TABLE, we have faith that the parser rejected multiple pkey clauses;
* and CREATE INDEX doesn't have a way to say PRIMARY KEY, so it's no
* problem either.
*/
if (is_alter_table &&
relationHasPrimaryKey(heapRel))
@ -243,15 +243,14 @@ index_check_primary_key(Relation heapRel,
}
/*
* XXX: Shouldn't the ALTER TABLE .. SET NOT NULL cascade to child
* tables? Currently, since the PRIMARY KEY itself doesn't cascade,
* we don't cascade the notnull constraint(s) either; but this is
* pretty debatable.
* XXX: Shouldn't the ALTER TABLE .. SET NOT NULL cascade to child tables?
* Currently, since the PRIMARY KEY itself doesn't cascade, we don't
* cascade the notnull constraint(s) either; but this is pretty debatable.
*
* XXX: possible future improvement: when being called from ALTER
* TABLE, it would be more efficient to merge this with the outer
* ALTER TABLE, so as to avoid two scans. But that seems to
* complicate DefineIndex's API unduly.
* XXX: possible future improvement: when being called from ALTER TABLE,
* it would be more efficient to merge this with the outer ALTER TABLE, so
* as to avoid two scans. But that seems to complicate DefineIndex's API
* unduly.
*/
if (cmds)
AlterTableInternal(RelationGetRelid(heapRel), cmds, false);
@ -788,8 +787,8 @@ index_create(Relation heapRelation,
if (!OidIsValid(indexRelationId))
{
/*
* Use binary-upgrade override for pg_class.oid/relfilenode,
* if supplied.
* Use binary-upgrade override for pg_class.oid/relfilenode, if
* supplied.
*/
if (OidIsValid(binary_upgrade_next_index_pg_class_oid))
{
@ -1176,8 +1175,8 @@ index_constraint_create(Relation heapRelation,
/*
* If the constraint is deferrable, create the deferred uniqueness
* checking trigger. (The trigger will be given an internal
* dependency on the constraint by CreateTrigger.)
* checking trigger. (The trigger will be given an internal dependency on
* the constraint by CreateTrigger.)
*/
if (deferrable)
{
@ -1303,8 +1302,8 @@ index_drop(Oid indexId)
userIndexRelation = index_open(indexId, AccessExclusiveLock);
/*
* There can no longer be anyone *else* touching the index, but we
* might still have open queries using it in our own session.
* There can no longer be anyone *else* touching the index, but we might
* still have open queries using it in our own session.
*/
CheckTableNotInUse(userIndexRelation, "DROP INDEX");
@ -1740,6 +1739,7 @@ index_build(Relation heapRelation,
if (heapRelation->rd_rel->relpersistence == RELPERSISTENCE_UNLOGGED)
{
RegProcedure ambuildempty = indexRelation->rd_am->ambuildempty;
RelationOpenSmgr(indexRelation);
smgrcreate(indexRelation->rd_smgr, INIT_FORKNUM, false);
OidFunctionCall1(ambuildempty, PointerGetDatum(indexRelation));

View File

@ -242,8 +242,8 @@ get_object_address(ObjectType objtype, List *objname, List *objargs,
/*
* If we're dealing with a relation or attribute, then the relation is
* already locked. If we're dealing with any other type of object, we need
* to lock it and then verify that it still exists.
* already locked. If we're dealing with any other type of object, we
* need to lock it and then verify that it still exists.
*/
if (address.classId != RelationRelationId)
{
@ -609,9 +609,9 @@ object_exists(ObjectAddress address)
/*
* For object types that have a relevant syscache, we use it; for
* everything else, we'll have to do an index-scan. This switch
* sets either the cache to be used for the syscache lookup, or the
* index to be used for the index scan.
* everything else, we'll have to do an index-scan. This switch sets
* either the cache to be used for the syscache lookup, or the index to be
* used for the index scan.
*/
switch (address.classId)
{
@ -664,6 +664,7 @@ object_exists(ObjectAddress address)
cache = OPFAMILYOID;
break;
case LargeObjectRelationId:
/*
* Weird backward compatibility hack: ObjectAddress notation uses
* LargeObjectRelationId for large objects, but since PostgreSQL
@ -851,6 +852,7 @@ check_object_ownership(Oid roleid, ObjectType objtype, ObjectAddress address,
NameListToString(objname));
break;
case OBJECT_ROLE:
/*
* We treat roles as being "owned" by those with CREATEROLE priv,
* except that superusers are only owned by superusers.

View File

@ -46,7 +46,9 @@ CollationCreate(const char *collname, Oid collnamespace,
HeapTuple tup;
Datum values[Natts_pg_collation];
bool nulls[Natts_pg_collation];
NameData name_name, name_collate, name_ctype;
NameData name_name,
name_collate,
name_ctype;
Oid oid;
ObjectAddress myself,
referenced;
@ -60,9 +62,9 @@ CollationCreate(const char *collname, Oid collnamespace,
/*
* Make sure there is no existing collation of same name & encoding.
*
* This would be caught by the unique index anyway; we're just giving
* a friendlier error message. The unique index provides a backstop
* against race conditions.
* This would be caught by the unique index anyway; we're just giving a
* friendlier error message. The unique index provides a backstop against
* race conditions.
*/
if (SearchSysCacheExists3(COLLNAMEENCNSP,
PointerGetDatum(collname),
@ -74,9 +76,9 @@ CollationCreate(const char *collname, Oid collnamespace,
collname, pg_encoding_to_char(collencoding))));
/*
* Also forbid matching an any-encoding entry. This test of course is
* not backed up by the unique index, but it's not a problem since we
* don't support adding any-encoding entries after initdb.
* Also forbid matching an any-encoding entry. This test of course is not
* backed up by the unique index, but it's not a problem since we don't
* support adding any-encoding entries after initdb.
*/
if (SearchSysCacheExists3(COLLNAMEENCNSP,
PointerGetDatum(collname),

View File

@ -58,9 +58,9 @@ EnumValuesCreate(Oid enumTypeOid, List *vals)
num_elems = list_length(vals);
/*
* We do not bother to check the list of values for duplicates --- if
* you have any, you'll get a less-than-friendly unique-index violation.
* It is probably not worth trying harder.
* We do not bother to check the list of values for duplicates --- if you
* have any, you'll get a less-than-friendly unique-index violation. It is
* probably not worth trying harder.
*/
pg_enum = heap_open(EnumRelationId, RowExclusiveLock);
@ -69,10 +69,9 @@ EnumValuesCreate(Oid enumTypeOid, List *vals)
* Allocate OIDs for the enum's members.
*
* While this method does not absolutely guarantee that we generate no
* duplicate OIDs (since we haven't entered each oid into the table
* before allocating the next), trouble could only occur if the OID
* counter wraps all the way around before we finish. Which seems
* unlikely.
* duplicate OIDs (since we haven't entered each oid into the table before
* allocating the next), trouble could only occur if the OID counter wraps
* all the way around before we finish. Which seems unlikely.
*/
oids = (Oid *) palloc(num_elems * sizeof(Oid));
@ -85,7 +84,8 @@ EnumValuesCreate(Oid enumTypeOid, List *vals)
*/
Oid new_oid;
do {
do
{
new_oid = GetNewOid(pg_enum);
} while (new_oid & 1);
oids[elemno] = new_oid;
@ -202,9 +202,9 @@ AddEnumLabel(Oid enumTypeOid,
/*
* Acquire a lock on the enum type, which we won't release until commit.
* This ensures that two backends aren't concurrently modifying the same
* enum type. Without that, we couldn't be sure to get a consistent
* view of the enum members via the syscache. Note that this does not
* block other backends from inspecting the type; see comments for
* enum type. Without that, we couldn't be sure to get a consistent view
* of the enum members via the syscache. Note that this does not block
* other backends from inspecting the type; see comments for
* RenumberEnumType.
*/
LockDatabaseObject(TypeRelationId, enumTypeOid, 0, ExclusiveLock);
@ -229,8 +229,8 @@ restart:
if (neighbor == NULL)
{
/*
* Put the new label at the end of the list.
* No change to existing tuples is required.
* Put the new label at the end of the list. No change to existing
* tuples is required.
*/
if (nelems > 0)
{
@ -265,14 +265,14 @@ restart:
nbr_en = (Form_pg_enum) GETSTRUCT(existing[nbr_index]);
/*
* Attempt to assign an appropriate enumsortorder value: one less
* than the smallest member, one more than the largest member,
* or halfway between two existing members.
* Attempt to assign an appropriate enumsortorder value: one less than
* the smallest member, one more than the largest member, or halfway
* between two existing members.
*
* In the "halfway" case, because of the finite precision of float4,
* we might compute a value that's actually equal to one or the
* other of its neighbors. In that case we renumber the existing
* members and try again.
* we might compute a value that's actually equal to one or the other
* of its neighbors. In that case we renumber the existing members
* and try again.
*/
if (newValIsAfter)
other_nbr_index = nbr_index + 1;
@ -291,10 +291,10 @@ restart:
/*
* On some machines, newelemorder may be in a register that's
* wider than float4. We need to force it to be rounded to
* float4 precision before making the following comparisons,
* or we'll get wrong results. (Such behavior violates the C
* standard, but fixing the compilers is out of our reach.)
* wider than float4. We need to force it to be rounded to float4
* precision before making the following comparisons, or we'll get
* wrong results. (Such behavior violates the C standard, but
* fixing the compilers is out of our reach.)
*/
newelemorder = DatumGetFloat4(Float4GetDatum(newelemorder));
@ -314,9 +314,9 @@ restart:
if (OidIsValid(binary_upgrade_next_pg_enum_oid))
{
/*
* Use binary-upgrade override for pg_enum.oid, if supplied.
* During binary upgrade, all pg_enum.oid's are set this way
* so they are guaranteed to be consistent.
* Use binary-upgrade override for pg_enum.oid, if supplied. During
* binary upgrade, all pg_enum.oid's are set this way so they are
* guaranteed to be consistent.
*/
if (neighbor != NULL)
ereport(ERROR,
@ -345,8 +345,8 @@ restart:
/*
* Detect whether it sorts correctly relative to existing
* even-numbered labels of the enum. We can ignore existing
* labels with odd Oids, since a comparison involving one of
* those will not take the fast path anyway.
* labels with odd Oids, since a comparison involving one of those
* will not take the fast path anyway.
*/
sorts_ok = true;
for (i = 0; i < nelems; i++)
@ -385,9 +385,9 @@ restart:
break;
/*
* If it's odd, and sorts OK, loop back to get another OID
* and try again. Probably, the next available even OID
* will sort correctly too, so it's worth trying.
* If it's odd, and sorts OK, loop back to get another OID and
* try again. Probably, the next available even OID will sort
* correctly too, so it's worth trying.
*/
}
else

View File

@ -842,8 +842,8 @@ fmgr_sql_validator(PG_FUNCTION_ARGS)
if (!haspolyarg)
{
/*
* OK to do full precheck: analyze and rewrite the queries,
* then verify the result type.
* OK to do full precheck: analyze and rewrite the queries, then
* verify the result type.
*/
SQLFunctionParseInfoPtr pinfo;

View File

@ -386,9 +386,11 @@ AlterObjectNamespace(Relation rel, int oidCacheId, int nameCacheId,
{
Oid classId = RelationGetRelid(rel);
Oid oldNspOid;
Datum name, namespace;
Datum name,
namespace;
bool isnull;
HeapTuple tup, newtup;
HeapTuple tup,
newtup;
Datum *values;
bool *nulls;
bool *replaces;

View File

@ -813,11 +813,11 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex,
rwstate = begin_heap_rewrite(NewHeap, OldestXmin, FreezeXid, use_wal);
/*
* Decide whether to use an indexscan or seqscan-and-optional-sort to
* scan the OldHeap. We know how to use a sort to duplicate the ordering
* of a btree index, and will use seqscan-and-sort for that case if the
* planner tells us it's cheaper. Otherwise, always indexscan if an
* index is provided, else plain seqscan.
* Decide whether to use an indexscan or seqscan-and-optional-sort to scan
* the OldHeap. We know how to use a sort to duplicate the ordering of a
* btree index, and will use seqscan-and-sort for that case if the planner
* tells us it's cheaper. Otherwise, always indexscan if an index is
* provided, else plain seqscan.
*/
if (OldIndex != NULL && OldIndex->rd_rel->relam == BTREE_AM_OID)
use_sort = plan_cluster_use_sort(OIDOldHeap, OIDOldIndex);
@ -869,8 +869,8 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex,
/*
* Scan through the OldHeap, either in OldIndex order or sequentially;
* copy each tuple into the NewHeap, or transiently to the tuplesort
* module. Note that we don't bother sorting dead tuples (they won't
* get to the new table anyway).
* module. Note that we don't bother sorting dead tuples (they won't get
* to the new table anyway).
*/
for (;;)
{
@ -984,8 +984,8 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex,
heap_endscan(heapScan);
/*
* In scan-and-sort mode, complete the sort, then read out all live
* tuples from the tuplestore and write them to the new relation.
* In scan-and-sort mode, complete the sort, then read out all live tuples
* from the tuplestore and write them to the new relation.
*/
if (tuplesort != NULL)
{

View File

@ -46,12 +46,13 @@ CommentObject(CommentStmt *stmt)
* (which is really pg_restore's fault, but for now we will work around
* the problem here). Consensus is that the best fix is to treat wrong
* database name as a WARNING not an ERROR; hence, the following special
* case. (If the length of stmt->objname is not 1, get_object_address will
* throw an error below; that's OK.)
* case. (If the length of stmt->objname is not 1, get_object_address
* will throw an error below; that's OK.)
*/
if (stmt->objtype == OBJECT_DATABASE && list_length(stmt->objname) == 1)
{
char *database = strVal(linitial(stmt->objname));
if (!OidIsValid(get_database_oid(database, true)))
{
ereport(WARNING,
@ -62,10 +63,10 @@ CommentObject(CommentStmt *stmt)
}
/*
* Translate the parser representation that identifies this object into
* an ObjectAddress. get_object_address() will throw an error if the
* object does not exist, and will also acquire a lock on the target
* to guard against concurrent DROP operations.
* Translate the parser representation that identifies this object into an
* ObjectAddress. get_object_address() will throw an error if the object
* does not exist, and will also acquire a lock on the target to guard
* against concurrent DROP operations.
*/
address = get_object_address(stmt->objtype, stmt->objname, stmt->objargs,
&relation, ShareUpdateExclusiveLock);
@ -78,6 +79,7 @@ CommentObject(CommentStmt *stmt)
switch (stmt->objtype)
{
case OBJECT_COLUMN:
/*
* Allow comments only on columns of tables, views, composite
* types, and foreign tables (which are the only relkinds for

View File

@ -335,7 +335,8 @@ AlterConversionOwner_internal(Relation rel, Oid conversionOid, Oid newOwnerId)
void
AlterConversionNamespace(List *name, const char *newschema)
{
Oid convOid, nspOid;
Oid convOid,
nspOid;
Relation rel;
rel = heap_open(ConversionRelationId, RowExclusiveLock);

View File

@ -1136,8 +1136,8 @@ BeginCopy(bool is_from,
cstate = (CopyStateData *) palloc0(sizeof(CopyStateData));
/*
* We allocate everything used by a cstate in a new memory context.
* This avoids memory leaks during repeated use of COPY in a query.
* We allocate everything used by a cstate in a new memory context. This
* avoids memory leaks during repeated use of COPY in a query.
*/
cstate->copycontext = AllocSetContextCreate(CurrentMemoryContext,
"COPY",
@ -1300,9 +1300,9 @@ BeginCopy(bool is_from,
cstate->file_encoding = pg_get_client_encoding();
/*
* Set up encoding conversion info. Even if the file and server
* encodings are the same, we must apply pg_any_to_server() to validate
* data in multibyte encodings.
* Set up encoding conversion info. Even if the file and server encodings
* are the same, we must apply pg_any_to_server() to validate data in
* multibyte encodings.
*/
cstate->need_transcoding =
(cstate->file_encoding != GetDatabaseEncoding() ||
@ -2300,9 +2300,9 @@ NextCopyFromRawFields(CopyState cstate, char ***fields, int *nfields)
done = CopyReadLine(cstate);
/*
* EOF at start of line means we're done. If we see EOF after
* some characters, we act as though it was newline followed by
* EOF, ie, process the line and then exit loop on next iteration.
* EOF at start of line means we're done. If we see EOF after some
* characters, we act as though it was newline followed by EOF, ie,
* process the line and then exit loop on next iteration.
*/
if (done && cstate->line_buf.len == 0)
return false;
@ -2456,16 +2456,16 @@ NextCopyFrom(CopyState cstate, ExprContext *econtext,
if (fld_count == -1)
{
/*
* Received EOF marker. In a V3-protocol copy, wait for
* the protocol-level EOF, and complain if it doesn't come
* immediately. This ensures that we correctly handle
* CopyFail, if client chooses to send that now.
* Received EOF marker. In a V3-protocol copy, wait for the
* protocol-level EOF, and complain if it doesn't come
* immediately. This ensures that we correctly handle CopyFail,
* if client chooses to send that now.
*
* Note that we MUST NOT try to read more data in an
* old-protocol copy, since there is no protocol-level EOF
* marker then. We could go either way for copy from file,
* but choose to throw error if there's data after the EOF
* marker, for consistency with the new-protocol case.
* Note that we MUST NOT try to read more data in an old-protocol
* copy, since there is no protocol-level EOF marker then. We
* could go either way for copy from file, but choose to throw
* error if there's data after the EOF marker, for consistency
* with the new-protocol case.
*/
char dummy;
@ -2524,8 +2524,8 @@ NextCopyFrom(CopyState cstate, ExprContext *econtext,
/*
* Now compute and insert any defaults available for the columns not
* provided by the input data. Anything not processed here or above
* will remain NULL.
* provided by the input data. Anything not processed here or above will
* remain NULL.
*/
for (i = 0; i < num_defaults; i++)
{

View File

@ -237,8 +237,8 @@ check_valid_extension_name(const char *extensionname)
int namelen = strlen(extensionname);
/*
* Disallow empty names (the parser rejects empty identifiers anyway,
* but let's check).
* Disallow empty names (the parser rejects empty identifiers anyway, but
* let's check).
*/
if (namelen == 0)
ereport(ERROR,
@ -256,10 +256,10 @@ check_valid_extension_name(const char *extensionname)
errdetail("Extension names must not contain \"--\".")));
/*
* No leading or trailing dash either. (We could probably allow this,
* but it would require much care in filename parsing and would make
* filenames visually if not formally ambiguous. Since there's no
* real-world use case, let's just forbid it.)
* No leading or trailing dash either. (We could probably allow this, but
* it would require much care in filename parsing and would make filenames
* visually if not formally ambiguous. Since there's no real-world use
* case, let's just forbid it.)
*/
if (extensionname[0] == '-' || extensionname[namelen - 1] == '-')
ereport(ERROR,
@ -809,8 +809,8 @@ execute_extension_script(Oid extensionOid, ExtensionControlFile *control,
* so that we won't spam the user with useless NOTICE messages from common
* script actions like creating shell types.
*
* We use the equivalent of SET LOCAL to ensure the setting is undone
* upon error.
* We use the equivalent of SET LOCAL to ensure the setting is undone upon
* error.
*/
save_client_min_messages =
pstrdup(GetConfigOption("client_min_messages", false));
@ -832,8 +832,8 @@ execute_extension_script(Oid extensionOid, ExtensionControlFile *control,
* makes the target schema be the default creation target namespace.
*
* Note: it might look tempting to use PushOverrideSearchPath for this,
* but we cannot do that. We have to actually set the search_path GUC
* in case the extension script examines or changes it.
* but we cannot do that. We have to actually set the search_path GUC in
* case the extension script examines or changes it.
*/
save_search_path = pstrdup(GetConfigOption("search_path", false));
@ -867,9 +867,9 @@ execute_extension_script(Oid extensionOid, ExtensionControlFile *control,
* If it's not relocatable, substitute the target schema name for
* occcurrences of @extschema@.
*
* For a relocatable extension, we just run the script as-is.
* There cannot be any need for @extschema@, else it wouldn't
* be relocatable.
* For a relocatable extension, we just run the script as-is. There
* cannot be any need for @extschema@, else it wouldn't be
* relocatable.
*/
if (!control->relocatable)
{
@ -1197,8 +1197,8 @@ CreateExtension(CreateExtensionStmt *stmt)
/*
* Check for duplicate extension name. The unique index on
* pg_extension.extname would catch this anyway, and serves as a backstop
* in case of race conditions; but this is a friendlier error message,
* and besides we need a check to support IF NOT EXISTS.
* in case of race conditions; but this is a friendlier error message, and
* besides we need a check to support IF NOT EXISTS.
*/
if (get_extension_oid(stmt->extname, true) != InvalidOid)
{
@ -1218,8 +1218,8 @@ CreateExtension(CreateExtensionStmt *stmt)
}
/*
* We use global variables to track the extension being created, so we
* can create only one extension at the same time.
* We use global variables to track the extension being created, so we can
* create only one extension at the same time.
*/
if (creating_extension)
ereport(ERROR,
@ -1306,8 +1306,8 @@ CreateExtension(CreateExtensionStmt *stmt)
if (list_length(updateVersions) == 1)
{
/*
* Simple case where there's just one update script to run.
* We will not need any follow-on update steps.
* Simple case where there's just one update script to run. We
* will not need any follow-on update steps.
*/
Assert(strcmp((char *) linitial(updateVersions), versionName) == 0);
updateVersions = NIL;
@ -1397,13 +1397,13 @@ CreateExtension(CreateExtensionStmt *stmt)
* extension script actually creates any objects there, it will fail if
* the user doesn't have such permissions. But there are cases such as
* procedural languages where it's convenient to set schema = pg_catalog
* yet we don't want to restrict the command to users with ACL_CREATE
* for pg_catalog.
* yet we don't want to restrict the command to users with ACL_CREATE for
* pg_catalog.
*/
/*
* Look up the prerequisite extensions, and build lists of their OIDs
* and the OIDs of their target schemas.
* Look up the prerequisite extensions, and build lists of their OIDs and
* the OIDs of their target schemas.
*/
requiredExtensions = NIL;
requiredSchemas = NIL;
@ -1453,8 +1453,8 @@ CreateExtension(CreateExtensionStmt *stmt)
schemaName, schemaOid);
/*
* If additional update scripts have to be executed, apply the updates
* as though a series of ALTER EXTENSION UPDATE commands were given
* If additional update scripts have to be executed, apply the updates as
* though a series of ALTER EXTENSION UPDATE commands were given
*/
ApplyExtensionUpdates(extensionOid, pcontrol,
versionName, updateVersions);
@ -1702,8 +1702,8 @@ pg_available_extensions(PG_FUNCTION_ARGS)
dir = AllocateDir(location);
/*
* If the control directory doesn't exist, we want to silently return
* an empty set. Any other error will be reported by ReadDir.
* If the control directory doesn't exist, we want to silently return an
* empty set. Any other error will be reported by ReadDir.
*/
if (dir == NULL && errno == ENOENT)
{
@ -1811,8 +1811,8 @@ pg_available_extension_versions(PG_FUNCTION_ARGS)
dir = AllocateDir(location);
/*
* If the control directory doesn't exist, we want to silently return
* an empty set. Any other error will be reported by ReadDir.
* If the control directory doesn't exist, we want to silently return an
* empty set. Any other error will be reported by ReadDir.
*/
if (dir == NULL && errno == ENOENT)
{
@ -2092,8 +2092,8 @@ pg_extension_config_dump(PG_FUNCTION_ARGS)
ArrayType *a;
/*
* We only allow this to be called from an extension's SQL script.
* We shouldn't need any permissions check beyond that.
* We only allow this to be called from an extension's SQL script. We
* shouldn't need any permissions check beyond that.
*/
if (!creating_extension)
ereport(ERROR,
@ -2103,8 +2103,8 @@ pg_extension_config_dump(PG_FUNCTION_ARGS)
/*
* Check that the table exists and is a member of the extension being
* created. This ensures that we don't need to register a dependency
* to protect the extconfig entry.
* created. This ensures that we don't need to register a dependency to
* protect the extconfig entry.
*/
tablename = get_rel_name(tableoid);
if (tablename == NULL)
@ -2119,8 +2119,8 @@ pg_extension_config_dump(PG_FUNCTION_ARGS)
tablename)));
/*
* Add the table OID and WHERE condition to the extension's extconfig
* and extcondition arrays.
* Add the table OID and WHERE condition to the extension's extconfig and
* extcondition arrays.
*/
/* Find the pg_extension tuple */
@ -2285,8 +2285,8 @@ AlterExtensionNamespace(List *names, const char *newschema)
systable_endscan(extScan);
/*
* If the extension is already in the target schema, just silently
* do nothing.
* If the extension is already in the target schema, just silently do
* nothing.
*/
if (extForm->extnamespace == nspOid)
{
@ -2402,8 +2402,8 @@ ExecAlterExtensionStmt(AlterExtensionStmt *stmt)
ListCell *lc;
/*
* We use global variables to track the extension being created, so we
* can create/update only one extension at the same time.
* We use global variables to track the extension being created, so we can
* create/update only one extension at the same time.
*/
if (creating_extension)
ereport(ERROR,
@ -2668,9 +2668,9 @@ ApplyExtensionUpdates(Oid extensionOid,
schemaName, schemaOid);
/*
* Update prior-version name and loop around. Since execute_sql_string
* did a final CommandCounterIncrement, we can update the pg_extension
* row again.
* Update prior-version name and loop around. Since
* execute_sql_string did a final CommandCounterIncrement, we can
* update the pg_extension row again.
*/
oldVersionName = versionName;
}
@ -2697,10 +2697,10 @@ ExecAlterExtensionContentsStmt(AlterExtensionContentsStmt *stmt)
stmt->extname);
/*
* Translate the parser representation that identifies the object into
* an ObjectAddress. get_object_address() will throw an error if the
* object does not exist, and will also acquire a lock on the object to
* guard against concurrent DROP and ALTER EXTENSION ADD/DROP operations.
* Translate the parser representation that identifies the object into an
* ObjectAddress. get_object_address() will throw an error if the object
* does not exist, and will also acquire a lock on the object to guard
* against concurrent DROP and ALTER EXTENSION ADD/DROP operations.
*/
object = get_object_address(stmt->objtype, stmt->objname, stmt->objargs,
&relation, ShareUpdateExclusiveLock);

View File

@ -643,8 +643,8 @@ AlterForeignDataWrapper(AlterFdwStmt *stmt)
ObjectAddress referenced;
/*
* Flush all existing dependency records of this FDW on functions;
* we assume there can be none other than the ones we are fixing.
* Flush all existing dependency records of this FDW on functions; we
* assume there can be none other than the ones we are fixing.
*/
deleteDependencyRecordsForClass(ForeignDataWrapperRelationId,
fdwId,

View File

@ -904,8 +904,8 @@ ComputeIndexAttrs(IndexInfo *indexInfo,
/*
* Check we have a collation iff it's a collatable type. The only
* expected failures here are (1) COLLATE applied to a noncollatable
* type, or (2) index expression had an unresolved collation. But
* we might as well code this to be a complete consistency check.
* type, or (2) index expression had an unresolved collation. But we
* might as well code this to be a complete consistency check.
*/
if (type_is_collatable(atttype))
{

View File

@ -1091,8 +1091,8 @@ assignOperTypes(OpFamilyMember *member, Oid amoid, Oid typeoid)
* Ordering op, check index supports that. (We could perhaps also
* check that the operator returns a type supported by the sortfamily,
* but that seems more trouble than it's worth here. If it does not,
* the operator will never be matchable to any ORDER BY clause, but
* no worse consequences can ensue. Also, trying to check that would
* the operator will never be matchable to any ORDER BY clause, but no
* worse consequences can ensue. Also, trying to check that would
* create an ordering hazard during dump/reload: it's possible that
* the family has been created but not yet populated with the required
* operators.)

View File

@ -464,7 +464,8 @@ AlterOperatorNamespace(List *names, List *argtypes, const char *newschema)
List *operatorName = names;
TypeName *typeName1 = (TypeName *) linitial(argtypes);
TypeName *typeName2 = (TypeName *) lsecond(argtypes);
Oid operOid, nspOid;
Oid operOid,
nspOid;
Relation rel;
rel = heap_open(OperatorRelationId, RowExclusiveLock);

View File

@ -255,10 +255,10 @@ PortalCleanup(Portal portal)
if (queryDesc)
{
/*
* Reset the queryDesc before anything else. This prevents us
* from trying to shut down the executor twice, in case of an
* error below. The transaction abort mechanisms will take care
* of resource cleanup in such a case.
* Reset the queryDesc before anything else. This prevents us from
* trying to shut down the executor twice, in case of an error below.
* The transaction abort mechanisms will take care of resource cleanup
* in such a case.
*/
portal->queryDesc = NULL;

View File

@ -82,10 +82,10 @@ ExecSecLabelStmt(SecLabelStmt *stmt)
}
/*
* Translate the parser representation which identifies this object
* into an ObjectAddress. get_object_address() will throw an error if
* the object does not exist, and will also acquire a lock on the
* target to guard against concurrent modifications.
* Translate the parser representation which identifies this object into
* an ObjectAddress. get_object_address() will throw an error if the
* object does not exist, and will also acquire a lock on the target to
* guard against concurrent modifications.
*/
address = get_object_address(stmt->objtype, stmt->objname, stmt->objargs,
&relation, ShareUpdateExclusiveLock);
@ -98,6 +98,7 @@ ExecSecLabelStmt(SecLabelStmt *stmt)
switch (stmt->objtype)
{
case OBJECT_COLUMN:
/*
* Allow security labels only on columns of tables, views,
* composite types, and foreign tables (which are the only

View File

@ -2592,11 +2592,13 @@ AlterTableGetLockLevel(List *cmds)
{
/*
* Need AccessExclusiveLock for these subcommands because they
* affect or potentially affect both read and write operations.
* affect or potentially affect both read and write
* operations.
*
* New subcommand types should be added here by default.
*/
case AT_AddColumn: /* may rewrite heap, in some cases and visible to SELECT */
case AT_AddColumn: /* may rewrite heap, in some cases and visible
* to SELECT */
case AT_DropColumn: /* change visible to SELECT */
case AT_AddColumnToView: /* CREATE VIEW */
case AT_AlterColumnType: /* must rewrite heap */
@ -2644,14 +2646,17 @@ AlterTableGetLockLevel(List *cmds)
case CONSTR_EXCLUSION:
case CONSTR_PRIMARY:
case CONSTR_UNIQUE:
/*
* Cases essentially the same as CREATE INDEX. We
* could reduce the lock strength to ShareLock if we
* can work out how to allow concurrent catalog updates.
* could reduce the lock strength to ShareLock if
* we can work out how to allow concurrent catalog
* updates.
*/
cmd_lockmode = ShareRowExclusiveLock;
break;
case CONSTR_FOREIGN:
/*
* We add triggers to both tables when we add a
* Foreign Key, so the lock level must be at least
@ -2667,11 +2672,12 @@ AlterTableGetLockLevel(List *cmds)
break;
/*
* These subcommands affect inheritance behaviour. Queries started before us
* will continue to see the old inheritance behaviour, while queries started
* after we commit will see new behaviour. No need to prevent reads or writes
* to the subtable while we hook it up though. In both cases the parent table
* is locked with AccessShareLock.
* These subcommands affect inheritance behaviour. Queries
* started before us will continue to see the old inheritance
* behaviour, while queries started after we commit will see
* new behaviour. No need to prevent reads or writes to the
* subtable while we hook it up though. In both cases the
* parent table is locked with AccessShareLock.
*/
case AT_AddInherit:
case AT_DropInherit:
@ -2679,12 +2685,14 @@ AlterTableGetLockLevel(List *cmds)
break;
/*
* These subcommands affect general strategies for performance and maintenance,
* though don't change the semantic results from normal data reads and writes.
* Delaying an ALTER TABLE behind currently active writes only delays the point
* where the new strategy begins to take effect, so there is no benefit in waiting.
* In this case the minimum restriction applies: we don't currently allow
* concurrent catalog updates.
* These subcommands affect general strategies for performance
* and maintenance, though don't change the semantic results
* from normal data reads and writes. Delaying an ALTER TABLE
* behind currently active writes only delays the point where
* the new strategy begins to take effect, so there is no
* benefit in waiting. In this case the minimum restriction
* applies: we don't currently allow concurrent catalog
* updates.
*/
case AT_SetStatistics:
case AT_ClusterOn:
@ -3254,8 +3262,8 @@ ATRewriteTables(List **wqueue, LOCKMODE lockmode)
* (Eventually we'll probably need to check for composite type
* dependencies even when we're just scanning the table without a
* rewrite, but at the moment a composite type does not enforce any
* constraints, so it's not necessary/appropriate to enforce them
* just during ALTER.)
* constraints, so it's not necessary/appropriate to enforce them just
* during ALTER.)
*/
if (tab->newvals != NIL || tab->rewrite)
{
@ -3386,8 +3394,8 @@ ATRewriteTables(List **wqueue, LOCKMODE lockmode)
con->conid);
/*
* No need to mark the constraint row as validated,
* we did that when we inserted the row earlier.
* No need to mark the constraint row as validated, we did
* that when we inserted the row earlier.
*/
heap_close(refrel, NoLock);
@ -4103,9 +4111,9 @@ ATExecAddColumn(List **wqueue, AlteredTableInfo *tab, Relation rel,
/*
* Are we adding the column to a recursion child? If so, check whether to
* merge with an existing definition for the column. If we do merge,
* we must not recurse. Children will already have the column, and
* recursing into them would mess up attinhcount.
* merge with an existing definition for the column. If we do merge, we
* must not recurse. Children will already have the column, and recursing
* into them would mess up attinhcount.
*/
if (colDef->inhcount > 0)
{
@ -5172,8 +5180,9 @@ ATExecAddIndexConstraint(AlteredTableInfo *tab, Relation rel,
/*
* Determine name to assign to constraint. We require a constraint to
* have the same name as the underlying index; therefore, use the index's
* existing name as the default constraint name, and if the user explicitly
* gives some other name for the constraint, rename the index to match.
* existing name as the default constraint name, and if the user
* explicitly gives some other name for the constraint, rename the index
* to match.
*/
constraintName = stmt->idxname;
if (constraintName == NULL)
@ -5337,9 +5346,9 @@ ATAddCheckConstraint(List **wqueue, AlteredTableInfo *tab, Relation rel,
/*
* If the constraint got merged with an existing constraint, we're done.
* We mustn't recurse to child tables in this case, because they've already
* got the constraint, and visiting them again would lead to an incorrect
* value for coninhcount.
* We mustn't recurse to child tables in this case, because they've
* already got the constraint, and visiting them again would lead to an
* incorrect value for coninhcount.
*/
if (newcons == NIL)
return;
@ -5655,8 +5664,8 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel,
/*
* Tell Phase 3 to check that the constraint is satisfied by existing rows
* We can skip this during table creation or if requested explicitly
* by specifying NOT VALID on an alter table statement.
* We can skip this during table creation or if requested explicitly by
* specifying NOT VALID on an alter table statement.
*/
if (!fkconstraint->skip_validation)
{
@ -5729,13 +5738,12 @@ ATExecValidateConstraint(Relation rel, const char *constrName)
Relation refrel;
/*
* Triggers are already in place on both tables, so a
* concurrent write that alters the result here is not
* possible. Normally we can run a query here to do the
* validation, which would only require AccessShareLock.
* In some cases, it is possible that we might need to
* fire triggers to perform the check, so we take a lock
* at RowShareLock level just in case.
* Triggers are already in place on both tables, so a concurrent write
* that alters the result here is not possible. Normally we can run a
* query here to do the validation, which would only require
* AccessShareLock. In some cases, it is possible that we might need
* to fire triggers to perform the check, so we take a lock at
* RowShareLock level just in case.
*/
refrel = heap_open(con->confrelid, RowShareLock);
@ -6571,12 +6579,12 @@ ATPrepAlterColumnType(List **wqueue,
if (tab->relkind == RELKIND_RELATION)
{
/*
* Set up an expression to transform the old data value to the new type.
* If a USING option was given, transform and use that expression, else
* just take the old value and try to coerce it. We do this first so that
* type incompatibility can be detected before we waste effort, and
* because we need the expression to be parsed against the original table
* rowtype.
* Set up an expression to transform the old data value to the new
* type. If a USING option was given, transform and use that
* expression, else just take the old value and try to coerce it. We
* do this first so that type incompatibility can be detected before
* we waste effort, and because we need the expression to be parsed
* against the original table rowtype.
*/
if (transform)
{
@ -6655,8 +6663,8 @@ ATPrepAlterColumnType(List **wqueue,
tab->relkind == RELKIND_FOREIGN_TABLE)
{
/*
* For composite types, do this check now. Tables will check
* it later when the table is being rewritten.
* For composite types, do this check now. Tables will check it later
* when the table is being rewritten.
*/
find_composite_type_dependencies(rel->rd_rel->reltype, rel, NULL);
}
@ -6924,6 +6932,7 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
break;
case OCLASS_TRIGGER:
/*
* A trigger can depend on a column because the column is
* specified as an update target, or because the column is

View File

@ -573,8 +573,8 @@ create_tablespace_directories(const char *location, const Oid tablespaceoid)
/*
* Our theory for replaying a CREATE is to forcibly drop the target
* subdirectory if present, and then recreate it. This may be
* more work than needed, but it is simple to implement.
* subdirectory if present, and then recreate it. This may be more
* work than needed, but it is simple to implement.
*/
if (stat(location_with_version_dir, &st) == 0 && S_ISDIR(st.st_mode))
{

View File

@ -144,11 +144,11 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
referenced;
/*
* ShareRowExclusiveLock is sufficient to prevent concurrent write activity
* to the relation, and thus to lock out any operations that might want to
* fire triggers on the relation. If we had ON SELECT triggers we would
* need to take an AccessExclusiveLock to add one of those, just as we do
* with ON SELECT rules.
* ShareRowExclusiveLock is sufficient to prevent concurrent write
* activity to the relation, and thus to lock out any operations that
* might want to fire triggers on the relation. If we had ON SELECT
* triggers we would need to take an AccessExclusiveLock to add one of
* those, just as we do with ON SELECT rules.
*/
rel = heap_openrv(stmt->relation, ShareRowExclusiveLock);
@ -480,8 +480,8 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
* can skip this for internally generated triggers, since the name
* modification above should be sufficient.
*
* NOTE that this is cool only because we have ShareRowExclusiveLock on the
* relation, so the trigger set won't be changing underneath us.
* NOTE that this is cool only because we have ShareRowExclusiveLock on
* the relation, so the trigger set won't be changing underneath us.
*/
if (!isInternal)
{
@ -1083,9 +1083,9 @@ RemoveTriggerById(Oid trigOid)
/*
* Open and lock the relation the trigger belongs to. As in
* CreateTrigger, this is sufficient to lock out all operations that
* could fire or add triggers; but it would need to be revisited if
* we had ON SELECT triggers.
* CreateTrigger, this is sufficient to lock out all operations that could
* fire or add triggers; but it would need to be revisited if we had ON
* SELECT triggers.
*/
relid = ((Form_pg_trigger) GETSTRUCT(tup))->tgrelid;

View File

@ -407,7 +407,8 @@ RenameTSParser(List *oldname, const char *newname)
void
AlterTSParserNamespace(List *name, const char *newschema)
{
Oid prsId, nspOid;
Oid prsId,
nspOid;
Relation rel;
rel = heap_open(TSParserRelationId, RowExclusiveLock);
@ -685,7 +686,8 @@ RenameTSDictionary(List *oldname, const char *newname)
void
AlterTSDictionaryNamespace(List *name, const char *newschema)
{
Oid dictId, nspOid;
Oid dictId,
nspOid;
Relation rel;
rel = heap_open(TSDictionaryRelationId, RowExclusiveLock);
@ -1218,7 +1220,8 @@ RenameTSTemplate(List *oldname, const char *newname)
void
AlterTSTemplateNamespace(List *name, const char *newschema)
{
Oid tmplId, nspOid;
Oid tmplId,
nspOid;
Relation rel;
rel = heap_open(TSTemplateRelationId, RowExclusiveLock);
@ -1668,7 +1671,8 @@ RenameTSConfiguration(List *oldname, const char *newname)
void
AlterTSConfigurationNamespace(List *name, const char *newschema)
{
Oid cfgId, nspOid;
Oid cfgId,
nspOid;
Relation rel;
rel = heap_open(TSConfigRelationId, RowExclusiveLock);

View File

@ -240,9 +240,10 @@ CreateRole(CreateRoleStmt *stmt)
if (dissuper)
{
issuper = intVal(dissuper->arg) != 0;
/*
* Superusers get replication by default, but only if
* NOREPLICATION wasn't explicitly mentioned
* Superusers get replication by default, but only if NOREPLICATION
* wasn't explicitly mentioned
*/
if (!(disreplication && intVal(disreplication->arg) == 0))
isreplication = 1;
@ -384,8 +385,8 @@ CreateRole(CreateRoleStmt *stmt)
tuple = heap_form_tuple(pg_authid_dsc, new_record, new_record_nulls);
/*
* pg_largeobject_metadata contains pg_authid.oid's, so we
* use the binary-upgrade override, if specified.
* pg_largeobject_metadata contains pg_authid.oid's, so we use the
* binary-upgrade override, if specified.
*/
if (OidIsValid(binary_upgrade_next_pg_authid_oid))
{

View File

@ -839,8 +839,8 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, bool do_toast, bool for_wraparound,
* There's a race condition here: the rel may have gone away since the
* last time we saw it. If so, we don't need to vacuum it.
*
* If we've been asked not to wait for the relation lock, acquire it
* first in non-blocking mode, before calling try_relation_open().
* If we've been asked not to wait for the relation lock, acquire it first
* in non-blocking mode, before calling try_relation_open().
*/
if (!(vacstmt->options & VACOPT_NOWAIT))
onerel = try_relation_open(relid, lmode);

View File

@ -705,15 +705,16 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
PageSetAllVisible(page);
SetBufferCommitInfoNeedsSave(buf);
}
/*
* It's possible for the value returned by GetOldestXmin() to move
* backwards, so it's not wrong for us to see tuples that appear to
* not be visible to everyone yet, while PD_ALL_VISIBLE is already
* set. The real safe xmin value never moves backwards, but
* GetOldestXmin() is conservative and sometimes returns a value
* that's unnecessarily small, so if we see that contradiction it
* just means that the tuples that we think are not visible to
* everyone yet actually are, and the PD_ALL_VISIBLE flag is correct.
* that's unnecessarily small, so if we see that contradiction it just
* means that the tuples that we think are not visible to everyone yet
* actually are, and the PD_ALL_VISIBLE flag is correct.
*
* There should never be dead tuples on a page with PD_ALL_VISIBLE
* set, however.

View File

@ -851,8 +851,8 @@ check_session_authorization(char **newval, void **extra, GucSource source)
{
/*
* Can't do catalog lookups, so fail. The result of this is that
* session_authorization cannot be set in postgresql.conf, which
* seems like a good thing anyway, so we don't work hard to avoid it.
* session_authorization cannot be set in postgresql.conf, which seems
* like a good thing anyway, so we don't work hard to avoid it.
*/
return false;
}

View File

@ -130,6 +130,7 @@ DefineVirtualRelation(const RangeVar *relation, List *tlist, bool replace)
def->cooked_default = NULL;
def->collClause = NULL;
def->collOid = exprCollation((Node *) tle->expr);
/*
* It's possible that the column is of a collatable type but the
* collation could not be resolved, so double-check.
@ -437,8 +438,8 @@ DefineView(ViewStmt *stmt, const char *queryString)
/*
* Check for unsupported cases. These tests are redundant with ones in
* DefineQueryRewrite(), but that function will complain about a bogus
* ON SELECT rule, and we'd rather the message complain about a view.
* DefineQueryRewrite(), but that function will complain about a bogus ON
* SELECT rule, and we'd rather the message complain about a view.
*/
if (viewParse->intoClause != NULL)
ereport(ERROR,

View File

@ -168,6 +168,7 @@ standard_ExecutorStart(QueryDesc *queryDesc, int eflags)
switch (queryDesc->operation)
{
case CMD_SELECT:
/*
* SELECT INTO, SELECT FOR UPDATE/SHARE and modifying CTEs need to
* mark tuples
@ -425,9 +426,9 @@ standard_ExecutorEnd(QueryDesc *queryDesc)
Assert(estate != NULL);
/*
* Check that ExecutorFinish was called, unless in EXPLAIN-only mode.
* This Assert is needed because ExecutorFinish is new as of 9.1, and
* callers might forget to call it.
* Check that ExecutorFinish was called, unless in EXPLAIN-only mode. This
* Assert is needed because ExecutorFinish is new as of 9.1, and callers
* might forget to call it.
*/
Assert(estate->es_finished ||
(estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
@ -1137,8 +1138,8 @@ ExecGetTriggerResultRel(EState *estate, Oid relid)
/*
* Open the target relation's relcache entry. We assume that an
* appropriate lock is still held by the backend from whenever the trigger
* event got queued, so we need take no new lock here. Also, we need
* not recheck the relkind, so no need for CheckValidResultRel.
* event got queued, so we need take no new lock here. Also, we need not
* recheck the relkind, so no need for CheckValidResultRel.
*/
rel = heap_open(relid, NoLock);
@ -2220,8 +2221,8 @@ EvalPlanQualStart(EPQState *epqstate, EState *parentestate, Plan *planTree)
* ExecInitSubPlan expects to be able to find these entries. Some of the
* SubPlans might not be used in the part of the plan tree we intend to
* run, but since it's not easy to tell which, we just initialize them
* all. (However, if the subplan is headed by a ModifyTable node, then
* it must be a data-modifying CTE, which we will certainly not need to
* all. (However, if the subplan is headed by a ModifyTable node, then it
* must be a data-modifying CTE, which we will certainly not need to
* re-run, so we can skip initializing it. This is just an efficiency
* hack; it won't skip data-modifying CTEs for which the ModifyTable node
* is not at the top.)

View File

@ -1319,9 +1319,9 @@ retry:
/*
* Ordinarily, at this point the search should have found the originally
* inserted tuple, unless we exited the loop early because of conflict.
* However, it is possible to define exclusion constraints for which
* that wouldn't be true --- for instance, if the operator is <>.
* So we no longer complain if found_self is still false.
* However, it is possible to define exclusion constraints for which that
* wouldn't be true --- for instance, if the operator is <>. So we no
* longer complain if found_self is still false.
*/
econtext->ecxt_scantuple = save_scantuple;

View File

@ -847,8 +847,8 @@ fmgr_sql(PG_FUNCTION_ARGS)
*
* In a non-read-only function, we rely on the fact that we'll never
* suspend execution between queries of the function: the only reason to
* suspend execution before completion is if we are returning a row from
* a lazily-evaluated SELECT. So, when first entering this loop, we'll
* suspend execution before completion is if we are returning a row from a
* lazily-evaluated SELECT. So, when first entering this loop, we'll
* either start a new query (and push a fresh snapshot) or re-establish
* the active snapshot from the existing query descriptor. If we need to
* start a new query in a subsequent execution of the loop, either we need
@ -927,10 +927,10 @@ fmgr_sql(PG_FUNCTION_ARGS)
es = (execution_state *) lfirst(eslc);
/*
* Flush the current snapshot so that we will take a new one
* for the new query list. This ensures that new snaps are
* taken at original-query boundaries, matching the behavior
* of interactive execution.
* Flush the current snapshot so that we will take a new one for
* the new query list. This ensures that new snaps are taken at
* original-query boundaries, matching the behavior of interactive
* execution.
*/
if (pushed_snapshot)
{

View File

@ -307,8 +307,8 @@ ExecInitBitmapIndexScan(BitmapIndexScan *node, EState *estate, int eflags)
indexstate->biss_NumScanKeys);
/*
* If no run-time keys to calculate, go ahead and pass the scankeys to
* the index AM.
* If no run-time keys to calculate, go ahead and pass the scankeys to the
* index AM.
*/
if (indexstate->biss_NumRuntimeKeys == 0 &&
indexstate->biss_NumArrayKeys == 0)

View File

@ -960,13 +960,11 @@ void
ExecPrepHashTableForUnmatched(HashJoinState *hjstate)
{
/*
*----------
* During this scan we use the HashJoinState fields as follows:
* ---------- During this scan we use the HashJoinState fields as follows:
*
* hj_CurBucketNo: next regular bucket to scan
* hj_CurSkewBucketNo: next skew bucket (an index into skewBucketNums)
* hj_CurTuple: last tuple returned, or NULL to start next bucket
*----------
* hj_CurBucketNo: next regular bucket to scan hj_CurSkewBucketNo: next
* skew bucket (an index into skewBucketNums) hj_CurTuple: last tuple
* returned, or NULL to start next bucket ----------
*/
hjstate->hj_CurBucketNo = 0;
hjstate->hj_CurSkewBucketNo = 0;

View File

@ -113,6 +113,7 @@ ExecHashJoin(HashJoinState *node)
switch (node->hj_JoinState)
{
case HJ_BUILD_HASHTABLE:
/*
* First time through: build hash table for inner relation.
*/
@ -123,12 +124,12 @@ ExecHashJoin(HashJoinState *node)
* right/full join, we can quit without building the hash
* table. However, for an inner join it is only a win to
* check this when the outer relation's startup cost is less
* than the projected cost of building the hash
* table. Otherwise it's best to build the hash table first
* and see if the inner relation is empty. (When it's a left
* join, we should always make this check, since we aren't
* going to be able to skip the join on the strength of an
* empty inner relation anyway.)
* than the projected cost of building the hash table.
* Otherwise it's best to build the hash table first and see
* if the inner relation is empty. (When it's a left join, we
* should always make this check, since we aren't going to be
* able to skip the join on the strength of an empty inner
* relation anyway.)
*
* If we are rescanning the join, we make use of information
* gained on the previous scan: don't bother to try the
@ -185,8 +186,8 @@ ExecHashJoin(HashJoinState *node)
return NULL;
/*
* need to remember whether nbatch has increased since we began
* scanning the outer relation
* need to remember whether nbatch has increased since we
* began scanning the outer relation
*/
hashtable->nbatch_outstart = hashtable->nbatch;
@ -202,6 +203,7 @@ ExecHashJoin(HashJoinState *node)
/* FALL THRU */
case HJ_NEED_NEW_OUTER:
/*
* We don't have an outer tuple, try to get the next one
*/
@ -261,6 +263,7 @@ ExecHashJoin(HashJoinState *node)
/* FALL THRU */
case HJ_SCAN_BUCKET:
/*
* Scan the selected hash bucket for matches to current outer
*/
@ -296,8 +299,8 @@ ExecHashJoin(HashJoinState *node)
}
/*
* In a semijoin, we'll consider returning the first match,
* but after that we're done with this outer tuple.
* In a semijoin, we'll consider returning the first
* match, but after that we're done with this outer tuple.
*/
if (node->js.jointype == JOIN_SEMI)
node->hj_JoinState = HJ_NEED_NEW_OUTER;
@ -320,10 +323,11 @@ ExecHashJoin(HashJoinState *node)
break;
case HJ_FILL_OUTER_TUPLE:
/*
* The current outer tuple has run out of matches, so check
* whether to emit a dummy outer-join tuple. Whether we
* emit one or not, the next state is NEED_NEW_OUTER.
* whether to emit a dummy outer-join tuple. Whether we emit
* one or not, the next state is NEED_NEW_OUTER.
*/
node->hj_JoinState = HJ_NEED_NEW_OUTER;
@ -354,6 +358,7 @@ ExecHashJoin(HashJoinState *node)
break;
case HJ_FILL_INNER_TUPLES:
/*
* We have finished a batch, but we are doing right/full join,
* so any unmatched inner tuples in the hashtable have to be
@ -389,6 +394,7 @@ ExecHashJoin(HashJoinState *node)
break;
case HJ_NEED_NEW_BATCH:
/*
* Try to advance to next batch. Done if there are no more.
*/
@ -944,14 +950,13 @@ ExecReScanHashJoin(HashJoinState *node)
ExecHashTableResetMatchFlags(node->hj_HashTable);
/*
* Also, we need to reset our state about the emptiness of
* the outer relation, so that the new scan of the outer will
* update it correctly if it turns out to be empty this time.
* (There's no harm in clearing it now because ExecHashJoin won't
* need the info. In the other cases, where the hash table
* doesn't exist or we are destroying it, we leave this state
* alone because ExecHashJoin will need it the first time
* through.)
* Also, we need to reset our state about the emptiness of the
* outer relation, so that the new scan of the outer will update
* it correctly if it turns out to be empty this time. (There's no
* harm in clearing it now because ExecHashJoin won't need the
* info. In the other cases, where the hash table doesn't exist
* or we are destroying it, we leave this state alone because
* ExecHashJoin will need it the first time through.)
*/
node->hj_OuterNotEmpty = false;

View File

@ -608,8 +608,8 @@ ExecInitIndexScan(IndexScan *node, EState *estate, int eflags)
indexstate->iss_NumOrderByKeys);
/*
* If no run-time keys to calculate, go ahead and pass the scankeys to
* the index AM.
* If no run-time keys to calculate, go ahead and pass the scankeys to the
* index AM.
*/
if (indexstate->iss_NumRuntimeKeys == 0)
index_rescan(indexstate->iss_ScanDesc,
@ -703,11 +703,11 @@ ExecIndexBuildScanKeys(PlanState *planstate, Relation index, Index scanrelid,
scan_keys = (ScanKey) palloc(n_scan_keys * sizeof(ScanKeyData));
/*
* runtime_keys array is dynamically resized as needed. We handle it
* this way so that the same runtime keys array can be shared between
* indexquals and indexorderbys, which will be processed in separate
* calls of this function. Caller must be sure to pass in NULL/0 for
* first call.
* runtime_keys array is dynamically resized as needed. We handle it this
* way so that the same runtime keys array can be shared between
* indexquals and indexorderbys, which will be processed in separate calls
* of this function. Caller must be sure to pass in NULL/0 for first
* call.
*/
runtime_keys = *runtimeKeys;
n_runtime_keys = max_runtime_keys = *numRuntimeKeys;

View File

@ -346,14 +346,14 @@ pass_down_bound(LimitState *node, PlanState *child_node)
else if (IsA(child_node, ResultState))
{
/*
* An extra consideration here is that if the Result is projecting
* a targetlist that contains any SRFs, we can't assume that every
* input tuple generates an output tuple, so a Sort underneath
* might need to return more than N tuples to satisfy LIMIT N.
* So we cannot use bounded sort.
* An extra consideration here is that if the Result is projecting a
* targetlist that contains any SRFs, we can't assume that every input
* tuple generates an output tuple, so a Sort underneath might need to
* return more than N tuples to satisfy LIMIT N. So we cannot use
* bounded sort.
*
* If Result supported qual checking, we'd have to punt on seeing
* a qual, too. Note that having a resconstantqual is not a
* If Result supported qual checking, we'd have to punt on seeing a
* qual, too. Note that having a resconstantqual is not a
* showstopper: if that fails we're not getting any rows at all.
*/
if (outerPlanState(child_node) &&

View File

@ -187,8 +187,8 @@ ExecMergeAppend(MergeAppendState *node)
if (!node->ms_initialized)
{
/*
* First time through: pull the first tuple from each subplan,
* and set up the heap.
* First time through: pull the first tuple from each subplan, and set
* up the heap.
*/
for (i = 0; i < node->ms_nplans; i++)
{

View File

@ -608,11 +608,10 @@ lreplace:;
/*
* Note: instead of having to update the old index tuples associated
* with the heap tuple, all we do is form and insert new index
* tuples. This is because UPDATEs are actually DELETEs and INSERTs,
* and index tuple deletion is done later by VACUUM (see notes in
* ExecDelete). All we do here is insert new index tuples. -cim
* 9/27/89
* with the heap tuple, all we do is form and insert new index tuples.
* This is because UPDATEs are actually DELETEs and INSERTs, and index
* tuple deletion is done later by VACUUM (see notes in ExecDelete).
* All we do here is insert new index tuples. -cim 9/27/89
*/
/*
@ -806,7 +805,8 @@ ExecModifyTable(ModifyTableState *node)
elog(ERROR, "ctid is NULL");
tupleid = (ItemPointer) DatumGetPointer(datum);
tuple_ctid = *tupleid; /* be sure we don't free ctid!! */
tuple_ctid = *tupleid; /* be sure we don't free
* ctid!! */
tupleid = &tuple_ctid;
}
else
@ -922,8 +922,8 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
/*
* call ExecInitNode on each of the plans to be executed and save the
* results into the array "mt_plans". This is also a convenient place
* to verify that the proposed target relations are valid and open their
* results into the array "mt_plans". This is also a convenient place to
* verify that the proposed target relations are valid and open their
* indexes for insertion of new index entries. Note we *must* set
* estate->es_result_relation_info correctly while we initialize each
* sub-plan; ExecContextForcesOids depends on that!
@ -1147,10 +1147,10 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
* Lastly, if this is not the primary (canSetTag) ModifyTable node, add it
* to estate->es_auxmodifytables so that it will be run to completion by
* ExecPostprocessPlan. (It'd actually work fine to add the primary
* ModifyTable node too, but there's no need.) Note the use of lcons
* not lappend: we need later-initialized ModifyTable nodes to be shut
* down before earlier ones. This ensures that we don't throw away
* RETURNING rows that need to be seen by a later CTE subplan.
* ModifyTable node too, but there's no need.) Note the use of lcons not
* lappend: we need later-initialized ModifyTable nodes to be shut down
* before earlier ones. This ensures that we don't throw away RETURNING
* rows that need to be seen by a later CTE subplan.
*/
if (!mtstate->canSetTag)
estate->es_auxmodifytables = lcons(mtstate,

View File

@ -137,9 +137,8 @@ ExecNestLoop(NestLoopState *node)
node->nl_MatchedOuter = false;
/*
* fetch the values of any outer Vars that must be passed to
* the inner scan, and store them in the appropriate PARAM_EXEC
* slots.
* fetch the values of any outer Vars that must be passed to the
* inner scan, and store them in the appropriate PARAM_EXEC slots.
*/
foreach(lc, nl->nestParams)
{
@ -330,9 +329,9 @@ ExecInitNestLoop(NestLoop *node, EState *estate, int eflags)
*
* If we have no parameters to pass into the inner rel from the outer,
* tell the inner child that cheap rescans would be good. If we do have
* such parameters, then there is no point in REWIND support at all in
* the inner child, because it will always be rescanned with fresh
* parameter values.
* such parameters, then there is no point in REWIND support at all in the
* inner child, because it will always be rescanned with fresh parameter
* values.
*/
outerPlanState(nlstate) = ExecInitNode(outerPlan(node), estate, eflags);
if (node->nestParams == NIL)

View File

@ -1787,8 +1787,8 @@ _SPI_execute_plan(SPIPlanPtr plan, ParamListInfo paramLI,
* snapshot != InvalidSnapshot, read_only = true: use exactly the given
* snapshot.
*
* snapshot != InvalidSnapshot, read_only = false: use the given
* snapshot, modified by advancing its command ID before each querytree.
* snapshot != InvalidSnapshot, read_only = false: use the given snapshot,
* modified by advancing its command ID before each querytree.
*
* snapshot == InvalidSnapshot, read_only = true: use the entry-time
* ActiveSnapshot, if any (if there isn't one, we run with no snapshot).
@ -1797,8 +1797,8 @@ _SPI_execute_plan(SPIPlanPtr plan, ParamListInfo paramLI,
* snapshot for each user command, and advance its command ID before each
* querytree within the command.
*
* In the first two cases, we can just push the snap onto the stack
* once for the whole plan list.
* In the first two cases, we can just push the snap onto the stack once
* for the whole plan list.
*/
if (snapshot != InvalidSnapshot)
{

View File

@ -61,6 +61,7 @@ static int recv_and_check_password_packet(Port *port);
#define IDENT_PORT 113
static int ident_inet(hbaPort *port);
#ifdef HAVE_UNIX_SOCKETS
static int auth_peer(hbaPort *port);
#endif
@ -1814,7 +1815,6 @@ auth_peer(hbaPort *port)
}
strlcpy(ident_user, pass->pw_name, IDENT_USERNAME_MAX + 1);
#elif defined(SO_PEERCRED)
/* Linux style: use getsockopt(SO_PEERCRED) */
struct ucred peercred;
@ -1843,7 +1843,6 @@ auth_peer(hbaPort *port)
}
strlcpy(ident_user, pass->pw_name, IDENT_USERNAME_MAX + 1);
#elif defined(HAVE_GETPEERUCRED)
/* Solaris > 10 */
uid_t uid;
@ -1879,7 +1878,6 @@ auth_peer(hbaPort *port)
}
strlcpy(ident_user, pass->pw_name, IDENT_USERNAME_MAX + 1);
#elif defined(HAVE_STRUCT_CMSGCRED) || defined(HAVE_STRUCT_FCRED) || (defined(HAVE_STRUCT_SOCKCRED) && defined(LOCAL_CREDS))
struct msghdr msg;
@ -1947,7 +1945,6 @@ auth_peer(hbaPort *port)
}
strlcpy(ident_user, pw->pw_name, IDENT_USERNAME_MAX + 1);
#else
ereport(LOG,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
@ -2768,10 +2765,10 @@ CheckRADIUSAuth(Port *port)
pg_freeaddrinfo_all(hint.ai_family, serveraddrs);
/*
* Figure out at what time we should time out. We can't just use
* a single call to select() with a timeout, since somebody can
* be sending invalid packets to our port thus causing us to
* retry in a loop and never time out.
* Figure out at what time we should time out. We can't just use a single
* call to select() with a timeout, since somebody can be sending invalid
* packets to our port thus causing us to retry in a loop and never time
* out.
*/
gettimeofday(&endtime, NULL);
endtime.tv_sec += RADIUS_TIMEOUT;
@ -2820,12 +2817,12 @@ CheckRADIUSAuth(Port *port)
/*
* Attempt to read the response packet, and verify the contents.
*
* Any packet that's not actually a RADIUS packet, or otherwise
* does not validate as an explicit reject, is just ignored and
* we retry for another packet (until we reach the timeout). This
* is to avoid the possibility to denial-of-service the login by
* flooding the server with invalid packets on the port that
* we're expecting the RADIUS response on.
* Any packet that's not actually a RADIUS packet, or otherwise does
* not validate as an explicit reject, is just ignored and we retry
* for another packet (until we reach the timeout). This is to avoid
* the possibility to denial-of-service the login by flooding the
* server with invalid packets on the port that we're expecting the
* RADIUS response on.
*/
addrsize = sizeof(remoteaddr);
@ -2889,8 +2886,8 @@ CheckRADIUSAuth(Port *port)
memcpy(cryptvector + 4, packet->vector, RADIUS_VECTOR_LENGTH); /* request
* authenticator, from
* original packet */
if (packetlength > RADIUS_HEADER_LENGTH) /* there may be no attributes
* at all */
if (packetlength > RADIUS_HEADER_LENGTH) /* there may be no
* attributes at all */
memcpy(cryptvector + RADIUS_HEADER_LENGTH, receive_buffer + RADIUS_HEADER_LENGTH, packetlength - RADIUS_HEADER_LENGTH);
memcpy(cryptvector + packetlength, port->hba->radiussecret, strlen(port->hba->radiussecret));

View File

@ -561,7 +561,6 @@ ipv6eq(struct sockaddr_in6 *a, struct sockaddr_in6 *b)
return true;
}
#endif /* HAVE_IPV6 */
/*
@ -590,7 +589,8 @@ hostname_match(const char *pattern, const char *actual_hostname)
static bool
check_hostname(hbaPort *port, const char *hostname)
{
struct addrinfo *gai_result, *gai;
struct addrinfo *gai_result,
*gai;
int ret;
bool found;

View File

@ -760,10 +760,11 @@ pq_set_nonblocking(bool nonblocking)
#ifdef WIN32
pgwin32_noblock = nonblocking ? 1 : 0;
#else
/*
* Use COMMERROR on failure, because ERROR would try to send the error
* to the client, which might require changing the mode again, leading
* to infinite recursion.
* Use COMMERROR on failure, because ERROR would try to send the error to
* the client, which might require changing the mode again, leading to
* infinite recursion.
*/
if (nonblocking)
{
@ -903,18 +904,17 @@ pq_getbyte_if_available(unsigned char *c)
{
/*
* Ok if no data available without blocking or interrupted (though
* EINTR really shouldn't happen with a non-blocking socket).
* Report other errors.
* EINTR really shouldn't happen with a non-blocking socket). Report
* other errors.
*/
if (errno == EAGAIN || errno == EWOULDBLOCK || errno == EINTR)
r = 0;
else
{
/*
* Careful: an ereport() that tries to write to the client
* would cause recursion to here, leading to stack overflow
* and core dump! This message must go *only* to the
* postmaster log.
* Careful: an ereport() that tries to write to the client would
* cause recursion to here, leading to stack overflow and core
* dump! This message must go *only* to the postmaster log.
*/
ereport(COMMERROR,
(errcode_for_socket_access(),
@ -1219,8 +1219,8 @@ internal_flush(void)
continue; /* Ok if we were interrupted */
/*
* Ok if no data writable without blocking, and the socket
* is in non-blocking mode.
* Ok if no data writable without blocking, and the socket is in
* non-blocking mode.
*/
if (errno == EAGAIN ||
errno == EWOULDBLOCK)
@ -1383,7 +1383,8 @@ pq_putmessage_noblock(char msgtype, const char *s, size_t len)
PqSendBufferSize = required;
}
res = pq_putmessage(msgtype, s, len);
Assert(res == 0); /* should not fail when the message fits in buffer */
Assert(res == 0); /* should not fail when the message fits in
* buffer */
}

View File

@ -218,8 +218,8 @@ startup_hacks(const char *progname)
/*
* On some platforms, unaligned memory accesses result in a kernel trap;
* the default kernel behavior is to emulate the memory access, but this
* results in a significant performance penalty. We want PG never to
* make such unaligned memory accesses, so this code disables the kernel
* results in a significant performance penalty. We want PG never to make
* such unaligned memory accesses, so this code disables the kernel
* emulation: unaligned accesses will result in SIGBUS instead.
*/
#ifdef NOFIXADE
@ -238,7 +238,6 @@ startup_hacks(const char *progname)
progname, strerror(errno));
}
#endif /* __alpha */
#endif /* NOFIXADE */
/*

View File

@ -775,10 +775,11 @@ exprCollation(Node *expr)
coll = ((MinMaxExpr *) expr)->minmaxcollid;
break;
case T_XmlExpr:
/*
* XMLSERIALIZE returns text from non-collatable inputs, so its
* collation is always default. The other cases return boolean
* or XML, which are non-collatable.
* collation is always default. The other cases return boolean or
* XML, which are non-collatable.
*/
if (((XmlExpr *) expr)->op == IS_XMLSERIALIZE)
coll = DEFAULT_COLLATION_OID;

Some files were not shown because too many files have changed in this diff Show More