Pgindent run before 9.1 beta2.

This commit is contained in:
Bruce Momjian 2011-06-09 14:32:50 -04:00
parent adf43b2b36
commit 6560407c7d
92 changed files with 644 additions and 620 deletions

View File

@ -381,7 +381,8 @@ check_new_cluster_is_empty(void)
static void
check_old_cluster_has_new_cluster_dbs(void)
{
int old_dbnum, new_dbnum;
int old_dbnum,
new_dbnum;
for (new_dbnum = 0; new_dbnum < new_cluster.dbarr.ndbs; new_dbnum++)
{

View File

@ -99,10 +99,11 @@ verify_directories(void)
if (access(".", R_OK | W_OK
#ifndef WIN32
/*
* Do a directory execute check only on Unix because execute permission
* on NTFS means "can execute scripts", which we don't care about.
* Also, X_OK is not defined in the Windows API.
* Do a directory execute check only on Unix because execute permission on
* NTFS means "can execute scripts", which we don't care about. Also, X_OK
* is not defined in the Windows API.
*/
| X_OK
#endif
@ -132,6 +133,7 @@ check_data_dir(const char *pg_data)
{
char subDirName[MAXPGPATH];
int subdirnum;
/* start check with top-most directory */
const char *requiredSubdirs[] = {"", "base", "global", "pg_clog",
"pg_multixact", "pg_subtrans", "pg_tblspc", "pg_twophase",
@ -142,6 +144,7 @@ check_data_dir(const char *pg_data)
++subdirnum)
{
struct stat statBuf;
snprintf(subDirName, sizeof(subDirName), "%s/%s", pg_data,
requiredSubdirs[subdirnum]);

View File

@ -158,6 +158,7 @@ parseCommandLine(int argc, char *argv[])
case 'u':
pg_free(os_info.user);
os_info.user = pg_strdup(optarg);
/*
* Push the user name into the environment so pre-9.1
* pg_ctl/libpq uses it.

View File

@ -146,9 +146,11 @@ start_postmaster(ClusterInfo *cluster)
PGconn *conn;
bool exit_hook_registered = false;
int pg_ctl_return = 0;
#ifndef WIN32
char *output_filename = log_opts.filename;
#else
/*
* On Win32, we can't send both pg_upgrade output and pg_ctl output to the
* same file because we get the error: "The process cannot access the file
@ -185,8 +187,8 @@ start_postmaster(ClusterInfo *cluster)
log_opts.filename);
/*
* Don't throw an error right away, let connecting throw the error
* because it might supply a reason for the failure.
* Don't throw an error right away, let connecting throw the error because
* it might supply a reason for the failure.
*/
pg_ctl_return = exec_prog(false, "%s", cmd);
@ -218,6 +220,7 @@ stop_postmaster(bool fast)
char cmd[MAXPGPATH];
const char *bindir;
const char *datadir;
#ifndef WIN32
char *output_filename = log_opts.filename;
#else

View File

@ -281,4 +281,3 @@ pg_putenv(const char *var, const char *val)
#endif
}
}

View File

@ -94,8 +94,8 @@ initGinState(GinState *state, Relation index)
* type for a noncollatable indexed data type (for instance, hstore
* uses text index entries). If there's no index collation then
* specify default collation in case the support functions need
* collation. This is harmless if the support functions don't
* care about collation, so we just do it unconditionally. (We could
* collation. This is harmless if the support functions don't care
* about collation, so we just do it unconditionally. (We could
* alternatively call get_typcollation, but that seems like expensive
* overkill --- there aren't going to be any cases where a GIN storage
* type has a nondefault collation.)

View File

@ -6656,15 +6656,15 @@ StartupXLOG(void)
ereport(FATAL,
(errmsg("requested recovery stop point is before consistent recovery point")));
}
/*
* Ran off end of WAL before reaching end-of-backup WAL record,
* or minRecoveryPoint. That's usually a bad sign, indicating that
* you tried to recover from an online backup but never called
* Ran off end of WAL before reaching end-of-backup WAL record, or
* minRecoveryPoint. That's usually a bad sign, indicating that you
* tried to recover from an online backup but never called
* pg_stop_backup(), or you didn't archive all the WAL up to that
* point. However, this also happens in crash recovery, if the
* system crashes while an online backup is in progress. We
* must not treat that as an error, or the database will refuse
* to start up.
* point. However, this also happens in crash recovery, if the system
* crashes while an online backup is in progress. We must not treat
* that as an error, or the database will refuse to start up.
*/
if (InArchiveRecovery)
{

View File

@ -1773,8 +1773,8 @@ index_build(Relation heapRelation,
* However, when reindexing an existing index, we should do nothing here.
* Any HOT chains that are broken with respect to the index must predate
* the index's original creation, so there is no need to change the
* index's usability horizon. Moreover, we *must not* try to change
* the index's pg_index entry while reindexing pg_index itself, and this
* index's usability horizon. Moreover, we *must not* try to change the
* index's pg_index entry while reindexing pg_index itself, and this
* optimization nicely prevents that.
*/
if (indexInfo->ii_BrokenHotChain && !isreindex)
@ -2136,8 +2136,8 @@ IndexBuildHeapScan(Relation heapRelation,
/*
* It's a HOT-updated tuple deleted by our own xact.
* We can assume the deletion will commit (else the
* index contents don't matter), so treat the same
* as RECENTLY_DEAD HOT-updated tuples.
* index contents don't matter), so treat the same as
* RECENTLY_DEAD HOT-updated tuples.
*/
indexIt = false;
/* mark the index as unsafe for old snapshots */
@ -2146,9 +2146,9 @@ IndexBuildHeapScan(Relation heapRelation,
else
{
/*
* It's a regular tuple deleted by our own xact.
* Index it but don't check for uniqueness, the same
* as a RECENTLY_DEAD tuple.
* It's a regular tuple deleted by our own xact. Index
* it but don't check for uniqueness, the same as a
* RECENTLY_DEAD tuple.
*/
indexIt = true;
}
@ -2281,9 +2281,8 @@ IndexCheckExclusion(Relation heapRelation,
/*
* If we are reindexing the target index, mark it as no longer being
* reindexed, to forestall an Assert in index_beginscan when we try to
* use the index for probes. This is OK because the index is now
* fully valid.
* reindexed, to forestall an Assert in index_beginscan when we try to use
* the index for probes. This is OK because the index is now fully valid.
*/
if (ReindexIsCurrentlyProcessingIndex(RelationGetRelid(indexRelation)))
ResetReindexProcessing();

View File

@ -60,8 +60,8 @@ AlterTableCreateToastTable(Oid relOid, Datum reloptions)
/*
* Grab a DDL-exclusive lock on the target table, since we'll update the
* pg_class tuple. This is redundant for all present users. Tuple toasting
* behaves safely in the face of a concurrent TOAST table add.
* pg_class tuple. This is redundant for all present users. Tuple
* toasting behaves safely in the face of a concurrent TOAST table add.
*/
rel = heap_open(relOid, ShareUpdateExclusiveLock);

View File

@ -185,9 +185,10 @@ DefineIndex(RangeVar *heapRelation,
rel->rd_rel->relkind != RELKIND_UNCATALOGED)
{
if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
/*
* Custom error message for FOREIGN TABLE since the term is
* close to a regular table and can confuse the user.
* Custom error message for FOREIGN TABLE since the term is close
* to a regular table and can confuse the user.
*/
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),

View File

@ -1077,12 +1077,12 @@ read_info(SeqTable elm, Relation rel, Buffer *buf)
tuple.t_data = (HeapTupleHeader) PageGetItem(page, lp);
/*
* Previous releases of Postgres neglected to prevent SELECT FOR UPDATE
* on a sequence, which would leave a non-frozen XID in the sequence
* tuple's xmax, which eventually leads to clog access failures or worse.
* If we see this has happened, clean up after it. We treat this like a
* hint bit update, ie, don't bother to WAL-log it, since we can certainly
* do this again if the update gets lost.
* Previous releases of Postgres neglected to prevent SELECT FOR UPDATE on
* a sequence, which would leave a non-frozen XID in the sequence tuple's
* xmax, which eventually leads to clog access failures or worse. If we
* see this has happened, clean up after it. We treat this like a hint
* bit update, ie, don't bother to WAL-log it, since we can certainly do
* this again if the update gets lost.
*/
if (HeapTupleHeaderGetXmax(tuple.t_data) != InvalidTransactionId)
{

View File

@ -2679,7 +2679,8 @@ AlterTableGetLockLevel(List *cmds)
* These subcommands affect implicit row type conversion. They
* have affects similar to CREATE/DROP CAST on queries. We
* don't provide for invalidating parse trees as a result of
* such changes. Do avoid concurrent pg_class updates, though.
* such changes. Do avoid concurrent pg_class updates,
* though.
*/
case AT_AddOf:
case AT_DropOf:
@ -4083,6 +4084,7 @@ check_of_type(HeapTuple typetuple)
Assert(OidIsValid(typ->typrelid));
typeRelation = relation_open(typ->typrelid, AccessShareLock);
typeOk = (typeRelation->rd_rel->relkind == RELKIND_COMPOSITE_TYPE);
/*
* Close the parent rel, but keep our AccessShareLock on it until xact
* commit. That will prevent someone else from deleting or ALTERing
@ -8748,6 +8750,7 @@ ATExecAddOf(Relation rel, const TypeName *ofTypename, LOCKMODE lockmode)
for (; table_attno <= tableTupleDesc->natts; table_attno++)
{
Form_pg_attribute table_attr = tableTupleDesc->attrs[table_attno - 1];
if (!table_attr->attisdropped)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
@ -8802,8 +8805,8 @@ ATExecDropOf(Relation rel, LOCKMODE lockmode)
RelationGetRelationName(rel))));
/*
* We don't bother to check ownership of the type --- ownership of the table
* is presumed enough rights. No lock required on the type, either.
* We don't bother to check ownership of the type --- ownership of the
* table is presumed enough rights. No lock required on the type, either.
*/
drop_parent_dependency(relid, TypeRelationId, rel->rd_rel->reloftype);

View File

@ -96,6 +96,7 @@ get_ts_parser_func(DefElem *defel, int attnum)
break;
case Anum_pg_ts_parser_prslextype:
nargs = 1;
/*
* Note: because the lextype method returns type internal, it must
* have an internal-type argument for security reasons. The

View File

@ -483,8 +483,8 @@ vac_estimate_reltuples(Relation relation, bool is_analyze,
return scanned_tuples;
/*
* If scanned_pages is zero but total_pages isn't, keep the existing
* value of reltuples.
* If scanned_pages is zero but total_pages isn't, keep the existing value
* of reltuples.
*/
if (scanned_pages == 0)
return old_rel_tuples;
@ -498,23 +498,23 @@ vac_estimate_reltuples(Relation relation, bool is_analyze,
/*
* Okay, we've covered the corner cases. The normal calculation is to
* convert the old measurement to a density (tuples per page), then
* update the density using an exponential-moving-average approach,
* and finally compute reltuples as updated_density * total_pages.
* convert the old measurement to a density (tuples per page), then update
* the density using an exponential-moving-average approach, and finally
* compute reltuples as updated_density * total_pages.
*
* For ANALYZE, the moving average multiplier is just the fraction of
* the table's pages we scanned. This is equivalent to assuming
* that the tuple density in the unscanned pages didn't change. Of
* course, it probably did, if the new density measurement is different.
* But over repeated cycles, the value of reltuples will converge towards
* the correct value, if repeated measurements show the same new density.
* For ANALYZE, the moving average multiplier is just the fraction of the
* table's pages we scanned. This is equivalent to assuming that the
* tuple density in the unscanned pages didn't change. Of course, it
* probably did, if the new density measurement is different. But over
* repeated cycles, the value of reltuples will converge towards the
* correct value, if repeated measurements show the same new density.
*
* For VACUUM, the situation is a bit different: we have looked at a
* nonrandom sample of pages, but we know for certain that the pages we
* didn't look at are precisely the ones that haven't changed lately.
* Thus, there is a reasonable argument for doing exactly the same thing
* as for the ANALYZE case, that is use the old density measurement as
* the value for the unscanned pages.
* as for the ANALYZE case, that is use the old density measurement as the
* value for the unscanned pages.
*
* This logic could probably use further refinement.
*/

View File

@ -341,9 +341,9 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
* of pages.
*
* Before entering the main loop, establish the invariant that
* next_not_all_visible_block is the next block number >= blkno that's
* not all-visible according to the visibility map, or nblocks if there's
* no such block. Also, we set up the skipping_all_visible_blocks flag,
* next_not_all_visible_block is the next block number >= blkno that's not
* all-visible according to the visibility map, or nblocks if there's no
* such block. Also, we set up the skipping_all_visible_blocks flag,
* which is needed because we need hysteresis in the decision: once we've
* started skipping blocks, we may as well skip everything up to the next
* not-all-visible block.
@ -1082,11 +1082,11 @@ lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats)
if (new_rel_pages != old_rel_pages)
{
/*
* Note: we intentionally don't update vacrelstats->rel_pages with
* the new rel size here. If we did, it would amount to assuming that
* the new pages are empty, which is unlikely. Leaving the numbers
* alone amounts to assuming that the new pages have the same tuple
* density as existing ones, which is less unlikely.
* Note: we intentionally don't update vacrelstats->rel_pages with the
* new rel size here. If we did, it would amount to assuming that the
* new pages are empty, which is unlikely. Leaving the numbers alone
* amounts to assuming that the new pages have the same tuple density
* as existing ones, which is less unlikely.
*/
UnlockRelation(onerel, AccessExclusiveLock);
return;

View File

@ -807,9 +807,9 @@ check_client_encoding(char **newval, void **extra, GucSource source)
*
* XXX Although canonicalizing seems like a good idea in the abstract, it
* breaks pre-9.1 JDBC drivers, which expect that if they send "UNICODE"
* as the client_encoding setting then it will read back the same way.
* As a workaround, don't replace the string if it's "UNICODE". Remove
* that hack when pre-9.1 JDBC drivers are no longer in use.
* as the client_encoding setting then it will read back the same way. As
* a workaround, don't replace the string if it's "UNICODE". Remove that
* hack when pre-9.1 JDBC drivers are no longer in use.
*/
if (strcmp(*newval, canonical_name) != 0 &&
strcmp(*newval, "UNICODE") != 0)

View File

@ -265,8 +265,8 @@ ExecHashJoin(HashJoinState *node)
/*
* We check for interrupts here because this corresponds to
* where we'd fetch a row from a child plan node in other
* join types.
* where we'd fetch a row from a child plan node in other join
* types.
*/
CHECK_FOR_INTERRUPTS();

View File

@ -74,6 +74,7 @@ geqo(PlannerInfo *root, int number_of_rels, List *initial_rels)
Pool *pool;
int pool_size,
number_generations;
#ifdef GEQO_DEBUG
int status_interval;
#endif

View File

@ -3383,9 +3383,9 @@ add_sort_column(AttrNumber colIdx, Oid sortOp, Oid coll, bool nulls_first,
* opposite nulls direction is redundant.
*
* We could probably consider sort keys with the same sortop and
* different collations to be redundant too, but for the moment
* treat them as not redundant. This will be needed if we ever
* support collations with different notions of equality.
* different collations to be redundant too, but for the moment treat
* them as not redundant. This will be needed if we ever support
* collations with different notions of equality.
*/
if (sortColIdx[i] == colIdx &&
sortOperators[numCols] == sortOp &&

View File

@ -1034,8 +1034,8 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
if (parse->hasAggs)
{
/*
* Collect statistics about aggregates for estimating costs.
* Note: we do not attempt to detect duplicate aggregates here; a
* Collect statistics about aggregates for estimating costs. Note:
* we do not attempt to detect duplicate aggregates here; a
* somewhat-overestimated cost is okay for our present purposes.
*/
count_agg_clauses(root, (Node *) tlist, &agg_costs);

View File

@ -933,8 +933,8 @@ generate_setop_tlist(List *colTypes, List *colCollations,
}
/*
* Ensure the tlist entry's exposed collation matches the set-op.
* This is necessary because plan_set_operations() reports the result
* Ensure the tlist entry's exposed collation matches the set-op. This
* is necessary because plan_set_operations() reports the result
* ordering as a list of SortGroupClauses, which don't carry collation
* themselves but just refer to tlist entries. If we don't show the
* right collation then planner.c might do the wrong thing in

View File

@ -2884,9 +2884,9 @@ eval_const_expressions_mutator(Node *node,
/*
* We can remove null constants from the list. For a non-null
* constant, if it has not been preceded by any other
* non-null-constant expressions then it is the result.
* Otherwise, it's the next argument, but we can drop following
* arguments since they will never be reached.
* non-null-constant expressions then it is the result. Otherwise,
* it's the next argument, but we can drop following arguments
* since they will never be reached.
*/
if (IsA(e, Const))
{

View File

@ -1100,8 +1100,8 @@ transformValuesClause(ParseState *pstate, SelectStmt *stmt)
* doesn't process rangetable entries, and (2) we need to label the VALUES
* RTE with column collations for use in the outer query. We don't
* consider conflict of implicit collations to be an error here; instead
* the column will just show InvalidOid as its collation, and you'll get
* a failure later if that results in failure to resolve a collation.
* the column will just show InvalidOid as its collation, and you'll get a
* failure later if that results in failure to resolve a collation.
*
* Note we modify the per-column expression lists in-place.
*/

View File

@ -1486,10 +1486,10 @@ ServerLoop(void)
WalWriterPID = StartWalWriter();
/*
* If we have lost the autovacuum launcher, try to start a new one.
* We don't want autovacuum to run in binary upgrade mode because
* autovacuum might update relfrozenxid for empty tables before
* the physical files are put in place.
* If we have lost the autovacuum launcher, try to start a new one. We
* don't want autovacuum to run in binary upgrade mode because
* autovacuum might update relfrozenxid for empty tables before the
* physical files are put in place.
*/
if (!IsBinaryUpgrade && AutoVacPID == 0 &&
(AutoVacuumingActive() || start_autovac_launcher) &&

View File

@ -242,8 +242,8 @@ pg_set_regex_collation(Oid collation)
{
/*
* NB: pg_newlocale_from_collation will fail if not HAVE_LOCALE_T;
* the case of pg_regex_locale != 0 but not HAVE_LOCALE_T does
* not have to be considered below.
* the case of pg_regex_locale != 0 but not HAVE_LOCALE_T does not
* have to be considered below.
*/
pg_regex_locale = pg_newlocale_from_collation(collation);
}

View File

@ -236,8 +236,8 @@ SyncRepWaitForLSN(XLogRecPtr XactCommitLSN)
/*
* If the postmaster dies, we'll probably never get an
* acknowledgement, because all the wal sender processes will exit.
* So just bail out.
* acknowledgement, because all the wal sender processes will exit. So
* just bail out.
*/
if (!PostmasterIsAlive(true))
{

View File

@ -455,17 +455,17 @@ rewriteRuleAction(Query *parsetree,
}
/*
* If the original query has any CTEs, copy them into the rule action.
* But we don't need them for a utility action.
* If the original query has any CTEs, copy them into the rule action. But
* we don't need them for a utility action.
*/
if (parsetree->cteList != NIL && sub_action->commandType != CMD_UTILITY)
{
ListCell *lc;
/*
* Annoying implementation restriction: because CTEs are identified
* by name within a cteList, we can't merge a CTE from the original
* query if it has the same name as any CTE in the rule action.
* Annoying implementation restriction: because CTEs are identified by
* name within a cteList, we can't merge a CTE from the original query
* if it has the same name as any CTE in the rule action.
*
* This could possibly be fixed by using some sort of internally
* generated ID, instead of names, to link CTE RTEs to their CTEs.
@ -2116,9 +2116,9 @@ RewriteQuery(Query *parsetree, List *rewrite_events)
/*
* If the original query has a CTE list, and we generated more than one
* non-utility result query, we have to fail because we'll have copied
* the CTE list into each result query. That would break the expectation
* of single evaluation of CTEs. This could possibly be fixed by
* non-utility result query, we have to fail because we'll have copied the
* CTE list into each result query. That would break the expectation of
* single evaluation of CTEs. This could possibly be fixed by
* restructuring so that a CTE list can be shared across multiple Query
* and PlannableStatement nodes.
*/

View File

@ -4049,10 +4049,11 @@ EncodeInterval(struct pg_tm * tm, fsec_t fsec, int style, char *str)
/* Compatible with postgresql < 8.4 when DateStyle = 'iso' */
case INTSTYLE_POSTGRES:
cp = AddPostgresIntPart(cp, year, "year", &is_zero, &is_before);
/*
* Ideally we should spell out "month" like we do for "year"
* and "day". However, for backward compatibility, we can't
* easily fix this. bjm 2011-05-24
* Ideally we should spell out "month" like we do for "year" and
* "day". However, for backward compatibility, we can't easily
* fix this. bjm 2011-05-24
*/
cp = AddPostgresIntPart(cp, mon, "mon", &is_zero, &is_before);
cp = AddPostgresIntPart(cp, mday, "day", &is_zero, &is_before);

View File

@ -3024,8 +3024,8 @@ ri_GenerateQualCollation(StringInfo buf, Oid collation)
collname = NameStr(colltup->collname);
/*
* We qualify the name always, for simplicity and to ensure the query
* is not search-path-dependent.
* We qualify the name always, for simplicity and to ensure the query is
* not search-path-dependent.
*/
quoteOneName(onename, get_namespace_name(colltup->collnamespace));
appendStringInfo(buf, " COLLATE %s", onename);
@ -3964,8 +3964,8 @@ ri_AttributesEqual(Oid eq_opr, Oid typeid,
}
/*
* Apply the comparison operator. We assume it doesn't
* care about collations.
* Apply the comparison operator. We assume it doesn't care about
* collations.
*/
return DatumGetBool(FunctionCall2(&entry->eq_opr_finfo,
oldvalue, newvalue));

View File

@ -5193,8 +5193,8 @@ get_rule_expr(Node *node, deparse_context *context,
if (caseexpr->arg)
{
/*
* The parser should have produced WHEN clauses of
* the form "CaseTestExpr = RHS", possibly with an
* The parser should have produced WHEN clauses of the
* form "CaseTestExpr = RHS", possibly with an
* implicit coercion inserted above the CaseTestExpr.
* For accurate decompilation of rules it's essential
* that we show just the RHS. However in an

View File

@ -2403,9 +2403,9 @@ eqjoinsel_semi(Oid operator,
* before doing the division.
*
* Crude as the above is, it's completely useless if we don't have
* reliable ndistinct values for both sides. Hence, if either nd1
* or nd2 is default, punt and assume half of the uncertain rows
* have join partners.
* reliable ndistinct values for both sides. Hence, if either nd1 or
* nd2 is default, punt and assume half of the uncertain rows have
* join partners.
*/
if (nd1 != DEFAULT_NUM_DISTINCT && nd2 != DEFAULT_NUM_DISTINCT)
{

View File

@ -291,10 +291,10 @@ lookup_type_cache(Oid type_id, int flags)
HTEqualStrategyNumber);
/*
* If the proposed equality operator is array_eq or record_eq,
* check to see if the element type or column types support equality.
* If not, array_eq or record_eq would fail at runtime, so we don't
* want to report that the type has equality.
* If the proposed equality operator is array_eq or record_eq, check
* to see if the element type or column types support equality. If
* not, array_eq or record_eq would fail at runtime, so we don't want
* to report that the type has equality.
*/
if (eq_opr == ARRAY_EQ_OP &&
!array_element_has_equality(typentry))
@ -468,9 +468,9 @@ load_typcache_tupdesc(TypeCacheEntry *typentry)
/*
* Link to the tupdesc and increment its refcount (we assert it's a
* refcounted descriptor). We don't use IncrTupleDescRefCount() for
* this, because the reference mustn't be entered in the current
* resource owner; it can outlive the current query.
* refcounted descriptor). We don't use IncrTupleDescRefCount() for this,
* because the reference mustn't be entered in the current resource owner;
* it can outlive the current query.
*/
typentry->tupDesc = RelationGetDescr(rel);

View File

@ -2284,7 +2284,6 @@ strreplace(char *str, char *needle, char *replacement)
memmove(s + replacementlen, rest, strlen(rest) + 1);
}
}
#endif /* WIN32 */
/*
@ -2306,6 +2305,7 @@ localemap(char *locale)
locale = xstrdup(locale);
#ifdef WIN32
/*
* Map the full country name to an abbreviation that setlocale() accepts.
*
@ -2321,14 +2321,14 @@ localemap(char *locale)
/*
* The ISO-3166 country code for Macau S.A.R. is MAC, but Windows doesn't
* seem to recognize that. And Macau isn't listed in the table of
* accepted abbreviations linked above.
* seem to recognize that. And Macau isn't listed in the table of accepted
* abbreviations linked above.
*
* Fortunately, "ZHM" seems to be accepted as an alias for
* "Chinese (Traditional)_Macau S.A.R..950", so we use that. Note that
* it's unlike HKG and ARE, ZHM is an alias for the whole locale name,
* not just the country part. I'm not sure where that "ZHM" comes from,
* must be some legacy naming scheme. But hey, it works.
* Fortunately, "ZHM" seems to be accepted as an alias for "Chinese
* (Traditional)_Macau S.A.R..950", so we use that. Note that it's unlike
* HKG and ARE, ZHM is an alias for the whole locale name, not just the
* country part. I'm not sure where that "ZHM" comes from, must be some
* legacy naming scheme. But hey, it works.
*
* Some versions of Windows spell it "Macau", others "Macao".
*/
@ -3000,9 +3000,9 @@ main(int argc, char *argv[])
else if (!pg_valid_server_encoding_id(ctype_enc))
{
/*
* We recognized it, but it's not a legal server encoding.
* On Windows, UTF-8 works with any locale, so we can fall back
* to UTF-8.
* We recognized it, but it's not a legal server encoding. On
* Windows, UTF-8 works with any locale, so we can fall back to
* UTF-8.
*/
#ifdef WIN32
printf(_("Encoding %s implied by locale is not allowed as a server-side encoding.\n"

View File

@ -370,9 +370,9 @@ start_postmaster(void)
* Since there might be quotes to handle here, it is easier simply to pass
* everything to a shell to process them.
*
* XXX it would be better to fork and exec so that we would know the
* child postmaster's PID directly; then test_postmaster_connection could
* use the PID without having to rely on reading it back from the pidfile.
* XXX it would be better to fork and exec so that we would know the child
* postmaster's PID directly; then test_postmaster_connection could use
* the PID without having to rely on reading it back from the pidfile.
*/
if (log_file != NULL)
snprintf(cmd, MAXPGPATH, SYSTEMQUOTE "\"%s\" %s%s < \"%s\" >> \"%s\" 2>&1 &" SYSTEMQUOTE,
@ -492,8 +492,8 @@ test_postmaster_connection(bool do_checkpoint)
if (pmpid <= 0 || pmstart < start_time - 2)
{
/*
* Set flag to report stale pidfile if it doesn't
* get overwritten before we give up waiting.
* Set flag to report stale pidfile if it doesn't get
* overwritten before we give up waiting.
*/
found_stale_pidfile = true;
}
@ -570,11 +570,11 @@ test_postmaster_connection(bool do_checkpoint)
/*
* The postmaster should create postmaster.pid very soon after being
* started. If it's not there after we've waited 5 or more seconds,
* assume startup failed and give up waiting. (Note this covers
* both cases where the pidfile was never created, and where it was
* created and then removed during postmaster exit.) Also, if there
* *is* a file there but it appears stale, issue a suitable warning
* and give up waiting.
* assume startup failed and give up waiting. (Note this covers both
* cases where the pidfile was never created, and where it was created
* and then removed during postmaster exit.) Also, if there *is* a
* file there but it appears stale, issue a suitable warning and give
* up waiting.
*/
if (i >= 5)
{

View File

@ -8072,8 +8072,8 @@ dumpCompositeType(Archive *fout, TypeInfo *tyinfo)
{
/*
* This is a dropped attribute and we're in binary_upgrade mode.
* Insert a placeholder for it in the CREATE TYPE command, and
* set length and alignment with direct UPDATE to the catalogs
* Insert a placeholder for it in the CREATE TYPE command, and set
* length and alignment with direct UPDATE to the catalogs
* afterwards. See similar code in dumpTableSchema().
*/
appendPQExpBuffer(q, "%s INTEGER /* dummy */", fmtId(attname));
@ -8380,8 +8380,8 @@ dumpProcLang(Archive *fout, ProcLangInfo *plang)
* However, for a language that belongs to an extension, we must not use
* the shouldDumpProcLangs heuristic, but just dump the language iff we're
* told to (via dobj.dump). Generally the support functions will belong
* to the same extension and so have the same dump flags ... if they don't,
* this might not work terribly nicely.
* to the same extension and so have the same dump flags ... if they
* don't, this might not work terribly nicely.
*/
useParams = (funcInfo != NULL &&
(inlineInfo != NULL || !OidIsValid(plang->laninline)) &&
@ -11181,8 +11181,8 @@ dumpForeignDataWrapper(Archive *fout, FdwInfo *fdwinfo)
return;
/*
* FDWs that belong to an extension are dumped based on their "dump" field.
* Otherwise omit them if we are only dumping some specific object.
* FDWs that belong to an extension are dumped based on their "dump"
* field. Otherwise omit them if we are only dumping some specific object.
*/
if (!fdwinfo->dobj.ext_member)
if (!include_everything)
@ -12085,6 +12085,7 @@ dumpTableSchema(Archive *fout, TableInfo *tbinfo)
"UNLOGGED " : "",
reltypename,
fmtId(tbinfo->dobj.name));
/*
* In case of a binary upgrade, we dump the table normally and attach
* it to the type afterward.

View File

@ -193,9 +193,9 @@ main(int argc, char *argv[])
appendPQExpBuffer(&sql, ";\n");
/*
* Connect to the 'postgres' database by default, except have
* the 'postgres' user use 'template1' so he can create the
* 'postgres' database.
* Connect to the 'postgres' database by default, except have the
* 'postgres' user use 'template1' so he can create the 'postgres'
* database.
*/
conn = connectDatabase(strcmp(dbname, "postgres") == 0 ? "template1" : "postgres",
host, port, username, prompt_password, progname);

View File

@ -114,9 +114,8 @@ main(int argc, char *argv[])
fmtId(dbname));
/*
* Connect to the 'postgres' database by default, except have
* the 'postgres' user use 'template1' so he can drop the
* 'postgres' database.
* Connect to the 'postgres' database by default, except have the
* 'postgres' user use 'template1' so he can drop the 'postgres' database.
*/
conn = connectDatabase(strcmp(dbname, "postgres") == 0 ? "template1" : "postgres",
host, port, username, prompt_password, progname);

View File

@ -228,7 +228,11 @@ ecpg_build_compat_sqlda(int line, PGresult *res, int row, enum COMPAT_MODE compa
strcpy(fname, PQfname(res, i));
sqlda->sqlvar[i].sqlname = fname;
fname += strlen(sqlda->sqlvar[i].sqlname) + 1;
/* this is reserved for future use, so we leave it empty for the time being */
/*
* this is reserved for future use, so we leave it empty for the time
* being
*/
/* sqlda->sqlvar[i].sqlformat = (char *) (long) PQfformat(res, i); */
sqlda->sqlvar[i].sqlxid = PQftype(res, i);
sqlda->sqlvar[i].sqltypelen = PQfsize(res, i);

View File

@ -1854,6 +1854,7 @@ keep_going: /* We will come back to here until there is
int packetlen;
#ifdef HAVE_UNIX_SOCKETS
/*
* Implement requirepeer check, if requested and it's a
* Unix-domain socket.
@ -1870,7 +1871,10 @@ keep_going: /* We will come back to here until there is
errno = 0;
if (getpeereid(conn->sock, &uid, &gid) != 0)
{
/* Provide special error message if getpeereid is a stub */
/*
* Provide special error message if getpeereid is a
* stub
*/
if (errno == ENOSYS)
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("requirepeer parameter is not supported on this platform\n"));

View File

@ -4512,8 +4512,8 @@ get_source_line(const char *src, int lineno)
/*
* Sanity check, next < s if the line was all-whitespace, which should
* never happen if Python reported a frame created on that line, but
* check anyway.
* never happen if Python reported a frame created on that line, but check
* anyway.
*/
if (next < s)
return NULL;
@ -4680,7 +4680,10 @@ PLy_traceback(char **xmsg, char **tbmsg, int *tb_depth)
&tbstr, "\n PL/Python function \"%s\", line %ld, in %s",
proname, plain_lineno - 1, fname);
/* function code object was compiled with "<string>" as the filename */
/*
* function code object was compiled with "<string>" as the
* filename
*/
if (PLy_curr_procedure && plain_filename != NULL &&
strcmp(plain_filename, "<string>") == 0)
{

View File

@ -81,8 +81,8 @@ inet_net_ntop(int af, const void *src, int bits, char *dst, size_t size)
* We need to cover both the address family constants used by the PG inet
* type (PGSQL_AF_INET and PGSQL_AF_INET6) and those used by the system
* libraries (AF_INET and AF_INET6). We can safely assume PGSQL_AF_INET
* == AF_INET, but the INET6 constants are very likely to be different.
* If AF_INET6 isn't defined, silently ignore it.
* == AF_INET, but the INET6 constants are very likely to be different. If
* AF_INET6 isn't defined, silently ignore it.
*/
switch (af)
{

View File

@ -1480,9 +1480,9 @@ pg_timezone_initialize(void)
* postgresql.conf, this code will not do what you might expect, namely
* call select_default_timezone() and install that value as the setting.
* Rather, the previously active setting --- typically the one from
* postgresql.conf --- will be reinstalled, relabeled as PGC_S_ENV_VAR.
* If we did try to install the "correct" default value, the effect would
* be that each postmaster child would independently run an extremely
* postgresql.conf --- will be reinstalled, relabeled as PGC_S_ENV_VAR. If
* we did try to install the "correct" default value, the effect would be
* that each postmaster child would independently run an extremely
* expensive search of the timezone database, bringing the database to its
* knees for possibly multiple seconds. This is so unpleasant, and could
* so easily be triggered quite unintentionally, that it seems better to