Revise GUC names quoting in messages again

After further review, we want to move in the direction of always
quoting GUC names in error messages, rather than the previous (PG16)
wildly mixed practice or the intermittent (mid-PG17) idea of doing
this depending on how possibly confusing the GUC name is.

This commit applies appropriate quotes to (almost?) all mentions of
GUC names in error messages.  It partially supersedes a243569bf6 and
8d9978a717, which had moved things a bit in the opposite direction
but which then were abandoned in a partial state.

Author: Peter Smith <smithpb2250@gmail.com>
Discussion: https://www.postgresql.org/message-id/flat/CAHut%2BPv-kSN8SkxSdoHano_wPubqcg5789ejhCDZAcLFceBR-w%40mail.gmail.com
This commit is contained in:
Peter Eisentraut 2024-05-17 11:23:08 +02:00
parent be5942aee7
commit 17974ec259
79 changed files with 208 additions and 215 deletions

View File

@ -831,7 +831,7 @@ apw_start_leader_worker(void)
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_RESOURCES),
errmsg("could not register background process"),
errhint("You may need to increase max_worker_processes.")));
errhint("You may need to increase \"max_worker_processes\".")));
status = WaitForBackgroundWorkerStartup(handle, &pid);
if (status != BGWH_STARTED)
@ -867,7 +867,7 @@ apw_start_database_worker(void)
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_RESOURCES),
errmsg("registering dynamic bgworker autoprewarm failed"),
errhint("Consider increasing configuration parameter max_worker_processes.")));
errhint("Consider increasing configuration parameter \"max_worker_processes\".")));
/*
* Ignore return value; if it fails, postmaster has died, but we have

View File

@ -1660,7 +1660,7 @@ pg_stat_statements_internal(FunctionCallInfo fcinfo,
if (!pgss || !pgss_hash)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("pg_stat_statements must be loaded via shared_preload_libraries")));
errmsg("pg_stat_statements must be loaded via \"shared_preload_libraries\"")));
InitMaterializedSRF(fcinfo, 0);
@ -1989,7 +1989,7 @@ pg_stat_statements_info(PG_FUNCTION_ARGS)
if (!pgss || !pgss_hash)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("pg_stat_statements must be loaded via shared_preload_libraries")));
errmsg("pg_stat_statements must be loaded via \"shared_preload_libraries\"")));
/* Build a tuple descriptor for our result type */
if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE)
@ -2671,7 +2671,7 @@ entry_reset(Oid userid, Oid dbid, uint64 queryid, bool minmax_only)
if (!pgss || !pgss_hash)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("pg_stat_statements must be loaded via shared_preload_libraries")));
errmsg("pg_stat_statements must be loaded via \"shared_preload_libraries\"")));
LWLockAcquire(pgss->lock, LW_EXCLUSIVE);
num_entries = hash_get_num_entries(pgss_hash);

View File

@ -406,7 +406,7 @@ _PG_init(void)
if (IsUnderPostmaster)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("sepgsql must be loaded via shared_preload_libraries")));
errmsg("sepgsql must be loaded via \"shared_preload_libraries\"")));
/*
* Check availability of SELinux on the platform. If disabled, we cannot

View File

@ -220,7 +220,7 @@ ORDER BY o.slot_name, c.slot_name;
-- released even when raise error during creating the target slot.
SELECT 'copy' FROM pg_copy_logical_replication_slot('orig_slot1', 'failed'); -- error
ERROR: all replication slots are in use
HINT: Free one or increase max_replication_slots.
HINT: Free one or increase "max_replication_slots".
-- temporary slots were dropped automatically
SELECT pg_drop_replication_slot('orig_slot1');
pg_drop_replication_slot

View File

@ -533,17 +533,10 @@ Hint: The addendum, written as a complete sentence.
<title>Use of Quotes</title>
<para>
Always use quotes to delimit file names, user-supplied identifiers, and
other variables that might contain words. Do not use them to mark up
variables that will not contain words (for example, operator names).
</para>
<para>
In messages containing configuration variable names, do not include quotes
when the names are visibly not natural English words, such as when they
have underscores, are all-uppercase or have mixed case. Otherwise, quotes
must be added. Do include quotes in a message where an arbitrary variable
name is to be expanded.
Always use quotes to delimit file names, user-supplied identifiers,
configuration variable names, and other variables that might contain
words. Do not use them to mark up variables that will not contain words
(for example, operator names).
</para>
<para>

View File

@ -42,7 +42,7 @@ ginCombineData(RBTNode *existing, const RBTNode *newdata, void *arg)
ereport(ERROR,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
errmsg("posting list is too long"),
errhint("Reduce maintenance_work_mem.")));
errhint("Reduce \"maintenance_work_mem\".")));
accum->allocatedMemory -= GetMemoryChunkSpace(eo->list);
eo->maxcount *= 2;

View File

@ -2327,7 +2327,7 @@ lazy_check_wraparound_failsafe(LVRelState *vacrel)
vacrel->dbname, vacrel->relnamespace, vacrel->relname,
vacrel->num_index_scans),
errdetail("The table's relfrozenxid or relminmxid is too far in the past."),
errhint("Consider increasing configuration parameter maintenance_work_mem or autovacuum_work_mem.\n"
errhint("Consider increasing configuration parameter \"maintenance_work_mem\" or \"autovacuum_work_mem\".\n"
"You might also need to consider other ways for VACUUM to keep up with the allocation of transaction IDs.")));
/* Stop applying cost limits from this point on */

View File

@ -106,14 +106,14 @@ check_default_table_access_method(char **newval, void **extra, GucSource source)
{
if (**newval == '\0')
{
GUC_check_errdetail("%s cannot be empty.",
GUC_check_errdetail("\"%s\" cannot be empty.",
"default_table_access_method");
return false;
}
if (strlen(*newval) >= NAMEDATALEN)
{
GUC_check_errdetail("%s is too long (maximum %d characters).",
GUC_check_errdetail("\"%s\" is too long (maximum %d characters).",
"default_table_access_method", NAMEDATALEN - 1);
return false;
}

View File

@ -384,9 +384,9 @@ error_commit_ts_disabled(void)
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("could not get commit timestamp data"),
RecoveryInProgress() ?
errhint("Make sure the configuration parameter %s is set on the primary server.",
errhint("Make sure the configuration parameter \"%s\" is set on the primary server.",
"track_commit_timestamp") :
errhint("Make sure the configuration parameter %s is set.",
errhint("Make sure the configuration parameter \"%s\" is set.",
"track_commit_timestamp")));
}

View File

@ -1151,7 +1151,7 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset)
MultiXactState->offsetStopLimit - nextOffset - 1,
nmembers,
MultiXactState->offsetStopLimit - nextOffset - 1),
errhint("Execute a database-wide VACUUM in database with OID %u with reduced vacuum_multixact_freeze_min_age and vacuum_multixact_freeze_table_age settings.",
errhint("Execute a database-wide VACUUM in database with OID %u with reduced \"vacuum_multixact_freeze_min_age\" and \"vacuum_multixact_freeze_table_age\" settings.",
MultiXactState->oldestMultiXactDB)));
}
@ -1187,7 +1187,7 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset)
MultiXactState->offsetStopLimit - nextOffset + nmembers,
MultiXactState->oldestMultiXactDB,
MultiXactState->offsetStopLimit - nextOffset + nmembers),
errhint("Execute a database-wide VACUUM in that database with reduced vacuum_multixact_freeze_min_age and vacuum_multixact_freeze_table_age settings.")));
errhint("Execute a database-wide VACUUM in that database with reduced \"vacuum_multixact_freeze_min_age\" and \"vacuum_multixact_freeze_table_age\" settings.")));
ExtendMultiXactMember(nextOffset, nmembers);

View File

@ -91,7 +91,7 @@ void
RmgrNotFound(RmgrId rmid)
{
ereport(ERROR, (errmsg("resource manager with ID %d not registered", rmid),
errhint("Include the extension module that implements this resource manager in shared_preload_libraries.")));
errhint("Include the extension module that implements this resource manager in \"shared_preload_libraries\".")));
}
/*
@ -118,7 +118,7 @@ RegisterCustomRmgr(RmgrId rmid, const RmgrData *rmgr)
if (!process_shared_preload_libraries_in_progress)
ereport(ERROR,
(errmsg("failed to register custom resource manager \"%s\" with ID %d", rmgr->rm_name, rmid),
errdetail("Custom resource manager must be registered while initializing modules in shared_preload_libraries.")));
errdetail("Custom resource manager must be registered while initializing modules in \"shared_preload_libraries\".")));
if (RmgrTable[rmid].rm_name != NULL)
ereport(ERROR,

View File

@ -373,7 +373,7 @@ MarkAsPreparing(TransactionId xid, const char *gid,
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("prepared transactions are disabled"),
errhint("Set max_prepared_transactions to a nonzero value.")));
errhint("Set \"max_prepared_transactions\" to a nonzero value.")));
/* on first call, register the exit hook */
if (!twophaseExitRegistered)
@ -402,7 +402,7 @@ MarkAsPreparing(TransactionId xid, const char *gid,
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("maximum number of prepared transactions reached"),
errhint("Increase max_prepared_transactions (currently %d).",
errhint("Increase \"max_prepared_transactions\" (currently %d).",
max_prepared_xacts)));
gxact = TwoPhaseState->freeGXacts;
TwoPhaseState->freeGXacts = gxact->next;
@ -2539,7 +2539,7 @@ PrepareRedoAdd(char *buf, XLogRecPtr start_lsn,
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("maximum number of prepared transactions reached"),
errhint("Increase max_prepared_transactions (currently %d).",
errhint("Increase \"max_prepared_transactions\" (currently %d).",
max_prepared_xacts)));
gxact = TwoPhaseState->freeGXacts;
TwoPhaseState->freeGXacts = gxact->next;

View File

@ -4501,11 +4501,11 @@ ReadControlFile(void)
/* check and update variables dependent on wal_segment_size */
if (ConvertToXSegs(min_wal_size_mb, wal_segment_size) < 2)
ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("min_wal_size must be at least twice wal_segment_size")));
errmsg("\"min_wal_size\" must be at least twice \"wal_segment_size\"")));
if (ConvertToXSegs(max_wal_size_mb, wal_segment_size) < 2)
ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("max_wal_size must be at least twice wal_segment_size")));
errmsg("\"max_wal_size\" must be at least twice \"wal_segment_size\"")));
UsableBytesInSegment =
(wal_segment_size / XLOG_BLCKSZ * UsableBytesInPage) -
@ -5351,9 +5351,9 @@ CheckRequiredParameterValues(void)
{
ereport(FATAL,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("WAL was generated with wal_level=minimal, cannot continue recovering"),
errdetail("This happens if you temporarily set wal_level=minimal on the server."),
errhint("Use a backup taken after setting wal_level to higher than minimal.")));
errmsg("WAL was generated with \"wal_level=minimal\", cannot continue recovering"),
errdetail("This happens if you temporarily set \"wal_level=minimal\" on the server."),
errhint("Use a backup taken after setting \"wal_level\" to higher than \"minimal\".")));
}
/*
@ -8549,7 +8549,7 @@ get_sync_bit(int method)
#endif
default:
/* can't happen (unless we are out of sync with option array) */
elog(ERROR, "unrecognized wal_sync_method: %d", method);
elog(ERROR, "unrecognized \"wal_sync_method\": %d", method);
return 0; /* silence warning */
}
}
@ -8647,7 +8647,7 @@ issue_xlog_fsync(int fd, XLogSegNo segno, TimeLineID tli)
default:
ereport(PANIC,
errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg_internal("unrecognized wal_sync_method: %d", wal_sync_method));
errmsg_internal("unrecognized \"wal_sync_method\": %d", wal_sync_method));
break;
}
@ -8725,7 +8725,7 @@ do_pg_backup_start(const char *backupidstr, bool fast, List **tablespaces,
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("WAL level not sufficient for making an online backup"),
errhint("wal_level must be set to \"replica\" or \"logical\" at server start.")));
errhint("\"wal_level\" must be set to \"replica\" or \"logical\" at server start.")));
if (strlen(backupidstr) > MAXPGPATH)
ereport(ERROR,
@ -8851,11 +8851,11 @@ do_pg_backup_start(const char *backupidstr, bool fast, List **tablespaces,
if (!checkpointfpw || state->startpoint <= recptr)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("WAL generated with full_page_writes=off was replayed "
errmsg("WAL generated with \"full_page_writes=off\" was replayed "
"since last restartpoint"),
errhint("This means that the backup being taken on the standby "
"is corrupt and should not be used. "
"Enable full_page_writes and run CHECKPOINT on the primary, "
"Enable \"full_page_writes\" and run CHECKPOINT on the primary, "
"and then try an online backup again.")));
/*
@ -9147,11 +9147,11 @@ do_pg_backup_stop(BackupState *state, bool waitforarchive)
if (state->startpoint <= recptr)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("WAL generated with full_page_writes=off was replayed "
errmsg("WAL generated with \"full_page_writes=off\" was replayed "
"during online backup"),
errhint("This means that the backup being taken on the standby "
"is corrupt and should not be used. "
"Enable full_page_writes and run CHECKPOINT on the primary, "
"Enable \"full_page_writes\" and run CHECKPOINT on the primary, "
"and then try an online backup again.")));
@ -9279,7 +9279,7 @@ do_pg_backup_stop(BackupState *state, bool waitforarchive)
ereport(WARNING,
(errmsg("still waiting for all required WAL segments to be archived (%d seconds elapsed)",
waits),
errhint("Check that your archive_command is executing properly. "
errhint("Check that your \"archive_command\" is executing properly. "
"You can safely cancel this backup, "
"but the database backup will not be usable without all the WAL segments.")));
}

View File

@ -233,7 +233,7 @@ RestoreArchivedFile(char *path, const char *xlogfname,
ereport(elevel,
(errcode_for_file_access(),
errmsg("could not stat file \"%s\": %m", xlogpath),
errdetail("restore_command returned a zero exit status, but stat() failed.")));
errdetail("\"restore_command\" returned a zero exit status, but stat() failed.")));
}
}

View File

@ -212,7 +212,7 @@ pg_log_standby_snapshot(PG_FUNCTION_ARGS)
if (!XLogStandbyInfoActive())
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("pg_log_standby_snapshot() can only be used if wal_level >= replica")));
errmsg("pg_log_standby_snapshot() can only be used if \"wal_level\" >= \"replica\"")));
recptr = LogStandbySnapshot();
@ -245,7 +245,7 @@ pg_create_restore_point(PG_FUNCTION_ARGS)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("WAL level not sufficient for creating a restore point"),
errhint("wal_level must be set to \"replica\" or \"logical\" at server start.")));
errhint("\"wal_level\" must be set to \"replica\" or \"logical\" at server start.")));
restore_name_str = text_to_cstring(restore_name);

View File

@ -1085,7 +1085,7 @@ check_recovery_prefetch(int *new_value, void **extra, GucSource source)
#ifndef USE_PREFETCH
if (*new_value == RECOVERY_PREFETCH_ON)
{
GUC_check_errdetail("recovery_prefetch is not supported on platforms that lack posix_fadvise().");
GUC_check_errdetail("\"recovery_prefetch\" is not supported on platforms that lack posix_fadvise().");
return false;
}
#endif

View File

@ -1119,7 +1119,7 @@ validateRecoveryParameters(void)
if ((PrimaryConnInfo == NULL || strcmp(PrimaryConnInfo, "") == 0) &&
(recoveryRestoreCommand == NULL || strcmp(recoveryRestoreCommand, "") == 0))
ereport(WARNING,
(errmsg("specified neither primary_conninfo nor restore_command"),
(errmsg("specified neither \"primary_conninfo\" nor \"restore_command\""),
errhint("The database server will regularly poll the pg_wal subdirectory to check for files placed there.")));
}
else
@ -1128,7 +1128,7 @@ validateRecoveryParameters(void)
strcmp(recoveryRestoreCommand, "") == 0)
ereport(FATAL,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("must specify restore_command when standby mode is not enabled")));
errmsg("must specify \"restore_command\" when standby mode is not enabled")));
}
/*
@ -2162,7 +2162,7 @@ CheckTablespaceDirectory(void)
errmsg("unexpected directory entry \"%s\" found in %s",
de->d_name, "pg_tblspc/"),
errdetail("All directory entries in pg_tblspc/ should be symbolic links."),
errhint("Remove those directories, or set allow_in_place_tablespaces to ON transiently to let recovery complete.")));
errhint("Remove those directories, or set \"allow_in_place_tablespaces\" to ON transiently to let recovery complete.")));
}
}
@ -4771,7 +4771,7 @@ error_multiple_recovery_targets(void)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("multiple recovery targets specified"),
errdetail("At most one of recovery_target, recovery_target_lsn, recovery_target_name, recovery_target_time, recovery_target_xid may be set.")));
errdetail("At most one of \"recovery_target\", \"recovery_target_lsn\", \"recovery_target_name\", \"recovery_target_time\", \"recovery_target_xid\" may be set.")));
}
/*
@ -4855,7 +4855,7 @@ check_recovery_target_name(char **newval, void **extra, GucSource source)
/* Use the value of newval directly */
if (strlen(*newval) >= MAXFNAMELEN)
{
GUC_check_errdetail("%s is too long (maximum %d characters).",
GUC_check_errdetail("\"%s\" is too long (maximum %d characters).",
"recovery_target_name", MAXFNAMELEN - 1);
return false;
}
@ -4979,7 +4979,7 @@ check_recovery_target_timeline(char **newval, void **extra, GucSource source)
strtoul(*newval, NULL, 0);
if (errno == EINVAL || errno == ERANGE)
{
GUC_check_errdetail("recovery_target_timeline is not a valid number.");
GUC_check_errdetail("\"recovery_target_timeline\" is not a valid number.");
return false;
}
}

View File

@ -858,8 +858,8 @@ CreatePublication(ParseState *pstate, CreatePublicationStmt *stmt)
if (wal_level != WAL_LEVEL_LOGICAL)
ereport(WARNING,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("wal_level is insufficient to publish logical changes"),
errhint("Set wal_level to \"logical\" before creating subscriptions.")));
errmsg("\"wal_level\" is insufficient to publish logical changes"),
errhint("Set \"wal_level\" to \"logical\" before creating subscriptions.")));
return myself;
}

View File

@ -131,7 +131,7 @@ check_vacuum_buffer_usage_limit(int *newval, void **extra,
return true;
/* Value does not fall within any allowable range */
GUC_check_errdetail("vacuum_buffer_usage_limit must be 0 or between %d kB and %d kB",
GUC_check_errdetail("\"vacuum_buffer_usage_limit\" must be 0 or between %d kB and %d kB",
MIN_BAS_VAC_RING_SIZE_KB, MAX_BAS_VAC_RING_SIZE_KB);
return false;

View File

@ -717,7 +717,7 @@ check_client_encoding(char **newval, void **extra, GucSource source)
else
{
/* Provide a useful complaint */
GUC_check_errdetail("Cannot change client_encoding now.");
GUC_check_errdetail("Cannot change \"client_encoding\" now.");
}
return false;
}
@ -778,7 +778,7 @@ assign_client_encoding(const char *newval, void *extra)
*/
ereport(ERROR,
(errcode(ERRCODE_INVALID_TRANSACTION_STATE),
errmsg("cannot change client_encoding during a parallel operation")));
errmsg("cannot change \"client_encoding\" during a parallel operation")));
}
/* We do not expect an error if PrepareClientEncoding succeeded */
@ -1202,7 +1202,7 @@ check_effective_io_concurrency(int *newval, void **extra, GucSource source)
#ifndef USE_PREFETCH
if (*newval != 0)
{
GUC_check_errdetail("effective_io_concurrency must be set to 0 on platforms that lack posix_fadvise().");
GUC_check_errdetail("\"effective_io_concurrency\" must be set to 0 on platforms that lack posix_fadvise().");
return false;
}
#endif /* USE_PREFETCH */
@ -1215,7 +1215,7 @@ check_maintenance_io_concurrency(int *newval, void **extra, GucSource source)
#ifndef USE_PREFETCH
if (*newval != 0)
{
GUC_check_errdetail("maintenance_io_concurrency must be set to 0 on platforms that lack posix_fadvise().");
GUC_check_errdetail("\"maintenance_io_concurrency\" must be set to 0 on platforms that lack posix_fadvise().");
return false;
}
#endif /* USE_PREFETCH */

View File

@ -201,7 +201,7 @@ be_tls_init(bool isServerStart)
{
ereport(isServerStart ? FATAL : LOG,
/*- translator: first %s is a GUC option name, second %s is its value */
(errmsg("%s setting \"%s\" not supported by this build",
(errmsg("\"%s\" setting \"%s\" not supported by this build",
"ssl_min_protocol_version",
GetConfigOption("ssl_min_protocol_version",
false, false))));
@ -251,7 +251,7 @@ be_tls_init(bool isServerStart)
{
ereport(isServerStart ? FATAL : LOG,
(errmsg("could not set SSL protocol version range"),
errdetail("%s cannot be higher than %s",
errdetail("\"%s\" cannot be higher than \"%s\"",
"ssl_min_protocol_version",
"ssl_max_protocol_version")));
goto error;

View File

@ -1378,7 +1378,7 @@ parse_hba_line(TokenizedAuthLine *tok_line, int elevel)
ereport(elevel,
(errcode(ERRCODE_CONFIG_FILE_ERROR),
errmsg("hostssl record cannot match because SSL is disabled"),
errhint("Set ssl = on in postgresql.conf."),
errhint("Set \"ssl = on\" in postgresql.conf."),
errcontext("line %d of configuration file \"%s\"",
line_num, file_name)));
*err_msg = "hostssl record cannot match because SSL is disabled";

View File

@ -731,7 +731,7 @@ Setup_AF_UNIX(const char *sock_path)
if (Unix_socket_group[0] != '\0')
{
#ifdef WIN32
elog(WARNING, "configuration item unix_socket_group is not supported on this platform");
elog(WARNING, "configuration item \"unix_socket_group\" is not supported on this platform");
#else
char *endptr;
unsigned long val;

View File

@ -565,7 +565,7 @@ other .
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("unsafe use of string constant with Unicode escapes"),
errdetail("String constants with Unicode escapes cannot be used when standard_conforming_strings is off."),
errdetail("String constants with Unicode escapes cannot be used when \"standard_conforming_strings\" is off."),
lexer_errposition()));
BEGIN(xus);
startlit();

View File

@ -127,7 +127,7 @@ InternalIpcSemaphoreCreate(IpcSemaphoreKey semKey, int numSems)
"semaphore sets (SEMMNI), or the system wide maximum number of "
"semaphores (SEMMNS), would be exceeded. You need to raise the "
"respective kernel parameter. Alternatively, reduce PostgreSQL's "
"consumption of semaphores by reducing its max_connections parameter.\n"
"consumption of semaphores by reducing its \"max_connections\" parameter.\n"
"The PostgreSQL documentation contains more information about "
"configuring your system for PostgreSQL.") : 0));
}

View File

@ -581,7 +581,7 @@ check_huge_page_size(int *newval, void **extra, GucSource source)
/* Recent enough Linux only, for now. See GetHugePageSize(). */
if (*newval != 0)
{
GUC_check_errdetail("huge_page_size must be 0 on this platform.");
GUC_check_errdetail("\"huge_page_size\" must be 0 on this platform.");
return false;
}
#endif
@ -658,8 +658,8 @@ CreateAnonymousSegment(Size *size)
"for a shared memory segment exceeded available memory, "
"swap space, or huge pages. To reduce the request size "
"(currently %zu bytes), reduce PostgreSQL's shared "
"memory usage, perhaps by reducing shared_buffers or "
"max_connections.",
"memory usage, perhaps by reducing \"shared_buffers\" or "
"\"max_connections\".",
allocsize) : 0));
}
@ -729,7 +729,7 @@ PGSharedMemoryCreate(Size size,
if (huge_pages == HUGE_PAGES_ON && shared_memory_type != SHMEM_TYPE_MMAP)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("huge pages not supported with the current shared_memory_type setting")));
errmsg("huge pages not supported with the current \"shared_memory_type\" setting")));
/* Room for a header? */
Assert(size > MAXALIGN(sizeof(PGShmemHeader)));

View File

@ -643,7 +643,7 @@ check_huge_page_size(int *newval, void **extra, GucSource source)
{
if (*newval != 0)
{
GUC_check_errdetail("huge_page_size must be 0 on this platform.");
GUC_check_errdetail("\"huge_page_size\" must be 0 on this platform.");
return false;
}
return true;

View File

@ -885,7 +885,7 @@ RegisterBackgroundWorker(BackgroundWorker *worker)
return;
ereport(LOG,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("background worker \"%s\": must be registered in shared_preload_libraries",
errmsg("background worker \"%s\": must be registered in \"shared_preload_libraries\"",
worker->bgw_name)));
return;
}

View File

@ -442,7 +442,7 @@ CheckpointerMain(char *startup_data, size_t startup_data_len)
"checkpoints are occurring too frequently (%d seconds apart)",
elapsed_secs,
elapsed_secs),
errhint("Consider increasing the configuration parameter max_wal_size.")));
errhint("Consider increasing the configuration parameter \"%s\".", "max_wal_size")));
/*
* Initialize checkpointer-private variables used during

View File

@ -425,7 +425,7 @@ pgarch_ArchiverCopyLoop(void)
!ArchiveCallbacks->check_configured_cb(archive_module_state))
{
ereport(WARNING,
(errmsg("archive_mode enabled, yet archiving is not configured"),
(errmsg("\"archive_mode\" enabled, yet archiving is not configured"),
arch_module_check_errdetail_string ?
errdetail_internal("%s", arch_module_check_errdetail_string) : 0));
return;
@ -876,8 +876,8 @@ HandlePgArchInterrupts(void)
if (XLogArchiveLibrary[0] != '\0' && XLogArchiveCommand[0] != '\0')
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("both archive_command and archive_library set"),
errdetail("Only one of archive_command, archive_library may be set.")));
errmsg("both \"archive_command\" and \"archive_library\" set"),
errdetail("Only one of \"archive_command\", \"archive_library\" may be set.")));
archiveLibChanged = strcmp(XLogArchiveLibrary, archiveLib) != 0;
pfree(archiveLib);
@ -915,8 +915,8 @@ LoadArchiveLibrary(void)
if (XLogArchiveLibrary[0] != '\0' && XLogArchiveCommand[0] != '\0')
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("both archive_command and archive_library set"),
errdetail("Only one of archive_command, archive_library may be set.")));
errmsg("both \"archive_command\" and \"archive_library\" set"),
errdetail("Only one of \"archive_command\", \"archive_library\" may be set.")));
/*
* If shell archiving is enabled, use our special initialization function.

View File

@ -822,7 +822,7 @@ PostmasterMain(int argc, char *argv[])
*/
if (SuperuserReservedConnections + ReservedConnections >= MaxConnections)
{
write_stderr("%s: superuser_reserved_connections (%d) plus reserved_connections (%d) must be less than max_connections (%d)\n",
write_stderr("%s: \"superuser_reserved_connections\" (%d) plus \"reserved_connections\" (%d) must be less than \"max_connections\" (%d)\n",
progname,
SuperuserReservedConnections, ReservedConnections,
MaxConnections);
@ -830,13 +830,13 @@ PostmasterMain(int argc, char *argv[])
}
if (XLogArchiveMode > ARCHIVE_MODE_OFF && wal_level == WAL_LEVEL_MINIMAL)
ereport(ERROR,
(errmsg("WAL archival cannot be enabled when wal_level is \"minimal\"")));
(errmsg("WAL archival cannot be enabled when \"wal_level\" is \"minimal\"")));
if (max_wal_senders > 0 && wal_level == WAL_LEVEL_MINIMAL)
ereport(ERROR,
(errmsg("WAL streaming (max_wal_senders > 0) requires wal_level \"replica\" or \"logical\"")));
(errmsg("WAL streaming (\"max_wal_senders\" > 0) requires \"wal_level\" to be \"replica\" or \"logical\"")));
if (summarize_wal && wal_level == WAL_LEVEL_MINIMAL)
ereport(ERROR,
(errmsg("WAL cannot be summarized when wal_level is \"minimal\"")));
(errmsg("WAL cannot be summarized when \"wal_level\" is \"minimal\"")));
/*
* Other one-time internal sanity checks can go here, if they are fast.
@ -3359,7 +3359,7 @@ PostmasterStateMachine(void)
if (!restart_after_crash)
{
ereport(LOG,
(errmsg("shutting down because restart_after_crash is off")));
(errmsg("shutting down because \"restart_after_crash\" is off")));
ExitPostmaster(1);
}
}

View File

@ -174,7 +174,7 @@ xlog_decode(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
Assert(RecoveryInProgress());
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("logical decoding on standby requires wal_level >= logical on the primary")));
errmsg("logical decoding on standby requires \"wal_level\" >= \"logical\" on the primary")));
}
break;
}

View File

@ -425,7 +425,7 @@ retry:
ereport(WARNING,
(errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED),
errmsg("out of logical replication worker slots"),
errhint("You might need to increase %s.", "max_logical_replication_workers")));
errhint("You might need to increase \"%s\".", "max_logical_replication_workers")));
return false;
}
@ -511,7 +511,7 @@ retry:
ereport(WARNING,
(errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED),
errmsg("out of background worker slots"),
errhint("You might need to increase %s.", "max_worker_processes")));
errhint("You might need to increase \"%s\".", "max_worker_processes")));
return false;
}

View File

@ -118,7 +118,7 @@ CheckLogicalDecodingRequirements(void)
if (wal_level < WAL_LEVEL_LOGICAL)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("logical decoding requires wal_level >= logical")));
errmsg("logical decoding requires \"wal_level\" >= \"logical\"")));
if (MyDatabaseId == InvalidOid)
ereport(ERROR,
@ -138,7 +138,7 @@ CheckLogicalDecodingRequirements(void)
if (GetActiveWalLevelOnStandby() < WAL_LEVEL_LOGICAL)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("logical decoding on standby requires wal_level >= logical on the primary")));
errmsg("logical decoding on standby requires \"wal_level\" >= \"logical\" on the primary")));
}
}

View File

@ -187,7 +187,7 @@ replorigin_check_prerequisites(bool check_slots, bool recoveryOK)
if (check_slots && max_replication_slots == 0)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("cannot query or manipulate replication origin when max_replication_slots = 0")));
errmsg("cannot query or manipulate replication origin when \"max_replication_slots\" is 0")));
if (!recoveryOK && RecoveryInProgress())
ereport(ERROR,
@ -795,7 +795,7 @@ StartupReplicationOrigin(void)
if (last_state == max_replication_slots)
ereport(PANIC,
(errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED),
errmsg("could not find free replication state, increase max_replication_slots")));
errmsg("could not find free replication state, increase \"max_replication_slots\"")));
/* copy data to shared memory */
replication_states[last_state].roident = disk_state.roident;
@ -954,7 +954,7 @@ replorigin_advance(RepOriginId node,
(errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED),
errmsg("could not find free replication state slot for replication origin with ID %d",
node),
errhint("Increase max_replication_slots and try again.")));
errhint("Increase \"max_replication_slots\" and try again.")));
if (replication_state == NULL)
{
@ -1155,7 +1155,7 @@ replorigin_session_setup(RepOriginId node, int acquired_by)
(errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED),
errmsg("could not find free replication state slot for replication origin with ID %d",
node),
errhint("Increase max_replication_slots and try again.")));
errhint("Increase \"max_replication_slots\" and try again.")));
else if (session_replication_state == NULL)
{
/* initialize new slot */

View File

@ -378,7 +378,7 @@ ReplicationSlotCreate(const char *name, bool db_specific,
ereport(ERROR,
(errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED),
errmsg("all replication slots are in use"),
errhint("Free one or increase max_replication_slots.")));
errhint("Free one or increase \"max_replication_slots\".")));
/*
* Since this slot is not in use, nobody should be looking at any part of
@ -1369,12 +1369,12 @@ CheckSlotRequirements(void)
if (max_replication_slots == 0)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("replication slots can only be used if max_replication_slots > 0")));
errmsg("replication slots can only be used if \"max_replication_slots\" > 0")));
if (wal_level < WAL_LEVEL_REPLICA)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("replication slots can only be used if wal_level >= replica")));
errmsg("replication slots can only be used if \"wal_level\" >= \"replica\"")));
}
/*
@ -1508,7 +1508,7 @@ ReportSlotInvalidation(ReplicationSlotInvalidationCause cause,
break;
case RS_INVAL_WAL_LEVEL:
appendStringInfoString(&err_detail, _("Logical decoding on standby requires wal_level >= logical on the primary server."));
appendStringInfoString(&err_detail, _("Logical decoding on standby requires \"wal_level\" >= \"logical\" on the primary server."));
break;
case RS_INVAL_NONE:
pg_unreachable();
@ -1521,7 +1521,7 @@ ReportSlotInvalidation(ReplicationSlotInvalidationCause cause,
errmsg("invalidating obsolete replication slot \"%s\"",
NameStr(slotname)),
errdetail_internal("%s", err_detail.data),
hint ? errhint("You might need to increase %s.", "max_slot_wal_keep_size") : 0);
hint ? errhint("You might need to increase \"%s\".", "max_slot_wal_keep_size") : 0);
pfree(err_detail.data);
}
@ -2332,15 +2332,15 @@ RestoreSlotFromDisk(const char *name)
if (cp.slotdata.database != InvalidOid && wal_level < WAL_LEVEL_LOGICAL)
ereport(FATAL,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("logical replication slot \"%s\" exists, but wal_level < logical",
errmsg("logical replication slot \"%s\" exists, but \"wal_level\" < \"logical\"",
NameStr(cp.slotdata.name)),
errhint("Change wal_level to be logical or higher.")));
errhint("Change \"wal_level\" to be \"logical\" or higher.")));
else if (wal_level < WAL_LEVEL_REPLICA)
ereport(FATAL,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("physical replication slot \"%s\" exists, but wal_level < replica",
errmsg("physical replication slot \"%s\" exists, but \"wal_level\" < \"replica\"",
NameStr(cp.slotdata.name)),
errhint("Change wal_level to be replica or higher.")));
errhint("Change \"wal_level\" to be \"replica\" or higher.")));
/* nothing can be active yet, don't lock anything */
for (i = 0; i < max_replication_slots; i++)
@ -2383,7 +2383,7 @@ RestoreSlotFromDisk(const char *name)
if (!restored)
ereport(FATAL,
(errmsg("too many replication slots active before shutdown"),
errhint("Increase max_replication_slots and try again.")));
errhint("Increase \"max_replication_slots\" and try again.")));
}
/*

View File

@ -1010,7 +1010,7 @@ check_synchronous_standby_names(char **newval, void **extra, GucSource source)
if (syncrep_parse_error_msg)
GUC_check_errdetail("%s", syncrep_parse_error_msg);
else
GUC_check_errdetail("synchronous_standby_names parser failed");
GUC_check_errdetail("\"synchronous_standby_names\" parser failed");
return false;
}

View File

@ -709,7 +709,7 @@ check_temp_buffers(int *newval, void **extra, GucSource source)
*/
if (source != PGC_S_TEST && NLocBuffer && NLocBuffer != *newval)
{
GUC_check_errdetail("temp_buffers cannot be changed after any temporary tables have been accessed in the session.");
GUC_check_errdetail("\"temp_buffers\" cannot be changed after any temporary tables have been accessed in the session.");
return false;
}
return true;

View File

@ -3947,7 +3947,7 @@ check_debug_io_direct(char **newval, void **extra, GucSource source)
#if PG_O_DIRECT == 0
if (strcmp(*newval, "") != 0)
{
GUC_check_errdetail("debug_io_direct is not supported on this platform.");
GUC_check_errdetail("\"debug_io_direct\" is not supported on this platform.");
result = false;
}
flags = 0;
@ -3961,7 +3961,7 @@ check_debug_io_direct(char **newval, void **extra, GucSource source)
if (!SplitGUCList(rawstring, ',', &elemlist))
{
GUC_check_errdetail("Invalid list syntax in parameter %s",
GUC_check_errdetail("Invalid list syntax in parameter \"%s\"",
"debug_io_direct");
pfree(rawstring);
list_free(elemlist);
@ -3994,14 +3994,14 @@ check_debug_io_direct(char **newval, void **extra, GucSource source)
#if XLOG_BLCKSZ < PG_IO_ALIGN_SIZE
if (result && (flags & (IO_DIRECT_WAL | IO_DIRECT_WAL_INIT)))
{
GUC_check_errdetail("debug_io_direct is not supported for WAL because XLOG_BLCKSZ is too small");
GUC_check_errdetail("\"debug_io_direct\" is not supported for WAL because XLOG_BLCKSZ is too small");
result = false;
}
#endif
#if BLCKSZ < PG_IO_ALIGN_SIZE
if (result && (flags & IO_DIRECT_DATA))
{
GUC_check_errdetail("debug_io_direct is not supported for data because BLCKSZ is too small");
GUC_check_errdetail("\"debug_io_direct\" is not supported for data because BLCKSZ is too small");
result = false;
}
#endif

View File

@ -960,7 +960,7 @@ LockAcquireExtended(const LOCKTAG *locktag,
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of shared memory"),
errhint("You might need to increase %s.", "max_locks_per_transaction")));
errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
else
return LOCKACQUIRE_NOT_AVAIL;
}
@ -998,7 +998,7 @@ LockAcquireExtended(const LOCKTAG *locktag,
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of shared memory"),
errhint("You might need to increase %s.", "max_locks_per_transaction")));
errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
else
return LOCKACQUIRE_NOT_AVAIL;
}
@ -2801,7 +2801,7 @@ FastPathGetRelationLockEntry(LOCALLOCK *locallock)
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of shared memory"),
errhint("You might need to increase %s.", "max_locks_per_transaction")));
errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
}
GrantLock(proclock->tag.myLock, proclock, lockmode);
FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
@ -4186,7 +4186,7 @@ lock_twophase_recover(TransactionId xid, uint16 info,
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of shared memory"),
errhint("You might need to increase %s.", "max_locks_per_transaction")));
errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
}
/*
@ -4251,7 +4251,7 @@ lock_twophase_recover(TransactionId xid, uint16 info,
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of shared memory"),
errhint("You might need to increase %s.", "max_locks_per_transaction")));
errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
}
/*
@ -4601,7 +4601,7 @@ VirtualXactLock(VirtualTransactionId vxid, bool wait)
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of shared memory"),
errhint("You might need to increase %s.", "max_locks_per_transaction")));
errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
}
GrantLock(proclock->tag.myLock, proclock, ExclusiveLock);

View File

@ -651,7 +651,7 @@ SetRWConflict(SERIALIZABLEXACT *reader, SERIALIZABLEXACT *writer)
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("not enough elements in RWConflictPool to record a read/write conflict"),
errhint("You might need to run fewer transactions at a time or increase max_connections.")));
errhint("You might need to run fewer transactions at a time or increase \"max_connections\".")));
conflict = dlist_head_element(RWConflictData, outLink, &RWConflictPool->availableList);
dlist_delete(&conflict->outLink);
@ -676,7 +676,7 @@ SetPossibleUnsafeConflict(SERIALIZABLEXACT *roXact,
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("not enough elements in RWConflictPool to record a potential read/write conflict"),
errhint("You might need to run fewer transactions at a time or increase max_connections.")));
errhint("You might need to run fewer transactions at a time or increase \"max_connections\".")));
conflict = dlist_head_element(RWConflictData, outLink, &RWConflictPool->availableList);
dlist_delete(&conflict->outLink);
@ -1678,7 +1678,7 @@ GetSerializableTransactionSnapshot(Snapshot snapshot)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot use serializable mode in a hot standby"),
errdetail("default_transaction_isolation is set to \"serializable\"."),
errdetail("\"default_transaction_isolation\" is set to \"serializable\"."),
errhint("You can use \"SET default_transaction_isolation = 'repeatable read'\" to change the default.")));
/*
@ -2461,7 +2461,7 @@ CreatePredicateLock(const PREDICATELOCKTARGETTAG *targettag,
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of shared memory"),
errhint("You might need to increase %s.", "max_pred_locks_per_transaction")));
errhint("You might need to increase \"%s\".", "max_pred_locks_per_transaction")));
if (!found)
dlist_init(&target->predicateLocks);
@ -2476,7 +2476,7 @@ CreatePredicateLock(const PREDICATELOCKTARGETTAG *targettag,
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of shared memory"),
errhint("You might need to increase %s.", "max_pred_locks_per_transaction")));
errhint("You might need to increase \"%s\".", "max_pred_locks_per_transaction")));
if (!found)
{
@ -3873,7 +3873,7 @@ ReleaseOneSerializableXact(SERIALIZABLEXACT *sxact, bool partial,
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of shared memory"),
errhint("You might need to increase %s.", "max_pred_locks_per_transaction")));
errhint("You might need to increase \"%s\".", "max_pred_locks_per_transaction")));
if (found)
{
Assert(predlock->commitSeqNo != 0);

View File

@ -345,7 +345,7 @@ InitProcess(void)
if (AmWalSenderProcess())
ereport(FATAL,
(errcode(ERRCODE_TOO_MANY_CONNECTIONS),
errmsg("number of requested standby connections exceeds max_wal_senders (currently %d)",
errmsg("number of requested standby connections exceeds \"max_wal_senders\" (currently %d)",
max_wal_senders)));
ereport(FATAL,
(errcode(ERRCODE_TOO_MANY_CONNECTIONS),

View File

@ -3535,7 +3535,7 @@ check_stack_depth(void)
ereport(ERROR,
(errcode(ERRCODE_STATEMENT_TOO_COMPLEX),
errmsg("stack depth limit exceeded"),
errhint("Increase the configuration parameter max_stack_depth (currently %dkB), "
errhint("Increase the configuration parameter \"max_stack_depth\" (currently %dkB), "
"after ensuring the platform's stack depth limit is adequate.",
max_stack_depth)));
}
@ -3582,7 +3582,7 @@ check_max_stack_depth(int *newval, void **extra, GucSource source)
if (stack_rlimit > 0 && newval_bytes > stack_rlimit - STACK_DEPTH_SLOP)
{
GUC_check_errdetail("max_stack_depth must not exceed %ldkB.",
GUC_check_errdetail("\"max_stack_depth\" must not exceed %ldkB.",
(stack_rlimit - STACK_DEPTH_SLOP) / 1024L);
GUC_check_errhint("Increase the platform's stack depth limit via \"ulimit -s\" or local equivalent.");
return false;
@ -3607,7 +3607,7 @@ check_client_connection_check_interval(int *newval, void **extra, GucSource sour
{
if (!WaitEventSetCanReportClosed() && *newval != 0)
{
GUC_check_errdetail("client_connection_check_interval must be set to 0 on this platform.");
GUC_check_errdetail("\"client_connection_check_interval\" must be set to 0 on this platform.");
return false;
}
return true;
@ -3643,9 +3643,9 @@ check_log_stats(bool *newval, void **extra, GucSource source)
if (*newval &&
(log_parser_stats || log_planner_stats || log_executor_stats))
{
GUC_check_errdetail("Cannot enable log_statement_stats when "
"log_parser_stats, log_planner_stats, "
"or log_executor_stats is true.");
GUC_check_errdetail("Cannot enable \"log_statement_stats\" when "
"\"log_parser_stats\", \"log_planner_stats\", "
"or \"log_executor_stats\" is true.");
return false;
}
return true;

View File

@ -3000,7 +3000,7 @@ icu_validate_locale(const char *loc_str)
ereport(elevel,
(errmsg("could not get language from ICU locale \"%s\": %s",
loc_str, u_errorName(status)),
errhint("To disable ICU locale validation, set the parameter %s to \"%s\".",
errhint("To disable ICU locale validation, set the parameter \"%s\" to \"%s\".",
"icu_validation_level", "disabled")));
return;
}
@ -3029,7 +3029,7 @@ icu_validate_locale(const char *loc_str)
ereport(elevel,
(errmsg("ICU locale \"%s\" has unknown language \"%s\"",
loc_str, lang),
errhint("To disable ICU locale validation, set the parameter %s to \"%s\".",
errhint("To disable ICU locale validation, set the parameter \"%s\" to \"%s\".",
"icu_validation_level", "disabled")));
/* check that it can be opened */

View File

@ -456,7 +456,7 @@ byteaout(PG_FUNCTION_ARGS)
}
else
{
elog(ERROR, "unrecognized bytea_output setting: %d",
elog(ERROR, "unrecognized \"bytea_output\" setting: %d",
bytea_output);
rp = result = NULL; /* keep compiler quiet */
}

View File

@ -538,7 +538,7 @@ find_in_dynamic_libpath(const char *basename)
if (piece == p)
ereport(ERROR,
(errcode(ERRCODE_INVALID_NAME),
errmsg("zero-length component in parameter dynamic_library_path")));
errmsg("zero-length component in parameter \"dynamic_library_path\"")));
if (piece == NULL)
len = strlen(p);
@ -557,7 +557,7 @@ find_in_dynamic_libpath(const char *basename)
if (!is_absolute_path(mangled))
ereport(ERROR,
(errcode(ERRCODE_INVALID_NAME),
errmsg("component in parameter dynamic_library_path is not an absolute path")));
errmsg("component in parameter \"dynamic_library_path\" is not an absolute path")));
full = palloc(strlen(mangled) + 1 + baselen + 1);
sprintf(full, "%s/%s", mangled, basename);

View File

@ -1879,7 +1879,7 @@ SelectConfigFiles(const char *userDoption, const char *progname)
else
{
write_stderr("%s does not know where to find the database system data.\n"
"This can be specified as data_directory in \"%s\", "
"This can be specified as \"data_directory\" in \"%s\", "
"or by the -D invocation option, or by the "
"PGDATA environment variable.\n",
progname, ConfigFileName);

View File

@ -1066,7 +1066,7 @@ struct config_bool ConfigureNamesBool[] =
},
{
{"ssl_passphrase_command_supports_reload", PGC_SIGHUP, CONN_AUTH_SSL,
gettext_noop("Controls whether ssl_passphrase_command is called during server reload."),
gettext_noop("Controls whether \"ssl_passphrase_command\" is called during server reload."),
NULL
},
&ssl_passphrase_command_supports_reload,
@ -1114,7 +1114,7 @@ struct config_bool ConfigureNamesBool[] =
gettext_noop("Continues processing past damaged page headers."),
gettext_noop("Detection of a damaged page header normally causes PostgreSQL to "
"report an error, aborting the current transaction. Setting "
"zero_damaged_pages to true causes the system to instead report a "
"\"zero_damaged_page\" to true causes the system to instead report a "
"warning, zero out the damaged page, and continue processing. This "
"behavior will destroy data, namely all the rows on the damaged page."),
GUC_NOT_IN_SAMPLE
@ -1129,7 +1129,7 @@ struct config_bool ConfigureNamesBool[] =
gettext_noop("Detection of WAL records having references to "
"invalid pages during recovery causes PostgreSQL to "
"raise a PANIC-level error, aborting the recovery. "
"Setting ignore_invalid_pages to true causes "
"Setting \"ignore_invalid_pages\" to true causes "
"the system to ignore invalid page references "
"in WAL records (but still report a warning), "
"and continue recovery. This behavior may cause "
@ -2713,7 +2713,7 @@ struct config_int ConfigureNamesInt[] =
{"max_locks_per_transaction", PGC_POSTMASTER, LOCK_MANAGEMENT,
gettext_noop("Sets the maximum number of locks per transaction."),
gettext_noop("The shared lock table is sized on the assumption that at most "
"max_locks_per_transaction objects per server process or prepared "
"\"max_locks_per_transaction\" objects per server process or prepared "
"transaction will need to be locked at any one time.")
},
&max_locks_per_xact,
@ -2725,7 +2725,7 @@ struct config_int ConfigureNamesInt[] =
{"max_pred_locks_per_transaction", PGC_POSTMASTER, LOCK_MANAGEMENT,
gettext_noop("Sets the maximum number of predicate locks per transaction."),
gettext_noop("The shared predicate lock table is sized on the assumption that "
"at most max_pred_locks_per_transaction objects per server process "
"at most \"max_pred_locks_per_transaction\" objects per server process "
"or prepared transaction will need to be locked at any one time.")
},
&max_predicate_locks_per_xact,
@ -2976,7 +2976,7 @@ struct config_int ConfigureNamesInt[] =
{
{"commit_siblings", PGC_USERSET, WAL_SETTINGS,
gettext_noop("Sets the minimum number of concurrent open transactions "
"required before performing commit_delay."),
"required before performing \"commit_delay\"."),
NULL
},
&CommitSiblings,
@ -3108,7 +3108,7 @@ struct config_int ConfigureNamesInt[] =
{"maintenance_io_concurrency",
PGC_USERSET,
RESOURCES_ASYNCHRONOUS,
gettext_noop("A variant of effective_io_concurrency that is used for maintenance work."),
gettext_noop("A variant of \"effective_io_concurrency\" that is used for maintenance work."),
NULL,
GUC_EXPLAIN
},
@ -3815,7 +3815,7 @@ struct config_real ConfigureNamesReal[] =
{
{"hash_mem_multiplier", PGC_USERSET, RESOURCES_MEM,
gettext_noop("Multiple of work_mem to use for hash tables."),
gettext_noop("Multiple of \"work_mem\" to use for hash tables."),
NULL,
GUC_EXPLAIN
},
@ -3909,7 +3909,7 @@ struct config_real ConfigureNamesReal[] =
{
{"log_statement_sample_rate", PGC_SUSET, LOGGING_WHEN,
gettext_noop("Fraction of statements exceeding log_min_duration_sample to be logged."),
gettext_noop("Fraction of statements exceeding \"log_min_duration_sample\" to be logged."),
gettext_noop("Use a value between 0.0 (never log) and 1.0 (always log).")
},
&log_statement_sample_rate,
@ -3940,7 +3940,7 @@ struct config_string ConfigureNamesString[] =
{
{"archive_command", PGC_SIGHUP, WAL_ARCHIVING,
gettext_noop("Sets the shell command that will be called to archive a WAL file."),
gettext_noop("This is used only if archive_library is not set.")
gettext_noop("This is used only if \"archive_library\" is not set.")
},
&XLogArchiveCommand,
"",
@ -3950,7 +3950,7 @@ struct config_string ConfigureNamesString[] =
{
{"archive_library", PGC_SIGHUP, WAL_ARCHIVING,
gettext_noop("Sets the library that will be called to archive a WAL file."),
gettext_noop("An empty string indicates that archive_command should be used.")
gettext_noop("An empty string indicates that \"archive_command\" should be used.")
},
&XLogArchiveLibrary,
"",
@ -4895,7 +4895,7 @@ struct config_enum ConfigureNamesEnum[] =
{
{"archive_mode", PGC_POSTMASTER, WAL_ARCHIVING,
gettext_noop("Allows archiving of WAL files using archive_command."),
gettext_noop("Allows archiving of WAL files using \"archive_command\"."),
NULL
},
&XLogArchiveMode,

View File

@ -1092,7 +1092,7 @@ test_config_settings(void)
* Probe for max_connections before shared_buffers, since it is subject to
* more constraints than shared_buffers.
*/
printf(_("selecting default max_connections ... "));
printf(_("selecting default \"max_connections\" ... "));
fflush(stdout);
for (i = 0; i < connslen; i++)
@ -1112,7 +1112,7 @@ test_config_settings(void)
printf("%d\n", n_connections);
printf(_("selecting default shared_buffers ... "));
printf(_("selecting default \"shared_buffers\" ... "));
fflush(stdout);
for (i = 0; i < bufslen; i++)

View File

@ -227,7 +227,7 @@ GetConnection(void)
res = PQexec(tmpconn, ALWAYS_SECURE_SEARCH_PATH_SQL);
if (PQresultStatus(res) != PGRES_TUPLES_OK)
{
pg_log_error("could not clear search_path: %s",
pg_log_error("could not clear \"search_path\": %s",
PQerrorMessage(tmpconn));
PQclear(res);
PQfinish(tmpconn);
@ -243,14 +243,14 @@ GetConnection(void)
tmpparam = PQparameterStatus(tmpconn, "integer_datetimes");
if (!tmpparam)
{
pg_log_error("could not determine server setting for integer_datetimes");
pg_log_error("could not determine server setting for \"integer_datetimes\"");
PQfinish(tmpconn);
exit(1);
}
if (strcmp(tmpparam, "on") != 0)
{
pg_log_error("integer_datetimes compile flag does not match server");
pg_log_error("\"integer_datetimes\" compile flag does not match server");
PQfinish(tmpconn);
exit(1);
}

View File

@ -81,7 +81,7 @@ wal_level_str(WalLevel wal_level)
case WAL_LEVEL_LOGICAL:
return "logical";
}
return _("unrecognized wal_level");
return _("unrecognized \"wal_level\"");
}

View File

@ -3454,7 +3454,7 @@ _selectOutputSchema(ArchiveHandle *AH, const char *schemaName)
if (!res || PQresultStatus(res) != PGRES_COMMAND_OK)
warn_or_exit_horribly(AH,
"could not set search_path to \"%s\": %s",
"could not set \"search_path\" to \"%s\": %s",
schemaName, PQerrorMessage(AH->connection));
PQclear(res);
@ -3515,7 +3515,7 @@ _selectTablespace(ArchiveHandle *AH, const char *tablespace)
if (!res || PQresultStatus(res) != PGRES_COMMAND_OK)
warn_or_exit_horribly(AH,
"could not set default_tablespace to %s: %s",
"could not set \"default_tablespace\" to %s: %s",
fmtId(want), PQerrorMessage(AH->connection));
PQclear(res);
@ -3564,7 +3564,7 @@ _selectTableAccessMethod(ArchiveHandle *AH, const char *tableam)
if (!res || PQresultStatus(res) != PGRES_COMMAND_OK)
warn_or_exit_horribly(AH,
"could not set default_table_access_method: %s",
"could not set \"default_table_access_method\": %s",
PQerrorMessage(AH->connection));
PQclear(res);

View File

@ -3534,7 +3534,7 @@ dumpStdStrings(Archive *AH)
const char *stdstrings = AH->std_strings ? "on" : "off";
PQExpBuffer qry = createPQExpBuffer();
pg_log_info("saving standard_conforming_strings = %s",
pg_log_info("saving \"standard_conforming_strings = %s\"",
stdstrings);
appendPQExpBuffer(qry, "SET standard_conforming_strings = '%s';\n",
@ -3592,7 +3592,7 @@ dumpSearchPath(Archive *AH)
appendStringLiteralAH(qry, path->data, AH);
appendPQExpBufferStr(qry, ", false);\n");
pg_log_info("saving search_path = %s", path->data);
pg_log_info("saving \"search_path = %s\"", path->data);
ArchiveEntry(AH, nilCatalogId, createDumpId(),
ARCHIVE_OPTS(.tag = "SEARCHPATH",

View File

@ -128,7 +128,7 @@ init_libpq_conn(PGconn *conn)
/* secure search_path */
res = PQexec(conn, ALWAYS_SECURE_SEARCH_PATH_SQL);
if (PQresultStatus(res) != PGRES_TUPLES_OK)
pg_fatal("could not clear search_path: %s",
pg_fatal("could not clear \"search_path\": %s",
PQresultErrorMessage(res));
PQclear(res);
@ -139,7 +139,7 @@ init_libpq_conn(PGconn *conn)
*/
str = run_simple_query(conn, "SHOW full_page_writes");
if (strcmp(str, "on") != 0)
pg_fatal("full_page_writes must be enabled in the source server");
pg_fatal("\"full_page_writes\" must be enabled in the source server");
pg_free(str);
/* Prepare a statement we'll use to fetch files */

View File

@ -94,7 +94,7 @@ usage(const char *progname)
printf(_("%s resynchronizes a PostgreSQL cluster with another copy of the cluster.\n\n"), progname);
printf(_("Usage:\n %s [OPTION]...\n\n"), progname);
printf(_("Options:\n"));
printf(_(" -c, --restore-target-wal use restore_command in target configuration to\n"
printf(_(" -c, --restore-target-wal use \"restore_command\" in target configuration to\n"
" retrieve WAL files from archives\n"));
printf(_(" -D, --target-pgdata=DIRECTORY existing data directory to modify\n"));
printf(_(" --source-pgdata=DIRECTORY source data directory to synchronize with\n"));
@ -1111,9 +1111,9 @@ getRestoreCommand(const char *argv0)
(void) pg_strip_crlf(restore_command);
if (strcmp(restore_command, "") == 0)
pg_fatal("restore_command is not set in the target cluster");
pg_fatal("\"restore_command\" is not set in the target cluster");
pg_log_debug("using for rewind restore_command = \'%s\'",
pg_log_debug("using for rewind \"restore_command = \'%s\'\"",
restore_command);
destroyPQExpBuffer(postgres_cmd);

View File

@ -298,7 +298,7 @@ test_sync(int writes_per_op)
printf(_("\nCompare file sync methods using one %dkB write:\n"), XLOG_BLCKSZ_K);
else
printf(_("\nCompare file sync methods using two %dkB writes:\n"), XLOG_BLCKSZ_K);
printf(_("(in wal_sync_method preference order, except fdatasync is Linux's default)\n"));
printf(_("(in \"wal_sync_method\" preference order, except fdatasync is Linux's default)\n"));
/*
* Test open_datasync if available

View File

@ -1769,13 +1769,13 @@ check_new_cluster_logical_replication_slots(void)
wal_level = PQgetvalue(res, 0, 0);
if (strcmp(wal_level, "logical") != 0)
pg_fatal("wal_level must be \"logical\", but is set to \"%s\"",
pg_fatal("\"wal_level\" must be \"logical\", but is set to \"%s\"",
wal_level);
max_replication_slots = atoi(PQgetvalue(res, 1, 0));
if (nslots_on_old > max_replication_slots)
pg_fatal("max_replication_slots (%d) must be greater than or equal to the number of "
pg_fatal("\"max_replication_slots\" (%d) must be greater than or equal to the number of "
"logical replication slots (%d) on the old cluster",
max_replication_slots, nslots_on_old);
@ -1822,7 +1822,7 @@ check_new_cluster_subscription_configuration(void)
max_replication_slots = atoi(PQgetvalue(res, 0, 0));
if (nsubs_on_old > max_replication_slots)
pg_fatal("max_replication_slots (%d) must be greater than or equal to the number of "
pg_fatal("\"max_replication_slots\" (%d) must be greater than or equal to the number of "
"subscriptions (%d) on the old cluster",
max_replication_slots, nsubs_on_old);

View File

@ -77,10 +77,10 @@ command_checks_all(
[@pg_upgrade_cmd],
1,
[
qr/max_replication_slots \(1\) must be greater than or equal to the number of logical replication slots \(2\) on the old cluster/
qr/"max_replication_slots" \(1\) must be greater than or equal to the number of logical replication slots \(2\) on the old cluster/
],
[qr//],
'run of pg_upgrade where the new cluster has insufficient max_replication_slots'
'run of pg_upgrade where the new cluster has insufficient "max_replication_slots"'
);
ok(-d $newpub->data_dir . "/pg_upgrade_output.d",
"pg_upgrade_output.d/ not removed after pg_upgrade failure");

View File

@ -66,7 +66,7 @@ command_checks_all(
],
1,
[
qr/max_replication_slots \(0\) must be greater than or equal to the number of subscriptions \(1\) on the old cluster/
qr/"max_replication_slots" \(0\) must be greater than or equal to the number of subscriptions \(1\) on the old cluster/
],
[qr//],
'run of pg_upgrade where the new cluster has insufficient max_replication_slots'

View File

@ -5376,7 +5376,7 @@ GetTableInfo(PGconn *con, bool scale_given)
* This case is unlikely as pgbench already found "pgbench_branches"
* above to compute the scale.
*/
pg_log_error("no pgbench_accounts table found in search_path");
pg_log_error("no pgbench_accounts table found in \"search_path\"");
pg_log_error_hint("Perhaps you need to do initialization (\"pgbench -i\") in database \"%s\".", PQdb(con));
exit(1);
}

View File

@ -95,7 +95,7 @@ RestoreArchivedFile(const char *path, const char *xlogfname,
* fatal too.
*/
if (wait_result_is_any_signal(rc, true))
pg_fatal("restore_command failed: %s",
pg_fatal("\"restore_command\" failed: %s",
wait_result_to_str(rc));
/*

View File

@ -1313,7 +1313,7 @@ PQencryptPasswordConn(PGconn *conn, const char *passwd, const char *user,
if (strlen(val) > MAX_ALGORITHM_NAME_LEN)
{
PQclear(res);
libpq_append_conn_error(conn, "password_encryption value too long");
libpq_append_conn_error(conn, "\"password_encryption\" value too long");
return NULL;
}
strcpy(algobuf, val);

View File

@ -1657,7 +1657,7 @@ pqConnectOptions2(PGconn *conn)
if (!sslVerifyProtocolVersion(conn->ssl_min_protocol_version))
{
conn->status = CONNECTION_BAD;
libpq_append_conn_error(conn, "invalid %s value: \"%s\"",
libpq_append_conn_error(conn, "invalid \"%s\" value: \"%s\"",
"ssl_min_protocol_version",
conn->ssl_min_protocol_version);
return false;
@ -1665,7 +1665,7 @@ pqConnectOptions2(PGconn *conn)
if (!sslVerifyProtocolVersion(conn->ssl_max_protocol_version))
{
conn->status = CONNECTION_BAD;
libpq_append_conn_error(conn, "invalid %s value: \"%s\"",
libpq_append_conn_error(conn, "invalid \"%s\" value: \"%s\"",
"ssl_max_protocol_version",
conn->ssl_max_protocol_version);
return false;

View File

@ -18,7 +18,7 @@ SELECT id,
FROM committs_test
ORDER BY id;
ERROR: could not get commit timestamp data
HINT: Make sure the configuration parameter track_commit_timestamp is set.
HINT: Make sure the configuration parameter "track_commit_timestamp" is set.
DROP TABLE committs_test;
SELECT pg_xact_commit_timestamp('0'::xid);
ERROR: cannot retrieve commit timestamp for transaction 0
@ -40,7 +40,7 @@ SELECT x.xid::text::bigint > 0 as xid_valid,
roident != 0 AS valid_roident
FROM pg_last_committed_xact() x;
ERROR: could not get commit timestamp data
HINT: Make sure the configuration parameter track_commit_timestamp is set.
HINT: Make sure the configuration parameter "track_commit_timestamp" is set.
-- Test non-normal transaction ids.
SELECT * FROM pg_xact_commit_timestamp_origin(NULL); -- ok, NULL
timestamp | roident
@ -69,13 +69,13 @@ SELECT x.timestamp > '-infinity'::timestamptz AS ts_low,
roident != 0 AS valid_roident
FROM pg_last_committed_xact() x;
ERROR: could not get commit timestamp data
HINT: Make sure the configuration parameter track_commit_timestamp is set.
HINT: Make sure the configuration parameter "track_commit_timestamp" is set.
SELECT x.timestamp > '-infinity'::timestamptz AS ts_low,
x.timestamp <= now() AS ts_high,
roident != 0 AS valid_roident
FROM pg_xact_commit_timestamp_origin(:'txid_no_origin') x;
ERROR: could not get commit timestamp data
HINT: Make sure the configuration parameter track_commit_timestamp is set.
HINT: Make sure the configuration parameter "track_commit_timestamp" is set.
-- Test transaction with replication origin
SELECT pg_replication_origin_create('regress_commit_ts: get_origin') != 0
AS valid_roident;
@ -97,14 +97,14 @@ SELECT x.timestamp > '-infinity'::timestamptz AS ts_low,
FROM pg_last_committed_xact() x, pg_replication_origin r
WHERE r.roident = x.roident;
ERROR: could not get commit timestamp data
HINT: Make sure the configuration parameter track_commit_timestamp is set.
HINT: Make sure the configuration parameter "track_commit_timestamp" is set.
SELECT x.timestamp > '-infinity'::timestamptz AS ts_low,
x.timestamp <= now() AS ts_high,
r.roname
FROM pg_xact_commit_timestamp_origin(:'txid_with_origin') x, pg_replication_origin r
WHERE r.roident = x.roident;
ERROR: could not get commit timestamp data
HINT: Make sure the configuration parameter track_commit_timestamp is set.
HINT: Make sure the configuration parameter "track_commit_timestamp" is set.
SELECT pg_replication_origin_session_reset();
pg_replication_origin_session_reset
-------------------------------------

View File

@ -2227,10 +2227,10 @@ main(int argc, char **argv)
res = PQexec(conn, "SET lc_messages TO \"C\"");
if (PQresultStatus(res) != PGRES_COMMAND_OK)
pg_fatal("failed to set lc_messages: %s", PQerrorMessage(conn));
pg_fatal("failed to set \"lc_messages\": %s", PQerrorMessage(conn));
res = PQexec(conn, "SET debug_parallel_query = off");
if (PQresultStatus(res) != PGRES_COMMAND_OK)
pg_fatal("failed to set debug_parallel_query: %s", PQerrorMessage(conn));
pg_fatal("failed to set \"debug_parallel_query\": %s", PQerrorMessage(conn));
/* Set the trace file, if requested */
if (tracefile != NULL)

View File

@ -58,7 +58,7 @@ set_rot13(SSL_CTX *context, bool isServerStart)
/* warn if the user has set ssl_passphrase_command */
if (ssl_passphrase_command[0])
ereport(WARNING,
(errmsg("ssl_passphrase_command setting ignored by ssl_passphrase_func module")));
(errmsg("\"ssl_passphrase_command\" setting ignored by ssl_passphrase_func module")));
SSL_CTX_set_default_passwd_cb(context, rot13_passphrase);
}

View File

@ -56,7 +56,7 @@ my $log_contents = slurp_file($log);
like(
$log_contents,
qr/WARNING.*ssl_passphrase_command setting ignored by ssl_passphrase_func module/,
qr/WARNING.*"ssl_passphrase_command" setting ignored by ssl_passphrase_func module/,
"ssl_passphrase_command set warning");
# set the wrong passphrase

View File

@ -233,7 +233,7 @@ setup_background_workers(int nworkers, dsm_segment *seg)
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_RESOURCES),
errmsg("could not register background process"),
errhint("You may need to increase max_worker_processes.")));
errhint("You may need to increase \"max_worker_processes\".")));
++wstate->nworkers;
}

View File

@ -251,7 +251,7 @@ _PG_init(void)
if (!process_shared_preload_libraries_in_progress)
ereport(ERROR,
(errmsg("cannot load \"%s\" after startup", "test_slru"),
errdetail("\"%s\" must be loaded with shared_preload_libraries.",
errdetail("\"%s\" must be loaded with \"shared_preload_libraries\".",
"test_slru")));
prev_shmem_request_hook = shmem_request_hook;

View File

@ -91,8 +91,8 @@ sub test_recovery_wal_level_minimal
# Confirm that the archive recovery fails with an expected error
my $logfile = slurp_file($recovery_node->logfile());
ok( $logfile =~
qr/FATAL: .* WAL was generated with wal_level=minimal, cannot continue recovering/,
"$node_text ends with an error because it finds WAL generated with wal_level=minimal"
qr/FATAL: .* WAL was generated with "wal_level=minimal", cannot continue recovering/,
"$node_text ends with an error because it finds WAL generated with \"wal_level=minimal\""
);
}

View File

@ -794,7 +794,7 @@ $handle =
make_slot_active($node_standby, 'wal_level_', 0, \$stdout, \$stderr);
# We are not able to read from the slot as it requires wal_level >= logical on the primary server
check_pg_recvlogical_stderr($handle,
"logical decoding on standby requires wal_level >= logical on the primary"
"logical decoding on standby requires \"wal_level\" >= \"logical\" on the primary"
);
# Restore primary wal_level

View File

@ -1042,7 +1042,7 @@ ERROR: parameter "locale" must be specified
SET icu_validation_level = ERROR;
CREATE COLLATION testx (provider = icu, locale = 'nonsense-nowhere'); -- fails
ERROR: ICU locale "nonsense-nowhere" has unknown language "nonsense"
HINT: To disable ICU locale validation, set the parameter icu_validation_level to "disabled".
HINT: To disable ICU locale validation, set the parameter "icu_validation_level" to "disabled".
CREATE COLLATION testx (provider = icu, locale = '@colStrength=primary;nonsense=yes'); -- fails
ERROR: could not convert locale name "@colStrength=primary;nonsense=yes" to language tag: U_ILLEGAL_ARGUMENT_ERROR
RESET icu_validation_level;
@ -1050,7 +1050,7 @@ CREATE COLLATION testx (provider = icu, locale = '@colStrength=primary;nonsense=
WARNING: could not convert locale name "@colStrength=primary;nonsense=yes" to language tag: U_ILLEGAL_ARGUMENT_ERROR
CREATE COLLATION testx (provider = icu, locale = 'nonsense-nowhere'); DROP COLLATION testx;
WARNING: ICU locale "nonsense-nowhere" has unknown language "nonsense"
HINT: To disable ICU locale validation, set the parameter icu_validation_level to "disabled".
HINT: To disable ICU locale validation, set the parameter "icu_validation_level" to "disabled".
CREATE COLLATION test4 FROM nonsense;
ERROR: collation "nonsense" for encoding "UTF8" does not exist
CREATE COLLATION test5 FROM test0;

View File

@ -113,7 +113,7 @@ COMMIT;
-- prevent empty values
SET default_table_access_method = '';
ERROR: invalid value for parameter "default_table_access_method": ""
DETAIL: default_table_access_method cannot be empty.
DETAIL: "default_table_access_method" cannot be empty.
-- prevent nonexistent values
SET default_table_access_method = 'I do not exist AM';
ERROR: invalid value for parameter "default_table_access_method": "I do not exist AM"

View File

@ -219,10 +219,10 @@ CONTEXT: JSON data, line 1: {"abc":1,3...
SET max_stack_depth = '100kB';
SELECT repeat('[', 10000)::json;
ERROR: stack depth limit exceeded
HINT: Increase the configuration parameter max_stack_depth (currently 100kB), after ensuring the platform's stack depth limit is adequate.
HINT: Increase the configuration parameter "max_stack_depth" (currently 100kB), after ensuring the platform's stack depth limit is adequate.
SELECT repeat('{"a":', 10000)::json;
ERROR: stack depth limit exceeded
HINT: Increase the configuration parameter max_stack_depth (currently 100kB), after ensuring the platform's stack depth limit is adequate.
HINT: Increase the configuration parameter "max_stack_depth" (currently 100kB), after ensuring the platform's stack depth limit is adequate.
RESET max_stack_depth;
-- Miscellaneous stuff.
SELECT 'true'::json; -- OK

View File

@ -213,10 +213,10 @@ CONTEXT: JSON data, line 1: {"abc":1,3...
SET max_stack_depth = '100kB';
SELECT repeat('[', 10000)::jsonb;
ERROR: stack depth limit exceeded
HINT: Increase the configuration parameter max_stack_depth (currently 100kB), after ensuring the platform's stack depth limit is adequate.
HINT: Increase the configuration parameter "max_stack_depth" (currently 100kB), after ensuring the platform's stack depth limit is adequate.
SELECT repeat('{"a":', 10000)::jsonb;
ERROR: stack depth limit exceeded
HINT: Increase the configuration parameter max_stack_depth (currently 100kB), after ensuring the platform's stack depth limit is adequate.
HINT: Increase the configuration parameter "max_stack_depth" (currently 100kB), after ensuring the platform's stack depth limit is adequate.
RESET max_stack_depth;
-- Miscellaneous stuff.
SELECT 'true'::jsonb; -- OK

View File

@ -19,7 +19,7 @@ SELECT * FROM pxtest1;
PREPARE TRANSACTION 'regress_foo1';
ERROR: prepared transactions are disabled
HINT: Set max_prepared_transactions to a nonzero value.
HINT: Set "max_prepared_transactions" to a nonzero value.
SELECT * FROM pxtest1;
foobar
--------
@ -58,7 +58,7 @@ SELECT * FROM pxtest1;
PREPARE TRANSACTION 'regress_foo2';
ERROR: prepared transactions are disabled
HINT: Set max_prepared_transactions to a nonzero value.
HINT: Set "max_prepared_transactions" to a nonzero value.
SELECT * FROM pxtest1;
foobar
--------
@ -84,7 +84,7 @@ SELECT * FROM pxtest1;
PREPARE TRANSACTION 'regress_foo3';
ERROR: prepared transactions are disabled
HINT: Set max_prepared_transactions to a nonzero value.
HINT: Set "max_prepared_transactions" to a nonzero value.
SELECT gid FROM pg_prepared_xacts WHERE gid ~ '^regress_' ORDER BY gid;
gid
-----
@ -95,7 +95,7 @@ INSERT INTO pxtest1 VALUES ('fff');
-- This should fail, because the gid foo3 is already in use
PREPARE TRANSACTION 'regress_foo3';
ERROR: prepared transactions are disabled
HINT: Set max_prepared_transactions to a nonzero value.
HINT: Set "max_prepared_transactions" to a nonzero value.
SELECT * FROM pxtest1;
foobar
--------
@ -121,7 +121,7 @@ SELECT * FROM pxtest1;
PREPARE TRANSACTION 'regress_foo4';
ERROR: prepared transactions are disabled
HINT: Set max_prepared_transactions to a nonzero value.
HINT: Set "max_prepared_transactions" to a nonzero value.
SELECT gid FROM pg_prepared_xacts WHERE gid ~ '^regress_' ORDER BY gid;
gid
-----
@ -138,7 +138,7 @@ SELECT * FROM pxtest1;
INSERT INTO pxtest1 VALUES ('fff');
PREPARE TRANSACTION 'regress_foo5';
ERROR: prepared transactions are disabled
HINT: Set max_prepared_transactions to a nonzero value.
HINT: Set "max_prepared_transactions" to a nonzero value.
SELECT gid FROM pg_prepared_xacts WHERE gid ~ '^regress_' ORDER BY gid;
gid
-----
@ -169,7 +169,7 @@ SELECT pg_advisory_xact_lock_shared(1);
PREPARE TRANSACTION 'regress_foo6'; -- fails
ERROR: prepared transactions are disabled
HINT: Set max_prepared_transactions to a nonzero value.
HINT: Set "max_prepared_transactions" to a nonzero value.
-- Test subtransactions
BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE;
CREATE TABLE pxtest2 (a int);
@ -181,7 +181,7 @@ BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE;
INSERT INTO pxtest2 VALUES (3);
PREPARE TRANSACTION 'regress_sub1';
ERROR: prepared transactions are disabled
HINT: Set max_prepared_transactions to a nonzero value.
HINT: Set "max_prepared_transactions" to a nonzero value.
CREATE TABLE pxtest3(fff int);
-- Test shared invalidation
BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE;
@ -199,7 +199,7 @@ BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE;
PREPARE TRANSACTION 'regress_sub2';
ERROR: prepared transactions are disabled
HINT: Set max_prepared_transactions to a nonzero value.
HINT: Set "max_prepared_transactions" to a nonzero value.
-- No such cursor
FETCH 1 FROM foo;
ERROR: cursor "foo" does not exist

View File

@ -147,17 +147,17 @@ SELECT U&'d\0061t\+000061' AS U&"d\0061t\+000061";
ERROR: unsafe use of string constant with Unicode escapes
LINE 1: SELECT U&'d\0061t\+000061' AS U&"d\0061t\+000061";
^
DETAIL: String constants with Unicode escapes cannot be used when standard_conforming_strings is off.
DETAIL: String constants with Unicode escapes cannot be used when "standard_conforming_strings" is off.
SELECT U&'d!0061t\+000061' UESCAPE '!' AS U&"d*0061t\+000061" UESCAPE '*';
ERROR: unsafe use of string constant with Unicode escapes
LINE 1: SELECT U&'d!0061t\+000061' UESCAPE '!' AS U&"d*0061t\+000061...
^
DETAIL: String constants with Unicode escapes cannot be used when standard_conforming_strings is off.
DETAIL: String constants with Unicode escapes cannot be used when "standard_conforming_strings" is off.
SELECT U&' \' UESCAPE '!' AS "tricky";
ERROR: unsafe use of string constant with Unicode escapes
LINE 1: SELECT U&' \' UESCAPE '!' AS "tricky";
^
DETAIL: String constants with Unicode escapes cannot be used when standard_conforming_strings is off.
DETAIL: String constants with Unicode escapes cannot be used when "standard_conforming_strings" is off.
SELECT 'tricky' AS U&"\" UESCAPE '!';
\
--------
@ -168,17 +168,17 @@ SELECT U&'wrong: \061';
ERROR: unsafe use of string constant with Unicode escapes
LINE 1: SELECT U&'wrong: \061';
^
DETAIL: String constants with Unicode escapes cannot be used when standard_conforming_strings is off.
DETAIL: String constants with Unicode escapes cannot be used when "standard_conforming_strings" is off.
SELECT U&'wrong: \+0061';
ERROR: unsafe use of string constant with Unicode escapes
LINE 1: SELECT U&'wrong: \+0061';
^
DETAIL: String constants with Unicode escapes cannot be used when standard_conforming_strings is off.
DETAIL: String constants with Unicode escapes cannot be used when "standard_conforming_strings" is off.
SELECT U&'wrong: +0061' UESCAPE '+';
ERROR: unsafe use of string constant with Unicode escapes
LINE 1: SELECT U&'wrong: +0061' UESCAPE '+';
^
DETAIL: String constants with Unicode escapes cannot be used when standard_conforming_strings is off.
DETAIL: String constants with Unicode escapes cannot be used when "standard_conforming_strings" is off.
RESET standard_conforming_strings;
-- bytea
SET bytea_output TO hex;

View File

@ -554,11 +554,11 @@ $node->connect_fails(
$node->connect_fails(
"$common_connstr sslrootcert=ssl/root+server_ca.crt sslmode=require ssl_min_protocol_version=incorrect_tls",
"connection failure with an incorrect SSL protocol minimum bound",
expected_stderr => qr/invalid ssl_min_protocol_version value/);
expected_stderr => qr/invalid "ssl_min_protocol_version" value/);
$node->connect_fails(
"$common_connstr sslrootcert=ssl/root+server_ca.crt sslmode=require ssl_max_protocol_version=incorrect_tls",
"connection failure with an incorrect SSL protocol maximum bound",
expected_stderr => qr/invalid ssl_max_protocol_version value/);
expected_stderr => qr/invalid "ssl_max_protocol_version" value/);
### Server-side tests.
###

View File

@ -573,7 +573,7 @@ CREATE PUBLICATION tap_pub2 FOR TABLE skip_wal;
ROLLBACK;
});
ok( $reterr =~
m/WARNING: wal_level is insufficient to publish logical changes/,
'CREATE PUBLICATION while wal_level=minimal');
m/WARNING: "wal_level" is insufficient to publish logical changes/,
'CREATE PUBLICATION while "wal_level=minimal"');
done_testing();