From 17974ec259463869bb6bb4885d46847422fbc9ec Mon Sep 17 00:00:00 2001 From: Peter Eisentraut Date: Fri, 17 May 2024 11:23:08 +0200 Subject: [PATCH] Revise GUC names quoting in messages again After further review, we want to move in the direction of always quoting GUC names in error messages, rather than the previous (PG16) wildly mixed practice or the intermittent (mid-PG17) idea of doing this depending on how possibly confusing the GUC name is. This commit applies appropriate quotes to (almost?) all mentions of GUC names in error messages. It partially supersedes a243569bf65 and 8d9978a7176, which had moved things a bit in the opposite direction but which then were abandoned in a partial state. Author: Peter Smith Discussion: https://www.postgresql.org/message-id/flat/CAHut%2BPv-kSN8SkxSdoHano_wPubqcg5789ejhCDZAcLFceBR-w%40mail.gmail.com --- contrib/pg_prewarm/autoprewarm.c | 4 +-- .../pg_stat_statements/pg_stat_statements.c | 6 ++--- contrib/sepgsql/hooks.c | 2 +- contrib/test_decoding/expected/slot.out | 2 +- doc/src/sgml/sources.sgml | 15 +++-------- src/backend/access/gin/ginbulk.c | 2 +- src/backend/access/heap/vacuumlazy.c | 2 +- src/backend/access/table/tableamapi.c | 4 +-- src/backend/access/transam/commit_ts.c | 4 +-- src/backend/access/transam/multixact.c | 4 +-- src/backend/access/transam/rmgr.c | 4 +-- src/backend/access/transam/twophase.c | 6 ++--- src/backend/access/transam/xlog.c | 26 +++++++++---------- src/backend/access/transam/xlogarchive.c | 2 +- src/backend/access/transam/xlogfuncs.c | 4 +-- src/backend/access/transam/xlogprefetcher.c | 2 +- src/backend/access/transam/xlogrecovery.c | 12 ++++----- src/backend/commands/publicationcmds.c | 4 +-- src/backend/commands/vacuum.c | 2 +- src/backend/commands/variable.c | 8 +++--- src/backend/libpq/be-secure-openssl.c | 4 +-- src/backend/libpq/hba.c | 2 +- src/backend/libpq/pqcomm.c | 2 +- src/backend/parser/scan.l | 2 +- src/backend/port/sysv_sema.c | 2 +- src/backend/port/sysv_shmem.c | 8 +++--- src/backend/port/win32_shmem.c | 2 +- src/backend/postmaster/bgworker.c | 2 +- src/backend/postmaster/checkpointer.c | 2 +- src/backend/postmaster/pgarch.c | 10 +++---- src/backend/postmaster/postmaster.c | 10 +++---- src/backend/replication/logical/decode.c | 2 +- src/backend/replication/logical/launcher.c | 4 +-- src/backend/replication/logical/logical.c | 4 +-- src/backend/replication/logical/origin.c | 8 +++--- src/backend/replication/slot.c | 20 +++++++------- src/backend/replication/syncrep.c | 2 +- src/backend/storage/buffer/localbuf.c | 2 +- src/backend/storage/file/fd.c | 8 +++--- src/backend/storage/lmgr/lock.c | 12 ++++----- src/backend/storage/lmgr/predicate.c | 12 ++++----- src/backend/storage/lmgr/proc.c | 2 +- src/backend/tcop/postgres.c | 12 ++++----- src/backend/utils/adt/pg_locale.c | 4 +-- src/backend/utils/adt/varlena.c | 2 +- src/backend/utils/fmgr/dfmgr.c | 4 +-- src/backend/utils/misc/guc.c | 2 +- src/backend/utils/misc/guc_tables.c | 24 ++++++++--------- src/bin/initdb/initdb.c | 4 +-- src/bin/pg_basebackup/streamutil.c | 6 ++--- src/bin/pg_controldata/pg_controldata.c | 2 +- src/bin/pg_dump/pg_backup_archiver.c | 6 ++--- src/bin/pg_dump/pg_dump.c | 4 +-- src/bin/pg_rewind/libpq_source.c | 4 +-- src/bin/pg_rewind/pg_rewind.c | 6 ++--- src/bin/pg_test_fsync/pg_test_fsync.c | 2 +- src/bin/pg_upgrade/check.c | 6 ++--- src/bin/pg_upgrade/t/003_logical_slots.pl | 4 +-- src/bin/pg_upgrade/t/004_subscription.pl | 2 +- src/bin/pgbench/pgbench.c | 2 +- src/fe_utils/archive.c | 2 +- src/interfaces/libpq/fe-auth.c | 2 +- src/interfaces/libpq/fe-connect.c | 4 +-- .../commit_ts/expected/commit_timestamp_1.out | 12 ++++----- .../modules/libpq_pipeline/libpq_pipeline.c | 4 +-- .../ssl_passphrase_func.c | 2 +- .../ssl_passphrase_callback/t/001_testfunc.pl | 2 +- src/test/modules/test_shm_mq/setup.c | 2 +- src/test/modules/test_slru/test_slru.c | 2 +- src/test/recovery/t/024_archive_recovery.pl | 4 +-- .../t/035_standby_logical_decoding.pl | 2 +- .../regress/expected/collate.icu.utf8.out | 4 +-- src/test/regress/expected/create_am.out | 2 +- src/test/regress/expected/json.out | 4 +-- src/test/regress/expected/jsonb.out | 4 +-- .../regress/expected/prepared_xacts_1.out | 18 ++++++------- src/test/regress/expected/strings.out | 12 ++++----- src/test/ssl/t/001_ssltests.pl | 4 +-- src/test/subscription/t/001_rep_changes.pl | 4 +-- 79 files changed, 208 insertions(+), 215 deletions(-) diff --git a/contrib/pg_prewarm/autoprewarm.c b/contrib/pg_prewarm/autoprewarm.c index 1c8804dc43..961d3b8e9d 100644 --- a/contrib/pg_prewarm/autoprewarm.c +++ b/contrib/pg_prewarm/autoprewarm.c @@ -831,7 +831,7 @@ apw_start_leader_worker(void) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_RESOURCES), errmsg("could not register background process"), - errhint("You may need to increase max_worker_processes."))); + errhint("You may need to increase \"max_worker_processes\"."))); status = WaitForBackgroundWorkerStartup(handle, &pid); if (status != BGWH_STARTED) @@ -867,7 +867,7 @@ apw_start_database_worker(void) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_RESOURCES), errmsg("registering dynamic bgworker autoprewarm failed"), - errhint("Consider increasing configuration parameter max_worker_processes."))); + errhint("Consider increasing configuration parameter \"max_worker_processes\"."))); /* * Ignore return value; if it fails, postmaster has died, but we have diff --git a/contrib/pg_stat_statements/pg_stat_statements.c b/contrib/pg_stat_statements/pg_stat_statements.c index 67cec865ba..d4197ae0f7 100644 --- a/contrib/pg_stat_statements/pg_stat_statements.c +++ b/contrib/pg_stat_statements/pg_stat_statements.c @@ -1660,7 +1660,7 @@ pg_stat_statements_internal(FunctionCallInfo fcinfo, if (!pgss || !pgss_hash) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("pg_stat_statements must be loaded via shared_preload_libraries"))); + errmsg("pg_stat_statements must be loaded via \"shared_preload_libraries\""))); InitMaterializedSRF(fcinfo, 0); @@ -1989,7 +1989,7 @@ pg_stat_statements_info(PG_FUNCTION_ARGS) if (!pgss || !pgss_hash) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("pg_stat_statements must be loaded via shared_preload_libraries"))); + errmsg("pg_stat_statements must be loaded via \"shared_preload_libraries\""))); /* Build a tuple descriptor for our result type */ if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) @@ -2671,7 +2671,7 @@ entry_reset(Oid userid, Oid dbid, uint64 queryid, bool minmax_only) if (!pgss || !pgss_hash) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("pg_stat_statements must be loaded via shared_preload_libraries"))); + errmsg("pg_stat_statements must be loaded via \"shared_preload_libraries\""))); LWLockAcquire(pgss->lock, LW_EXCLUSIVE); num_entries = hash_get_num_entries(pgss_hash); diff --git a/contrib/sepgsql/hooks.c b/contrib/sepgsql/hooks.c index a6b2a3d9ba..0f206b1093 100644 --- a/contrib/sepgsql/hooks.c +++ b/contrib/sepgsql/hooks.c @@ -406,7 +406,7 @@ _PG_init(void) if (IsUnderPostmaster) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("sepgsql must be loaded via shared_preload_libraries"))); + errmsg("sepgsql must be loaded via \"shared_preload_libraries\""))); /* * Check availability of SELinux on the platform. If disabled, we cannot diff --git a/contrib/test_decoding/expected/slot.out b/contrib/test_decoding/expected/slot.out index 349ab2d380..7de03c79f6 100644 --- a/contrib/test_decoding/expected/slot.out +++ b/contrib/test_decoding/expected/slot.out @@ -220,7 +220,7 @@ ORDER BY o.slot_name, c.slot_name; -- released even when raise error during creating the target slot. SELECT 'copy' FROM pg_copy_logical_replication_slot('orig_slot1', 'failed'); -- error ERROR: all replication slots are in use -HINT: Free one or increase max_replication_slots. +HINT: Free one or increase "max_replication_slots". -- temporary slots were dropped automatically SELECT pg_drop_replication_slot('orig_slot1'); pg_drop_replication_slot diff --git a/doc/src/sgml/sources.sgml b/doc/src/sgml/sources.sgml index 0dae4d9158..fa68d4d024 100644 --- a/doc/src/sgml/sources.sgml +++ b/doc/src/sgml/sources.sgml @@ -533,17 +533,10 @@ Hint: The addendum, written as a complete sentence. Use of Quotes - Always use quotes to delimit file names, user-supplied identifiers, and - other variables that might contain words. Do not use them to mark up - variables that will not contain words (for example, operator names). - - - - In messages containing configuration variable names, do not include quotes - when the names are visibly not natural English words, such as when they - have underscores, are all-uppercase or have mixed case. Otherwise, quotes - must be added. Do include quotes in a message where an arbitrary variable - name is to be expanded. + Always use quotes to delimit file names, user-supplied identifiers, + configuration variable names, and other variables that might contain + words. Do not use them to mark up variables that will not contain words + (for example, operator names). diff --git a/src/backend/access/gin/ginbulk.c b/src/backend/access/gin/ginbulk.c index a522801c2f..7f89cd5e82 100644 --- a/src/backend/access/gin/ginbulk.c +++ b/src/backend/access/gin/ginbulk.c @@ -42,7 +42,7 @@ ginCombineData(RBTNode *existing, const RBTNode *newdata, void *arg) ereport(ERROR, (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), errmsg("posting list is too long"), - errhint("Reduce maintenance_work_mem."))); + errhint("Reduce \"maintenance_work_mem\"."))); accum->allocatedMemory -= GetMemoryChunkSpace(eo->list); eo->maxcount *= 2; diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c index 84cc983b6e..8145ea8fc3 100644 --- a/src/backend/access/heap/vacuumlazy.c +++ b/src/backend/access/heap/vacuumlazy.c @@ -2327,7 +2327,7 @@ lazy_check_wraparound_failsafe(LVRelState *vacrel) vacrel->dbname, vacrel->relnamespace, vacrel->relname, vacrel->num_index_scans), errdetail("The table's relfrozenxid or relminmxid is too far in the past."), - errhint("Consider increasing configuration parameter maintenance_work_mem or autovacuum_work_mem.\n" + errhint("Consider increasing configuration parameter \"maintenance_work_mem\" or \"autovacuum_work_mem\".\n" "You might also need to consider other ways for VACUUM to keep up with the allocation of transaction IDs."))); /* Stop applying cost limits from this point on */ diff --git a/src/backend/access/table/tableamapi.c b/src/backend/access/table/tableamapi.c index ce637a5a5d..e9b598256f 100644 --- a/src/backend/access/table/tableamapi.c +++ b/src/backend/access/table/tableamapi.c @@ -106,14 +106,14 @@ check_default_table_access_method(char **newval, void **extra, GucSource source) { if (**newval == '\0') { - GUC_check_errdetail("%s cannot be empty.", + GUC_check_errdetail("\"%s\" cannot be empty.", "default_table_access_method"); return false; } if (strlen(*newval) >= NAMEDATALEN) { - GUC_check_errdetail("%s is too long (maximum %d characters).", + GUC_check_errdetail("\"%s\" is too long (maximum %d characters).", "default_table_access_method", NAMEDATALEN - 1); return false; } diff --git a/src/backend/access/transam/commit_ts.c b/src/backend/access/transam/commit_ts.c index f221494687..77e1899d7a 100644 --- a/src/backend/access/transam/commit_ts.c +++ b/src/backend/access/transam/commit_ts.c @@ -384,9 +384,9 @@ error_commit_ts_disabled(void) (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("could not get commit timestamp data"), RecoveryInProgress() ? - errhint("Make sure the configuration parameter %s is set on the primary server.", + errhint("Make sure the configuration parameter \"%s\" is set on the primary server.", "track_commit_timestamp") : - errhint("Make sure the configuration parameter %s is set.", + errhint("Make sure the configuration parameter \"%s\" is set.", "track_commit_timestamp"))); } diff --git a/src/backend/access/transam/multixact.c b/src/backend/access/transam/multixact.c index 380c866d71..54c916e034 100644 --- a/src/backend/access/transam/multixact.c +++ b/src/backend/access/transam/multixact.c @@ -1151,7 +1151,7 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset) MultiXactState->offsetStopLimit - nextOffset - 1, nmembers, MultiXactState->offsetStopLimit - nextOffset - 1), - errhint("Execute a database-wide VACUUM in database with OID %u with reduced vacuum_multixact_freeze_min_age and vacuum_multixact_freeze_table_age settings.", + errhint("Execute a database-wide VACUUM in database with OID %u with reduced \"vacuum_multixact_freeze_min_age\" and \"vacuum_multixact_freeze_table_age\" settings.", MultiXactState->oldestMultiXactDB))); } @@ -1187,7 +1187,7 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset) MultiXactState->offsetStopLimit - nextOffset + nmembers, MultiXactState->oldestMultiXactDB, MultiXactState->offsetStopLimit - nextOffset + nmembers), - errhint("Execute a database-wide VACUUM in that database with reduced vacuum_multixact_freeze_min_age and vacuum_multixact_freeze_table_age settings."))); + errhint("Execute a database-wide VACUUM in that database with reduced \"vacuum_multixact_freeze_min_age\" and \"vacuum_multixact_freeze_table_age\" settings."))); ExtendMultiXactMember(nextOffset, nmembers); diff --git a/src/backend/access/transam/rmgr.c b/src/backend/access/transam/rmgr.c index 3e2f1d4a23..1b7499726e 100644 --- a/src/backend/access/transam/rmgr.c +++ b/src/backend/access/transam/rmgr.c @@ -91,7 +91,7 @@ void RmgrNotFound(RmgrId rmid) { ereport(ERROR, (errmsg("resource manager with ID %d not registered", rmid), - errhint("Include the extension module that implements this resource manager in shared_preload_libraries."))); + errhint("Include the extension module that implements this resource manager in \"shared_preload_libraries\"."))); } /* @@ -118,7 +118,7 @@ RegisterCustomRmgr(RmgrId rmid, const RmgrData *rmgr) if (!process_shared_preload_libraries_in_progress) ereport(ERROR, (errmsg("failed to register custom resource manager \"%s\" with ID %d", rmgr->rm_name, rmid), - errdetail("Custom resource manager must be registered while initializing modules in shared_preload_libraries."))); + errdetail("Custom resource manager must be registered while initializing modules in \"shared_preload_libraries\"."))); if (RmgrTable[rmid].rm_name != NULL) ereport(ERROR, diff --git a/src/backend/access/transam/twophase.c b/src/backend/access/transam/twophase.c index 8090ac9fc1..bf451d42ff 100644 --- a/src/backend/access/transam/twophase.c +++ b/src/backend/access/transam/twophase.c @@ -373,7 +373,7 @@ MarkAsPreparing(TransactionId xid, const char *gid, ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("prepared transactions are disabled"), - errhint("Set max_prepared_transactions to a nonzero value."))); + errhint("Set \"max_prepared_transactions\" to a nonzero value."))); /* on first call, register the exit hook */ if (!twophaseExitRegistered) @@ -402,7 +402,7 @@ MarkAsPreparing(TransactionId xid, const char *gid, ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("maximum number of prepared transactions reached"), - errhint("Increase max_prepared_transactions (currently %d).", + errhint("Increase \"max_prepared_transactions\" (currently %d).", max_prepared_xacts))); gxact = TwoPhaseState->freeGXacts; TwoPhaseState->freeGXacts = gxact->next; @@ -2539,7 +2539,7 @@ PrepareRedoAdd(char *buf, XLogRecPtr start_lsn, ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("maximum number of prepared transactions reached"), - errhint("Increase max_prepared_transactions (currently %d).", + errhint("Increase \"max_prepared_transactions\" (currently %d).", max_prepared_xacts))); gxact = TwoPhaseState->freeGXacts; TwoPhaseState->freeGXacts = gxact->next; diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c index c3fd9c1eae..330e058c5f 100644 --- a/src/backend/access/transam/xlog.c +++ b/src/backend/access/transam/xlog.c @@ -4501,11 +4501,11 @@ ReadControlFile(void) /* check and update variables dependent on wal_segment_size */ if (ConvertToXSegs(min_wal_size_mb, wal_segment_size) < 2) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("min_wal_size must be at least twice wal_segment_size"))); + errmsg("\"min_wal_size\" must be at least twice \"wal_segment_size\""))); if (ConvertToXSegs(max_wal_size_mb, wal_segment_size) < 2) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("max_wal_size must be at least twice wal_segment_size"))); + errmsg("\"max_wal_size\" must be at least twice \"wal_segment_size\""))); UsableBytesInSegment = (wal_segment_size / XLOG_BLCKSZ * UsableBytesInPage) - @@ -5351,9 +5351,9 @@ CheckRequiredParameterValues(void) { ereport(FATAL, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("WAL was generated with wal_level=minimal, cannot continue recovering"), - errdetail("This happens if you temporarily set wal_level=minimal on the server."), - errhint("Use a backup taken after setting wal_level to higher than minimal."))); + errmsg("WAL was generated with \"wal_level=minimal\", cannot continue recovering"), + errdetail("This happens if you temporarily set \"wal_level=minimal\" on the server."), + errhint("Use a backup taken after setting \"wal_level\" to higher than \"minimal\"."))); } /* @@ -8549,7 +8549,7 @@ get_sync_bit(int method) #endif default: /* can't happen (unless we are out of sync with option array) */ - elog(ERROR, "unrecognized wal_sync_method: %d", method); + elog(ERROR, "unrecognized \"wal_sync_method\": %d", method); return 0; /* silence warning */ } } @@ -8647,7 +8647,7 @@ issue_xlog_fsync(int fd, XLogSegNo segno, TimeLineID tli) default: ereport(PANIC, errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg_internal("unrecognized wal_sync_method: %d", wal_sync_method)); + errmsg_internal("unrecognized \"wal_sync_method\": %d", wal_sync_method)); break; } @@ -8725,7 +8725,7 @@ do_pg_backup_start(const char *backupidstr, bool fast, List **tablespaces, ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("WAL level not sufficient for making an online backup"), - errhint("wal_level must be set to \"replica\" or \"logical\" at server start."))); + errhint("\"wal_level\" must be set to \"replica\" or \"logical\" at server start."))); if (strlen(backupidstr) > MAXPGPATH) ereport(ERROR, @@ -8851,11 +8851,11 @@ do_pg_backup_start(const char *backupidstr, bool fast, List **tablespaces, if (!checkpointfpw || state->startpoint <= recptr) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("WAL generated with full_page_writes=off was replayed " + errmsg("WAL generated with \"full_page_writes=off\" was replayed " "since last restartpoint"), errhint("This means that the backup being taken on the standby " "is corrupt and should not be used. " - "Enable full_page_writes and run CHECKPOINT on the primary, " + "Enable \"full_page_writes\" and run CHECKPOINT on the primary, " "and then try an online backup again."))); /* @@ -9147,11 +9147,11 @@ do_pg_backup_stop(BackupState *state, bool waitforarchive) if (state->startpoint <= recptr) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("WAL generated with full_page_writes=off was replayed " + errmsg("WAL generated with \"full_page_writes=off\" was replayed " "during online backup"), errhint("This means that the backup being taken on the standby " "is corrupt and should not be used. " - "Enable full_page_writes and run CHECKPOINT on the primary, " + "Enable \"full_page_writes\" and run CHECKPOINT on the primary, " "and then try an online backup again."))); @@ -9279,7 +9279,7 @@ do_pg_backup_stop(BackupState *state, bool waitforarchive) ereport(WARNING, (errmsg("still waiting for all required WAL segments to be archived (%d seconds elapsed)", waits), - errhint("Check that your archive_command is executing properly. " + errhint("Check that your \"archive_command\" is executing properly. " "You can safely cancel this backup, " "but the database backup will not be usable without all the WAL segments."))); } diff --git a/src/backend/access/transam/xlogarchive.c b/src/backend/access/transam/xlogarchive.c index caa1f03d93..81999b4820 100644 --- a/src/backend/access/transam/xlogarchive.c +++ b/src/backend/access/transam/xlogarchive.c @@ -233,7 +233,7 @@ RestoreArchivedFile(char *path, const char *xlogfname, ereport(elevel, (errcode_for_file_access(), errmsg("could not stat file \"%s\": %m", xlogpath), - errdetail("restore_command returned a zero exit status, but stat() failed."))); + errdetail("\"restore_command\" returned a zero exit status, but stat() failed."))); } } diff --git a/src/backend/access/transam/xlogfuncs.c b/src/backend/access/transam/xlogfuncs.c index 92bdb17ed5..4e46baaebd 100644 --- a/src/backend/access/transam/xlogfuncs.c +++ b/src/backend/access/transam/xlogfuncs.c @@ -212,7 +212,7 @@ pg_log_standby_snapshot(PG_FUNCTION_ARGS) if (!XLogStandbyInfoActive()) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("pg_log_standby_snapshot() can only be used if wal_level >= replica"))); + errmsg("pg_log_standby_snapshot() can only be used if \"wal_level\" >= \"replica\""))); recptr = LogStandbySnapshot(); @@ -245,7 +245,7 @@ pg_create_restore_point(PG_FUNCTION_ARGS) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("WAL level not sufficient for creating a restore point"), - errhint("wal_level must be set to \"replica\" or \"logical\" at server start."))); + errhint("\"wal_level\" must be set to \"replica\" or \"logical\" at server start."))); restore_name_str = text_to_cstring(restore_name); diff --git a/src/backend/access/transam/xlogprefetcher.c b/src/backend/access/transam/xlogprefetcher.c index fc80c37e55..84023d61ba 100644 --- a/src/backend/access/transam/xlogprefetcher.c +++ b/src/backend/access/transam/xlogprefetcher.c @@ -1085,7 +1085,7 @@ check_recovery_prefetch(int *new_value, void **extra, GucSource source) #ifndef USE_PREFETCH if (*new_value == RECOVERY_PREFETCH_ON) { - GUC_check_errdetail("recovery_prefetch is not supported on platforms that lack posix_fadvise()."); + GUC_check_errdetail("\"recovery_prefetch\" is not supported on platforms that lack posix_fadvise()."); return false; } #endif diff --git a/src/backend/access/transam/xlogrecovery.c b/src/backend/access/transam/xlogrecovery.c index 29c5bec084..b45b833172 100644 --- a/src/backend/access/transam/xlogrecovery.c +++ b/src/backend/access/transam/xlogrecovery.c @@ -1119,7 +1119,7 @@ validateRecoveryParameters(void) if ((PrimaryConnInfo == NULL || strcmp(PrimaryConnInfo, "") == 0) && (recoveryRestoreCommand == NULL || strcmp(recoveryRestoreCommand, "") == 0)) ereport(WARNING, - (errmsg("specified neither primary_conninfo nor restore_command"), + (errmsg("specified neither \"primary_conninfo\" nor \"restore_command\""), errhint("The database server will regularly poll the pg_wal subdirectory to check for files placed there."))); } else @@ -1128,7 +1128,7 @@ validateRecoveryParameters(void) strcmp(recoveryRestoreCommand, "") == 0) ereport(FATAL, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("must specify restore_command when standby mode is not enabled"))); + errmsg("must specify \"restore_command\" when standby mode is not enabled"))); } /* @@ -2162,7 +2162,7 @@ CheckTablespaceDirectory(void) errmsg("unexpected directory entry \"%s\" found in %s", de->d_name, "pg_tblspc/"), errdetail("All directory entries in pg_tblspc/ should be symbolic links."), - errhint("Remove those directories, or set allow_in_place_tablespaces to ON transiently to let recovery complete."))); + errhint("Remove those directories, or set \"allow_in_place_tablespaces\" to ON transiently to let recovery complete."))); } } @@ -4771,7 +4771,7 @@ error_multiple_recovery_targets(void) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("multiple recovery targets specified"), - errdetail("At most one of recovery_target, recovery_target_lsn, recovery_target_name, recovery_target_time, recovery_target_xid may be set."))); + errdetail("At most one of \"recovery_target\", \"recovery_target_lsn\", \"recovery_target_name\", \"recovery_target_time\", \"recovery_target_xid\" may be set."))); } /* @@ -4855,7 +4855,7 @@ check_recovery_target_name(char **newval, void **extra, GucSource source) /* Use the value of newval directly */ if (strlen(*newval) >= MAXFNAMELEN) { - GUC_check_errdetail("%s is too long (maximum %d characters).", + GUC_check_errdetail("\"%s\" is too long (maximum %d characters).", "recovery_target_name", MAXFNAMELEN - 1); return false; } @@ -4979,7 +4979,7 @@ check_recovery_target_timeline(char **newval, void **extra, GucSource source) strtoul(*newval, NULL, 0); if (errno == EINVAL || errno == ERANGE) { - GUC_check_errdetail("recovery_target_timeline is not a valid number."); + GUC_check_errdetail("\"recovery_target_timeline\" is not a valid number."); return false; } } diff --git a/src/backend/commands/publicationcmds.c b/src/backend/commands/publicationcmds.c index 9bcc22fdd7..6ea709988e 100644 --- a/src/backend/commands/publicationcmds.c +++ b/src/backend/commands/publicationcmds.c @@ -858,8 +858,8 @@ CreatePublication(ParseState *pstate, CreatePublicationStmt *stmt) if (wal_level != WAL_LEVEL_LOGICAL) ereport(WARNING, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("wal_level is insufficient to publish logical changes"), - errhint("Set wal_level to \"logical\" before creating subscriptions."))); + errmsg("\"wal_level\" is insufficient to publish logical changes"), + errhint("Set \"wal_level\" to \"logical\" before creating subscriptions."))); return myself; } diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c index 521ee74586..48f8eab202 100644 --- a/src/backend/commands/vacuum.c +++ b/src/backend/commands/vacuum.c @@ -131,7 +131,7 @@ check_vacuum_buffer_usage_limit(int *newval, void **extra, return true; /* Value does not fall within any allowable range */ - GUC_check_errdetail("vacuum_buffer_usage_limit must be 0 or between %d kB and %d kB", + GUC_check_errdetail("\"vacuum_buffer_usage_limit\" must be 0 or between %d kB and %d kB", MIN_BAS_VAC_RING_SIZE_KB, MAX_BAS_VAC_RING_SIZE_KB); return false; diff --git a/src/backend/commands/variable.c b/src/backend/commands/variable.c index 01151ca2b5..9345131711 100644 --- a/src/backend/commands/variable.c +++ b/src/backend/commands/variable.c @@ -717,7 +717,7 @@ check_client_encoding(char **newval, void **extra, GucSource source) else { /* Provide a useful complaint */ - GUC_check_errdetail("Cannot change client_encoding now."); + GUC_check_errdetail("Cannot change \"client_encoding\" now."); } return false; } @@ -778,7 +778,7 @@ assign_client_encoding(const char *newval, void *extra) */ ereport(ERROR, (errcode(ERRCODE_INVALID_TRANSACTION_STATE), - errmsg("cannot change client_encoding during a parallel operation"))); + errmsg("cannot change \"client_encoding\" during a parallel operation"))); } /* We do not expect an error if PrepareClientEncoding succeeded */ @@ -1202,7 +1202,7 @@ check_effective_io_concurrency(int *newval, void **extra, GucSource source) #ifndef USE_PREFETCH if (*newval != 0) { - GUC_check_errdetail("effective_io_concurrency must be set to 0 on platforms that lack posix_fadvise()."); + GUC_check_errdetail("\"effective_io_concurrency\" must be set to 0 on platforms that lack posix_fadvise()."); return false; } #endif /* USE_PREFETCH */ @@ -1215,7 +1215,7 @@ check_maintenance_io_concurrency(int *newval, void **extra, GucSource source) #ifndef USE_PREFETCH if (*newval != 0) { - GUC_check_errdetail("maintenance_io_concurrency must be set to 0 on platforms that lack posix_fadvise()."); + GUC_check_errdetail("\"maintenance_io_concurrency\" must be set to 0 on platforms that lack posix_fadvise()."); return false; } #endif /* USE_PREFETCH */ diff --git a/src/backend/libpq/be-secure-openssl.c b/src/backend/libpq/be-secure-openssl.c index 60cf68aac4..0caad6bed3 100644 --- a/src/backend/libpq/be-secure-openssl.c +++ b/src/backend/libpq/be-secure-openssl.c @@ -201,7 +201,7 @@ be_tls_init(bool isServerStart) { ereport(isServerStart ? FATAL : LOG, /*- translator: first %s is a GUC option name, second %s is its value */ - (errmsg("%s setting \"%s\" not supported by this build", + (errmsg("\"%s\" setting \"%s\" not supported by this build", "ssl_min_protocol_version", GetConfigOption("ssl_min_protocol_version", false, false)))); @@ -251,7 +251,7 @@ be_tls_init(bool isServerStart) { ereport(isServerStart ? FATAL : LOG, (errmsg("could not set SSL protocol version range"), - errdetail("%s cannot be higher than %s", + errdetail("\"%s\" cannot be higher than \"%s\"", "ssl_min_protocol_version", "ssl_max_protocol_version"))); goto error; diff --git a/src/backend/libpq/hba.c b/src/backend/libpq/hba.c index d506c3c0b7..18271def2e 100644 --- a/src/backend/libpq/hba.c +++ b/src/backend/libpq/hba.c @@ -1378,7 +1378,7 @@ parse_hba_line(TokenizedAuthLine *tok_line, int elevel) ereport(elevel, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("hostssl record cannot match because SSL is disabled"), - errhint("Set ssl = on in postgresql.conf."), + errhint("Set \"ssl = on\" in postgresql.conf."), errcontext("line %d of configuration file \"%s\"", line_num, file_name))); *err_msg = "hostssl record cannot match because SSL is disabled"; diff --git a/src/backend/libpq/pqcomm.c b/src/backend/libpq/pqcomm.c index 2cee49a208..daa0696146 100644 --- a/src/backend/libpq/pqcomm.c +++ b/src/backend/libpq/pqcomm.c @@ -731,7 +731,7 @@ Setup_AF_UNIX(const char *sock_path) if (Unix_socket_group[0] != '\0') { #ifdef WIN32 - elog(WARNING, "configuration item unix_socket_group is not supported on this platform"); + elog(WARNING, "configuration item \"unix_socket_group\" is not supported on this platform"); #else char *endptr; unsigned long val; diff --git a/src/backend/parser/scan.l b/src/backend/parser/scan.l index b499975e9c..9b33fb8d72 100644 --- a/src/backend/parser/scan.l +++ b/src/backend/parser/scan.l @@ -565,7 +565,7 @@ other . ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("unsafe use of string constant with Unicode escapes"), - errdetail("String constants with Unicode escapes cannot be used when standard_conforming_strings is off."), + errdetail("String constants with Unicode escapes cannot be used when \"standard_conforming_strings\" is off."), lexer_errposition())); BEGIN(xus); startlit(); diff --git a/src/backend/port/sysv_sema.c b/src/backend/port/sysv_sema.c index 647045e8c5..1454f96b5f 100644 --- a/src/backend/port/sysv_sema.c +++ b/src/backend/port/sysv_sema.c @@ -127,7 +127,7 @@ InternalIpcSemaphoreCreate(IpcSemaphoreKey semKey, int numSems) "semaphore sets (SEMMNI), or the system wide maximum number of " "semaphores (SEMMNS), would be exceeded. You need to raise the " "respective kernel parameter. Alternatively, reduce PostgreSQL's " - "consumption of semaphores by reducing its max_connections parameter.\n" + "consumption of semaphores by reducing its \"max_connections\" parameter.\n" "The PostgreSQL documentation contains more information about " "configuring your system for PostgreSQL.") : 0)); } diff --git a/src/backend/port/sysv_shmem.c b/src/backend/port/sysv_shmem.c index 1a6d8fa0fb..362a37d3b3 100644 --- a/src/backend/port/sysv_shmem.c +++ b/src/backend/port/sysv_shmem.c @@ -581,7 +581,7 @@ check_huge_page_size(int *newval, void **extra, GucSource source) /* Recent enough Linux only, for now. See GetHugePageSize(). */ if (*newval != 0) { - GUC_check_errdetail("huge_page_size must be 0 on this platform."); + GUC_check_errdetail("\"huge_page_size\" must be 0 on this platform."); return false; } #endif @@ -658,8 +658,8 @@ CreateAnonymousSegment(Size *size) "for a shared memory segment exceeded available memory, " "swap space, or huge pages. To reduce the request size " "(currently %zu bytes), reduce PostgreSQL's shared " - "memory usage, perhaps by reducing shared_buffers or " - "max_connections.", + "memory usage, perhaps by reducing \"shared_buffers\" or " + "\"max_connections\".", allocsize) : 0)); } @@ -729,7 +729,7 @@ PGSharedMemoryCreate(Size size, if (huge_pages == HUGE_PAGES_ON && shared_memory_type != SHMEM_TYPE_MMAP) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("huge pages not supported with the current shared_memory_type setting"))); + errmsg("huge pages not supported with the current \"shared_memory_type\" setting"))); /* Room for a header? */ Assert(size > MAXALIGN(sizeof(PGShmemHeader))); diff --git a/src/backend/port/win32_shmem.c b/src/backend/port/win32_shmem.c index 90bed0146d..3bcce9d3b6 100644 --- a/src/backend/port/win32_shmem.c +++ b/src/backend/port/win32_shmem.c @@ -643,7 +643,7 @@ check_huge_page_size(int *newval, void **extra, GucSource source) { if (*newval != 0) { - GUC_check_errdetail("huge_page_size must be 0 on this platform."); + GUC_check_errdetail("\"huge_page_size\" must be 0 on this platform."); return false; } return true; diff --git a/src/backend/postmaster/bgworker.c b/src/backend/postmaster/bgworker.c index cf64a4beb2..97f9f28424 100644 --- a/src/backend/postmaster/bgworker.c +++ b/src/backend/postmaster/bgworker.c @@ -885,7 +885,7 @@ RegisterBackgroundWorker(BackgroundWorker *worker) return; ereport(LOG, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("background worker \"%s\": must be registered in shared_preload_libraries", + errmsg("background worker \"%s\": must be registered in \"shared_preload_libraries\"", worker->bgw_name))); return; } diff --git a/src/backend/postmaster/checkpointer.c b/src/backend/postmaster/checkpointer.c index 8ef600ae72..3c68a9904d 100644 --- a/src/backend/postmaster/checkpointer.c +++ b/src/backend/postmaster/checkpointer.c @@ -442,7 +442,7 @@ CheckpointerMain(char *startup_data, size_t startup_data_len) "checkpoints are occurring too frequently (%d seconds apart)", elapsed_secs, elapsed_secs), - errhint("Consider increasing the configuration parameter max_wal_size."))); + errhint("Consider increasing the configuration parameter \"%s\".", "max_wal_size"))); /* * Initialize checkpointer-private variables used during diff --git a/src/backend/postmaster/pgarch.c b/src/backend/postmaster/pgarch.c index d82bcc2cfd..3fc8fe7d10 100644 --- a/src/backend/postmaster/pgarch.c +++ b/src/backend/postmaster/pgarch.c @@ -425,7 +425,7 @@ pgarch_ArchiverCopyLoop(void) !ArchiveCallbacks->check_configured_cb(archive_module_state)) { ereport(WARNING, - (errmsg("archive_mode enabled, yet archiving is not configured"), + (errmsg("\"archive_mode\" enabled, yet archiving is not configured"), arch_module_check_errdetail_string ? errdetail_internal("%s", arch_module_check_errdetail_string) : 0)); return; @@ -876,8 +876,8 @@ HandlePgArchInterrupts(void) if (XLogArchiveLibrary[0] != '\0' && XLogArchiveCommand[0] != '\0') ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("both archive_command and archive_library set"), - errdetail("Only one of archive_command, archive_library may be set."))); + errmsg("both \"archive_command\" and \"archive_library\" set"), + errdetail("Only one of \"archive_command\", \"archive_library\" may be set."))); archiveLibChanged = strcmp(XLogArchiveLibrary, archiveLib) != 0; pfree(archiveLib); @@ -915,8 +915,8 @@ LoadArchiveLibrary(void) if (XLogArchiveLibrary[0] != '\0' && XLogArchiveCommand[0] != '\0') ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("both archive_command and archive_library set"), - errdetail("Only one of archive_command, archive_library may be set."))); + errmsg("both \"archive_command\" and \"archive_library\" set"), + errdetail("Only one of \"archive_command\", \"archive_library\" may be set."))); /* * If shell archiving is enabled, use our special initialization function. diff --git a/src/backend/postmaster/postmaster.c b/src/backend/postmaster/postmaster.c index 7f3170a8f0..bf0241aed0 100644 --- a/src/backend/postmaster/postmaster.c +++ b/src/backend/postmaster/postmaster.c @@ -822,7 +822,7 @@ PostmasterMain(int argc, char *argv[]) */ if (SuperuserReservedConnections + ReservedConnections >= MaxConnections) { - write_stderr("%s: superuser_reserved_connections (%d) plus reserved_connections (%d) must be less than max_connections (%d)\n", + write_stderr("%s: \"superuser_reserved_connections\" (%d) plus \"reserved_connections\" (%d) must be less than \"max_connections\" (%d)\n", progname, SuperuserReservedConnections, ReservedConnections, MaxConnections); @@ -830,13 +830,13 @@ PostmasterMain(int argc, char *argv[]) } if (XLogArchiveMode > ARCHIVE_MODE_OFF && wal_level == WAL_LEVEL_MINIMAL) ereport(ERROR, - (errmsg("WAL archival cannot be enabled when wal_level is \"minimal\""))); + (errmsg("WAL archival cannot be enabled when \"wal_level\" is \"minimal\""))); if (max_wal_senders > 0 && wal_level == WAL_LEVEL_MINIMAL) ereport(ERROR, - (errmsg("WAL streaming (max_wal_senders > 0) requires wal_level \"replica\" or \"logical\""))); + (errmsg("WAL streaming (\"max_wal_senders\" > 0) requires \"wal_level\" to be \"replica\" or \"logical\""))); if (summarize_wal && wal_level == WAL_LEVEL_MINIMAL) ereport(ERROR, - (errmsg("WAL cannot be summarized when wal_level is \"minimal\""))); + (errmsg("WAL cannot be summarized when \"wal_level\" is \"minimal\""))); /* * Other one-time internal sanity checks can go here, if they are fast. @@ -3359,7 +3359,7 @@ PostmasterStateMachine(void) if (!restart_after_crash) { ereport(LOG, - (errmsg("shutting down because restart_after_crash is off"))); + (errmsg("shutting down because \"restart_after_crash\" is off"))); ExitPostmaster(1); } } diff --git a/src/backend/replication/logical/decode.c b/src/backend/replication/logical/decode.c index 7a86f8481d..8ec5adfd90 100644 --- a/src/backend/replication/logical/decode.c +++ b/src/backend/replication/logical/decode.c @@ -174,7 +174,7 @@ xlog_decode(LogicalDecodingContext *ctx, XLogRecordBuffer *buf) Assert(RecoveryInProgress()); ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("logical decoding on standby requires wal_level >= logical on the primary"))); + errmsg("logical decoding on standby requires \"wal_level\" >= \"logical\" on the primary"))); } break; } diff --git a/src/backend/replication/logical/launcher.c b/src/backend/replication/logical/launcher.c index 66070e9131..27c3a91fb7 100644 --- a/src/backend/replication/logical/launcher.c +++ b/src/backend/replication/logical/launcher.c @@ -425,7 +425,7 @@ retry: ereport(WARNING, (errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED), errmsg("out of logical replication worker slots"), - errhint("You might need to increase %s.", "max_logical_replication_workers"))); + errhint("You might need to increase \"%s\".", "max_logical_replication_workers"))); return false; } @@ -511,7 +511,7 @@ retry: ereport(WARNING, (errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED), errmsg("out of background worker slots"), - errhint("You might need to increase %s.", "max_worker_processes"))); + errhint("You might need to increase \"%s\".", "max_worker_processes"))); return false; } diff --git a/src/backend/replication/logical/logical.c b/src/backend/replication/logical/logical.c index 97a4d99c4e..99f31849bb 100644 --- a/src/backend/replication/logical/logical.c +++ b/src/backend/replication/logical/logical.c @@ -118,7 +118,7 @@ CheckLogicalDecodingRequirements(void) if (wal_level < WAL_LEVEL_LOGICAL) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("logical decoding requires wal_level >= logical"))); + errmsg("logical decoding requires \"wal_level\" >= \"logical\""))); if (MyDatabaseId == InvalidOid) ereport(ERROR, @@ -138,7 +138,7 @@ CheckLogicalDecodingRequirements(void) if (GetActiveWalLevelOnStandby() < WAL_LEVEL_LOGICAL) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("logical decoding on standby requires wal_level >= logical on the primary"))); + errmsg("logical decoding on standby requires \"wal_level\" >= \"logical\" on the primary"))); } } diff --git a/src/backend/replication/logical/origin.c b/src/backend/replication/logical/origin.c index a529da983a..419e4814f0 100644 --- a/src/backend/replication/logical/origin.c +++ b/src/backend/replication/logical/origin.c @@ -187,7 +187,7 @@ replorigin_check_prerequisites(bool check_slots, bool recoveryOK) if (check_slots && max_replication_slots == 0) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("cannot query or manipulate replication origin when max_replication_slots = 0"))); + errmsg("cannot query or manipulate replication origin when \"max_replication_slots\" is 0"))); if (!recoveryOK && RecoveryInProgress()) ereport(ERROR, @@ -795,7 +795,7 @@ StartupReplicationOrigin(void) if (last_state == max_replication_slots) ereport(PANIC, (errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED), - errmsg("could not find free replication state, increase max_replication_slots"))); + errmsg("could not find free replication state, increase \"max_replication_slots\""))); /* copy data to shared memory */ replication_states[last_state].roident = disk_state.roident; @@ -954,7 +954,7 @@ replorigin_advance(RepOriginId node, (errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED), errmsg("could not find free replication state slot for replication origin with ID %d", node), - errhint("Increase max_replication_slots and try again."))); + errhint("Increase \"max_replication_slots\" and try again."))); if (replication_state == NULL) { @@ -1155,7 +1155,7 @@ replorigin_session_setup(RepOriginId node, int acquired_by) (errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED), errmsg("could not find free replication state slot for replication origin with ID %d", node), - errhint("Increase max_replication_slots and try again."))); + errhint("Increase \"max_replication_slots\" and try again."))); else if (session_replication_state == NULL) { /* initialize new slot */ diff --git a/src/backend/replication/slot.c b/src/backend/replication/slot.c index aa4ea387da..0e54ea5bb9 100644 --- a/src/backend/replication/slot.c +++ b/src/backend/replication/slot.c @@ -378,7 +378,7 @@ ReplicationSlotCreate(const char *name, bool db_specific, ereport(ERROR, (errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED), errmsg("all replication slots are in use"), - errhint("Free one or increase max_replication_slots."))); + errhint("Free one or increase \"max_replication_slots\"."))); /* * Since this slot is not in use, nobody should be looking at any part of @@ -1369,12 +1369,12 @@ CheckSlotRequirements(void) if (max_replication_slots == 0) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("replication slots can only be used if max_replication_slots > 0"))); + errmsg("replication slots can only be used if \"max_replication_slots\" > 0"))); if (wal_level < WAL_LEVEL_REPLICA) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("replication slots can only be used if wal_level >= replica"))); + errmsg("replication slots can only be used if \"wal_level\" >= \"replica\""))); } /* @@ -1508,7 +1508,7 @@ ReportSlotInvalidation(ReplicationSlotInvalidationCause cause, break; case RS_INVAL_WAL_LEVEL: - appendStringInfoString(&err_detail, _("Logical decoding on standby requires wal_level >= logical on the primary server.")); + appendStringInfoString(&err_detail, _("Logical decoding on standby requires \"wal_level\" >= \"logical\" on the primary server.")); break; case RS_INVAL_NONE: pg_unreachable(); @@ -1521,7 +1521,7 @@ ReportSlotInvalidation(ReplicationSlotInvalidationCause cause, errmsg("invalidating obsolete replication slot \"%s\"", NameStr(slotname)), errdetail_internal("%s", err_detail.data), - hint ? errhint("You might need to increase %s.", "max_slot_wal_keep_size") : 0); + hint ? errhint("You might need to increase \"%s\".", "max_slot_wal_keep_size") : 0); pfree(err_detail.data); } @@ -2332,15 +2332,15 @@ RestoreSlotFromDisk(const char *name) if (cp.slotdata.database != InvalidOid && wal_level < WAL_LEVEL_LOGICAL) ereport(FATAL, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("logical replication slot \"%s\" exists, but wal_level < logical", + errmsg("logical replication slot \"%s\" exists, but \"wal_level\" < \"logical\"", NameStr(cp.slotdata.name)), - errhint("Change wal_level to be logical or higher."))); + errhint("Change \"wal_level\" to be \"logical\" or higher."))); else if (wal_level < WAL_LEVEL_REPLICA) ereport(FATAL, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("physical replication slot \"%s\" exists, but wal_level < replica", + errmsg("physical replication slot \"%s\" exists, but \"wal_level\" < \"replica\"", NameStr(cp.slotdata.name)), - errhint("Change wal_level to be replica or higher."))); + errhint("Change \"wal_level\" to be \"replica\" or higher."))); /* nothing can be active yet, don't lock anything */ for (i = 0; i < max_replication_slots; i++) @@ -2383,7 +2383,7 @@ RestoreSlotFromDisk(const char *name) if (!restored) ereport(FATAL, (errmsg("too many replication slots active before shutdown"), - errhint("Increase max_replication_slots and try again."))); + errhint("Increase \"max_replication_slots\" and try again."))); } /* diff --git a/src/backend/replication/syncrep.c b/src/backend/replication/syncrep.c index 77917b848a..fa5988c824 100644 --- a/src/backend/replication/syncrep.c +++ b/src/backend/replication/syncrep.c @@ -1010,7 +1010,7 @@ check_synchronous_standby_names(char **newval, void **extra, GucSource source) if (syncrep_parse_error_msg) GUC_check_errdetail("%s", syncrep_parse_error_msg); else - GUC_check_errdetail("synchronous_standby_names parser failed"); + GUC_check_errdetail("\"synchronous_standby_names\" parser failed"); return false; } diff --git a/src/backend/storage/buffer/localbuf.c b/src/backend/storage/buffer/localbuf.c index 985a2c7049..8da7dd6c98 100644 --- a/src/backend/storage/buffer/localbuf.c +++ b/src/backend/storage/buffer/localbuf.c @@ -709,7 +709,7 @@ check_temp_buffers(int *newval, void **extra, GucSource source) */ if (source != PGC_S_TEST && NLocBuffer && NLocBuffer != *newval) { - GUC_check_errdetail("temp_buffers cannot be changed after any temporary tables have been accessed in the session."); + GUC_check_errdetail("\"temp_buffers\" cannot be changed after any temporary tables have been accessed in the session."); return false; } return true; diff --git a/src/backend/storage/file/fd.c b/src/backend/storage/file/fd.c index 8c8e81f899..a7c05b0a6f 100644 --- a/src/backend/storage/file/fd.c +++ b/src/backend/storage/file/fd.c @@ -3947,7 +3947,7 @@ check_debug_io_direct(char **newval, void **extra, GucSource source) #if PG_O_DIRECT == 0 if (strcmp(*newval, "") != 0) { - GUC_check_errdetail("debug_io_direct is not supported on this platform."); + GUC_check_errdetail("\"debug_io_direct\" is not supported on this platform."); result = false; } flags = 0; @@ -3961,7 +3961,7 @@ check_debug_io_direct(char **newval, void **extra, GucSource source) if (!SplitGUCList(rawstring, ',', &elemlist)) { - GUC_check_errdetail("Invalid list syntax in parameter %s", + GUC_check_errdetail("Invalid list syntax in parameter \"%s\"", "debug_io_direct"); pfree(rawstring); list_free(elemlist); @@ -3994,14 +3994,14 @@ check_debug_io_direct(char **newval, void **extra, GucSource source) #if XLOG_BLCKSZ < PG_IO_ALIGN_SIZE if (result && (flags & (IO_DIRECT_WAL | IO_DIRECT_WAL_INIT))) { - GUC_check_errdetail("debug_io_direct is not supported for WAL because XLOG_BLCKSZ is too small"); + GUC_check_errdetail("\"debug_io_direct\" is not supported for WAL because XLOG_BLCKSZ is too small"); result = false; } #endif #if BLCKSZ < PG_IO_ALIGN_SIZE if (result && (flags & IO_DIRECT_DATA)) { - GUC_check_errdetail("debug_io_direct is not supported for data because BLCKSZ is too small"); + GUC_check_errdetail("\"debug_io_direct\" is not supported for data because BLCKSZ is too small"); result = false; } #endif diff --git a/src/backend/storage/lmgr/lock.c b/src/backend/storage/lmgr/lock.c index 5154353c84..9e4ddf7225 100644 --- a/src/backend/storage/lmgr/lock.c +++ b/src/backend/storage/lmgr/lock.c @@ -960,7 +960,7 @@ LockAcquireExtended(const LOCKTAG *locktag, ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("out of shared memory"), - errhint("You might need to increase %s.", "max_locks_per_transaction"))); + errhint("You might need to increase \"%s\".", "max_locks_per_transaction"))); else return LOCKACQUIRE_NOT_AVAIL; } @@ -998,7 +998,7 @@ LockAcquireExtended(const LOCKTAG *locktag, ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("out of shared memory"), - errhint("You might need to increase %s.", "max_locks_per_transaction"))); + errhint("You might need to increase \"%s\".", "max_locks_per_transaction"))); else return LOCKACQUIRE_NOT_AVAIL; } @@ -2801,7 +2801,7 @@ FastPathGetRelationLockEntry(LOCALLOCK *locallock) ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("out of shared memory"), - errhint("You might need to increase %s.", "max_locks_per_transaction"))); + errhint("You might need to increase \"%s\".", "max_locks_per_transaction"))); } GrantLock(proclock->tag.myLock, proclock, lockmode); FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode); @@ -4186,7 +4186,7 @@ lock_twophase_recover(TransactionId xid, uint16 info, ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("out of shared memory"), - errhint("You might need to increase %s.", "max_locks_per_transaction"))); + errhint("You might need to increase \"%s\".", "max_locks_per_transaction"))); } /* @@ -4251,7 +4251,7 @@ lock_twophase_recover(TransactionId xid, uint16 info, ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("out of shared memory"), - errhint("You might need to increase %s.", "max_locks_per_transaction"))); + errhint("You might need to increase \"%s\".", "max_locks_per_transaction"))); } /* @@ -4601,7 +4601,7 @@ VirtualXactLock(VirtualTransactionId vxid, bool wait) ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("out of shared memory"), - errhint("You might need to increase %s.", "max_locks_per_transaction"))); + errhint("You might need to increase \"%s\".", "max_locks_per_transaction"))); } GrantLock(proclock->tag.myLock, proclock, ExclusiveLock); diff --git a/src/backend/storage/lmgr/predicate.c b/src/backend/storage/lmgr/predicate.c index d5bbfbd4c6..93841654db 100644 --- a/src/backend/storage/lmgr/predicate.c +++ b/src/backend/storage/lmgr/predicate.c @@ -651,7 +651,7 @@ SetRWConflict(SERIALIZABLEXACT *reader, SERIALIZABLEXACT *writer) ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("not enough elements in RWConflictPool to record a read/write conflict"), - errhint("You might need to run fewer transactions at a time or increase max_connections."))); + errhint("You might need to run fewer transactions at a time or increase \"max_connections\"."))); conflict = dlist_head_element(RWConflictData, outLink, &RWConflictPool->availableList); dlist_delete(&conflict->outLink); @@ -676,7 +676,7 @@ SetPossibleUnsafeConflict(SERIALIZABLEXACT *roXact, ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("not enough elements in RWConflictPool to record a potential read/write conflict"), - errhint("You might need to run fewer transactions at a time or increase max_connections."))); + errhint("You might need to run fewer transactions at a time or increase \"max_connections\"."))); conflict = dlist_head_element(RWConflictData, outLink, &RWConflictPool->availableList); dlist_delete(&conflict->outLink); @@ -1678,7 +1678,7 @@ GetSerializableTransactionSnapshot(Snapshot snapshot) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot use serializable mode in a hot standby"), - errdetail("default_transaction_isolation is set to \"serializable\"."), + errdetail("\"default_transaction_isolation\" is set to \"serializable\"."), errhint("You can use \"SET default_transaction_isolation = 'repeatable read'\" to change the default."))); /* @@ -2461,7 +2461,7 @@ CreatePredicateLock(const PREDICATELOCKTARGETTAG *targettag, ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("out of shared memory"), - errhint("You might need to increase %s.", "max_pred_locks_per_transaction"))); + errhint("You might need to increase \"%s\".", "max_pred_locks_per_transaction"))); if (!found) dlist_init(&target->predicateLocks); @@ -2476,7 +2476,7 @@ CreatePredicateLock(const PREDICATELOCKTARGETTAG *targettag, ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("out of shared memory"), - errhint("You might need to increase %s.", "max_pred_locks_per_transaction"))); + errhint("You might need to increase \"%s\".", "max_pred_locks_per_transaction"))); if (!found) { @@ -3873,7 +3873,7 @@ ReleaseOneSerializableXact(SERIALIZABLEXACT *sxact, bool partial, ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("out of shared memory"), - errhint("You might need to increase %s.", "max_pred_locks_per_transaction"))); + errhint("You might need to increase \"%s\".", "max_pred_locks_per_transaction"))); if (found) { Assert(predlock->commitSeqNo != 0); diff --git a/src/backend/storage/lmgr/proc.c b/src/backend/storage/lmgr/proc.c index e4f256c63c..a2900b6014 100644 --- a/src/backend/storage/lmgr/proc.c +++ b/src/backend/storage/lmgr/proc.c @@ -345,7 +345,7 @@ InitProcess(void) if (AmWalSenderProcess()) ereport(FATAL, (errcode(ERRCODE_TOO_MANY_CONNECTIONS), - errmsg("number of requested standby connections exceeds max_wal_senders (currently %d)", + errmsg("number of requested standby connections exceeds \"max_wal_senders\" (currently %d)", max_wal_senders))); ereport(FATAL, (errcode(ERRCODE_TOO_MANY_CONNECTIONS), diff --git a/src/backend/tcop/postgres.c b/src/backend/tcop/postgres.c index 2dff28afce..45a3794b8e 100644 --- a/src/backend/tcop/postgres.c +++ b/src/backend/tcop/postgres.c @@ -3535,7 +3535,7 @@ check_stack_depth(void) ereport(ERROR, (errcode(ERRCODE_STATEMENT_TOO_COMPLEX), errmsg("stack depth limit exceeded"), - errhint("Increase the configuration parameter max_stack_depth (currently %dkB), " + errhint("Increase the configuration parameter \"max_stack_depth\" (currently %dkB), " "after ensuring the platform's stack depth limit is adequate.", max_stack_depth))); } @@ -3582,7 +3582,7 @@ check_max_stack_depth(int *newval, void **extra, GucSource source) if (stack_rlimit > 0 && newval_bytes > stack_rlimit - STACK_DEPTH_SLOP) { - GUC_check_errdetail("max_stack_depth must not exceed %ldkB.", + GUC_check_errdetail("\"max_stack_depth\" must not exceed %ldkB.", (stack_rlimit - STACK_DEPTH_SLOP) / 1024L); GUC_check_errhint("Increase the platform's stack depth limit via \"ulimit -s\" or local equivalent."); return false; @@ -3607,7 +3607,7 @@ check_client_connection_check_interval(int *newval, void **extra, GucSource sour { if (!WaitEventSetCanReportClosed() && *newval != 0) { - GUC_check_errdetail("client_connection_check_interval must be set to 0 on this platform."); + GUC_check_errdetail("\"client_connection_check_interval\" must be set to 0 on this platform."); return false; } return true; @@ -3643,9 +3643,9 @@ check_log_stats(bool *newval, void **extra, GucSource source) if (*newval && (log_parser_stats || log_planner_stats || log_executor_stats)) { - GUC_check_errdetail("Cannot enable log_statement_stats when " - "log_parser_stats, log_planner_stats, " - "or log_executor_stats is true."); + GUC_check_errdetail("Cannot enable \"log_statement_stats\" when " + "\"log_parser_stats\", \"log_planner_stats\", " + "or \"log_executor_stats\" is true."); return false; } return true; diff --git a/src/backend/utils/adt/pg_locale.c b/src/backend/utils/adt/pg_locale.c index 8d95b5d42a..7e5bb2b703 100644 --- a/src/backend/utils/adt/pg_locale.c +++ b/src/backend/utils/adt/pg_locale.c @@ -3000,7 +3000,7 @@ icu_validate_locale(const char *loc_str) ereport(elevel, (errmsg("could not get language from ICU locale \"%s\": %s", loc_str, u_errorName(status)), - errhint("To disable ICU locale validation, set the parameter %s to \"%s\".", + errhint("To disable ICU locale validation, set the parameter \"%s\" to \"%s\".", "icu_validation_level", "disabled"))); return; } @@ -3029,7 +3029,7 @@ icu_validate_locale(const char *loc_str) ereport(elevel, (errmsg("ICU locale \"%s\" has unknown language \"%s\"", loc_str, lang), - errhint("To disable ICU locale validation, set the parameter %s to \"%s\".", + errhint("To disable ICU locale validation, set the parameter \"%s\" to \"%s\".", "icu_validation_level", "disabled"))); /* check that it can be opened */ diff --git a/src/backend/utils/adt/varlena.c b/src/backend/utils/adt/varlena.c index dccd130c91..d2e2e9bbba 100644 --- a/src/backend/utils/adt/varlena.c +++ b/src/backend/utils/adt/varlena.c @@ -456,7 +456,7 @@ byteaout(PG_FUNCTION_ARGS) } else { - elog(ERROR, "unrecognized bytea_output setting: %d", + elog(ERROR, "unrecognized \"bytea_output\" setting: %d", bytea_output); rp = result = NULL; /* keep compiler quiet */ } diff --git a/src/backend/utils/fmgr/dfmgr.c b/src/backend/utils/fmgr/dfmgr.c index eafa0128ef..092004dcf3 100644 --- a/src/backend/utils/fmgr/dfmgr.c +++ b/src/backend/utils/fmgr/dfmgr.c @@ -538,7 +538,7 @@ find_in_dynamic_libpath(const char *basename) if (piece == p) ereport(ERROR, (errcode(ERRCODE_INVALID_NAME), - errmsg("zero-length component in parameter dynamic_library_path"))); + errmsg("zero-length component in parameter \"dynamic_library_path\""))); if (piece == NULL) len = strlen(p); @@ -557,7 +557,7 @@ find_in_dynamic_libpath(const char *basename) if (!is_absolute_path(mangled)) ereport(ERROR, (errcode(ERRCODE_INVALID_NAME), - errmsg("component in parameter dynamic_library_path is not an absolute path"))); + errmsg("component in parameter \"dynamic_library_path\" is not an absolute path"))); full = palloc(strlen(mangled) + 1 + baselen + 1); sprintf(full, "%s/%s", mangled, basename); diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c index 3fb6803998..547cecde24 100644 --- a/src/backend/utils/misc/guc.c +++ b/src/backend/utils/misc/guc.c @@ -1879,7 +1879,7 @@ SelectConfigFiles(const char *userDoption, const char *progname) else { write_stderr("%s does not know where to find the database system data.\n" - "This can be specified as data_directory in \"%s\", " + "This can be specified as \"data_directory\" in \"%s\", " "or by the -D invocation option, or by the " "PGDATA environment variable.\n", progname, ConfigFileName); diff --git a/src/backend/utils/misc/guc_tables.c b/src/backend/utils/misc/guc_tables.c index ea2b0577bc..85c8d54d4f 100644 --- a/src/backend/utils/misc/guc_tables.c +++ b/src/backend/utils/misc/guc_tables.c @@ -1066,7 +1066,7 @@ struct config_bool ConfigureNamesBool[] = }, { {"ssl_passphrase_command_supports_reload", PGC_SIGHUP, CONN_AUTH_SSL, - gettext_noop("Controls whether ssl_passphrase_command is called during server reload."), + gettext_noop("Controls whether \"ssl_passphrase_command\" is called during server reload."), NULL }, &ssl_passphrase_command_supports_reload, @@ -1114,7 +1114,7 @@ struct config_bool ConfigureNamesBool[] = gettext_noop("Continues processing past damaged page headers."), gettext_noop("Detection of a damaged page header normally causes PostgreSQL to " "report an error, aborting the current transaction. Setting " - "zero_damaged_pages to true causes the system to instead report a " + "\"zero_damaged_page\" to true causes the system to instead report a " "warning, zero out the damaged page, and continue processing. This " "behavior will destroy data, namely all the rows on the damaged page."), GUC_NOT_IN_SAMPLE @@ -1129,7 +1129,7 @@ struct config_bool ConfigureNamesBool[] = gettext_noop("Detection of WAL records having references to " "invalid pages during recovery causes PostgreSQL to " "raise a PANIC-level error, aborting the recovery. " - "Setting ignore_invalid_pages to true causes " + "Setting \"ignore_invalid_pages\" to true causes " "the system to ignore invalid page references " "in WAL records (but still report a warning), " "and continue recovery. This behavior may cause " @@ -2713,7 +2713,7 @@ struct config_int ConfigureNamesInt[] = {"max_locks_per_transaction", PGC_POSTMASTER, LOCK_MANAGEMENT, gettext_noop("Sets the maximum number of locks per transaction."), gettext_noop("The shared lock table is sized on the assumption that at most " - "max_locks_per_transaction objects per server process or prepared " + "\"max_locks_per_transaction\" objects per server process or prepared " "transaction will need to be locked at any one time.") }, &max_locks_per_xact, @@ -2725,7 +2725,7 @@ struct config_int ConfigureNamesInt[] = {"max_pred_locks_per_transaction", PGC_POSTMASTER, LOCK_MANAGEMENT, gettext_noop("Sets the maximum number of predicate locks per transaction."), gettext_noop("The shared predicate lock table is sized on the assumption that " - "at most max_pred_locks_per_transaction objects per server process " + "at most \"max_pred_locks_per_transaction\" objects per server process " "or prepared transaction will need to be locked at any one time.") }, &max_predicate_locks_per_xact, @@ -2976,7 +2976,7 @@ struct config_int ConfigureNamesInt[] = { {"commit_siblings", PGC_USERSET, WAL_SETTINGS, gettext_noop("Sets the minimum number of concurrent open transactions " - "required before performing commit_delay."), + "required before performing \"commit_delay\"."), NULL }, &CommitSiblings, @@ -3108,7 +3108,7 @@ struct config_int ConfigureNamesInt[] = {"maintenance_io_concurrency", PGC_USERSET, RESOURCES_ASYNCHRONOUS, - gettext_noop("A variant of effective_io_concurrency that is used for maintenance work."), + gettext_noop("A variant of \"effective_io_concurrency\" that is used for maintenance work."), NULL, GUC_EXPLAIN }, @@ -3815,7 +3815,7 @@ struct config_real ConfigureNamesReal[] = { {"hash_mem_multiplier", PGC_USERSET, RESOURCES_MEM, - gettext_noop("Multiple of work_mem to use for hash tables."), + gettext_noop("Multiple of \"work_mem\" to use for hash tables."), NULL, GUC_EXPLAIN }, @@ -3909,7 +3909,7 @@ struct config_real ConfigureNamesReal[] = { {"log_statement_sample_rate", PGC_SUSET, LOGGING_WHEN, - gettext_noop("Fraction of statements exceeding log_min_duration_sample to be logged."), + gettext_noop("Fraction of statements exceeding \"log_min_duration_sample\" to be logged."), gettext_noop("Use a value between 0.0 (never log) and 1.0 (always log).") }, &log_statement_sample_rate, @@ -3940,7 +3940,7 @@ struct config_string ConfigureNamesString[] = { {"archive_command", PGC_SIGHUP, WAL_ARCHIVING, gettext_noop("Sets the shell command that will be called to archive a WAL file."), - gettext_noop("This is used only if archive_library is not set.") + gettext_noop("This is used only if \"archive_library\" is not set.") }, &XLogArchiveCommand, "", @@ -3950,7 +3950,7 @@ struct config_string ConfigureNamesString[] = { {"archive_library", PGC_SIGHUP, WAL_ARCHIVING, gettext_noop("Sets the library that will be called to archive a WAL file."), - gettext_noop("An empty string indicates that archive_command should be used.") + gettext_noop("An empty string indicates that \"archive_command\" should be used.") }, &XLogArchiveLibrary, "", @@ -4895,7 +4895,7 @@ struct config_enum ConfigureNamesEnum[] = { {"archive_mode", PGC_POSTMASTER, WAL_ARCHIVING, - gettext_noop("Allows archiving of WAL files using archive_command."), + gettext_noop("Allows archiving of WAL files using \"archive_command\"."), NULL }, &XLogArchiveMode, diff --git a/src/bin/initdb/initdb.c b/src/bin/initdb/initdb.c index 5e89b3c8e8..12ae194067 100644 --- a/src/bin/initdb/initdb.c +++ b/src/bin/initdb/initdb.c @@ -1092,7 +1092,7 @@ test_config_settings(void) * Probe for max_connections before shared_buffers, since it is subject to * more constraints than shared_buffers. */ - printf(_("selecting default max_connections ... ")); + printf(_("selecting default \"max_connections\" ... ")); fflush(stdout); for (i = 0; i < connslen; i++) @@ -1112,7 +1112,7 @@ test_config_settings(void) printf("%d\n", n_connections); - printf(_("selecting default shared_buffers ... ")); + printf(_("selecting default \"shared_buffers\" ... ")); fflush(stdout); for (i = 0; i < bufslen; i++) diff --git a/src/bin/pg_basebackup/streamutil.c b/src/bin/pg_basebackup/streamutil.c index d0efd8600c..feee451d59 100644 --- a/src/bin/pg_basebackup/streamutil.c +++ b/src/bin/pg_basebackup/streamutil.c @@ -227,7 +227,7 @@ GetConnection(void) res = PQexec(tmpconn, ALWAYS_SECURE_SEARCH_PATH_SQL); if (PQresultStatus(res) != PGRES_TUPLES_OK) { - pg_log_error("could not clear search_path: %s", + pg_log_error("could not clear \"search_path\": %s", PQerrorMessage(tmpconn)); PQclear(res); PQfinish(tmpconn); @@ -243,14 +243,14 @@ GetConnection(void) tmpparam = PQparameterStatus(tmpconn, "integer_datetimes"); if (!tmpparam) { - pg_log_error("could not determine server setting for integer_datetimes"); + pg_log_error("could not determine server setting for \"integer_datetimes\""); PQfinish(tmpconn); exit(1); } if (strcmp(tmpparam, "on") != 0) { - pg_log_error("integer_datetimes compile flag does not match server"); + pg_log_error("\"integer_datetimes\" compile flag does not match server"); PQfinish(tmpconn); exit(1); } diff --git a/src/bin/pg_controldata/pg_controldata.c b/src/bin/pg_controldata/pg_controldata.c index 93e0837947..93a05d80ca 100644 --- a/src/bin/pg_controldata/pg_controldata.c +++ b/src/bin/pg_controldata/pg_controldata.c @@ -81,7 +81,7 @@ wal_level_str(WalLevel wal_level) case WAL_LEVEL_LOGICAL: return "logical"; } - return _("unrecognized wal_level"); + return _("unrecognized \"wal_level\""); } diff --git a/src/bin/pg_dump/pg_backup_archiver.c b/src/bin/pg_dump/pg_backup_archiver.c index 56e0688154..68e321212d 100644 --- a/src/bin/pg_dump/pg_backup_archiver.c +++ b/src/bin/pg_dump/pg_backup_archiver.c @@ -3454,7 +3454,7 @@ _selectOutputSchema(ArchiveHandle *AH, const char *schemaName) if (!res || PQresultStatus(res) != PGRES_COMMAND_OK) warn_or_exit_horribly(AH, - "could not set search_path to \"%s\": %s", + "could not set \"search_path\" to \"%s\": %s", schemaName, PQerrorMessage(AH->connection)); PQclear(res); @@ -3515,7 +3515,7 @@ _selectTablespace(ArchiveHandle *AH, const char *tablespace) if (!res || PQresultStatus(res) != PGRES_COMMAND_OK) warn_or_exit_horribly(AH, - "could not set default_tablespace to %s: %s", + "could not set \"default_tablespace\" to %s: %s", fmtId(want), PQerrorMessage(AH->connection)); PQclear(res); @@ -3564,7 +3564,7 @@ _selectTableAccessMethod(ArchiveHandle *AH, const char *tableam) if (!res || PQresultStatus(res) != PGRES_COMMAND_OK) warn_or_exit_horribly(AH, - "could not set default_table_access_method: %s", + "could not set \"default_table_access_method\": %s", PQerrorMessage(AH->connection)); PQclear(res); diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c index cb14fcafea..e324070828 100644 --- a/src/bin/pg_dump/pg_dump.c +++ b/src/bin/pg_dump/pg_dump.c @@ -3534,7 +3534,7 @@ dumpStdStrings(Archive *AH) const char *stdstrings = AH->std_strings ? "on" : "off"; PQExpBuffer qry = createPQExpBuffer(); - pg_log_info("saving standard_conforming_strings = %s", + pg_log_info("saving \"standard_conforming_strings = %s\"", stdstrings); appendPQExpBuffer(qry, "SET standard_conforming_strings = '%s';\n", @@ -3592,7 +3592,7 @@ dumpSearchPath(Archive *AH) appendStringLiteralAH(qry, path->data, AH); appendPQExpBufferStr(qry, ", false);\n"); - pg_log_info("saving search_path = %s", path->data); + pg_log_info("saving \"search_path = %s\"", path->data); ArchiveEntry(AH, nilCatalogId, createDumpId(), ARCHIVE_OPTS(.tag = "SEARCHPATH", diff --git a/src/bin/pg_rewind/libpq_source.c b/src/bin/pg_rewind/libpq_source.c index 7d898c3b50..9378266d28 100644 --- a/src/bin/pg_rewind/libpq_source.c +++ b/src/bin/pg_rewind/libpq_source.c @@ -128,7 +128,7 @@ init_libpq_conn(PGconn *conn) /* secure search_path */ res = PQexec(conn, ALWAYS_SECURE_SEARCH_PATH_SQL); if (PQresultStatus(res) != PGRES_TUPLES_OK) - pg_fatal("could not clear search_path: %s", + pg_fatal("could not clear \"search_path\": %s", PQresultErrorMessage(res)); PQclear(res); @@ -139,7 +139,7 @@ init_libpq_conn(PGconn *conn) */ str = run_simple_query(conn, "SHOW full_page_writes"); if (strcmp(str, "on") != 0) - pg_fatal("full_page_writes must be enabled in the source server"); + pg_fatal("\"full_page_writes\" must be enabled in the source server"); pg_free(str); /* Prepare a statement we'll use to fetch files */ diff --git a/src/bin/pg_rewind/pg_rewind.c b/src/bin/pg_rewind/pg_rewind.c index 8449ae78ef..8dfea05846 100644 --- a/src/bin/pg_rewind/pg_rewind.c +++ b/src/bin/pg_rewind/pg_rewind.c @@ -94,7 +94,7 @@ usage(const char *progname) printf(_("%s resynchronizes a PostgreSQL cluster with another copy of the cluster.\n\n"), progname); printf(_("Usage:\n %s [OPTION]...\n\n"), progname); printf(_("Options:\n")); - printf(_(" -c, --restore-target-wal use restore_command in target configuration to\n" + printf(_(" -c, --restore-target-wal use \"restore_command\" in target configuration to\n" " retrieve WAL files from archives\n")); printf(_(" -D, --target-pgdata=DIRECTORY existing data directory to modify\n")); printf(_(" --source-pgdata=DIRECTORY source data directory to synchronize with\n")); @@ -1111,9 +1111,9 @@ getRestoreCommand(const char *argv0) (void) pg_strip_crlf(restore_command); if (strcmp(restore_command, "") == 0) - pg_fatal("restore_command is not set in the target cluster"); + pg_fatal("\"restore_command\" is not set in the target cluster"); - pg_log_debug("using for rewind restore_command = \'%s\'", + pg_log_debug("using for rewind \"restore_command = \'%s\'\"", restore_command); destroyPQExpBuffer(postgres_cmd); diff --git a/src/bin/pg_test_fsync/pg_test_fsync.c b/src/bin/pg_test_fsync/pg_test_fsync.c index 5c0da425fb..cbf587116e 100644 --- a/src/bin/pg_test_fsync/pg_test_fsync.c +++ b/src/bin/pg_test_fsync/pg_test_fsync.c @@ -298,7 +298,7 @@ test_sync(int writes_per_op) printf(_("\nCompare file sync methods using one %dkB write:\n"), XLOG_BLCKSZ_K); else printf(_("\nCompare file sync methods using two %dkB writes:\n"), XLOG_BLCKSZ_K); - printf(_("(in wal_sync_method preference order, except fdatasync is Linux's default)\n")); + printf(_("(in \"wal_sync_method\" preference order, except fdatasync is Linux's default)\n")); /* * Test open_datasync if available diff --git a/src/bin/pg_upgrade/check.c b/src/bin/pg_upgrade/check.c index 259b1109b8..27924159d6 100644 --- a/src/bin/pg_upgrade/check.c +++ b/src/bin/pg_upgrade/check.c @@ -1769,13 +1769,13 @@ check_new_cluster_logical_replication_slots(void) wal_level = PQgetvalue(res, 0, 0); if (strcmp(wal_level, "logical") != 0) - pg_fatal("wal_level must be \"logical\", but is set to \"%s\"", + pg_fatal("\"wal_level\" must be \"logical\", but is set to \"%s\"", wal_level); max_replication_slots = atoi(PQgetvalue(res, 1, 0)); if (nslots_on_old > max_replication_slots) - pg_fatal("max_replication_slots (%d) must be greater than or equal to the number of " + pg_fatal("\"max_replication_slots\" (%d) must be greater than or equal to the number of " "logical replication slots (%d) on the old cluster", max_replication_slots, nslots_on_old); @@ -1822,7 +1822,7 @@ check_new_cluster_subscription_configuration(void) max_replication_slots = atoi(PQgetvalue(res, 0, 0)); if (nsubs_on_old > max_replication_slots) - pg_fatal("max_replication_slots (%d) must be greater than or equal to the number of " + pg_fatal("\"max_replication_slots\" (%d) must be greater than or equal to the number of " "subscriptions (%d) on the old cluster", max_replication_slots, nsubs_on_old); diff --git a/src/bin/pg_upgrade/t/003_logical_slots.pl b/src/bin/pg_upgrade/t/003_logical_slots.pl index f9394f97b1..87c471a6ea 100644 --- a/src/bin/pg_upgrade/t/003_logical_slots.pl +++ b/src/bin/pg_upgrade/t/003_logical_slots.pl @@ -77,10 +77,10 @@ command_checks_all( [@pg_upgrade_cmd], 1, [ - qr/max_replication_slots \(1\) must be greater than or equal to the number of logical replication slots \(2\) on the old cluster/ + qr/"max_replication_slots" \(1\) must be greater than or equal to the number of logical replication slots \(2\) on the old cluster/ ], [qr//], - 'run of pg_upgrade where the new cluster has insufficient max_replication_slots' + 'run of pg_upgrade where the new cluster has insufficient "max_replication_slots"' ); ok(-d $newpub->data_dir . "/pg_upgrade_output.d", "pg_upgrade_output.d/ not removed after pg_upgrade failure"); diff --git a/src/bin/pg_upgrade/t/004_subscription.pl b/src/bin/pg_upgrade/t/004_subscription.pl index ba782c3bd9..c59b83af9c 100644 --- a/src/bin/pg_upgrade/t/004_subscription.pl +++ b/src/bin/pg_upgrade/t/004_subscription.pl @@ -66,7 +66,7 @@ command_checks_all( ], 1, [ - qr/max_replication_slots \(0\) must be greater than or equal to the number of subscriptions \(1\) on the old cluster/ + qr/"max_replication_slots" \(0\) must be greater than or equal to the number of subscriptions \(1\) on the old cluster/ ], [qr//], 'run of pg_upgrade where the new cluster has insufficient max_replication_slots' diff --git a/src/bin/pgbench/pgbench.c b/src/bin/pgbench/pgbench.c index af776b31d8..86ffb3c868 100644 --- a/src/bin/pgbench/pgbench.c +++ b/src/bin/pgbench/pgbench.c @@ -5376,7 +5376,7 @@ GetTableInfo(PGconn *con, bool scale_given) * This case is unlikely as pgbench already found "pgbench_branches" * above to compute the scale. */ - pg_log_error("no pgbench_accounts table found in search_path"); + pg_log_error("no pgbench_accounts table found in \"search_path\""); pg_log_error_hint("Perhaps you need to do initialization (\"pgbench -i\") in database \"%s\".", PQdb(con)); exit(1); } diff --git a/src/fe_utils/archive.c b/src/fe_utils/archive.c index 490a508136..f194809d53 100644 --- a/src/fe_utils/archive.c +++ b/src/fe_utils/archive.c @@ -95,7 +95,7 @@ RestoreArchivedFile(const char *path, const char *xlogfname, * fatal too. */ if (wait_result_is_any_signal(rc, true)) - pg_fatal("restore_command failed: %s", + pg_fatal("\"restore_command\" failed: %s", wait_result_to_str(rc)); /* diff --git a/src/interfaces/libpq/fe-auth.c b/src/interfaces/libpq/fe-auth.c index 81ec08485d..256f596e6b 100644 --- a/src/interfaces/libpq/fe-auth.c +++ b/src/interfaces/libpq/fe-auth.c @@ -1313,7 +1313,7 @@ PQencryptPasswordConn(PGconn *conn, const char *passwd, const char *user, if (strlen(val) > MAX_ALGORITHM_NAME_LEN) { PQclear(res); - libpq_append_conn_error(conn, "password_encryption value too long"); + libpq_append_conn_error(conn, "\"password_encryption\" value too long"); return NULL; } strcpy(algobuf, val); diff --git a/src/interfaces/libpq/fe-connect.c b/src/interfaces/libpq/fe-connect.c index a6b75ad6ac..548ad118fb 100644 --- a/src/interfaces/libpq/fe-connect.c +++ b/src/interfaces/libpq/fe-connect.c @@ -1657,7 +1657,7 @@ pqConnectOptions2(PGconn *conn) if (!sslVerifyProtocolVersion(conn->ssl_min_protocol_version)) { conn->status = CONNECTION_BAD; - libpq_append_conn_error(conn, "invalid %s value: \"%s\"", + libpq_append_conn_error(conn, "invalid \"%s\" value: \"%s\"", "ssl_min_protocol_version", conn->ssl_min_protocol_version); return false; @@ -1665,7 +1665,7 @@ pqConnectOptions2(PGconn *conn) if (!sslVerifyProtocolVersion(conn->ssl_max_protocol_version)) { conn->status = CONNECTION_BAD; - libpq_append_conn_error(conn, "invalid %s value: \"%s\"", + libpq_append_conn_error(conn, "invalid \"%s\" value: \"%s\"", "ssl_max_protocol_version", conn->ssl_max_protocol_version); return false; diff --git a/src/test/modules/commit_ts/expected/commit_timestamp_1.out b/src/test/modules/commit_ts/expected/commit_timestamp_1.out index 4c62bc95f9..f37e701f37 100644 --- a/src/test/modules/commit_ts/expected/commit_timestamp_1.out +++ b/src/test/modules/commit_ts/expected/commit_timestamp_1.out @@ -18,7 +18,7 @@ SELECT id, FROM committs_test ORDER BY id; ERROR: could not get commit timestamp data -HINT: Make sure the configuration parameter track_commit_timestamp is set. +HINT: Make sure the configuration parameter "track_commit_timestamp" is set. DROP TABLE committs_test; SELECT pg_xact_commit_timestamp('0'::xid); ERROR: cannot retrieve commit timestamp for transaction 0 @@ -40,7 +40,7 @@ SELECT x.xid::text::bigint > 0 as xid_valid, roident != 0 AS valid_roident FROM pg_last_committed_xact() x; ERROR: could not get commit timestamp data -HINT: Make sure the configuration parameter track_commit_timestamp is set. +HINT: Make sure the configuration parameter "track_commit_timestamp" is set. -- Test non-normal transaction ids. SELECT * FROM pg_xact_commit_timestamp_origin(NULL); -- ok, NULL timestamp | roident @@ -69,13 +69,13 @@ SELECT x.timestamp > '-infinity'::timestamptz AS ts_low, roident != 0 AS valid_roident FROM pg_last_committed_xact() x; ERROR: could not get commit timestamp data -HINT: Make sure the configuration parameter track_commit_timestamp is set. +HINT: Make sure the configuration parameter "track_commit_timestamp" is set. SELECT x.timestamp > '-infinity'::timestamptz AS ts_low, x.timestamp <= now() AS ts_high, roident != 0 AS valid_roident FROM pg_xact_commit_timestamp_origin(:'txid_no_origin') x; ERROR: could not get commit timestamp data -HINT: Make sure the configuration parameter track_commit_timestamp is set. +HINT: Make sure the configuration parameter "track_commit_timestamp" is set. -- Test transaction with replication origin SELECT pg_replication_origin_create('regress_commit_ts: get_origin') != 0 AS valid_roident; @@ -97,14 +97,14 @@ SELECT x.timestamp > '-infinity'::timestamptz AS ts_low, FROM pg_last_committed_xact() x, pg_replication_origin r WHERE r.roident = x.roident; ERROR: could not get commit timestamp data -HINT: Make sure the configuration parameter track_commit_timestamp is set. +HINT: Make sure the configuration parameter "track_commit_timestamp" is set. SELECT x.timestamp > '-infinity'::timestamptz AS ts_low, x.timestamp <= now() AS ts_high, r.roname FROM pg_xact_commit_timestamp_origin(:'txid_with_origin') x, pg_replication_origin r WHERE r.roident = x.roident; ERROR: could not get commit timestamp data -HINT: Make sure the configuration parameter track_commit_timestamp is set. +HINT: Make sure the configuration parameter "track_commit_timestamp" is set. SELECT pg_replication_origin_session_reset(); pg_replication_origin_session_reset ------------------------------------- diff --git a/src/test/modules/libpq_pipeline/libpq_pipeline.c b/src/test/modules/libpq_pipeline/libpq_pipeline.c index 928ef6b170..ac4d26302c 100644 --- a/src/test/modules/libpq_pipeline/libpq_pipeline.c +++ b/src/test/modules/libpq_pipeline/libpq_pipeline.c @@ -2227,10 +2227,10 @@ main(int argc, char **argv) res = PQexec(conn, "SET lc_messages TO \"C\""); if (PQresultStatus(res) != PGRES_COMMAND_OK) - pg_fatal("failed to set lc_messages: %s", PQerrorMessage(conn)); + pg_fatal("failed to set \"lc_messages\": %s", PQerrorMessage(conn)); res = PQexec(conn, "SET debug_parallel_query = off"); if (PQresultStatus(res) != PGRES_COMMAND_OK) - pg_fatal("failed to set debug_parallel_query: %s", PQerrorMessage(conn)); + pg_fatal("failed to set \"debug_parallel_query\": %s", PQerrorMessage(conn)); /* Set the trace file, if requested */ if (tracefile != NULL) diff --git a/src/test/modules/ssl_passphrase_callback/ssl_passphrase_func.c b/src/test/modules/ssl_passphrase_callback/ssl_passphrase_func.c index 948706af85..d599214982 100644 --- a/src/test/modules/ssl_passphrase_callback/ssl_passphrase_func.c +++ b/src/test/modules/ssl_passphrase_callback/ssl_passphrase_func.c @@ -58,7 +58,7 @@ set_rot13(SSL_CTX *context, bool isServerStart) /* warn if the user has set ssl_passphrase_command */ if (ssl_passphrase_command[0]) ereport(WARNING, - (errmsg("ssl_passphrase_command setting ignored by ssl_passphrase_func module"))); + (errmsg("\"ssl_passphrase_command\" setting ignored by ssl_passphrase_func module"))); SSL_CTX_set_default_passwd_cb(context, rot13_passphrase); } diff --git a/src/test/modules/ssl_passphrase_callback/t/001_testfunc.pl b/src/test/modules/ssl_passphrase_callback/t/001_testfunc.pl index a2bfb64576..7a63539f39 100644 --- a/src/test/modules/ssl_passphrase_callback/t/001_testfunc.pl +++ b/src/test/modules/ssl_passphrase_callback/t/001_testfunc.pl @@ -56,7 +56,7 @@ my $log_contents = slurp_file($log); like( $log_contents, - qr/WARNING.*ssl_passphrase_command setting ignored by ssl_passphrase_func module/, + qr/WARNING.*"ssl_passphrase_command" setting ignored by ssl_passphrase_func module/, "ssl_passphrase_command set warning"); # set the wrong passphrase diff --git a/src/test/modules/test_shm_mq/setup.c b/src/test/modules/test_shm_mq/setup.c index 3de5d01e30..b3dac44d97 100644 --- a/src/test/modules/test_shm_mq/setup.c +++ b/src/test/modules/test_shm_mq/setup.c @@ -233,7 +233,7 @@ setup_background_workers(int nworkers, dsm_segment *seg) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_RESOURCES), errmsg("could not register background process"), - errhint("You may need to increase max_worker_processes."))); + errhint("You may need to increase \"max_worker_processes\"."))); ++wstate->nworkers; } diff --git a/src/test/modules/test_slru/test_slru.c b/src/test/modules/test_slru/test_slru.c index 068a21f125..d227b06703 100644 --- a/src/test/modules/test_slru/test_slru.c +++ b/src/test/modules/test_slru/test_slru.c @@ -251,7 +251,7 @@ _PG_init(void) if (!process_shared_preload_libraries_in_progress) ereport(ERROR, (errmsg("cannot load \"%s\" after startup", "test_slru"), - errdetail("\"%s\" must be loaded with shared_preload_libraries.", + errdetail("\"%s\" must be loaded with \"shared_preload_libraries\".", "test_slru"))); prev_shmem_request_hook = shmem_request_hook; diff --git a/src/test/recovery/t/024_archive_recovery.pl b/src/test/recovery/t/024_archive_recovery.pl index c7318d92e8..c6480bbdcd 100644 --- a/src/test/recovery/t/024_archive_recovery.pl +++ b/src/test/recovery/t/024_archive_recovery.pl @@ -91,8 +91,8 @@ sub test_recovery_wal_level_minimal # Confirm that the archive recovery fails with an expected error my $logfile = slurp_file($recovery_node->logfile()); ok( $logfile =~ - qr/FATAL: .* WAL was generated with wal_level=minimal, cannot continue recovering/, - "$node_text ends with an error because it finds WAL generated with wal_level=minimal" + qr/FATAL: .* WAL was generated with "wal_level=minimal", cannot continue recovering/, + "$node_text ends with an error because it finds WAL generated with \"wal_level=minimal\"" ); } diff --git a/src/test/recovery/t/035_standby_logical_decoding.pl b/src/test/recovery/t/035_standby_logical_decoding.pl index 07ff5231d3..4628f9fb80 100644 --- a/src/test/recovery/t/035_standby_logical_decoding.pl +++ b/src/test/recovery/t/035_standby_logical_decoding.pl @@ -794,7 +794,7 @@ $handle = make_slot_active($node_standby, 'wal_level_', 0, \$stdout, \$stderr); # We are not able to read from the slot as it requires wal_level >= logical on the primary server check_pg_recvlogical_stderr($handle, - "logical decoding on standby requires wal_level >= logical on the primary" + "logical decoding on standby requires \"wal_level\" >= \"logical\" on the primary" ); # Restore primary wal_level diff --git a/src/test/regress/expected/collate.icu.utf8.out b/src/test/regress/expected/collate.icu.utf8.out index 4b8c8f143f..7d59fb4431 100644 --- a/src/test/regress/expected/collate.icu.utf8.out +++ b/src/test/regress/expected/collate.icu.utf8.out @@ -1042,7 +1042,7 @@ ERROR: parameter "locale" must be specified SET icu_validation_level = ERROR; CREATE COLLATION testx (provider = icu, locale = 'nonsense-nowhere'); -- fails ERROR: ICU locale "nonsense-nowhere" has unknown language "nonsense" -HINT: To disable ICU locale validation, set the parameter icu_validation_level to "disabled". +HINT: To disable ICU locale validation, set the parameter "icu_validation_level" to "disabled". CREATE COLLATION testx (provider = icu, locale = '@colStrength=primary;nonsense=yes'); -- fails ERROR: could not convert locale name "@colStrength=primary;nonsense=yes" to language tag: U_ILLEGAL_ARGUMENT_ERROR RESET icu_validation_level; @@ -1050,7 +1050,7 @@ CREATE COLLATION testx (provider = icu, locale = '@colStrength=primary;nonsense= WARNING: could not convert locale name "@colStrength=primary;nonsense=yes" to language tag: U_ILLEGAL_ARGUMENT_ERROR CREATE COLLATION testx (provider = icu, locale = 'nonsense-nowhere'); DROP COLLATION testx; WARNING: ICU locale "nonsense-nowhere" has unknown language "nonsense" -HINT: To disable ICU locale validation, set the parameter icu_validation_level to "disabled". +HINT: To disable ICU locale validation, set the parameter "icu_validation_level" to "disabled". CREATE COLLATION test4 FROM nonsense; ERROR: collation "nonsense" for encoding "UTF8" does not exist CREATE COLLATION test5 FROM test0; diff --git a/src/test/regress/expected/create_am.out b/src/test/regress/expected/create_am.out index 9762c332ce..35d4cf1d46 100644 --- a/src/test/regress/expected/create_am.out +++ b/src/test/regress/expected/create_am.out @@ -113,7 +113,7 @@ COMMIT; -- prevent empty values SET default_table_access_method = ''; ERROR: invalid value for parameter "default_table_access_method": "" -DETAIL: default_table_access_method cannot be empty. +DETAIL: "default_table_access_method" cannot be empty. -- prevent nonexistent values SET default_table_access_method = 'I do not exist AM'; ERROR: invalid value for parameter "default_table_access_method": "I do not exist AM" diff --git a/src/test/regress/expected/json.out b/src/test/regress/expected/json.out index 7cb28f106d..aa29bc597b 100644 --- a/src/test/regress/expected/json.out +++ b/src/test/regress/expected/json.out @@ -219,10 +219,10 @@ CONTEXT: JSON data, line 1: {"abc":1,3... SET max_stack_depth = '100kB'; SELECT repeat('[', 10000)::json; ERROR: stack depth limit exceeded -HINT: Increase the configuration parameter max_stack_depth (currently 100kB), after ensuring the platform's stack depth limit is adequate. +HINT: Increase the configuration parameter "max_stack_depth" (currently 100kB), after ensuring the platform's stack depth limit is adequate. SELECT repeat('{"a":', 10000)::json; ERROR: stack depth limit exceeded -HINT: Increase the configuration parameter max_stack_depth (currently 100kB), after ensuring the platform's stack depth limit is adequate. +HINT: Increase the configuration parameter "max_stack_depth" (currently 100kB), after ensuring the platform's stack depth limit is adequate. RESET max_stack_depth; -- Miscellaneous stuff. SELECT 'true'::json; -- OK diff --git a/src/test/regress/expected/jsonb.out b/src/test/regress/expected/jsonb.out index 66bee5162b..e66d760189 100644 --- a/src/test/regress/expected/jsonb.out +++ b/src/test/regress/expected/jsonb.out @@ -213,10 +213,10 @@ CONTEXT: JSON data, line 1: {"abc":1,3... SET max_stack_depth = '100kB'; SELECT repeat('[', 10000)::jsonb; ERROR: stack depth limit exceeded -HINT: Increase the configuration parameter max_stack_depth (currently 100kB), after ensuring the platform's stack depth limit is adequate. +HINT: Increase the configuration parameter "max_stack_depth" (currently 100kB), after ensuring the platform's stack depth limit is adequate. SELECT repeat('{"a":', 10000)::jsonb; ERROR: stack depth limit exceeded -HINT: Increase the configuration parameter max_stack_depth (currently 100kB), after ensuring the platform's stack depth limit is adequate. +HINT: Increase the configuration parameter "max_stack_depth" (currently 100kB), after ensuring the platform's stack depth limit is adequate. RESET max_stack_depth; -- Miscellaneous stuff. SELECT 'true'::jsonb; -- OK diff --git a/src/test/regress/expected/prepared_xacts_1.out b/src/test/regress/expected/prepared_xacts_1.out index 7168f86bf9..6ad3d11898 100644 --- a/src/test/regress/expected/prepared_xacts_1.out +++ b/src/test/regress/expected/prepared_xacts_1.out @@ -19,7 +19,7 @@ SELECT * FROM pxtest1; PREPARE TRANSACTION 'regress_foo1'; ERROR: prepared transactions are disabled -HINT: Set max_prepared_transactions to a nonzero value. +HINT: Set "max_prepared_transactions" to a nonzero value. SELECT * FROM pxtest1; foobar -------- @@ -58,7 +58,7 @@ SELECT * FROM pxtest1; PREPARE TRANSACTION 'regress_foo2'; ERROR: prepared transactions are disabled -HINT: Set max_prepared_transactions to a nonzero value. +HINT: Set "max_prepared_transactions" to a nonzero value. SELECT * FROM pxtest1; foobar -------- @@ -84,7 +84,7 @@ SELECT * FROM pxtest1; PREPARE TRANSACTION 'regress_foo3'; ERROR: prepared transactions are disabled -HINT: Set max_prepared_transactions to a nonzero value. +HINT: Set "max_prepared_transactions" to a nonzero value. SELECT gid FROM pg_prepared_xacts WHERE gid ~ '^regress_' ORDER BY gid; gid ----- @@ -95,7 +95,7 @@ INSERT INTO pxtest1 VALUES ('fff'); -- This should fail, because the gid foo3 is already in use PREPARE TRANSACTION 'regress_foo3'; ERROR: prepared transactions are disabled -HINT: Set max_prepared_transactions to a nonzero value. +HINT: Set "max_prepared_transactions" to a nonzero value. SELECT * FROM pxtest1; foobar -------- @@ -121,7 +121,7 @@ SELECT * FROM pxtest1; PREPARE TRANSACTION 'regress_foo4'; ERROR: prepared transactions are disabled -HINT: Set max_prepared_transactions to a nonzero value. +HINT: Set "max_prepared_transactions" to a nonzero value. SELECT gid FROM pg_prepared_xacts WHERE gid ~ '^regress_' ORDER BY gid; gid ----- @@ -138,7 +138,7 @@ SELECT * FROM pxtest1; INSERT INTO pxtest1 VALUES ('fff'); PREPARE TRANSACTION 'regress_foo5'; ERROR: prepared transactions are disabled -HINT: Set max_prepared_transactions to a nonzero value. +HINT: Set "max_prepared_transactions" to a nonzero value. SELECT gid FROM pg_prepared_xacts WHERE gid ~ '^regress_' ORDER BY gid; gid ----- @@ -169,7 +169,7 @@ SELECT pg_advisory_xact_lock_shared(1); PREPARE TRANSACTION 'regress_foo6'; -- fails ERROR: prepared transactions are disabled -HINT: Set max_prepared_transactions to a nonzero value. +HINT: Set "max_prepared_transactions" to a nonzero value. -- Test subtransactions BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE; CREATE TABLE pxtest2 (a int); @@ -181,7 +181,7 @@ BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE; INSERT INTO pxtest2 VALUES (3); PREPARE TRANSACTION 'regress_sub1'; ERROR: prepared transactions are disabled -HINT: Set max_prepared_transactions to a nonzero value. +HINT: Set "max_prepared_transactions" to a nonzero value. CREATE TABLE pxtest3(fff int); -- Test shared invalidation BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE; @@ -199,7 +199,7 @@ BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE; PREPARE TRANSACTION 'regress_sub2'; ERROR: prepared transactions are disabled -HINT: Set max_prepared_transactions to a nonzero value. +HINT: Set "max_prepared_transactions" to a nonzero value. -- No such cursor FETCH 1 FROM foo; ERROR: cursor "foo" does not exist diff --git a/src/test/regress/expected/strings.out b/src/test/regress/expected/strings.out index b7500d9c0e..52b69a107f 100644 --- a/src/test/regress/expected/strings.out +++ b/src/test/regress/expected/strings.out @@ -147,17 +147,17 @@ SELECT U&'d\0061t\+000061' AS U&"d\0061t\+000061"; ERROR: unsafe use of string constant with Unicode escapes LINE 1: SELECT U&'d\0061t\+000061' AS U&"d\0061t\+000061"; ^ -DETAIL: String constants with Unicode escapes cannot be used when standard_conforming_strings is off. +DETAIL: String constants with Unicode escapes cannot be used when "standard_conforming_strings" is off. SELECT U&'d!0061t\+000061' UESCAPE '!' AS U&"d*0061t\+000061" UESCAPE '*'; ERROR: unsafe use of string constant with Unicode escapes LINE 1: SELECT U&'d!0061t\+000061' UESCAPE '!' AS U&"d*0061t\+000061... ^ -DETAIL: String constants with Unicode escapes cannot be used when standard_conforming_strings is off. +DETAIL: String constants with Unicode escapes cannot be used when "standard_conforming_strings" is off. SELECT U&' \' UESCAPE '!' AS "tricky"; ERROR: unsafe use of string constant with Unicode escapes LINE 1: SELECT U&' \' UESCAPE '!' AS "tricky"; ^ -DETAIL: String constants with Unicode escapes cannot be used when standard_conforming_strings is off. +DETAIL: String constants with Unicode escapes cannot be used when "standard_conforming_strings" is off. SELECT 'tricky' AS U&"\" UESCAPE '!'; \ -------- @@ -168,17 +168,17 @@ SELECT U&'wrong: \061'; ERROR: unsafe use of string constant with Unicode escapes LINE 1: SELECT U&'wrong: \061'; ^ -DETAIL: String constants with Unicode escapes cannot be used when standard_conforming_strings is off. +DETAIL: String constants with Unicode escapes cannot be used when "standard_conforming_strings" is off. SELECT U&'wrong: \+0061'; ERROR: unsafe use of string constant with Unicode escapes LINE 1: SELECT U&'wrong: \+0061'; ^ -DETAIL: String constants with Unicode escapes cannot be used when standard_conforming_strings is off. +DETAIL: String constants with Unicode escapes cannot be used when "standard_conforming_strings" is off. SELECT U&'wrong: +0061' UESCAPE '+'; ERROR: unsafe use of string constant with Unicode escapes LINE 1: SELECT U&'wrong: +0061' UESCAPE '+'; ^ -DETAIL: String constants with Unicode escapes cannot be used when standard_conforming_strings is off. +DETAIL: String constants with Unicode escapes cannot be used when "standard_conforming_strings" is off. RESET standard_conforming_strings; -- bytea SET bytea_output TO hex; diff --git a/src/test/ssl/t/001_ssltests.pl b/src/test/ssl/t/001_ssltests.pl index 68c1b6b41f..b877327023 100644 --- a/src/test/ssl/t/001_ssltests.pl +++ b/src/test/ssl/t/001_ssltests.pl @@ -554,11 +554,11 @@ $node->connect_fails( $node->connect_fails( "$common_connstr sslrootcert=ssl/root+server_ca.crt sslmode=require ssl_min_protocol_version=incorrect_tls", "connection failure with an incorrect SSL protocol minimum bound", - expected_stderr => qr/invalid ssl_min_protocol_version value/); + expected_stderr => qr/invalid "ssl_min_protocol_version" value/); $node->connect_fails( "$common_connstr sslrootcert=ssl/root+server_ca.crt sslmode=require ssl_max_protocol_version=incorrect_tls", "connection failure with an incorrect SSL protocol maximum bound", - expected_stderr => qr/invalid ssl_max_protocol_version value/); + expected_stderr => qr/invalid "ssl_max_protocol_version" value/); ### Server-side tests. ### diff --git a/src/test/subscription/t/001_rep_changes.pl b/src/test/subscription/t/001_rep_changes.pl index 9ccebd890a..471e981962 100644 --- a/src/test/subscription/t/001_rep_changes.pl +++ b/src/test/subscription/t/001_rep_changes.pl @@ -573,7 +573,7 @@ CREATE PUBLICATION tap_pub2 FOR TABLE skip_wal; ROLLBACK; }); ok( $reterr =~ - m/WARNING: wal_level is insufficient to publish logical changes/, - 'CREATE PUBLICATION while wal_level=minimal'); + m/WARNING: "wal_level" is insufficient to publish logical changes/, + 'CREATE PUBLICATION while "wal_level=minimal"'); done_testing();