Compare commits
13 Commits
3e36e48d8e
...
ae2ccf66a2
Author | SHA1 | Date |
---|---|---|
Alexander Korotkov | ae2ccf66a2 | |
Daniel Gustafsson | b3efa270b5 | |
Daniel Gustafsson | c3f4a84481 | |
Daniel Gustafsson | 17935e1fdf | |
Alexander Korotkov | b589f211e0 | |
John Naylor | 095d109ccd | |
Peter Eisentraut | 489ca33081 | |
Masahiko Sawada | e255b646a1 | |
Michael Paquier | a243569bf6 | |
Michael Paquier | 8d9978a717 | |
Masahiko Sawada | 334f512f45 | |
Peter Eisentraut | a1827568d2 | |
Peter Eisentraut | 7e5f517799 |
|
@ -2574,7 +2574,6 @@ createNewConnection(const char *name, remoteConn *rconn)
|
|||
}
|
||||
|
||||
hentry->rconn = rconn;
|
||||
strlcpy(hentry->name, name, sizeof(hentry->name));
|
||||
}
|
||||
|
||||
static void
|
||||
|
|
|
@ -877,7 +877,7 @@ apw_start_database_worker(void)
|
|||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INSUFFICIENT_RESOURCES),
|
||||
errmsg("registering dynamic bgworker autoprewarm failed"),
|
||||
errhint("Consider increasing configuration parameter \"max_worker_processes\".")));
|
||||
errhint("Consider increasing configuration parameter max_worker_processes.")));
|
||||
|
||||
/*
|
||||
* Ignore return value; if it fails, postmaster has died, but we have
|
||||
|
|
|
@ -2717,10 +2717,10 @@ ALTER FOREIGN TABLE ft4 OPTIONS (ADD use_remote_estimate 'true');
|
|||
-- regress_view_owner_another, the view owner, though it fails as expected
|
||||
-- due to the lack of a user mapping for that user.
|
||||
EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM v4;
|
||||
ERROR: user mapping not found for "regress_view_owner_another"
|
||||
ERROR: user mapping not found for user "regress_view_owner_another", server "loopback"
|
||||
-- Likewise, but with the query under an UNION ALL
|
||||
EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM (SELECT * FROM v4 UNION ALL SELECT * FROM v4);
|
||||
ERROR: user mapping not found for "regress_view_owner_another"
|
||||
ERROR: user mapping not found for user "regress_view_owner_another", server "loopback"
|
||||
-- Should not get that error once a user mapping is created
|
||||
CREATE USER MAPPING FOR regress_view_owner_another SERVER loopback OPTIONS (password_required 'false');
|
||||
EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM v4;
|
||||
|
|
|
@ -314,6 +314,16 @@ make check-world PG_TEST_EXTRA='kerberos ldap ssl load_balance'
|
|||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry>
|
||||
<term><literal>xid_wraparound</literal></term>
|
||||
<listitem>
|
||||
<para>
|
||||
Runs the test suite under <filename>src/test/module/xid_wrapround</filename>.
|
||||
Not enabled by default because it is resource intensive.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
</variablelist>
|
||||
|
||||
Tests for features that are not supported by the current build
|
||||
|
|
|
@ -538,6 +538,14 @@ Hint: The addendum, written as a complete sentence.
|
|||
variables that will not contain words (for example, operator names).
|
||||
</para>
|
||||
|
||||
<para>
|
||||
In messages containing configuration variable names, do not include quotes
|
||||
when the names are visibly not natural English words, such as when they
|
||||
have underscores, are all-uppercase or have mixed case. Otherwise, quotes
|
||||
must be added. Do include quotes in a message where an arbitrary variable
|
||||
name is to be expanded.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
There are functions in the backend that will double-quote their own output
|
||||
as needed (for example, <function>format_type_be()</function>). Do not put
|
||||
|
|
|
@ -1063,7 +1063,7 @@ pyopt = get_option('plpython')
|
|||
python3_dep = not_found_dep
|
||||
if not pyopt.disabled()
|
||||
pm = import('python')
|
||||
python3_inst = pm.find_installation(required: pyopt)
|
||||
python3_inst = pm.find_installation(python.path(), required: pyopt)
|
||||
if python3_inst.found()
|
||||
python3_dep = python3_inst.dependency(embed: true, required: pyopt)
|
||||
# Remove this check after we depend on Meson >= 1.1.0
|
||||
|
|
|
@ -2658,7 +2658,7 @@ lazy_check_wraparound_failsafe(LVRelState *vacrel)
|
|||
vacrel->dbname, vacrel->relnamespace, vacrel->relname,
|
||||
vacrel->num_index_scans),
|
||||
errdetail("The table's relfrozenxid or relminmxid is too far in the past."),
|
||||
errhint("Consider increasing configuration parameter \"maintenance_work_mem\" or \"autovacuum_work_mem\".\n"
|
||||
errhint("Consider increasing configuration parameter maintenance_work_mem or autovacuum_work_mem.\n"
|
||||
"You might also need to consider other ways for VACUUM to keep up with the allocation of transaction IDs.")));
|
||||
|
||||
/* Stop applying cost limits from this point on */
|
||||
|
|
|
@ -385,9 +385,9 @@ error_commit_ts_disabled(void)
|
|||
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
|
||||
errmsg("could not get commit timestamp data"),
|
||||
RecoveryInProgress() ?
|
||||
errhint("Make sure the configuration parameter \"%s\" is set on the primary server.",
|
||||
errhint("Make sure the configuration parameter %s is set on the primary server.",
|
||||
"track_commit_timestamp") :
|
||||
errhint("Make sure the configuration parameter \"%s\" is set.",
|
||||
errhint("Make sure the configuration parameter %s is set.",
|
||||
"track_commit_timestamp")));
|
||||
}
|
||||
|
||||
|
|
|
@ -60,7 +60,7 @@
|
|||
#include "storage/fd.h"
|
||||
#include "storage/shmem.h"
|
||||
|
||||
static int inline
|
||||
static inline int
|
||||
SlruFileName(SlruCtl ctl, char *path, int64 segno)
|
||||
{
|
||||
if (ctl->long_segment_names)
|
||||
|
|
|
@ -965,7 +965,7 @@ AdjustToFullTransactionId(TransactionId xid)
|
|||
epoch = EpochFromFullTransactionId(nextFullXid);
|
||||
if (unlikely(xid > nextXid))
|
||||
{
|
||||
/* Wraparound occured, must be from a prev epoch. */
|
||||
/* Wraparound occurred, must be from a prev epoch. */
|
||||
Assert(epoch > 0);
|
||||
epoch--;
|
||||
}
|
||||
|
|
|
@ -4258,11 +4258,11 @@ ReadControlFile(void)
|
|||
/* check and update variables dependent on wal_segment_size */
|
||||
if (ConvertToXSegs(min_wal_size_mb, wal_segment_size) < 2)
|
||||
ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
||||
errmsg("\"min_wal_size\" must be at least twice \"wal_segment_size\"")));
|
||||
errmsg("min_wal_size must be at least twice wal_segment_size")));
|
||||
|
||||
if (ConvertToXSegs(max_wal_size_mb, wal_segment_size) < 2)
|
||||
ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
||||
errmsg("\"max_wal_size\" must be at least twice \"wal_segment_size\"")));
|
||||
errmsg("max_wal_size must be at least twice wal_segment_size")));
|
||||
|
||||
UsableBytesInSegment =
|
||||
(wal_segment_size / XLOG_BLCKSZ * UsableBytesInPage) -
|
||||
|
|
|
@ -2305,15 +2305,13 @@ AddEventToPendingNotifies(Notification *n)
|
|||
foreach(l, pendingNotifies->events)
|
||||
{
|
||||
Notification *oldn = (Notification *) lfirst(l);
|
||||
NotificationHash *hentry;
|
||||
bool found;
|
||||
|
||||
hentry = (NotificationHash *) hash_search(pendingNotifies->hashtab,
|
||||
&oldn,
|
||||
HASH_ENTER,
|
||||
&found);
|
||||
(void) hash_search(pendingNotifies->hashtab,
|
||||
&oldn,
|
||||
HASH_ENTER,
|
||||
&found);
|
||||
Assert(!found);
|
||||
hentry->event = oldn;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2323,15 +2321,13 @@ AddEventToPendingNotifies(Notification *n)
|
|||
/* Add event to the hash table if needed */
|
||||
if (pendingNotifies->hashtab != NULL)
|
||||
{
|
||||
NotificationHash *hentry;
|
||||
bool found;
|
||||
|
||||
hentry = (NotificationHash *) hash_search(pendingNotifies->hashtab,
|
||||
&n,
|
||||
HASH_ENTER,
|
||||
&found);
|
||||
(void) hash_search(pendingNotifies->hashtab,
|
||||
&n,
|
||||
HASH_ENTER,
|
||||
&found);
|
||||
Assert(!found);
|
||||
hentry->event = n;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -2104,10 +2104,7 @@ ExecuteTruncateGuts(List *explicit_rels,
|
|||
/* Find or create cached entry for the foreign table */
|
||||
ft_info = hash_search(ft_htab, &serverid, HASH_ENTER, &found);
|
||||
if (!found)
|
||||
{
|
||||
ft_info->serverid = serverid;
|
||||
ft_info->rels = NIL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Save the foreign table in the entry of the server that the
|
||||
|
|
|
@ -134,7 +134,7 @@ check_vacuum_buffer_usage_limit(int *newval, void **extra,
|
|||
return true;
|
||||
|
||||
/* Value does not fall within any allowable range */
|
||||
GUC_check_errdetail("\"vacuum_buffer_usage_limit\" must be 0 or between %d kB and %d kB",
|
||||
GUC_check_errdetail("vacuum_buffer_usage_limit must be 0 or between %d kB and %d kB",
|
||||
MIN_BAS_VAC_RING_SIZE_KB, MAX_BAS_VAC_RING_SIZE_KB);
|
||||
|
||||
return false;
|
||||
|
|
|
@ -717,7 +717,7 @@ check_client_encoding(char **newval, void **extra, GucSource source)
|
|||
else
|
||||
{
|
||||
/* Provide a useful complaint */
|
||||
GUC_check_errdetail("Cannot change \"client_encoding\" now.");
|
||||
GUC_check_errdetail("Cannot change client_encoding now.");
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -217,10 +217,14 @@ GetUserMapping(Oid userid, Oid serverid)
|
|||
}
|
||||
|
||||
if (!HeapTupleIsValid(tp))
|
||||
{
|
||||
ForeignServer *server = GetForeignServer(serverid);
|
||||
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_UNDEFINED_OBJECT),
|
||||
errmsg("user mapping not found for \"%s\"",
|
||||
MappingUserName(userid))));
|
||||
errmsg("user mapping not found for user \"%s\", server \"%s\"",
|
||||
MappingUserName(userid), server->servername)));
|
||||
}
|
||||
|
||||
um = (UserMapping *) palloc(sizeof(UserMapping));
|
||||
um->umid = ((Form_pg_user_mapping) GETSTRUCT(tp))->oid;
|
||||
|
|
|
@ -195,7 +195,7 @@ be_tls_init(bool isServerStart)
|
|||
{
|
||||
ereport(isServerStart ? FATAL : LOG,
|
||||
/*- translator: first %s is a GUC option name, second %s is its value */
|
||||
(errmsg("\"%s\" setting \"%s\" not supported by this build",
|
||||
(errmsg("%s setting \"%s\" not supported by this build",
|
||||
"ssl_min_protocol_version",
|
||||
GetConfigOption("ssl_min_protocol_version",
|
||||
false, false))));
|
||||
|
@ -218,7 +218,7 @@ be_tls_init(bool isServerStart)
|
|||
{
|
||||
ereport(isServerStart ? FATAL : LOG,
|
||||
/*- translator: first %s is a GUC option name, second %s is its value */
|
||||
(errmsg("\"%s\" setting \"%s\" not supported by this build",
|
||||
(errmsg("%s setting \"%s\" not supported by this build",
|
||||
"ssl_max_protocol_version",
|
||||
GetConfigOption("ssl_max_protocol_version",
|
||||
false, false))));
|
||||
|
@ -245,7 +245,7 @@ be_tls_init(bool isServerStart)
|
|||
{
|
||||
ereport(isServerStart ? FATAL : LOG,
|
||||
(errmsg("could not set SSL protocol version range"),
|
||||
errdetail("\"%s\" cannot be higher than \"%s\"",
|
||||
errdetail("%s cannot be higher than %s",
|
||||
"ssl_min_protocol_version",
|
||||
"ssl_max_protocol_version")));
|
||||
goto error;
|
||||
|
|
|
@ -944,7 +944,7 @@ RegisterBackgroundWorker(BackgroundWorker *worker)
|
|||
"Up to %d background workers can be registered with the current settings.",
|
||||
max_worker_processes,
|
||||
max_worker_processes),
|
||||
errhint("Consider increasing the configuration parameter \"max_worker_processes\".")));
|
||||
errhint("Consider increasing the configuration parameter max_worker_processes.")));
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -423,7 +423,7 @@ CheckpointerMain(void)
|
|||
"checkpoints are occurring too frequently (%d seconds apart)",
|
||||
elapsed_secs,
|
||||
elapsed_secs),
|
||||
errhint("Consider increasing the configuration parameter \"max_wal_size\".")));
|
||||
errhint("Consider increasing the configuration parameter max_wal_size.")));
|
||||
|
||||
/*
|
||||
* Initialize checkpointer-private variables used during
|
||||
|
|
|
@ -807,7 +807,7 @@ HandlePgArchInterrupts(void)
|
|||
*/
|
||||
ereport(LOG,
|
||||
(errmsg("restarting archiver process because value of "
|
||||
"\"archive_library\" was changed")));
|
||||
"archive_library was changed")));
|
||||
|
||||
proc_exit(0);
|
||||
}
|
||||
|
|
|
@ -509,7 +509,6 @@ pa_allocate_worker(TransactionId xid)
|
|||
winfo->in_use = true;
|
||||
winfo->serialize_changes = false;
|
||||
entry->winfo = winfo;
|
||||
entry->xid = xid;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -657,7 +657,6 @@ logicalrep_partition_open(LogicalRepRelMapEntry *root,
|
|||
int i;
|
||||
|
||||
/* Remote relation is copied as-is from the root entry. */
|
||||
entry = &part_entry->relmapentry;
|
||||
entry->remoterel.remoteid = remoterel->remoteid;
|
||||
entry->remoterel.nspname = pstrdup(remoterel->nspname);
|
||||
entry->remoterel.relname = pstrdup(remoterel->relname);
|
||||
|
|
|
@ -705,7 +705,7 @@ check_temp_buffers(int *newval, void **extra, GucSource source)
|
|||
*/
|
||||
if (source != PGC_S_TEST && NLocBuffer && NLocBuffer != *newval)
|
||||
{
|
||||
GUC_check_errdetail("\"temp_buffers\" cannot be changed after any temporary tables have been accessed in the session.");
|
||||
GUC_check_errdetail("temp_buffers cannot be changed after any temporary tables have been accessed in the session.");
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
|
|
|
@ -3931,7 +3931,7 @@ check_debug_io_direct(char **newval, void **extra, GucSource source)
|
|||
|
||||
if (!SplitGUCList(rawstring, ',', &elemlist))
|
||||
{
|
||||
GUC_check_errdetail("invalid list syntax in parameter \"%s\"",
|
||||
GUC_check_errdetail("invalid list syntax in parameter %s",
|
||||
"debug_io_direct");
|
||||
pfree(rawstring);
|
||||
list_free(elemlist);
|
||||
|
|
|
@ -1644,7 +1644,7 @@ GetSerializableTransactionSnapshot(Snapshot snapshot)
|
|||
ereport(ERROR,
|
||||
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("cannot use serializable mode in a hot standby"),
|
||||
errdetail("\"default_transaction_isolation\" is set to \"serializable\"."),
|
||||
errdetail("default_transaction_isolation is set to \"serializable\"."),
|
||||
errhint("You can use \"SET default_transaction_isolation = 'repeatable read'\" to change the default.")));
|
||||
|
||||
/*
|
||||
|
|
|
@ -3524,7 +3524,7 @@ check_stack_depth(void)
|
|||
ereport(ERROR,
|
||||
(errcode(ERRCODE_STATEMENT_TOO_COMPLEX),
|
||||
errmsg("stack depth limit exceeded"),
|
||||
errhint("Increase the configuration parameter \"max_stack_depth\" (currently %dkB), "
|
||||
errhint("Increase the configuration parameter max_stack_depth (currently %dkB), "
|
||||
"after ensuring the platform's stack depth limit is adequate.",
|
||||
max_stack_depth)));
|
||||
}
|
||||
|
@ -3571,7 +3571,7 @@ check_max_stack_depth(int *newval, void **extra, GucSource source)
|
|||
|
||||
if (stack_rlimit > 0 && newval_bytes > stack_rlimit - STACK_DEPTH_SLOP)
|
||||
{
|
||||
GUC_check_errdetail("\"max_stack_depth\" must not exceed %ldkB.",
|
||||
GUC_check_errdetail("max_stack_depth must not exceed %ldkB.",
|
||||
(stack_rlimit - STACK_DEPTH_SLOP) / 1024L);
|
||||
GUC_check_errhint("Increase the platform's stack depth limit via \"ulimit -s\" or local equivalent.");
|
||||
return false;
|
||||
|
@ -3632,9 +3632,9 @@ check_log_stats(bool *newval, void **extra, GucSource source)
|
|||
if (*newval &&
|
||||
(log_parser_stats || log_planner_stats || log_executor_stats))
|
||||
{
|
||||
GUC_check_errdetail("Cannot enable \"log_statement_stats\" when "
|
||||
"\"log_parser_stats\", \"log_planner_stats\", "
|
||||
"or \"log_executor_stats\" is true.");
|
||||
GUC_check_errdetail("Cannot enable log_statement_stats when "
|
||||
"log_parser_stats, log_planner_stats, "
|
||||
"or log_executor_stats is true.");
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
|
|
|
@ -2875,7 +2875,7 @@ icu_validate_locale(const char *loc_str)
|
|||
ereport(elevel,
|
||||
(errmsg("could not get language from ICU locale \"%s\": %s",
|
||||
loc_str, u_errorName(status)),
|
||||
errhint("To disable ICU locale validation, set the parameter \"%s\" to \"%s\".",
|
||||
errhint("To disable ICU locale validation, set the parameter %s to \"%s\".",
|
||||
"icu_validation_level", "disabled")));
|
||||
return;
|
||||
}
|
||||
|
@ -2904,7 +2904,7 @@ icu_validate_locale(const char *loc_str)
|
|||
ereport(elevel,
|
||||
(errmsg("ICU locale \"%s\" has unknown language \"%s\"",
|
||||
loc_str, lang),
|
||||
errhint("To disable ICU locale validation, set the parameter \"%s\" to \"%s\".",
|
||||
errhint("To disable ICU locale validation, set the parameter %s to \"%s\".",
|
||||
"icu_validation_level", "disabled")));
|
||||
|
||||
/* check that it can be opened */
|
||||
|
|
|
@ -555,7 +555,7 @@ find_in_dynamic_libpath(const char *basename)
|
|||
if (piece == p)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_NAME),
|
||||
errmsg("zero-length component in parameter \"dynamic_library_path\"")));
|
||||
errmsg("zero-length component in parameter dynamic_library_path")));
|
||||
|
||||
if (piece == NULL)
|
||||
len = strlen(p);
|
||||
|
@ -574,7 +574,7 @@ find_in_dynamic_libpath(const char *basename)
|
|||
if (!is_absolute_path(mangled))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_NAME),
|
||||
errmsg("component in parameter \"dynamic_library_path\" is not an absolute path")));
|
||||
errmsg("component in parameter dynamic_library_path is not an absolute path")));
|
||||
|
||||
full = palloc(strlen(mangled) + 1 + baselen + 1);
|
||||
sprintf(full, "%s/%s", mangled, basename);
|
||||
|
|
|
@ -1873,7 +1873,7 @@ SelectConfigFiles(const char *userDoption, const char *progname)
|
|||
else
|
||||
{
|
||||
write_stderr("%s does not know where to find the database system data.\n"
|
||||
"This can be specified as \"data_directory\" in \"%s\", "
|
||||
"This can be specified as data_directory in \"%s\", "
|
||||
"or by the -D invocation option, or by the "
|
||||
"PGDATA environment variable.\n",
|
||||
progname, ConfigFileName);
|
||||
|
|
|
@ -3821,7 +3821,7 @@ struct config_string ConfigureNamesString[] =
|
|||
{
|
||||
{"archive_command", PGC_SIGHUP, WAL_ARCHIVING,
|
||||
gettext_noop("Sets the shell command that will be called to archive a WAL file."),
|
||||
gettext_noop("This is used only if \"archive_library\" is not set.")
|
||||
gettext_noop("This is used only if archive_library is not set.")
|
||||
},
|
||||
&XLogArchiveCommand,
|
||||
"",
|
||||
|
@ -3831,7 +3831,7 @@ struct config_string ConfigureNamesString[] =
|
|||
{
|
||||
{"archive_library", PGC_SIGHUP, WAL_ARCHIVING,
|
||||
gettext_noop("Sets the library that will be called to archive a WAL file."),
|
||||
gettext_noop("An empty string indicates that \"archive_command\" should be used.")
|
||||
gettext_noop("An empty string indicates that archive_command should be used.")
|
||||
},
|
||||
&XLogArchiveLibrary,
|
||||
"",
|
||||
|
|
|
@ -185,14 +185,14 @@ filter_get_keyword(const char **line, int *size)
|
|||
*size = 0;
|
||||
|
||||
/* Skip initial whitespace */
|
||||
while (isspace(*ptr))
|
||||
while (isspace((unsigned char) *ptr))
|
||||
ptr++;
|
||||
|
||||
if (isalpha(*ptr))
|
||||
if (isalpha((unsigned char) *ptr))
|
||||
{
|
||||
result = ptr++;
|
||||
|
||||
while (isalpha(*ptr) || *ptr == '_')
|
||||
while (isalpha((unsigned char) *ptr) || *ptr == '_')
|
||||
ptr++;
|
||||
|
||||
*size = ptr - result;
|
||||
|
@ -301,7 +301,7 @@ read_pattern(FilterStateData *fstate, const char *str, PQExpBuffer pattern)
|
|||
bool found_space = false;
|
||||
|
||||
/* Skip initial whitespace */
|
||||
while (isspace(*str))
|
||||
while (isspace((unsigned char) *str))
|
||||
str++;
|
||||
|
||||
if (*str == '\0')
|
||||
|
@ -312,7 +312,7 @@ read_pattern(FilterStateData *fstate, const char *str, PQExpBuffer pattern)
|
|||
|
||||
while (*str && *str != '#')
|
||||
{
|
||||
while (*str && !isspace(*str) && !strchr("#,.()\"", *str))
|
||||
while (*str && !isspace((unsigned char) *str) && !strchr("#,.()\"", *str))
|
||||
{
|
||||
/*
|
||||
* Append space only when it is allowed, and when it was found in
|
||||
|
@ -351,7 +351,7 @@ read_pattern(FilterStateData *fstate, const char *str, PQExpBuffer pattern)
|
|||
found_space = false;
|
||||
|
||||
/* skip ending whitespaces */
|
||||
while (isspace(*str))
|
||||
while (isspace((unsigned char) *str))
|
||||
{
|
||||
found_space = true;
|
||||
str++;
|
||||
|
@ -400,7 +400,7 @@ filter_read_item(FilterStateData *fstate,
|
|||
fstate->lineno++;
|
||||
|
||||
/* Skip initial white spaces */
|
||||
while (isspace(*str))
|
||||
while (isspace((unsigned char) *str))
|
||||
str++;
|
||||
|
||||
/*
|
||||
|
|
|
@ -20,6 +20,7 @@ GETTEXT_FILES = $(FRONTEND_COMMON_GETTEXT_FILES) \
|
|||
pg_dumpall.c \
|
||||
parallel.c \
|
||||
parallel.h \
|
||||
filter.c \
|
||||
pg_backup_utils.c \
|
||||
pg_backup_utils.h \
|
||||
../../common/compression.c \
|
||||
|
|
|
@ -1119,7 +1119,7 @@ help(const char *progname)
|
|||
" including child and partition tables\n"));
|
||||
printf(_(" --extra-float-digits=NUM override default setting for extra_float_digits\n"));
|
||||
printf(_(" --filter=FILENAME include or exclude objects and data from dump\n"
|
||||
" based expressions in FILENAME\n"));
|
||||
" based on expressions in FILENAME\n"));
|
||||
printf(_(" --if-exists use IF EXISTS when dropping objects\n"));
|
||||
printf(_(" --include-foreign-data=PATTERN\n"
|
||||
" include data of foreign tables on foreign\n"
|
||||
|
@ -18812,7 +18812,7 @@ read_dump_filters(const char *filename, DumpOptions *dopt)
|
|||
case FILTER_OBJECT_TYPE_TABLE_DATA:
|
||||
case FILTER_OBJECT_TYPE_TABLE_DATA_AND_CHILDREN:
|
||||
case FILTER_OBJECT_TYPE_TRIGGER:
|
||||
pg_log_filter_error(&fstate, _("%s filter for \"%s\" is not allowed."),
|
||||
pg_log_filter_error(&fstate, _("%s filter for \"%s\" is not allowed"),
|
||||
"include",
|
||||
filter_object_type_name(objtype));
|
||||
exit_nicely(1);
|
||||
|
@ -18851,7 +18851,7 @@ read_dump_filters(const char *filename, DumpOptions *dopt)
|
|||
case FILTER_OBJECT_TYPE_TRIGGER:
|
||||
case FILTER_OBJECT_TYPE_EXTENSION:
|
||||
case FILTER_OBJECT_TYPE_FOREIGN_DATA:
|
||||
pg_log_filter_error(&fstate, _("%s filter for \"%s\" is not allowed."),
|
||||
pg_log_filter_error(&fstate, _("%s filter for \"%s\" is not allowed"),
|
||||
"exclude",
|
||||
filter_object_type_name(objtype));
|
||||
exit_nicely(1);
|
||||
|
|
|
@ -1969,7 +1969,7 @@ read_dumpall_filters(const char *filename, SimpleStringList *pattern)
|
|||
{
|
||||
if (comtype == FILTER_COMMAND_TYPE_INCLUDE)
|
||||
{
|
||||
pg_log_filter_error(&fstate, _("%s filter for \"%s\" is not allowed."),
|
||||
pg_log_filter_error(&fstate, _("%s filter for \"%s\" is not allowed"),
|
||||
"include",
|
||||
filter_object_type_name(objtype));
|
||||
exit_nicely(1);
|
||||
|
@ -1989,7 +1989,7 @@ read_dumpall_filters(const char *filename, SimpleStringList *pattern)
|
|||
case FILTER_OBJECT_TYPE_SCHEMA:
|
||||
case FILTER_OBJECT_TYPE_TABLE:
|
||||
case FILTER_OBJECT_TYPE_TABLE_AND_CHILDREN:
|
||||
pg_log_filter_error(&fstate, _("unsupported filter object."));
|
||||
pg_log_filter_error(&fstate, _("unsupported filter object"));
|
||||
exit_nicely(1);
|
||||
break;
|
||||
|
||||
|
|
|
@ -535,7 +535,7 @@ read_restore_filters(const char *filename, RestoreOptions *opts)
|
|||
case FILTER_OBJECT_TYPE_DATABASE:
|
||||
case FILTER_OBJECT_TYPE_EXTENSION:
|
||||
case FILTER_OBJECT_TYPE_FOREIGN_DATA:
|
||||
pg_log_filter_error(&fstate, _("%s filter for \"%s\" is not allowed."),
|
||||
pg_log_filter_error(&fstate, _("%s filter for \"%s\" is not allowed"),
|
||||
"include",
|
||||
filter_object_type_name(objtype));
|
||||
exit_nicely(1);
|
||||
|
@ -581,7 +581,7 @@ read_restore_filters(const char *filename, RestoreOptions *opts)
|
|||
case FILTER_OBJECT_TYPE_TABLE:
|
||||
case FILTER_OBJECT_TYPE_TABLE_AND_CHILDREN:
|
||||
case FILTER_OBJECT_TYPE_TRIGGER:
|
||||
pg_log_filter_error(&fstate, _("%s filter for \"%s\" is not allowed."),
|
||||
pg_log_filter_error(&fstate, _("%s filter for \"%s\" is not allowed"),
|
||||
"exclude",
|
||||
filter_object_type_name(objtype));
|
||||
exit_nicely(1);
|
||||
|
|
|
@ -1130,7 +1130,6 @@ static void
|
|||
ensureCleanShutdown(const char *argv0)
|
||||
{
|
||||
int ret;
|
||||
#define MAXCMDLEN (2 * MAXPGPATH)
|
||||
char exec_path[MAXPGPATH];
|
||||
PQExpBuffer postgres_cmd;
|
||||
|
||||
|
|
|
@ -34,7 +34,8 @@ SUBDIRS = \
|
|||
test_shm_mq \
|
||||
test_slru \
|
||||
unsafe_tests \
|
||||
worker_spi
|
||||
worker_spi \
|
||||
xid_wraparound
|
||||
|
||||
ifeq ($(with_ssl),openssl)
|
||||
SUBDIRS += ssl_passphrase_callback
|
||||
|
|
|
@ -18,7 +18,7 @@ SELECT id,
|
|||
FROM committs_test
|
||||
ORDER BY id;
|
||||
ERROR: could not get commit timestamp data
|
||||
HINT: Make sure the configuration parameter "track_commit_timestamp" is set.
|
||||
HINT: Make sure the configuration parameter track_commit_timestamp is set.
|
||||
DROP TABLE committs_test;
|
||||
SELECT pg_xact_commit_timestamp('0'::xid);
|
||||
ERROR: cannot retrieve commit timestamp for transaction 0
|
||||
|
@ -40,7 +40,7 @@ SELECT x.xid::text::bigint > 0 as xid_valid,
|
|||
roident != 0 AS valid_roident
|
||||
FROM pg_last_committed_xact() x;
|
||||
ERROR: could not get commit timestamp data
|
||||
HINT: Make sure the configuration parameter "track_commit_timestamp" is set.
|
||||
HINT: Make sure the configuration parameter track_commit_timestamp is set.
|
||||
-- Test non-normal transaction ids.
|
||||
SELECT * FROM pg_xact_commit_timestamp_origin(NULL); -- ok, NULL
|
||||
timestamp | roident
|
||||
|
@ -69,13 +69,13 @@ SELECT x.timestamp > '-infinity'::timestamptz AS ts_low,
|
|||
roident != 0 AS valid_roident
|
||||
FROM pg_last_committed_xact() x;
|
||||
ERROR: could not get commit timestamp data
|
||||
HINT: Make sure the configuration parameter "track_commit_timestamp" is set.
|
||||
HINT: Make sure the configuration parameter track_commit_timestamp is set.
|
||||
SELECT x.timestamp > '-infinity'::timestamptz AS ts_low,
|
||||
x.timestamp <= now() AS ts_high,
|
||||
roident != 0 AS valid_roident
|
||||
FROM pg_xact_commit_timestamp_origin(:'txid_no_origin') x;
|
||||
ERROR: could not get commit timestamp data
|
||||
HINT: Make sure the configuration parameter "track_commit_timestamp" is set.
|
||||
HINT: Make sure the configuration parameter track_commit_timestamp is set.
|
||||
-- Test transaction with replication origin
|
||||
SELECT pg_replication_origin_create('regress_commit_ts: get_origin') != 0
|
||||
AS valid_roident;
|
||||
|
@ -97,14 +97,14 @@ SELECT x.timestamp > '-infinity'::timestamptz AS ts_low,
|
|||
FROM pg_last_committed_xact() x, pg_replication_origin r
|
||||
WHERE r.roident = x.roident;
|
||||
ERROR: could not get commit timestamp data
|
||||
HINT: Make sure the configuration parameter "track_commit_timestamp" is set.
|
||||
HINT: Make sure the configuration parameter track_commit_timestamp is set.
|
||||
SELECT x.timestamp > '-infinity'::timestamptz AS ts_low,
|
||||
x.timestamp <= now() AS ts_high,
|
||||
r.roname
|
||||
FROM pg_xact_commit_timestamp_origin(:'txid_with_origin') x, pg_replication_origin r
|
||||
WHERE r.roident = x.roident;
|
||||
ERROR: could not get commit timestamp data
|
||||
HINT: Make sure the configuration parameter "track_commit_timestamp" is set.
|
||||
HINT: Make sure the configuration parameter track_commit_timestamp is set.
|
||||
SELECT pg_replication_origin_session_reset();
|
||||
pg_replication_origin_session_reset
|
||||
-------------------------------------
|
||||
|
|
|
@ -32,3 +32,4 @@ subdir('test_shm_mq')
|
|||
subdir('test_slru')
|
||||
subdir('unsafe_tests')
|
||||
subdir('worker_spi')
|
||||
subdir('xid_wraparound')
|
||||
|
|
|
@ -0,0 +1,4 @@
|
|||
# Generated subdirectories
|
||||
/log/
|
||||
/results/
|
||||
/tmp_check/
|
|
@ -0,0 +1,23 @@
|
|||
# src/test/modules/xid_wraparound/Makefile
|
||||
|
||||
MODULE_big = xid_wraparound
|
||||
OBJS = \
|
||||
$(WIN32RES) \
|
||||
xid_wraparound.o
|
||||
PGFILEDESC = "xid_wraparound - tests for XID wraparound"
|
||||
|
||||
EXTENSION = xid_wraparound
|
||||
DATA = xid_wraparound--1.0.sql
|
||||
|
||||
TAP_TESTS = 1
|
||||
|
||||
ifdef USE_PGXS
|
||||
PG_CONFIG = pg_config
|
||||
PGXS := $(shell $(PG_CONFIG) --pgxs)
|
||||
include $(PGXS)
|
||||
else
|
||||
subdir = src/test/modules/xid_wraparound
|
||||
top_builddir = ../../../..
|
||||
include $(top_builddir)/src/Makefile.global
|
||||
include $(top_srcdir)/contrib/contrib-global.mk
|
||||
endif
|
|
@ -0,0 +1,3 @@
|
|||
This module contains tests for XID wraparound. The tests use two
|
||||
helper functions to quickly consume lots of XIDs, to reach XID
|
||||
wraparound faster.
|
|
@ -0,0 +1,36 @@
|
|||
# Copyright (c) 2023, PostgreSQL Global Development Group
|
||||
|
||||
xid_wraparound_sources = files(
|
||||
'xid_wraparound.c',
|
||||
)
|
||||
|
||||
if host_system == 'windows'
|
||||
xid_wraparound_sources += rc_lib_gen.process(win32ver_rc, extra_args: [
|
||||
'--NAME', 'xid_wraparound',
|
||||
'--FILEDESC', 'xid_wraparound - tests for XID wraparound',])
|
||||
endif
|
||||
|
||||
xid_wraparound = shared_module('xid_wraparound',
|
||||
xid_wraparound_sources,
|
||||
kwargs: pg_mod_args,
|
||||
)
|
||||
testprep_targets += xid_wraparound
|
||||
|
||||
install_data(
|
||||
'xid_wraparound.control',
|
||||
'xid_wraparound--1.0.sql',
|
||||
kwargs: contrib_data_args,
|
||||
)
|
||||
|
||||
tests += {
|
||||
'name': 'xid_wraparound',
|
||||
'sd': meson.current_source_dir(),
|
||||
'bd': meson.current_build_dir(),
|
||||
'tap': {
|
||||
'tests': [
|
||||
't/001_emergency_vacuum.pl',
|
||||
't/002_limits.pl',
|
||||
't/003_wraparounds.pl',
|
||||
],
|
||||
},
|
||||
}
|
|
@ -0,0 +1,132 @@
|
|||
# Copyright (c) 2023, PostgreSQL Global Development Group
|
||||
|
||||
# Test wraparound emergency autovacuum.
|
||||
use strict;
|
||||
use warnings;
|
||||
use PostgreSQL::Test::Cluster;
|
||||
use PostgreSQL::Test::Utils;
|
||||
use Test::More;
|
||||
|
||||
if ($ENV{PG_TEST_EXTRA} !~ /\bxid_wraparound\b/)
|
||||
{
|
||||
plan skip_all => "test xid_wraparound not enabled in PG_TEST_EXTRA";
|
||||
}
|
||||
|
||||
# Initialize node
|
||||
my $node = PostgreSQL::Test::Cluster->new('main');
|
||||
|
||||
$node->init;
|
||||
$node->append_conf(
|
||||
'postgresql.conf', qq[
|
||||
autovacuum = off # run autovacuum only when to anti wraparound
|
||||
autovacuum_naptime = 1s
|
||||
# so it's easier to verify the order of operations
|
||||
autovacuum_max_workers = 1
|
||||
log_autovacuum_min_duration = 0
|
||||
]);
|
||||
$node->start;
|
||||
$node->safe_psql('postgres', 'CREATE EXTENSION xid_wraparound');
|
||||
|
||||
# Create tables for a few different test scenarios
|
||||
$node->safe_psql(
|
||||
'postgres', qq[
|
||||
CREATE TABLE large(id serial primary key, data text, filler text default repeat(random()::text, 10));
|
||||
INSERT INTO large(data) SELECT generate_series(1,30000);
|
||||
|
||||
CREATE TABLE large_trunc(id serial primary key, data text, filler text default repeat(random()::text, 10));
|
||||
INSERT INTO large_trunc(data) SELECT generate_series(1,30000);
|
||||
|
||||
CREATE TABLE small(id serial primary key, data text, filler text default repeat(random()::text, 10));
|
||||
INSERT INTO small(data) SELECT generate_series(1,15000);
|
||||
|
||||
CREATE TABLE small_trunc(id serial primary key, data text, filler text default repeat(random()::text, 10));
|
||||
INSERT INTO small_trunc(data) SELECT generate_series(1,15000);
|
||||
|
||||
CREATE TABLE autovacuum_disabled(id serial primary key, data text) WITH (autovacuum_enabled=false);
|
||||
INSERT INTO autovacuum_disabled(data) SELECT generate_series(1,1000);
|
||||
]);
|
||||
|
||||
# Bump the query timeout to avoid false negatives on slow test systems.
|
||||
my $psql_timeout_secs = 4 * $PostgreSQL::Test::Utils::timeout_default;
|
||||
|
||||
# Start a background session, which holds a transaction open, preventing
|
||||
# autovacuum from advancing relfrozenxid and datfrozenxid.
|
||||
my $background_psql = $node->background_psql(
|
||||
'postgres',
|
||||
on_error_stop => 0,
|
||||
timeout => $psql_timeout_secs);
|
||||
$background_psql->set_query_timer_restart();
|
||||
$background_psql->query_safe(
|
||||
qq[
|
||||
BEGIN;
|
||||
DELETE FROM large WHERE id % 2 = 0;
|
||||
DELETE FROM large_trunc WHERE id > 10000;
|
||||
DELETE FROM small WHERE id % 2 = 0;
|
||||
DELETE FROM small_trunc WHERE id > 1000;
|
||||
DELETE FROM autovacuum_disabled WHERE id % 2 = 0;
|
||||
]);
|
||||
|
||||
# Consume 2 billion XIDs, to get us very close to wraparound
|
||||
$node->safe_psql('postgres',
|
||||
qq[SELECT consume_xids_until('2000000000'::xid8)]);
|
||||
|
||||
# Make sure the latest completed XID is advanced
|
||||
$node->safe_psql('postgres', qq[INSERT INTO small(data) SELECT 1]);
|
||||
|
||||
# Check that all databases became old enough to trigger failsafe.
|
||||
my $ret = $node->safe_psql(
|
||||
'postgres',
|
||||
qq[
|
||||
SELECT datname,
|
||||
age(datfrozenxid) > current_setting('vacuum_failsafe_age')::int as old
|
||||
FROM pg_database ORDER BY 1
|
||||
]);
|
||||
is( $ret, "postgres|t
|
||||
template0|t
|
||||
template1|t", "all tables became old");
|
||||
|
||||
my $log_offset = -s $node->logfile;
|
||||
|
||||
# Finish the old transaction, to allow vacuum freezing to advance
|
||||
# relfrozenxid and datfrozenxid again.
|
||||
$background_psql->query_safe(qq[COMMIT]);
|
||||
$background_psql->quit;
|
||||
|
||||
# Wait until autovacuum processed all tables and advanced the
|
||||
# system-wide oldest-XID.
|
||||
$node->poll_query_until(
|
||||
'postgres', qq[
|
||||
SELECT NOT EXISTS (
|
||||
SELECT *
|
||||
FROM pg_database
|
||||
WHERE age(datfrozenxid) > current_setting('autovacuum_freeze_max_age')::int)
|
||||
]) or die "timeout waiting for all databases to be vacuumed";
|
||||
|
||||
# Check if these tables are vacuumed.
|
||||
$ret = $node->safe_psql(
|
||||
'postgres', qq[
|
||||
SELECT relname, age(relfrozenxid) > current_setting('autovacuum_freeze_max_age')::int
|
||||
FROM pg_class
|
||||
WHERE relname IN ('large', 'large_trunc', 'small', 'small_trunc', 'autovacuum_disabled')
|
||||
ORDER BY 1
|
||||
]);
|
||||
|
||||
is( $ret, "autovacuum_disabled|f
|
||||
large|f
|
||||
large_trunc|f
|
||||
small|f
|
||||
small_trunc|f", "all tables are vacuumed");
|
||||
|
||||
# Check if vacuum failsafe was triggered for each table.
|
||||
my $log_contents = slurp_file($node->logfile, $log_offset);
|
||||
foreach my $tablename ('large', 'large_trunc', 'small', 'small_trunc',
|
||||
'autovacuum_disabled')
|
||||
{
|
||||
like(
|
||||
$log_contents,
|
||||
qr/bypassing nonessential maintenance of table "postgres.public.$tablename" as a failsafe after \d+ index scans/,
|
||||
"failsafe vacuum triggered for $tablename");
|
||||
}
|
||||
|
||||
$node->stop;
|
||||
done_testing();
|
|
@ -0,0 +1,138 @@
|
|||
# Copyright (c) 2023, PostgreSQL Global Development Group
|
||||
#
|
||||
# Test XID wraparound limits.
|
||||
#
|
||||
# When you get close to XID wraparound, you start to get warnings, and
|
||||
# when you get even closer, the system refuses to assign any more XIDs
|
||||
# until the oldest databases have been vacuumed and datfrozenxid has
|
||||
# been advanced.
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
use PostgreSQL::Test::Cluster;
|
||||
use PostgreSQL::Test::Utils;
|
||||
use Test::More;
|
||||
use Time::HiRes qw(usleep);
|
||||
|
||||
if ($ENV{PG_TEST_EXTRA} !~ /\bxid_wraparound\b/)
|
||||
{
|
||||
plan skip_all => "test xid_wraparound not enabled in PG_TEST_EXTRA";
|
||||
}
|
||||
|
||||
my $ret;
|
||||
|
||||
# Initialize node
|
||||
my $node = PostgreSQL::Test::Cluster->new('wraparound');
|
||||
|
||||
$node->init;
|
||||
$node->append_conf(
|
||||
'postgresql.conf', qq[
|
||||
autovacuum = off # run autovacuum only to prevent wraparound
|
||||
autovacuum_naptime = 1s
|
||||
log_autovacuum_min_duration = 0
|
||||
]);
|
||||
$node->start;
|
||||
$node->safe_psql('postgres', 'CREATE EXTENSION xid_wraparound');
|
||||
|
||||
# Create a test table
|
||||
$node->safe_psql(
|
||||
'postgres', qq[
|
||||
CREATE TABLE wraparoundtest(t text);
|
||||
INSERT INTO wraparoundtest VALUES ('start');
|
||||
]);
|
||||
|
||||
# Bump the query timeout to avoid false negatives on slow test systems.
|
||||
my $psql_timeout_secs = 4 * $PostgreSQL::Test::Utils::timeout_default;
|
||||
|
||||
# Start a background session, which holds a transaction open, preventing
|
||||
# autovacuum from advancing relfrozenxid and datfrozenxid.
|
||||
my $background_psql = $node->background_psql(
|
||||
'postgres',
|
||||
on_error_stop => 0,
|
||||
timeout => $psql_timeout_secs);
|
||||
$background_psql->query_safe(
|
||||
qq[
|
||||
BEGIN;
|
||||
INSERT INTO wraparoundtest VALUES ('oldxact');
|
||||
]);
|
||||
|
||||
# Consume 2 billion transactions, to get close to wraparound
|
||||
$node->safe_psql('postgres', qq[SELECT consume_xids(1000000000)]);
|
||||
$node->safe_psql('postgres',
|
||||
qq[INSERT INTO wraparoundtest VALUES ('after 1 billion')]);
|
||||
|
||||
$node->safe_psql('postgres', qq[SELECT consume_xids(1000000000)]);
|
||||
$node->safe_psql('postgres',
|
||||
qq[INSERT INTO wraparoundtest VALUES ('after 2 billion')]);
|
||||
|
||||
# We are now just under 150 million XIDs away from wraparound.
|
||||
# Continue consuming XIDs, in batches of 10 million, until we get
|
||||
# the warning:
|
||||
#
|
||||
# WARNING: database "postgres" must be vacuumed within 3000024 transactions
|
||||
# HINT: To avoid a database shutdown, execute a database-wide VACUUM in that database.
|
||||
# You might also need to commit or roll back old prepared transactions, or drop stale replication slots.
|
||||
my $stderr;
|
||||
my $warn_limit = 0;
|
||||
for my $i (1 .. 15)
|
||||
{
|
||||
$node->psql(
|
||||
'postgres', qq[SELECT consume_xids(10000000)],
|
||||
stderr => \$stderr,
|
||||
on_error_die => 1);
|
||||
|
||||
if ($stderr =~
|
||||
/WARNING: database "postgres" must be vacuumed within [0-9]+ transactions/
|
||||
)
|
||||
{
|
||||
# Reached the warn-limit
|
||||
$warn_limit = 1;
|
||||
last;
|
||||
}
|
||||
}
|
||||
ok($warn_limit == 1, "warn-limit reached");
|
||||
|
||||
# We can still INSERT, despite the warnings.
|
||||
$node->safe_psql('postgres',
|
||||
qq[INSERT INTO wraparoundtest VALUES ('reached warn-limit')]);
|
||||
|
||||
# Keep going. We'll hit the hard "stop" limit.
|
||||
$ret = $node->psql(
|
||||
'postgres',
|
||||
qq[SELECT consume_xids(100000000)],
|
||||
stderr => \$stderr);
|
||||
like(
|
||||
$stderr,
|
||||
qr/ERROR: database is not accepting commands that assign new XIDs to avoid wraparound data loss in database "postgres"/,
|
||||
"stop-limit");
|
||||
|
||||
# Finish the old transaction, to allow vacuum freezing to advance
|
||||
# relfrozenxid and datfrozenxid again.
|
||||
$background_psql->query_safe(qq[COMMIT]);
|
||||
$background_psql->quit;
|
||||
|
||||
# VACUUM, to freeze the tables and advance datfrozenxid.
|
||||
#
|
||||
# Autovacuum does this for the other databases, and would do it for
|
||||
# 'postgres' too, but let's test manual VACUUM.
|
||||
#
|
||||
$node->safe_psql('postgres', 'VACUUM');
|
||||
|
||||
# Wait until autovacuum has processed the other databases and advanced
|
||||
# the system-wide oldest-XID.
|
||||
$ret =
|
||||
$node->poll_query_until('postgres',
|
||||
qq[INSERT INTO wraparoundtest VALUES ('after VACUUM')],
|
||||
'INSERT 0 1');
|
||||
|
||||
# Check the table contents
|
||||
$ret = $node->safe_psql('postgres', qq[SELECT * from wraparoundtest]);
|
||||
is( $ret, "start
|
||||
oldxact
|
||||
after 1 billion
|
||||
after 2 billion
|
||||
reached warn-limit
|
||||
after VACUUM");
|
||||
|
||||
$node->stop;
|
||||
done_testing();
|
|
@ -0,0 +1,60 @@
|
|||
# Copyright (c) 2023, PostgreSQL Global Development Group
|
||||
#
|
||||
# Consume a lot of XIDs, wrapping around a few times.
|
||||
#
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
use PostgreSQL::Test::Cluster;
|
||||
use PostgreSQL::Test::Utils;
|
||||
use Test::More;
|
||||
use Time::HiRes qw(usleep);
|
||||
|
||||
if ($ENV{PG_TEST_EXTRA} !~ /\bxid_wraparound\b/)
|
||||
{
|
||||
plan skip_all => "test xid_wraparound not enabled in PG_TEST_EXTRA";
|
||||
}
|
||||
|
||||
# Initialize node
|
||||
my $node = PostgreSQL::Test::Cluster->new('wraparound');
|
||||
|
||||
$node->init;
|
||||
$node->append_conf(
|
||||
'postgresql.conf', qq[
|
||||
autovacuum = off # run autovacuum only when to anti wraparound
|
||||
autovacuum_naptime = 1s
|
||||
# so it's easier to verify the order of operations
|
||||
autovacuum_max_workers = 1
|
||||
log_autovacuum_min_duration = 0
|
||||
]);
|
||||
$node->start;
|
||||
$node->safe_psql('postgres', 'CREATE EXTENSION xid_wraparound');
|
||||
|
||||
# Create a test table
|
||||
$node->safe_psql(
|
||||
'postgres', qq[
|
||||
CREATE TABLE wraparoundtest(t text);
|
||||
INSERT INTO wraparoundtest VALUES ('beginning');
|
||||
]);
|
||||
|
||||
# Bump the query timeout to avoid false negatives on slow test systems.
|
||||
my $psql_timeout_secs = 4 * $PostgreSQL::Test::Utils::timeout_default;
|
||||
|
||||
# Burn through 10 billion transactions in total, in batches of 100 million.
|
||||
my $ret;
|
||||
for my $i (1 .. 100)
|
||||
{
|
||||
$ret = $node->safe_psql(
|
||||
'postgres',
|
||||
qq[SELECT consume_xids(100000000)],
|
||||
timeout => $psql_timeout_secs);
|
||||
$ret = $node->safe_psql('postgres',
|
||||
qq[INSERT INTO wraparoundtest VALUES ('after $i batches')]);
|
||||
}
|
||||
|
||||
$ret = $node->safe_psql('postgres', qq[SELECT COUNT(*) FROM wraparoundtest]);
|
||||
is($ret, "101");
|
||||
|
||||
$node->stop;
|
||||
|
||||
done_testing();
|
|
@ -0,0 +1,12 @@
|
|||
/* src/test/modules/xid_wraparound/xid_wraparound--1.0.sql */
|
||||
|
||||
-- complain if script is sourced in psql, rather than via CREATE EXTENSION
|
||||
\echo Use "CREATE EXTENSION xid_wraparound" to load this file. \quit
|
||||
|
||||
CREATE FUNCTION consume_xids(nxids bigint)
|
||||
RETURNS xid8 IMMUTABLE PARALLEL SAFE STRICT
|
||||
AS 'MODULE_PATHNAME' LANGUAGE C;
|
||||
|
||||
CREATE FUNCTION consume_xids_until(targetxid xid8)
|
||||
RETURNS xid8 IMMUTABLE PARALLEL SAFE STRICT
|
||||
AS 'MODULE_PATHNAME' LANGUAGE C;
|
|
@ -0,0 +1,219 @@
|
|||
/*--------------------------------------------------------------------------
|
||||
*
|
||||
* xid_wraparound.c
|
||||
* Utilities for testing XID wraparound
|
||||
*
|
||||
*
|
||||
* Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* src/test/modules/xid_wraparound/xid_wraparound.c
|
||||
*
|
||||
* -------------------------------------------------------------------------
|
||||
*/
|
||||
#include "postgres.h"
|
||||
|
||||
#include "access/xact.h"
|
||||
#include "miscadmin.h"
|
||||
#include "storage/proc.h"
|
||||
#include "utils/xid8.h"
|
||||
|
||||
PG_MODULE_MAGIC;
|
||||
|
||||
static int64 consume_xids_shortcut(void);
|
||||
static FullTransactionId consume_xids_common(FullTransactionId untilxid, uint64 nxids);
|
||||
|
||||
/*
|
||||
* Consume the specified number of XIDs.
|
||||
*/
|
||||
PG_FUNCTION_INFO_V1(consume_xids);
|
||||
Datum
|
||||
consume_xids(PG_FUNCTION_ARGS)
|
||||
{
|
||||
int64 nxids = PG_GETARG_INT64(0);
|
||||
FullTransactionId lastxid;
|
||||
|
||||
if (nxids < 0)
|
||||
elog(ERROR, "invalid nxids argument: %lld", (long long) nxids);
|
||||
|
||||
if (nxids == 0)
|
||||
lastxid = ReadNextFullTransactionId();
|
||||
else
|
||||
lastxid = consume_xids_common(InvalidFullTransactionId, (uint64) nxids);
|
||||
|
||||
PG_RETURN_FULLTRANSACTIONID(lastxid);
|
||||
}
|
||||
|
||||
/*
|
||||
* Consume XIDs, up to the given XID.
|
||||
*/
|
||||
PG_FUNCTION_INFO_V1(consume_xids_until);
|
||||
Datum
|
||||
consume_xids_until(PG_FUNCTION_ARGS)
|
||||
{
|
||||
FullTransactionId targetxid = PG_GETARG_FULLTRANSACTIONID(0);
|
||||
FullTransactionId lastxid;
|
||||
|
||||
if (!FullTransactionIdIsNormal(targetxid))
|
||||
elog(ERROR, "targetxid %llu is not normal",
|
||||
(unsigned long long) U64FromFullTransactionId(targetxid));
|
||||
|
||||
lastxid = consume_xids_common(targetxid, 0);
|
||||
|
||||
PG_RETURN_FULLTRANSACTIONID(lastxid);
|
||||
}
|
||||
|
||||
/*
|
||||
* Common functionality between the two public functions.
|
||||
*/
|
||||
static FullTransactionId
|
||||
consume_xids_common(FullTransactionId untilxid, uint64 nxids)
|
||||
{
|
||||
FullTransactionId lastxid;
|
||||
uint64 last_reported_at = 0;
|
||||
uint64 consumed = 0;
|
||||
|
||||
/* Print a NOTICE every REPORT_INTERVAL xids */
|
||||
#define REPORT_INTERVAL (10 * 1000000)
|
||||
|
||||
/* initialize 'lastxid' with the system's current next XID */
|
||||
lastxid = ReadNextFullTransactionId();
|
||||
|
||||
/*
|
||||
* We consume XIDs by calling GetNewTransactionId(true), which marks the
|
||||
* consumed XIDs as subtransactions of the current top-level transaction.
|
||||
* For that to work, this transaction must have a top-level XID.
|
||||
*
|
||||
* GetNewTransactionId registers them in the subxid cache in PGPROC, until
|
||||
* the cache overflows, but beyond that, we don't keep track of the
|
||||
* consumed XIDs.
|
||||
*/
|
||||
(void) GetTopTransactionId();
|
||||
|
||||
for (;;)
|
||||
{
|
||||
uint64 xids_left;
|
||||
|
||||
CHECK_FOR_INTERRUPTS();
|
||||
|
||||
/* How many XIDs do we have left to consume? */
|
||||
if (nxids > 0)
|
||||
{
|
||||
if (consumed >= nxids)
|
||||
break;
|
||||
xids_left = nxids - consumed;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (FullTransactionIdFollowsOrEquals(lastxid, untilxid))
|
||||
break;
|
||||
xids_left = U64FromFullTransactionId(untilxid) - U64FromFullTransactionId(lastxid);
|
||||
}
|
||||
|
||||
/*
|
||||
* If we still have plenty of XIDs to consume, try to take a shortcut
|
||||
* and bump up the nextXid counter directly.
|
||||
*/
|
||||
if (xids_left > 2000 &&
|
||||
consumed - last_reported_at < REPORT_INTERVAL &&
|
||||
MyProc->subxidStatus.overflowed)
|
||||
{
|
||||
int64 consumed_by_shortcut = consume_xids_shortcut();
|
||||
|
||||
if (consumed_by_shortcut > 0)
|
||||
{
|
||||
consumed += consumed_by_shortcut;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
/* Slow path: Call GetNewTransactionId to allocate a new XID. */
|
||||
lastxid = GetNewTransactionId(true);
|
||||
consumed++;
|
||||
|
||||
/* Report progress */
|
||||
if (consumed - last_reported_at >= REPORT_INTERVAL)
|
||||
{
|
||||
if (nxids > 0)
|
||||
elog(NOTICE, "consumed %llu / %llu XIDs, latest %u:%u",
|
||||
(unsigned long long) consumed, (unsigned long long) nxids,
|
||||
EpochFromFullTransactionId(lastxid),
|
||||
XidFromFullTransactionId(lastxid));
|
||||
else
|
||||
elog(NOTICE, "consumed up to %u:%u / %u:%u",
|
||||
EpochFromFullTransactionId(lastxid),
|
||||
XidFromFullTransactionId(lastxid),
|
||||
EpochFromFullTransactionId(untilxid),
|
||||
XidFromFullTransactionId(untilxid));
|
||||
last_reported_at = consumed;
|
||||
}
|
||||
}
|
||||
|
||||
return lastxid;
|
||||
}
|
||||
|
||||
/*
|
||||
* These constants copied from .c files, because they're private.
|
||||
*/
|
||||
#define COMMIT_TS_XACTS_PER_PAGE (BLCKSZ / 10)
|
||||
#define SUBTRANS_XACTS_PER_PAGE (BLCKSZ / sizeof(TransactionId))
|
||||
#define CLOG_XACTS_PER_BYTE 4
|
||||
#define CLOG_XACTS_PER_PAGE (BLCKSZ * CLOG_XACTS_PER_BYTE)
|
||||
|
||||
/*
|
||||
* All the interesting action in GetNewTransactionId happens when we extend
|
||||
* the SLRUs, or at the uint32 wraparound. If the nextXid counter is not close
|
||||
* to any of those interesting values, take a shortcut and bump nextXID
|
||||
* directly, close to the next "interesting" value.
|
||||
*/
|
||||
static inline uint32
|
||||
XidSkip(FullTransactionId fullxid)
|
||||
{
|
||||
uint32 low = XidFromFullTransactionId(fullxid);
|
||||
uint32 rem;
|
||||
uint32 distance;
|
||||
|
||||
if (low < 5 || low >= UINT32_MAX - 5)
|
||||
return 0;
|
||||
distance = UINT32_MAX - 5 - low;
|
||||
|
||||
rem = low % COMMIT_TS_XACTS_PER_PAGE;
|
||||
if (rem == 0)
|
||||
return 0;
|
||||
distance = Min(distance, COMMIT_TS_XACTS_PER_PAGE - rem);
|
||||
|
||||
rem = low % SUBTRANS_XACTS_PER_PAGE;
|
||||
if (rem == 0)
|
||||
return 0;
|
||||
distance = Min(distance, SUBTRANS_XACTS_PER_PAGE - rem);
|
||||
|
||||
rem = low % CLOG_XACTS_PER_PAGE;
|
||||
if (rem == 0)
|
||||
return 0;
|
||||
distance = Min(distance, CLOG_XACTS_PER_PAGE - rem);
|
||||
|
||||
return distance;
|
||||
}
|
||||
|
||||
static int64
|
||||
consume_xids_shortcut(void)
|
||||
{
|
||||
FullTransactionId nextXid;
|
||||
uint32 consumed;
|
||||
|
||||
LWLockAcquire(XidGenLock, LW_EXCLUSIVE);
|
||||
nextXid = ShmemVariableCache->nextXid;
|
||||
|
||||
/*
|
||||
* Go slow near the "interesting values". The interesting zones include 5
|
||||
* transactions before and after SLRU page switches.
|
||||
*/
|
||||
consumed = XidSkip(nextXid);
|
||||
if (consumed > 0)
|
||||
ShmemVariableCache->nextXid.value += (uint64) consumed;
|
||||
|
||||
LWLockRelease(XidGenLock);
|
||||
|
||||
return consumed;
|
||||
}
|
|
@ -0,0 +1,4 @@
|
|||
comment = 'Tests for XID wraparound'
|
||||
default_version = '1.0'
|
||||
module_pathname = '$libdir/xid_wraparound'
|
||||
relocatable = true
|
|
@ -68,7 +68,7 @@ use Test::More;
|
|||
|
||||
=over
|
||||
|
||||
=item PostgreSQL::Test::BackgroundPsql->new(interactive, @params)
|
||||
=item PostgreSQL::Test::BackgroundPsql->new(interactive, @psql_params, timeout)
|
||||
|
||||
Builds a new object of class C<PostgreSQL::Test::BackgroundPsql> for either
|
||||
an interactive or background session and starts it. If C<interactive> is
|
||||
|
@ -81,7 +81,7 @@ string. For C<interactive> sessions, IO::Pty is required.
|
|||
sub new
|
||||
{
|
||||
my $class = shift;
|
||||
my ($interactive, $psql_params) = @_;
|
||||
my ($interactive, $psql_params, $timeout) = @_;
|
||||
my $psql = {
|
||||
'stdin' => '',
|
||||
'stdout' => '',
|
||||
|
@ -96,8 +96,10 @@ sub new
|
|||
"Forbidden caller of constructor: package: $package, file: $file:$line"
|
||||
unless $package->isa('PostgreSQL::Test::Cluster');
|
||||
|
||||
$psql->{timeout} =
|
||||
IPC::Run::timeout($PostgreSQL::Test::Utils::timeout_default);
|
||||
$psql->{timeout} = IPC::Run::timeout(
|
||||
defined($timeout)
|
||||
? $timeout
|
||||
: $PostgreSQL::Test::Utils::timeout_default);
|
||||
|
||||
if ($interactive)
|
||||
{
|
||||
|
|
|
@ -2028,8 +2028,6 @@ sub psql
|
|||
|
||||
Invoke B<psql> on B<$dbname> and return a BackgroundPsql object.
|
||||
|
||||
A timeout of $PostgreSQL::Test::Utils::timeout_default is set up.
|
||||
|
||||
psql is invoked in tuples-only unaligned mode with reading of B<.psqlrc>
|
||||
disabled. That may be overridden by passing extra psql parameters.
|
||||
|
||||
|
@ -2047,6 +2045,11 @@ By default, the B<psql> method invokes the B<psql> program with ON_ERROR_STOP=1
|
|||
set, so SQL execution is stopped at the first error and exit code 3 is
|
||||
returned. Set B<on_error_stop> to 0 to ignore errors instead.
|
||||
|
||||
=item timeout => 'interval'
|
||||
|
||||
Set a timeout for a background psql session. By default, timeout of
|
||||
$PostgreSQL::Test::Utils::timeout_default is set up.
|
||||
|
||||
=item replication => B<value>
|
||||
|
||||
If set, add B<replication=value> to the conninfo string.
|
||||
|
@ -2068,6 +2071,7 @@ sub background_psql
|
|||
local %ENV = $self->_get_env();
|
||||
|
||||
my $replication = $params{replication};
|
||||
my $timeout = undef;
|
||||
|
||||
my @psql_params = (
|
||||
$self->installed_command('psql'),
|
||||
|
@ -2079,12 +2083,13 @@ sub background_psql
|
|||
'-');
|
||||
|
||||
$params{on_error_stop} = 1 unless defined $params{on_error_stop};
|
||||
$timeout = $params{timeout} if defined $params{timeout};
|
||||
|
||||
push @psql_params, '-v', 'ON_ERROR_STOP=1' if $params{on_error_stop};
|
||||
push @psql_params, @{ $params{extra_params} }
|
||||
if defined $params{extra_params};
|
||||
|
||||
return PostgreSQL::Test::BackgroundPsql->new(0, \@psql_params);
|
||||
return PostgreSQL::Test::BackgroundPsql->new(0, \@psql_params, $timeout);
|
||||
}
|
||||
|
||||
=pod
|
||||
|
|
|
@ -1042,7 +1042,7 @@ ERROR: parameter "locale" must be specified
|
|||
SET icu_validation_level = ERROR;
|
||||
CREATE COLLATION testx (provider = icu, locale = 'nonsense-nowhere'); -- fails
|
||||
ERROR: ICU locale "nonsense-nowhere" has unknown language "nonsense"
|
||||
HINT: To disable ICU locale validation, set the parameter "icu_validation_level" to "disabled".
|
||||
HINT: To disable ICU locale validation, set the parameter icu_validation_level to "disabled".
|
||||
CREATE COLLATION testx (provider = icu, locale = '@colStrength=primary;nonsense=yes'); -- fails
|
||||
ERROR: could not convert locale name "@colStrength=primary;nonsense=yes" to language tag: U_ILLEGAL_ARGUMENT_ERROR
|
||||
RESET icu_validation_level;
|
||||
|
@ -1050,7 +1050,7 @@ CREATE COLLATION testx (provider = icu, locale = '@colStrength=primary;nonsense=
|
|||
WARNING: could not convert locale name "@colStrength=primary;nonsense=yes" to language tag: U_ILLEGAL_ARGUMENT_ERROR
|
||||
CREATE COLLATION testx (provider = icu, locale = 'nonsense-nowhere'); DROP COLLATION testx;
|
||||
WARNING: ICU locale "nonsense-nowhere" has unknown language "nonsense"
|
||||
HINT: To disable ICU locale validation, set the parameter "icu_validation_level" to "disabled".
|
||||
HINT: To disable ICU locale validation, set the parameter icu_validation_level to "disabled".
|
||||
CREATE COLLATION test4 FROM nonsense;
|
||||
ERROR: collation "nonsense" for encoding "UTF8" does not exist
|
||||
CREATE COLLATION test5 FROM test0;
|
||||
|
|
|
@ -219,10 +219,10 @@ CONTEXT: JSON data, line 1: {"abc":1,3...
|
|||
SET max_stack_depth = '100kB';
|
||||
SELECT repeat('[', 10000)::json;
|
||||
ERROR: stack depth limit exceeded
|
||||
HINT: Increase the configuration parameter "max_stack_depth" (currently 100kB), after ensuring the platform's stack depth limit is adequate.
|
||||
HINT: Increase the configuration parameter max_stack_depth (currently 100kB), after ensuring the platform's stack depth limit is adequate.
|
||||
SELECT repeat('{"a":', 10000)::json;
|
||||
ERROR: stack depth limit exceeded
|
||||
HINT: Increase the configuration parameter "max_stack_depth" (currently 100kB), after ensuring the platform's stack depth limit is adequate.
|
||||
HINT: Increase the configuration parameter max_stack_depth (currently 100kB), after ensuring the platform's stack depth limit is adequate.
|
||||
RESET max_stack_depth;
|
||||
-- Miscellaneous stuff.
|
||||
SELECT 'true'::json; -- OK
|
||||
|
|
|
@ -213,10 +213,10 @@ CONTEXT: JSON data, line 1: {"abc":1,3...
|
|||
SET max_stack_depth = '100kB';
|
||||
SELECT repeat('[', 10000)::jsonb;
|
||||
ERROR: stack depth limit exceeded
|
||||
HINT: Increase the configuration parameter "max_stack_depth" (currently 100kB), after ensuring the platform's stack depth limit is adequate.
|
||||
HINT: Increase the configuration parameter max_stack_depth (currently 100kB), after ensuring the platform's stack depth limit is adequate.
|
||||
SELECT repeat('{"a":', 10000)::jsonb;
|
||||
ERROR: stack depth limit exceeded
|
||||
HINT: Increase the configuration parameter "max_stack_depth" (currently 100kB), after ensuring the platform's stack depth limit is adequate.
|
||||
HINT: Increase the configuration parameter max_stack_depth (currently 100kB), after ensuring the platform's stack depth limit is adequate.
|
||||
RESET max_stack_depth;
|
||||
-- Miscellaneous stuff.
|
||||
SELECT 'true'::jsonb; -- OK
|
||||
|
|
Loading…
Reference in New Issue