postgresql/src/backend/replication/slotfuncs.c

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

951 lines
27 KiB
C
Raw Normal View History

/*-------------------------------------------------------------------------
*
* slotfuncs.c
* Support functions for replication slots
*
* Copyright (c) 2012-2021, PostgreSQL Global Development Group
*
* IDENTIFICATION
* src/backend/replication/slotfuncs.c
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include "access/htup_details.h"
#include "access/xlog_internal.h"
#include "access/xlogutils.h"
#include "funcapi.h"
#include "miscadmin.h"
#include "replication/decode.h"
#include "replication/logical.h"
#include "replication/slot.h"
#include "utils/builtins.h"
#include "utils/inval.h"
#include "utils/pg_lsn.h"
#include "utils/resowner.h"
static void
check_permissions(void)
{
if (!superuser() && !has_rolreplication(GetUserId()))
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("must be superuser or replication role to use replication slots")));
}
/*
* Helper function for creating a new physical replication slot with
* given arguments. Note that this function doesn't release the created
* slot.
*
* If restart_lsn is a valid value, we use it without WAL reservation
* routine. So the caller must guarantee that WAL is available.
*/
static void
create_physical_replication_slot(char *name, bool immediately_reserve,
bool temporary, XLogRecPtr restart_lsn)
{
Assert(!MyReplicationSlot);
/* acquire replication slot, this will check for conflicting names */
ReplicationSlotCreate(name, false,
temporary ? RS_TEMPORARY : RS_PERSISTENT, false);
if (immediately_reserve)
{
/* Reserve WAL as the user asked for it */
if (XLogRecPtrIsInvalid(restart_lsn))
ReplicationSlotReserveWal();
else
MyReplicationSlot->data.restart_lsn = restart_lsn;
/* Write this slot to disk */
ReplicationSlotMarkDirty();
ReplicationSlotSave();
}
}
/*
* SQL function for creating a new physical (streaming replication)
* replication slot.
*/
Datum
pg_create_physical_replication_slot(PG_FUNCTION_ARGS)
{
Name name = PG_GETARG_NAME(0);
bool immediately_reserve = PG_GETARG_BOOL(1);
bool temporary = PG_GETARG_BOOL(2);
Datum values[2];
bool nulls[2];
TupleDesc tupdesc;
HeapTuple tuple;
Datum result;
if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE)
elog(ERROR, "return type must be a row type");
check_permissions();
CheckSlotRequirements();
create_physical_replication_slot(NameStr(*name),
immediately_reserve,
temporary,
InvalidXLogRecPtr);
values[0] = NameGetDatum(&MyReplicationSlot->data.name);
nulls[0] = false;
if (immediately_reserve)
{
values[1] = LSNGetDatum(MyReplicationSlot->data.restart_lsn);
nulls[1] = false;
}
else
nulls[1] = true;
tuple = heap_form_tuple(tupdesc, values, nulls);
result = HeapTupleGetDatum(tuple);
ReplicationSlotRelease();
PG_RETURN_DATUM(result);
}
/*
* Helper function for creating a new logical replication slot with
* given arguments. Note that this function doesn't release the created
* slot.
*
* When find_startpoint is false, the slot's confirmed_flush is not set; it's
* caller's responsibility to ensure it's set to something sensible.
*/
static void
create_logical_replication_slot(char *name, char *plugin,
bool temporary, bool two_phase,
XLogRecPtr restart_lsn,
bool find_startpoint)
{
LogicalDecodingContext *ctx = NULL;
Assert(!MyReplicationSlot);
/*
* Acquire a logical decoding slot, this will check for conflicting names.
2016-12-17 14:33:26 +01:00
* Initially create persistent slot as ephemeral - that allows us to
* nicely handle errors during initialization because it'll get dropped if
* this transaction fails. We'll make it persistent at the end. Temporary
* slots can be created as temporary from beginning as they get dropped on
* error as well.
*/
ReplicationSlotCreate(name, true,
temporary ? RS_TEMPORARY : RS_EPHEMERAL, two_phase);
/*
* Create logical decoding context to find start point or, if we don't
* need it, to 1) bump slot's restart_lsn and xmin 2) check plugin sanity.
*
* Note: when !find_startpoint this is still important, because it's at
* this point that the output plugin is validated.
*/
ctx = CreateInitDecodingContext(plugin, NIL,
false, /* just catalogs is OK */
restart_lsn,
XL_ROUTINE(.page_read = read_local_xlog_page,
.segment_open = wal_segment_open,
.segment_close = wal_segment_close),
NULL, NULL, NULL);
/*
* If caller needs us to determine the decoding start point, do so now.
* This might take a while.
*/
if (find_startpoint)
DecodingContextFindStartpoint(ctx);
/* don't need the decoding context anymore */
FreeDecodingContext(ctx);
}
/*
* SQL function for creating a new logical replication slot.
*/
Datum
pg_create_logical_replication_slot(PG_FUNCTION_ARGS)
{
Name name = PG_GETARG_NAME(0);
Name plugin = PG_GETARG_NAME(1);
bool temporary = PG_GETARG_BOOL(2);
bool two_phase = PG_GETARG_BOOL(3);
Datum result;
TupleDesc tupdesc;
HeapTuple tuple;
Datum values[2];
bool nulls[2];
if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE)
elog(ERROR, "return type must be a row type");
check_permissions();
CheckLogicalDecodingRequirements();
create_logical_replication_slot(NameStr(*name),
NameStr(*plugin),
temporary,
two_phase,
InvalidXLogRecPtr,
true);
values[0] = NameGetDatum(&MyReplicationSlot->data.name);
values[1] = LSNGetDatum(MyReplicationSlot->data.confirmed_flush);
memset(nulls, 0, sizeof(nulls));
tuple = heap_form_tuple(tupdesc, values, nulls);
result = HeapTupleGetDatum(tuple);
/* ok, slot is now fully created, mark it as persistent if needed */
if (!temporary)
ReplicationSlotPersist();
ReplicationSlotRelease();
PG_RETURN_DATUM(result);
}
/*
* SQL function for dropping a replication slot.
*/
Datum
pg_drop_replication_slot(PG_FUNCTION_ARGS)
{
Name name = PG_GETARG_NAME(0);
check_permissions();
CheckSlotRequirements();
ReplicationSlotDrop(NameStr(*name), true);
PG_RETURN_VOID();
}
/*
* pg_get_replication_slots - SQL SRF showing active replication slots.
*/
Datum
pg_get_replication_slots(PG_FUNCTION_ARGS)
{
#define PG_GET_REPLICATION_SLOTS_COLS 14
ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
TupleDesc tupdesc;
Tuplestorestate *tupstore;
MemoryContext per_query_ctx;
MemoryContext oldcontext;
XLogRecPtr currlsn;
int slotno;
/* check to see if caller supports us returning a tuplestore */
if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("set-valued function called in context that cannot accept a set")));
if (!(rsinfo->allowedModes & SFRM_Materialize))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("materialize mode required, but it is not allowed in this context")));
/* Build a tuple descriptor for our result type */
if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE)
elog(ERROR, "return type must be a row type");
/*
* We don't require any special permission to see this function's data
* because nothing should be sensitive. The most critical being the slot
* name, which shouldn't contain anything particularly sensitive.
*/
per_query_ctx = rsinfo->econtext->ecxt_per_query_memory;
oldcontext = MemoryContextSwitchTo(per_query_ctx);
tupstore = tuplestore_begin_heap(true, false, work_mem);
rsinfo->returnMode = SFRM_Materialize;
rsinfo->setResult = tupstore;
rsinfo->setDesc = tupdesc;
MemoryContextSwitchTo(oldcontext);
currlsn = GetXLogWriteRecPtr();
LWLockAcquire(ReplicationSlotControlLock, LW_SHARED);
for (slotno = 0; slotno < max_replication_slots; slotno++)
{
ReplicationSlot *slot = &ReplicationSlotCtl->replication_slots[slotno];
Don't call palloc() while holding a spinlock, either. Fix some more violations of the "only straight-line code inside a spinlock" rule. These are hazardous not only because they risk holding the lock for an excessively long time, but because it's possible for palloc to throw elog(ERROR), leaving a stuck spinlock behind. copy_replication_slot() had two separate places that did pallocs while holding a spinlock. We can make the code simpler and safer by copying the whole ReplicationSlot struct into a local variable while holding the spinlock, and then referencing that copy. (While that's arguably more cycles than we really need to spend holding the lock, the struct isn't all that big, and this way seems far more maintainable than copying fields piecemeal. Anyway this is surely much cheaper than a palloc.) That bug goes back to v12. InvalidateObsoleteReplicationSlots() not only did a palloc while holding a spinlock, but for extra sloppiness then leaked the memory --- probably for the lifetime of the checkpointer process, though I didn't try to verify that. Fortunately that silliness is new in HEAD. pg_get_replication_slots() had a cosmetic violation of the rule, in that it only assumed it's safe to call namecpy() while holding a spinlock. Still, that's a hazard waiting to bite somebody, and there were some other cosmetic coding-rule violations in the same function, so clean it up. I back-patched this as far as v10; the code exists before that but it looks different, and this didn't seem important enough to adapt the patch further back. Discussion: https://postgr.es/m/20200602.161518.1399689010416646074.horikyota.ntt@gmail.com
2020-06-03 18:36:00 +02:00
ReplicationSlot slot_contents;
Datum values[PG_GET_REPLICATION_SLOTS_COLS];
bool nulls[PG_GET_REPLICATION_SLOTS_COLS];
WALAvailability walstate;
int i;
if (!slot->in_use)
continue;
Don't call palloc() while holding a spinlock, either. Fix some more violations of the "only straight-line code inside a spinlock" rule. These are hazardous not only because they risk holding the lock for an excessively long time, but because it's possible for palloc to throw elog(ERROR), leaving a stuck spinlock behind. copy_replication_slot() had two separate places that did pallocs while holding a spinlock. We can make the code simpler and safer by copying the whole ReplicationSlot struct into a local variable while holding the spinlock, and then referencing that copy. (While that's arguably more cycles than we really need to spend holding the lock, the struct isn't all that big, and this way seems far more maintainable than copying fields piecemeal. Anyway this is surely much cheaper than a palloc.) That bug goes back to v12. InvalidateObsoleteReplicationSlots() not only did a palloc while holding a spinlock, but for extra sloppiness then leaked the memory --- probably for the lifetime of the checkpointer process, though I didn't try to verify that. Fortunately that silliness is new in HEAD. pg_get_replication_slots() had a cosmetic violation of the rule, in that it only assumed it's safe to call namecpy() while holding a spinlock. Still, that's a hazard waiting to bite somebody, and there were some other cosmetic coding-rule violations in the same function, so clean it up. I back-patched this as far as v10; the code exists before that but it looks different, and this didn't seem important enough to adapt the patch further back. Discussion: https://postgr.es/m/20200602.161518.1399689010416646074.horikyota.ntt@gmail.com
2020-06-03 18:36:00 +02:00
/* Copy slot contents while holding spinlock, then examine at leisure */
SpinLockAcquire(&slot->mutex);
Don't call palloc() while holding a spinlock, either. Fix some more violations of the "only straight-line code inside a spinlock" rule. These are hazardous not only because they risk holding the lock for an excessively long time, but because it's possible for palloc to throw elog(ERROR), leaving a stuck spinlock behind. copy_replication_slot() had two separate places that did pallocs while holding a spinlock. We can make the code simpler and safer by copying the whole ReplicationSlot struct into a local variable while holding the spinlock, and then referencing that copy. (While that's arguably more cycles than we really need to spend holding the lock, the struct isn't all that big, and this way seems far more maintainable than copying fields piecemeal. Anyway this is surely much cheaper than a palloc.) That bug goes back to v12. InvalidateObsoleteReplicationSlots() not only did a palloc while holding a spinlock, but for extra sloppiness then leaked the memory --- probably for the lifetime of the checkpointer process, though I didn't try to verify that. Fortunately that silliness is new in HEAD. pg_get_replication_slots() had a cosmetic violation of the rule, in that it only assumed it's safe to call namecpy() while holding a spinlock. Still, that's a hazard waiting to bite somebody, and there were some other cosmetic coding-rule violations in the same function, so clean it up. I back-patched this as far as v10; the code exists before that but it looks different, and this didn't seem important enough to adapt the patch further back. Discussion: https://postgr.es/m/20200602.161518.1399689010416646074.horikyota.ntt@gmail.com
2020-06-03 18:36:00 +02:00
slot_contents = *slot;
SpinLockRelease(&slot->mutex);
Don't call palloc() while holding a spinlock, either. Fix some more violations of the "only straight-line code inside a spinlock" rule. These are hazardous not only because they risk holding the lock for an excessively long time, but because it's possible for palloc to throw elog(ERROR), leaving a stuck spinlock behind. copy_replication_slot() had two separate places that did pallocs while holding a spinlock. We can make the code simpler and safer by copying the whole ReplicationSlot struct into a local variable while holding the spinlock, and then referencing that copy. (While that's arguably more cycles than we really need to spend holding the lock, the struct isn't all that big, and this way seems far more maintainable than copying fields piecemeal. Anyway this is surely much cheaper than a palloc.) That bug goes back to v12. InvalidateObsoleteReplicationSlots() not only did a palloc while holding a spinlock, but for extra sloppiness then leaked the memory --- probably for the lifetime of the checkpointer process, though I didn't try to verify that. Fortunately that silliness is new in HEAD. pg_get_replication_slots() had a cosmetic violation of the rule, in that it only assumed it's safe to call namecpy() while holding a spinlock. Still, that's a hazard waiting to bite somebody, and there were some other cosmetic coding-rule violations in the same function, so clean it up. I back-patched this as far as v10; the code exists before that but it looks different, and this didn't seem important enough to adapt the patch further back. Discussion: https://postgr.es/m/20200602.161518.1399689010416646074.horikyota.ntt@gmail.com
2020-06-03 18:36:00 +02:00
memset(values, 0, sizeof(values));
memset(nulls, 0, sizeof(nulls));
i = 0;
Don't call palloc() while holding a spinlock, either. Fix some more violations of the "only straight-line code inside a spinlock" rule. These are hazardous not only because they risk holding the lock for an excessively long time, but because it's possible for palloc to throw elog(ERROR), leaving a stuck spinlock behind. copy_replication_slot() had two separate places that did pallocs while holding a spinlock. We can make the code simpler and safer by copying the whole ReplicationSlot struct into a local variable while holding the spinlock, and then referencing that copy. (While that's arguably more cycles than we really need to spend holding the lock, the struct isn't all that big, and this way seems far more maintainable than copying fields piecemeal. Anyway this is surely much cheaper than a palloc.) That bug goes back to v12. InvalidateObsoleteReplicationSlots() not only did a palloc while holding a spinlock, but for extra sloppiness then leaked the memory --- probably for the lifetime of the checkpointer process, though I didn't try to verify that. Fortunately that silliness is new in HEAD. pg_get_replication_slots() had a cosmetic violation of the rule, in that it only assumed it's safe to call namecpy() while holding a spinlock. Still, that's a hazard waiting to bite somebody, and there were some other cosmetic coding-rule violations in the same function, so clean it up. I back-patched this as far as v10; the code exists before that but it looks different, and this didn't seem important enough to adapt the patch further back. Discussion: https://postgr.es/m/20200602.161518.1399689010416646074.horikyota.ntt@gmail.com
2020-06-03 18:36:00 +02:00
values[i++] = NameGetDatum(&slot_contents.data.name);
Don't call palloc() while holding a spinlock, either. Fix some more violations of the "only straight-line code inside a spinlock" rule. These are hazardous not only because they risk holding the lock for an excessively long time, but because it's possible for palloc to throw elog(ERROR), leaving a stuck spinlock behind. copy_replication_slot() had two separate places that did pallocs while holding a spinlock. We can make the code simpler and safer by copying the whole ReplicationSlot struct into a local variable while holding the spinlock, and then referencing that copy. (While that's arguably more cycles than we really need to spend holding the lock, the struct isn't all that big, and this way seems far more maintainable than copying fields piecemeal. Anyway this is surely much cheaper than a palloc.) That bug goes back to v12. InvalidateObsoleteReplicationSlots() not only did a palloc while holding a spinlock, but for extra sloppiness then leaked the memory --- probably for the lifetime of the checkpointer process, though I didn't try to verify that. Fortunately that silliness is new in HEAD. pg_get_replication_slots() had a cosmetic violation of the rule, in that it only assumed it's safe to call namecpy() while holding a spinlock. Still, that's a hazard waiting to bite somebody, and there were some other cosmetic coding-rule violations in the same function, so clean it up. I back-patched this as far as v10; the code exists before that but it looks different, and this didn't seem important enough to adapt the patch further back. Discussion: https://postgr.es/m/20200602.161518.1399689010416646074.horikyota.ntt@gmail.com
2020-06-03 18:36:00 +02:00
if (slot_contents.data.database == InvalidOid)
nulls[i++] = true;
else
Don't call palloc() while holding a spinlock, either. Fix some more violations of the "only straight-line code inside a spinlock" rule. These are hazardous not only because they risk holding the lock for an excessively long time, but because it's possible for palloc to throw elog(ERROR), leaving a stuck spinlock behind. copy_replication_slot() had two separate places that did pallocs while holding a spinlock. We can make the code simpler and safer by copying the whole ReplicationSlot struct into a local variable while holding the spinlock, and then referencing that copy. (While that's arguably more cycles than we really need to spend holding the lock, the struct isn't all that big, and this way seems far more maintainable than copying fields piecemeal. Anyway this is surely much cheaper than a palloc.) That bug goes back to v12. InvalidateObsoleteReplicationSlots() not only did a palloc while holding a spinlock, but for extra sloppiness then leaked the memory --- probably for the lifetime of the checkpointer process, though I didn't try to verify that. Fortunately that silliness is new in HEAD. pg_get_replication_slots() had a cosmetic violation of the rule, in that it only assumed it's safe to call namecpy() while holding a spinlock. Still, that's a hazard waiting to bite somebody, and there were some other cosmetic coding-rule violations in the same function, so clean it up. I back-patched this as far as v10; the code exists before that but it looks different, and this didn't seem important enough to adapt the patch further back. Discussion: https://postgr.es/m/20200602.161518.1399689010416646074.horikyota.ntt@gmail.com
2020-06-03 18:36:00 +02:00
values[i++] = NameGetDatum(&slot_contents.data.plugin);
Don't call palloc() while holding a spinlock, either. Fix some more violations of the "only straight-line code inside a spinlock" rule. These are hazardous not only because they risk holding the lock for an excessively long time, but because it's possible for palloc to throw elog(ERROR), leaving a stuck spinlock behind. copy_replication_slot() had two separate places that did pallocs while holding a spinlock. We can make the code simpler and safer by copying the whole ReplicationSlot struct into a local variable while holding the spinlock, and then referencing that copy. (While that's arguably more cycles than we really need to spend holding the lock, the struct isn't all that big, and this way seems far more maintainable than copying fields piecemeal. Anyway this is surely much cheaper than a palloc.) That bug goes back to v12. InvalidateObsoleteReplicationSlots() not only did a palloc while holding a spinlock, but for extra sloppiness then leaked the memory --- probably for the lifetime of the checkpointer process, though I didn't try to verify that. Fortunately that silliness is new in HEAD. pg_get_replication_slots() had a cosmetic violation of the rule, in that it only assumed it's safe to call namecpy() while holding a spinlock. Still, that's a hazard waiting to bite somebody, and there were some other cosmetic coding-rule violations in the same function, so clean it up. I back-patched this as far as v10; the code exists before that but it looks different, and this didn't seem important enough to adapt the patch further back. Discussion: https://postgr.es/m/20200602.161518.1399689010416646074.horikyota.ntt@gmail.com
2020-06-03 18:36:00 +02:00
if (slot_contents.data.database == InvalidOid)
values[i++] = CStringGetTextDatum("physical");
else
values[i++] = CStringGetTextDatum("logical");
Don't call palloc() while holding a spinlock, either. Fix some more violations of the "only straight-line code inside a spinlock" rule. These are hazardous not only because they risk holding the lock for an excessively long time, but because it's possible for palloc to throw elog(ERROR), leaving a stuck spinlock behind. copy_replication_slot() had two separate places that did pallocs while holding a spinlock. We can make the code simpler and safer by copying the whole ReplicationSlot struct into a local variable while holding the spinlock, and then referencing that copy. (While that's arguably more cycles than we really need to spend holding the lock, the struct isn't all that big, and this way seems far more maintainable than copying fields piecemeal. Anyway this is surely much cheaper than a palloc.) That bug goes back to v12. InvalidateObsoleteReplicationSlots() not only did a palloc while holding a spinlock, but for extra sloppiness then leaked the memory --- probably for the lifetime of the checkpointer process, though I didn't try to verify that. Fortunately that silliness is new in HEAD. pg_get_replication_slots() had a cosmetic violation of the rule, in that it only assumed it's safe to call namecpy() while holding a spinlock. Still, that's a hazard waiting to bite somebody, and there were some other cosmetic coding-rule violations in the same function, so clean it up. I back-patched this as far as v10; the code exists before that but it looks different, and this didn't seem important enough to adapt the patch further back. Discussion: https://postgr.es/m/20200602.161518.1399689010416646074.horikyota.ntt@gmail.com
2020-06-03 18:36:00 +02:00
if (slot_contents.data.database == InvalidOid)
nulls[i++] = true;
else
Don't call palloc() while holding a spinlock, either. Fix some more violations of the "only straight-line code inside a spinlock" rule. These are hazardous not only because they risk holding the lock for an excessively long time, but because it's possible for palloc to throw elog(ERROR), leaving a stuck spinlock behind. copy_replication_slot() had two separate places that did pallocs while holding a spinlock. We can make the code simpler and safer by copying the whole ReplicationSlot struct into a local variable while holding the spinlock, and then referencing that copy. (While that's arguably more cycles than we really need to spend holding the lock, the struct isn't all that big, and this way seems far more maintainable than copying fields piecemeal. Anyway this is surely much cheaper than a palloc.) That bug goes back to v12. InvalidateObsoleteReplicationSlots() not only did a palloc while holding a spinlock, but for extra sloppiness then leaked the memory --- probably for the lifetime of the checkpointer process, though I didn't try to verify that. Fortunately that silliness is new in HEAD. pg_get_replication_slots() had a cosmetic violation of the rule, in that it only assumed it's safe to call namecpy() while holding a spinlock. Still, that's a hazard waiting to bite somebody, and there were some other cosmetic coding-rule violations in the same function, so clean it up. I back-patched this as far as v10; the code exists before that but it looks different, and this didn't seem important enough to adapt the patch further back. Discussion: https://postgr.es/m/20200602.161518.1399689010416646074.horikyota.ntt@gmail.com
2020-06-03 18:36:00 +02:00
values[i++] = ObjectIdGetDatum(slot_contents.data.database);
Don't call palloc() while holding a spinlock, either. Fix some more violations of the "only straight-line code inside a spinlock" rule. These are hazardous not only because they risk holding the lock for an excessively long time, but because it's possible for palloc to throw elog(ERROR), leaving a stuck spinlock behind. copy_replication_slot() had two separate places that did pallocs while holding a spinlock. We can make the code simpler and safer by copying the whole ReplicationSlot struct into a local variable while holding the spinlock, and then referencing that copy. (While that's arguably more cycles than we really need to spend holding the lock, the struct isn't all that big, and this way seems far more maintainable than copying fields piecemeal. Anyway this is surely much cheaper than a palloc.) That bug goes back to v12. InvalidateObsoleteReplicationSlots() not only did a palloc while holding a spinlock, but for extra sloppiness then leaked the memory --- probably for the lifetime of the checkpointer process, though I didn't try to verify that. Fortunately that silliness is new in HEAD. pg_get_replication_slots() had a cosmetic violation of the rule, in that it only assumed it's safe to call namecpy() while holding a spinlock. Still, that's a hazard waiting to bite somebody, and there were some other cosmetic coding-rule violations in the same function, so clean it up. I back-patched this as far as v10; the code exists before that but it looks different, and this didn't seem important enough to adapt the patch further back. Discussion: https://postgr.es/m/20200602.161518.1399689010416646074.horikyota.ntt@gmail.com
2020-06-03 18:36:00 +02:00
values[i++] = BoolGetDatum(slot_contents.data.persistency == RS_TEMPORARY);
values[i++] = BoolGetDatum(slot_contents.active_pid != 0);
Don't call palloc() while holding a spinlock, either. Fix some more violations of the "only straight-line code inside a spinlock" rule. These are hazardous not only because they risk holding the lock for an excessively long time, but because it's possible for palloc to throw elog(ERROR), leaving a stuck spinlock behind. copy_replication_slot() had two separate places that did pallocs while holding a spinlock. We can make the code simpler and safer by copying the whole ReplicationSlot struct into a local variable while holding the spinlock, and then referencing that copy. (While that's arguably more cycles than we really need to spend holding the lock, the struct isn't all that big, and this way seems far more maintainable than copying fields piecemeal. Anyway this is surely much cheaper than a palloc.) That bug goes back to v12. InvalidateObsoleteReplicationSlots() not only did a palloc while holding a spinlock, but for extra sloppiness then leaked the memory --- probably for the lifetime of the checkpointer process, though I didn't try to verify that. Fortunately that silliness is new in HEAD. pg_get_replication_slots() had a cosmetic violation of the rule, in that it only assumed it's safe to call namecpy() while holding a spinlock. Still, that's a hazard waiting to bite somebody, and there were some other cosmetic coding-rule violations in the same function, so clean it up. I back-patched this as far as v10; the code exists before that but it looks different, and this didn't seem important enough to adapt the patch further back. Discussion: https://postgr.es/m/20200602.161518.1399689010416646074.horikyota.ntt@gmail.com
2020-06-03 18:36:00 +02:00
if (slot_contents.active_pid != 0)
values[i++] = Int32GetDatum(slot_contents.active_pid);
else
nulls[i++] = true;
Don't call palloc() while holding a spinlock, either. Fix some more violations of the "only straight-line code inside a spinlock" rule. These are hazardous not only because they risk holding the lock for an excessively long time, but because it's possible for palloc to throw elog(ERROR), leaving a stuck spinlock behind. copy_replication_slot() had two separate places that did pallocs while holding a spinlock. We can make the code simpler and safer by copying the whole ReplicationSlot struct into a local variable while holding the spinlock, and then referencing that copy. (While that's arguably more cycles than we really need to spend holding the lock, the struct isn't all that big, and this way seems far more maintainable than copying fields piecemeal. Anyway this is surely much cheaper than a palloc.) That bug goes back to v12. InvalidateObsoleteReplicationSlots() not only did a palloc while holding a spinlock, but for extra sloppiness then leaked the memory --- probably for the lifetime of the checkpointer process, though I didn't try to verify that. Fortunately that silliness is new in HEAD. pg_get_replication_slots() had a cosmetic violation of the rule, in that it only assumed it's safe to call namecpy() while holding a spinlock. Still, that's a hazard waiting to bite somebody, and there were some other cosmetic coding-rule violations in the same function, so clean it up. I back-patched this as far as v10; the code exists before that but it looks different, and this didn't seem important enough to adapt the patch further back. Discussion: https://postgr.es/m/20200602.161518.1399689010416646074.horikyota.ntt@gmail.com
2020-06-03 18:36:00 +02:00
if (slot_contents.data.xmin != InvalidTransactionId)
values[i++] = TransactionIdGetDatum(slot_contents.data.xmin);
else
nulls[i++] = true;
Don't call palloc() while holding a spinlock, either. Fix some more violations of the "only straight-line code inside a spinlock" rule. These are hazardous not only because they risk holding the lock for an excessively long time, but because it's possible for palloc to throw elog(ERROR), leaving a stuck spinlock behind. copy_replication_slot() had two separate places that did pallocs while holding a spinlock. We can make the code simpler and safer by copying the whole ReplicationSlot struct into a local variable while holding the spinlock, and then referencing that copy. (While that's arguably more cycles than we really need to spend holding the lock, the struct isn't all that big, and this way seems far more maintainable than copying fields piecemeal. Anyway this is surely much cheaper than a palloc.) That bug goes back to v12. InvalidateObsoleteReplicationSlots() not only did a palloc while holding a spinlock, but for extra sloppiness then leaked the memory --- probably for the lifetime of the checkpointer process, though I didn't try to verify that. Fortunately that silliness is new in HEAD. pg_get_replication_slots() had a cosmetic violation of the rule, in that it only assumed it's safe to call namecpy() while holding a spinlock. Still, that's a hazard waiting to bite somebody, and there were some other cosmetic coding-rule violations in the same function, so clean it up. I back-patched this as far as v10; the code exists before that but it looks different, and this didn't seem important enough to adapt the patch further back. Discussion: https://postgr.es/m/20200602.161518.1399689010416646074.horikyota.ntt@gmail.com
2020-06-03 18:36:00 +02:00
if (slot_contents.data.catalog_xmin != InvalidTransactionId)
values[i++] = TransactionIdGetDatum(slot_contents.data.catalog_xmin);
else
nulls[i++] = true;
Don't call palloc() while holding a spinlock, either. Fix some more violations of the "only straight-line code inside a spinlock" rule. These are hazardous not only because they risk holding the lock for an excessively long time, but because it's possible for palloc to throw elog(ERROR), leaving a stuck spinlock behind. copy_replication_slot() had two separate places that did pallocs while holding a spinlock. We can make the code simpler and safer by copying the whole ReplicationSlot struct into a local variable while holding the spinlock, and then referencing that copy. (While that's arguably more cycles than we really need to spend holding the lock, the struct isn't all that big, and this way seems far more maintainable than copying fields piecemeal. Anyway this is surely much cheaper than a palloc.) That bug goes back to v12. InvalidateObsoleteReplicationSlots() not only did a palloc while holding a spinlock, but for extra sloppiness then leaked the memory --- probably for the lifetime of the checkpointer process, though I didn't try to verify that. Fortunately that silliness is new in HEAD. pg_get_replication_slots() had a cosmetic violation of the rule, in that it only assumed it's safe to call namecpy() while holding a spinlock. Still, that's a hazard waiting to bite somebody, and there were some other cosmetic coding-rule violations in the same function, so clean it up. I back-patched this as far as v10; the code exists before that but it looks different, and this didn't seem important enough to adapt the patch further back. Discussion: https://postgr.es/m/20200602.161518.1399689010416646074.horikyota.ntt@gmail.com
2020-06-03 18:36:00 +02:00
if (slot_contents.data.restart_lsn != InvalidXLogRecPtr)
values[i++] = LSNGetDatum(slot_contents.data.restart_lsn);
else
nulls[i++] = true;
Don't call palloc() while holding a spinlock, either. Fix some more violations of the "only straight-line code inside a spinlock" rule. These are hazardous not only because they risk holding the lock for an excessively long time, but because it's possible for palloc to throw elog(ERROR), leaving a stuck spinlock behind. copy_replication_slot() had two separate places that did pallocs while holding a spinlock. We can make the code simpler and safer by copying the whole ReplicationSlot struct into a local variable while holding the spinlock, and then referencing that copy. (While that's arguably more cycles than we really need to spend holding the lock, the struct isn't all that big, and this way seems far more maintainable than copying fields piecemeal. Anyway this is surely much cheaper than a palloc.) That bug goes back to v12. InvalidateObsoleteReplicationSlots() not only did a palloc while holding a spinlock, but for extra sloppiness then leaked the memory --- probably for the lifetime of the checkpointer process, though I didn't try to verify that. Fortunately that silliness is new in HEAD. pg_get_replication_slots() had a cosmetic violation of the rule, in that it only assumed it's safe to call namecpy() while holding a spinlock. Still, that's a hazard waiting to bite somebody, and there were some other cosmetic coding-rule violations in the same function, so clean it up. I back-patched this as far as v10; the code exists before that but it looks different, and this didn't seem important enough to adapt the patch further back. Discussion: https://postgr.es/m/20200602.161518.1399689010416646074.horikyota.ntt@gmail.com
2020-06-03 18:36:00 +02:00
if (slot_contents.data.confirmed_flush != InvalidXLogRecPtr)
values[i++] = LSNGetDatum(slot_contents.data.confirmed_flush);
else
nulls[i++] = true;
/*
* If invalidated_at is valid and restart_lsn is invalid, we know for
* certain that the slot has been invalidated. Otherwise, test
* availability from restart_lsn.
*/
if (XLogRecPtrIsInvalid(slot_contents.data.restart_lsn) &&
!XLogRecPtrIsInvalid(slot_contents.data.invalidated_at))
walstate = WALAVAIL_REMOVED;
else
walstate = GetWALAvailability(slot_contents.data.restart_lsn);
switch (walstate)
{
case WALAVAIL_INVALID_LSN:
nulls[i++] = true;
break;
case WALAVAIL_RESERVED:
values[i++] = CStringGetTextDatum("reserved");
break;
case WALAVAIL_EXTENDED:
values[i++] = CStringGetTextDatum("extended");
break;
case WALAVAIL_UNRESERVED:
values[i++] = CStringGetTextDatum("unreserved");
break;
case WALAVAIL_REMOVED:
/*
* If we read the restart_lsn long enough ago, maybe that file
* has been removed by now. However, the walsender could have
* moved forward enough that it jumped to another file after
* we looked. If checkpointer signalled the process to
* termination, then it's definitely lost; but if a process is
* still alive, then "unreserved" seems more appropriate.
*
* If we do change it, save the state for safe_wal_size below.
*/
if (!XLogRecPtrIsInvalid(slot_contents.data.restart_lsn))
{
int pid;
SpinLockAcquire(&slot->mutex);
pid = slot->active_pid;
slot_contents.data.restart_lsn = slot->data.restart_lsn;
SpinLockRelease(&slot->mutex);
if (pid != 0)
{
values[i++] = CStringGetTextDatum("unreserved");
walstate = WALAVAIL_UNRESERVED;
break;
}
}
values[i++] = CStringGetTextDatum("lost");
break;
}
/*
* safe_wal_size is only computed for slots that have not been lost,
* and only if there's a configured maximum size.
*/
if (walstate == WALAVAIL_REMOVED || max_slot_wal_keep_size_mb < 0)
nulls[i++] = true;
else
{
XLogSegNo targetSeg;
uint64 slotKeepSegs;
uint64 keepSegs;
XLogSegNo failSeg;
XLogRecPtr failLSN;
XLByteToSeg(slot_contents.data.restart_lsn, targetSeg, wal_segment_size);
/* determine how many segments slots can be kept by slots */
slotKeepSegs = XLogMBVarToSegs(max_slot_wal_keep_size_mb, wal_segment_size);
/* ditto for wal_keep_size */
keepSegs = XLogMBVarToSegs(wal_keep_size_mb, wal_segment_size);
/* if currpos reaches failLSN, we lose our segment */
failSeg = targetSeg + Max(slotKeepSegs, keepSegs) + 1;
XLogSegNoOffsetToRecPtr(failSeg, 0, wal_segment_size, failLSN);
values[i++] = Int64GetDatum(failLSN - currlsn);
}
values[i++] = BoolGetDatum(slot_contents.data.two_phase);
Don't call palloc() while holding a spinlock, either. Fix some more violations of the "only straight-line code inside a spinlock" rule. These are hazardous not only because they risk holding the lock for an excessively long time, but because it's possible for palloc to throw elog(ERROR), leaving a stuck spinlock behind. copy_replication_slot() had two separate places that did pallocs while holding a spinlock. We can make the code simpler and safer by copying the whole ReplicationSlot struct into a local variable while holding the spinlock, and then referencing that copy. (While that's arguably more cycles than we really need to spend holding the lock, the struct isn't all that big, and this way seems far more maintainable than copying fields piecemeal. Anyway this is surely much cheaper than a palloc.) That bug goes back to v12. InvalidateObsoleteReplicationSlots() not only did a palloc while holding a spinlock, but for extra sloppiness then leaked the memory --- probably for the lifetime of the checkpointer process, though I didn't try to verify that. Fortunately that silliness is new in HEAD. pg_get_replication_slots() had a cosmetic violation of the rule, in that it only assumed it's safe to call namecpy() while holding a spinlock. Still, that's a hazard waiting to bite somebody, and there were some other cosmetic coding-rule violations in the same function, so clean it up. I back-patched this as far as v10; the code exists before that but it looks different, and this didn't seem important enough to adapt the patch further back. Discussion: https://postgr.es/m/20200602.161518.1399689010416646074.horikyota.ntt@gmail.com
2020-06-03 18:36:00 +02:00
Assert(i == PG_GET_REPLICATION_SLOTS_COLS);
tuplestore_putvalues(tupstore, tupdesc, values, nulls);
}
Don't call palloc() while holding a spinlock, either. Fix some more violations of the "only straight-line code inside a spinlock" rule. These are hazardous not only because they risk holding the lock for an excessively long time, but because it's possible for palloc to throw elog(ERROR), leaving a stuck spinlock behind. copy_replication_slot() had two separate places that did pallocs while holding a spinlock. We can make the code simpler and safer by copying the whole ReplicationSlot struct into a local variable while holding the spinlock, and then referencing that copy. (While that's arguably more cycles than we really need to spend holding the lock, the struct isn't all that big, and this way seems far more maintainable than copying fields piecemeal. Anyway this is surely much cheaper than a palloc.) That bug goes back to v12. InvalidateObsoleteReplicationSlots() not only did a palloc while holding a spinlock, but for extra sloppiness then leaked the memory --- probably for the lifetime of the checkpointer process, though I didn't try to verify that. Fortunately that silliness is new in HEAD. pg_get_replication_slots() had a cosmetic violation of the rule, in that it only assumed it's safe to call namecpy() while holding a spinlock. Still, that's a hazard waiting to bite somebody, and there were some other cosmetic coding-rule violations in the same function, so clean it up. I back-patched this as far as v10; the code exists before that but it looks different, and this didn't seem important enough to adapt the patch further back. Discussion: https://postgr.es/m/20200602.161518.1399689010416646074.horikyota.ntt@gmail.com
2020-06-03 18:36:00 +02:00
LWLockRelease(ReplicationSlotControlLock);
tuplestore_donestoring(tupstore);
return (Datum) 0;
}
/*
* Helper function for advancing our physical replication slot forward.
*
* The LSN position to move to is compared simply to the slot's restart_lsn,
* knowing that any position older than that would be removed by successive
* checkpoints.
*/
static XLogRecPtr
Fix a couple of bugs with replication slot advancing feature A review of the code has showed up a couple of issues fixed by this commit: - Physical slots have been using the confirmed LSN position as a start comparison point which is always 0/0, instead use the restart LSN position (logical slots need to use the confirmed LSN position, which was correct). - The actual slot update was incorrect for both physical and logical slots. Physical slots need to use their restart_lsn as base comparison point (confirmed_flush was used because of previous point), and logical slots need to begin reading WAL from restart_lsn (confirmed_flush was used as well), while confirmed_flush is compiled depending on the decoding context and record read, and is the LSN position returned back to the caller. - Never return 0/0 if a slot cannot be advanced. This way, if a slot is advanced while the activity is idle, then the same position is returned to the caller over and over without raising an error. Instead return the LSN the slot has been advanced to. With repetitive calls, the same position is returned hence caller can directly monitor the difference in progress in bytes by doing simply LSN difference calculations, which should be monotonic. Note that as the slot is owned by the backend advancing it, then the read of those fields is fine lock-less, while updates need to happen while the slot mutex is held, so fix that on the way as well. Other locks for in-memory data of replication slots have been already fixed previously. Some of those issues have been pointed out by Petr and Simon during the patch, while I noticed some of them after looking at the code. This also visibly takes of a recently-discovered bug causing assertion failures which can be triggered by a two-step slot forwarding which first advanced the slot to a WAL page boundary and secondly advanced it to the latest position, say 'FF/FFFFFFF' to make sure that the newest LSN is used as forward point. It would have been nice to drop a test for that, but the set of operators working on pg_lsn limits it, so this is left for a future exercise. Author: Michael Paquier Reviewed-by: Petr Jelinek, Simon Riggs Discussion: https://postgr.es/m/CANP8+jLyS=X-CAk59BJnsxKQfjwrmKicHQykyn52Qj-Q=9GLCw@mail.gmail.com Discussion: https://www.postgresql.org/message-id/2840048a-1184-417a-9da8-3299d207a1d7%40postgrespro.ru
2018-06-11 02:26:13 +02:00
pg_physical_replication_slot_advance(XLogRecPtr moveto)
{
Fix a couple of bugs with replication slot advancing feature A review of the code has showed up a couple of issues fixed by this commit: - Physical slots have been using the confirmed LSN position as a start comparison point which is always 0/0, instead use the restart LSN position (logical slots need to use the confirmed LSN position, which was correct). - The actual slot update was incorrect for both physical and logical slots. Physical slots need to use their restart_lsn as base comparison point (confirmed_flush was used because of previous point), and logical slots need to begin reading WAL from restart_lsn (confirmed_flush was used as well), while confirmed_flush is compiled depending on the decoding context and record read, and is the LSN position returned back to the caller. - Never return 0/0 if a slot cannot be advanced. This way, if a slot is advanced while the activity is idle, then the same position is returned to the caller over and over without raising an error. Instead return the LSN the slot has been advanced to. With repetitive calls, the same position is returned hence caller can directly monitor the difference in progress in bytes by doing simply LSN difference calculations, which should be monotonic. Note that as the slot is owned by the backend advancing it, then the read of those fields is fine lock-less, while updates need to happen while the slot mutex is held, so fix that on the way as well. Other locks for in-memory data of replication slots have been already fixed previously. Some of those issues have been pointed out by Petr and Simon during the patch, while I noticed some of them after looking at the code. This also visibly takes of a recently-discovered bug causing assertion failures which can be triggered by a two-step slot forwarding which first advanced the slot to a WAL page boundary and secondly advanced it to the latest position, say 'FF/FFFFFFF' to make sure that the newest LSN is used as forward point. It would have been nice to drop a test for that, but the set of operators working on pg_lsn limits it, so this is left for a future exercise. Author: Michael Paquier Reviewed-by: Petr Jelinek, Simon Riggs Discussion: https://postgr.es/m/CANP8+jLyS=X-CAk59BJnsxKQfjwrmKicHQykyn52Qj-Q=9GLCw@mail.gmail.com Discussion: https://www.postgresql.org/message-id/2840048a-1184-417a-9da8-3299d207a1d7%40postgrespro.ru
2018-06-11 02:26:13 +02:00
XLogRecPtr startlsn = MyReplicationSlot->data.restart_lsn;
XLogRecPtr retlsn = startlsn;
Assert(moveto != InvalidXLogRecPtr);
Fix a couple of bugs with replication slot advancing feature A review of the code has showed up a couple of issues fixed by this commit: - Physical slots have been using the confirmed LSN position as a start comparison point which is always 0/0, instead use the restart LSN position (logical slots need to use the confirmed LSN position, which was correct). - The actual slot update was incorrect for both physical and logical slots. Physical slots need to use their restart_lsn as base comparison point (confirmed_flush was used because of previous point), and logical slots need to begin reading WAL from restart_lsn (confirmed_flush was used as well), while confirmed_flush is compiled depending on the decoding context and record read, and is the LSN position returned back to the caller. - Never return 0/0 if a slot cannot be advanced. This way, if a slot is advanced while the activity is idle, then the same position is returned to the caller over and over without raising an error. Instead return the LSN the slot has been advanced to. With repetitive calls, the same position is returned hence caller can directly monitor the difference in progress in bytes by doing simply LSN difference calculations, which should be monotonic. Note that as the slot is owned by the backend advancing it, then the read of those fields is fine lock-less, while updates need to happen while the slot mutex is held, so fix that on the way as well. Other locks for in-memory data of replication slots have been already fixed previously. Some of those issues have been pointed out by Petr and Simon during the patch, while I noticed some of them after looking at the code. This also visibly takes of a recently-discovered bug causing assertion failures which can be triggered by a two-step slot forwarding which first advanced the slot to a WAL page boundary and secondly advanced it to the latest position, say 'FF/FFFFFFF' to make sure that the newest LSN is used as forward point. It would have been nice to drop a test for that, but the set of operators working on pg_lsn limits it, so this is left for a future exercise. Author: Michael Paquier Reviewed-by: Petr Jelinek, Simon Riggs Discussion: https://postgr.es/m/CANP8+jLyS=X-CAk59BJnsxKQfjwrmKicHQykyn52Qj-Q=9GLCw@mail.gmail.com Discussion: https://www.postgresql.org/message-id/2840048a-1184-417a-9da8-3299d207a1d7%40postgrespro.ru
2018-06-11 02:26:13 +02:00
if (startlsn < moveto)
{
Fix a couple of bugs with replication slot advancing feature A review of the code has showed up a couple of issues fixed by this commit: - Physical slots have been using the confirmed LSN position as a start comparison point which is always 0/0, instead use the restart LSN position (logical slots need to use the confirmed LSN position, which was correct). - The actual slot update was incorrect for both physical and logical slots. Physical slots need to use their restart_lsn as base comparison point (confirmed_flush was used because of previous point), and logical slots need to begin reading WAL from restart_lsn (confirmed_flush was used as well), while confirmed_flush is compiled depending on the decoding context and record read, and is the LSN position returned back to the caller. - Never return 0/0 if a slot cannot be advanced. This way, if a slot is advanced while the activity is idle, then the same position is returned to the caller over and over without raising an error. Instead return the LSN the slot has been advanced to. With repetitive calls, the same position is returned hence caller can directly monitor the difference in progress in bytes by doing simply LSN difference calculations, which should be monotonic. Note that as the slot is owned by the backend advancing it, then the read of those fields is fine lock-less, while updates need to happen while the slot mutex is held, so fix that on the way as well. Other locks for in-memory data of replication slots have been already fixed previously. Some of those issues have been pointed out by Petr and Simon during the patch, while I noticed some of them after looking at the code. This also visibly takes of a recently-discovered bug causing assertion failures which can be triggered by a two-step slot forwarding which first advanced the slot to a WAL page boundary and secondly advanced it to the latest position, say 'FF/FFFFFFF' to make sure that the newest LSN is used as forward point. It would have been nice to drop a test for that, but the set of operators working on pg_lsn limits it, so this is left for a future exercise. Author: Michael Paquier Reviewed-by: Petr Jelinek, Simon Riggs Discussion: https://postgr.es/m/CANP8+jLyS=X-CAk59BJnsxKQfjwrmKicHQykyn52Qj-Q=9GLCw@mail.gmail.com Discussion: https://www.postgresql.org/message-id/2840048a-1184-417a-9da8-3299d207a1d7%40postgrespro.ru
2018-06-11 02:26:13 +02:00
SpinLockAcquire(&MyReplicationSlot->mutex);
MyReplicationSlot->data.restart_lsn = moveto;
Fix a couple of bugs with replication slot advancing feature A review of the code has showed up a couple of issues fixed by this commit: - Physical slots have been using the confirmed LSN position as a start comparison point which is always 0/0, instead use the restart LSN position (logical slots need to use the confirmed LSN position, which was correct). - The actual slot update was incorrect for both physical and logical slots. Physical slots need to use their restart_lsn as base comparison point (confirmed_flush was used because of previous point), and logical slots need to begin reading WAL from restart_lsn (confirmed_flush was used as well), while confirmed_flush is compiled depending on the decoding context and record read, and is the LSN position returned back to the caller. - Never return 0/0 if a slot cannot be advanced. This way, if a slot is advanced while the activity is idle, then the same position is returned to the caller over and over without raising an error. Instead return the LSN the slot has been advanced to. With repetitive calls, the same position is returned hence caller can directly monitor the difference in progress in bytes by doing simply LSN difference calculations, which should be monotonic. Note that as the slot is owned by the backend advancing it, then the read of those fields is fine lock-less, while updates need to happen while the slot mutex is held, so fix that on the way as well. Other locks for in-memory data of replication slots have been already fixed previously. Some of those issues have been pointed out by Petr and Simon during the patch, while I noticed some of them after looking at the code. This also visibly takes of a recently-discovered bug causing assertion failures which can be triggered by a two-step slot forwarding which first advanced the slot to a WAL page boundary and secondly advanced it to the latest position, say 'FF/FFFFFFF' to make sure that the newest LSN is used as forward point. It would have been nice to drop a test for that, but the set of operators working on pg_lsn limits it, so this is left for a future exercise. Author: Michael Paquier Reviewed-by: Petr Jelinek, Simon Riggs Discussion: https://postgr.es/m/CANP8+jLyS=X-CAk59BJnsxKQfjwrmKicHQykyn52Qj-Q=9GLCw@mail.gmail.com Discussion: https://www.postgresql.org/message-id/2840048a-1184-417a-9da8-3299d207a1d7%40postgrespro.ru
2018-06-11 02:26:13 +02:00
SpinLockRelease(&MyReplicationSlot->mutex);
retlsn = moveto;
/*
* Dirty the slot so as it is written out at the next checkpoint. Note
* that the LSN position advanced may still be lost in the event of a
* crash, but this makes the data consistent after a clean shutdown.
*/
ReplicationSlotMarkDirty();
}
return retlsn;
}
/*
* Helper function for advancing our logical replication slot forward.
*
* The slot's restart_lsn is used as start point for reading records, while
* confirmed_flush is used as base point for the decoding context.
*
* We cannot just do LogicalConfirmReceivedLocation to update confirmed_flush,
* because we need to digest WAL to advance restart_lsn allowing to recycle
* WAL and removal of old catalog tuples. As decoding is done in fast_forward
* mode, no changes are generated anyway.
*/
static XLogRecPtr
Fix a couple of bugs with replication slot advancing feature A review of the code has showed up a couple of issues fixed by this commit: - Physical slots have been using the confirmed LSN position as a start comparison point which is always 0/0, instead use the restart LSN position (logical slots need to use the confirmed LSN position, which was correct). - The actual slot update was incorrect for both physical and logical slots. Physical slots need to use their restart_lsn as base comparison point (confirmed_flush was used because of previous point), and logical slots need to begin reading WAL from restart_lsn (confirmed_flush was used as well), while confirmed_flush is compiled depending on the decoding context and record read, and is the LSN position returned back to the caller. - Never return 0/0 if a slot cannot be advanced. This way, if a slot is advanced while the activity is idle, then the same position is returned to the caller over and over without raising an error. Instead return the LSN the slot has been advanced to. With repetitive calls, the same position is returned hence caller can directly monitor the difference in progress in bytes by doing simply LSN difference calculations, which should be monotonic. Note that as the slot is owned by the backend advancing it, then the read of those fields is fine lock-less, while updates need to happen while the slot mutex is held, so fix that on the way as well. Other locks for in-memory data of replication slots have been already fixed previously. Some of those issues have been pointed out by Petr and Simon during the patch, while I noticed some of them after looking at the code. This also visibly takes of a recently-discovered bug causing assertion failures which can be triggered by a two-step slot forwarding which first advanced the slot to a WAL page boundary and secondly advanced it to the latest position, say 'FF/FFFFFFF' to make sure that the newest LSN is used as forward point. It would have been nice to drop a test for that, but the set of operators working on pg_lsn limits it, so this is left for a future exercise. Author: Michael Paquier Reviewed-by: Petr Jelinek, Simon Riggs Discussion: https://postgr.es/m/CANP8+jLyS=X-CAk59BJnsxKQfjwrmKicHQykyn52Qj-Q=9GLCw@mail.gmail.com Discussion: https://www.postgresql.org/message-id/2840048a-1184-417a-9da8-3299d207a1d7%40postgrespro.ru
2018-06-11 02:26:13 +02:00
pg_logical_replication_slot_advance(XLogRecPtr moveto)
{
LogicalDecodingContext *ctx;
ResourceOwner old_resowner = CurrentResourceOwner;
XLogRecPtr retlsn;
Assert(moveto != InvalidXLogRecPtr);
PG_TRY();
{
/*
* Create our decoding context in fast_forward mode, passing start_lsn
* as InvalidXLogRecPtr, so that we start processing from my slot's
* confirmed_flush.
*/
ctx = CreateDecodingContext(InvalidXLogRecPtr,
NIL,
true, /* fast_forward */
XL_ROUTINE(.page_read = read_local_xlog_page,
.segment_open = wal_segment_open,
.segment_close = wal_segment_close),
NULL, NULL, NULL);
/*
* Start reading at the slot's restart_lsn, which we know to point to
* a valid record.
*/
XLogBeginRead(ctx->reader, MyReplicationSlot->data.restart_lsn);
/* invalidate non-timetravel entries */
InvalidateSystemCaches();
/* Decode at least one record, until we run out of records */
while (ctx->reader->EndRecPtr < moveto)
{
char *errm = NULL;
XLogRecord *record;
/*
* Read records. No changes are generated in fast_forward mode,
* but snapbuilder/slot statuses are updated properly.
*/
record = XLogReadRecord(ctx->reader, &errm);
if (errm)
elog(ERROR, "%s", errm);
/*
* Process the record. Storage-level changes are ignored in
* fast_forward mode, but other modules (such as snapbuilder)
* might still have critical updates to do.
*/
if (record)
LogicalDecodingProcessRecord(ctx, ctx->reader);
/* Stop once the requested target has been reached */
if (moveto <= ctx->reader->EndRecPtr)
break;
CHECK_FOR_INTERRUPTS();
}
Use a ResourceOwner to track buffer pins in all cases. Historically, we've allowed auxiliary processes to take buffer pins without tracking them in a ResourceOwner. However, that creates problems for error recovery. In particular, we've seen multiple reports of assertion crashes in the startup process when it gets an error while holding a buffer pin, as for example if it gets ENOSPC during a write. In a non-assert build, the process would simply exit without releasing the pin at all. We've gotten away with that so far just because a failure exit of the startup process translates to a database crash anyhow; but any similar behavior in other aux processes could result in stuck pins and subsequent problems in vacuum. To improve this, institute a policy that we must *always* have a resowner backing any attempt to pin a buffer, which we can enforce just by removing the previous special-case code in resowner.c. Add infrastructure to make it easy to create a process-lifespan AuxProcessResourceOwner and clear out its contents at appropriate times. Replace existing ad-hoc resowner management in bgwriter.c and other aux processes with that. (Thus, while the startup process gains a resowner where it had none at all before, some other aux process types are replacing an ad-hoc resowner with this code.) Also use the AuxProcessResourceOwner to manage buffer pins taken during StartupXLOG and ShutdownXLOG, even when those are being run in a bootstrap process or a standalone backend rather than a true auxiliary process. In passing, remove some other ad-hoc resource owner creations that had gotten cargo-culted into various other places. As far as I can tell that was all unnecessary, and if it had been necessary it was incomplete, due to lacking any provision for clearing those resowners later. (Also worth noting in this connection is that a process that hasn't called InitBufferPoolBackend has no business accessing buffers; so there's more to do than just add the resowner if we want to touch buffers in processes not covered by this patch.) Although this fixes a very old bug, no back-patch, because there's no evidence of any significant problem in non-assert builds. Patch by me, pursuant to a report from Justin Pryzby. Thanks to Robert Haas and Kyotaro Horiguchi for reviews. Discussion: https://postgr.es/m/20180627233939.GA10276@telsasoft.com
2018-07-18 18:15:16 +02:00
/*
* Logical decoding could have clobbered CurrentResourceOwner during
* transaction management, so restore the executor's value. (This is
* a kluge, but it's not worth cleaning up right now.)
*/
CurrentResourceOwner = old_resowner;
if (ctx->reader->EndRecPtr != InvalidXLogRecPtr)
{
LogicalConfirmReceivedLocation(moveto);
/*
* If only the confirmed_flush LSN has changed the slot won't get
* marked as dirty by the above. Callers on the walsender
* interface are expected to keep track of their own progress and
* don't need it written out. But SQL-interface users cannot
* specify their own start positions and it's harder for them to
* keep track of their progress, so we should make more of an
* effort to save it for them.
*
* Dirty the slot so it is written out at the next checkpoint. The
* LSN position advanced to may still be lost on a crash but this
* makes the data consistent after a clean shutdown.
*/
ReplicationSlotMarkDirty();
}
retlsn = MyReplicationSlot->data.confirmed_flush;
/* free context, call shutdown callback */
FreeDecodingContext(ctx);
InvalidateSystemCaches();
}
PG_CATCH();
{
/* clear all timetravel entries */
InvalidateSystemCaches();
PG_RE_THROW();
}
PG_END_TRY();
return retlsn;
}
/*
* SQL function for moving the position in a replication slot.
*/
Datum
pg_replication_slot_advance(PG_FUNCTION_ARGS)
{
Name slotname = PG_GETARG_NAME(0);
XLogRecPtr moveto = PG_GETARG_LSN(1);
XLogRecPtr endlsn;
Fix a couple of bugs with replication slot advancing feature A review of the code has showed up a couple of issues fixed by this commit: - Physical slots have been using the confirmed LSN position as a start comparison point which is always 0/0, instead use the restart LSN position (logical slots need to use the confirmed LSN position, which was correct). - The actual slot update was incorrect for both physical and logical slots. Physical slots need to use their restart_lsn as base comparison point (confirmed_flush was used because of previous point), and logical slots need to begin reading WAL from restart_lsn (confirmed_flush was used as well), while confirmed_flush is compiled depending on the decoding context and record read, and is the LSN position returned back to the caller. - Never return 0/0 if a slot cannot be advanced. This way, if a slot is advanced while the activity is idle, then the same position is returned to the caller over and over without raising an error. Instead return the LSN the slot has been advanced to. With repetitive calls, the same position is returned hence caller can directly monitor the difference in progress in bytes by doing simply LSN difference calculations, which should be monotonic. Note that as the slot is owned by the backend advancing it, then the read of those fields is fine lock-less, while updates need to happen while the slot mutex is held, so fix that on the way as well. Other locks for in-memory data of replication slots have been already fixed previously. Some of those issues have been pointed out by Petr and Simon during the patch, while I noticed some of them after looking at the code. This also visibly takes of a recently-discovered bug causing assertion failures which can be triggered by a two-step slot forwarding which first advanced the slot to a WAL page boundary and secondly advanced it to the latest position, say 'FF/FFFFFFF' to make sure that the newest LSN is used as forward point. It would have been nice to drop a test for that, but the set of operators working on pg_lsn limits it, so this is left for a future exercise. Author: Michael Paquier Reviewed-by: Petr Jelinek, Simon Riggs Discussion: https://postgr.es/m/CANP8+jLyS=X-CAk59BJnsxKQfjwrmKicHQykyn52Qj-Q=9GLCw@mail.gmail.com Discussion: https://www.postgresql.org/message-id/2840048a-1184-417a-9da8-3299d207a1d7%40postgrespro.ru
2018-06-11 02:26:13 +02:00
XLogRecPtr minlsn;
TupleDesc tupdesc;
Datum values[2];
bool nulls[2];
HeapTuple tuple;
Datum result;
Assert(!MyReplicationSlot);
check_permissions();
if (XLogRecPtrIsInvalid(moveto))
ereport(ERROR,
2019-09-23 13:37:33 +02:00
(errmsg("invalid target WAL LSN")));
/* Build a tuple descriptor for our result type */
if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE)
elog(ERROR, "return type must be a row type");
/*
* We can't move slot past what's been flushed/replayed so clamp the
* target position accordingly.
*/
if (!RecoveryInProgress())
moveto = Min(moveto, GetFlushRecPtr());
else
moveto = Min(moveto, GetXLogReplayRecPtr(&ThisTimeLineID));
/* Acquire the slot so we "own" it */
ReplicationSlotAcquire(NameStr(*slotname), true);
/* A slot whose restart_lsn has never been reserved cannot be advanced */
if (XLogRecPtrIsInvalid(MyReplicationSlot->data.restart_lsn))
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("replication slot \"%s\" cannot be advanced",
NameStr(*slotname)),
errdetail("This slot has never previously reserved WAL, or it has been invalidated.")));
Fix a couple of bugs with replication slot advancing feature A review of the code has showed up a couple of issues fixed by this commit: - Physical slots have been using the confirmed LSN position as a start comparison point which is always 0/0, instead use the restart LSN position (logical slots need to use the confirmed LSN position, which was correct). - The actual slot update was incorrect for both physical and logical slots. Physical slots need to use their restart_lsn as base comparison point (confirmed_flush was used because of previous point), and logical slots need to begin reading WAL from restart_lsn (confirmed_flush was used as well), while confirmed_flush is compiled depending on the decoding context and record read, and is the LSN position returned back to the caller. - Never return 0/0 if a slot cannot be advanced. This way, if a slot is advanced while the activity is idle, then the same position is returned to the caller over and over without raising an error. Instead return the LSN the slot has been advanced to. With repetitive calls, the same position is returned hence caller can directly monitor the difference in progress in bytes by doing simply LSN difference calculations, which should be monotonic. Note that as the slot is owned by the backend advancing it, then the read of those fields is fine lock-less, while updates need to happen while the slot mutex is held, so fix that on the way as well. Other locks for in-memory data of replication slots have been already fixed previously. Some of those issues have been pointed out by Petr and Simon during the patch, while I noticed some of them after looking at the code. This also visibly takes of a recently-discovered bug causing assertion failures which can be triggered by a two-step slot forwarding which first advanced the slot to a WAL page boundary and secondly advanced it to the latest position, say 'FF/FFFFFFF' to make sure that the newest LSN is used as forward point. It would have been nice to drop a test for that, but the set of operators working on pg_lsn limits it, so this is left for a future exercise. Author: Michael Paquier Reviewed-by: Petr Jelinek, Simon Riggs Discussion: https://postgr.es/m/CANP8+jLyS=X-CAk59BJnsxKQfjwrmKicHQykyn52Qj-Q=9GLCw@mail.gmail.com Discussion: https://www.postgresql.org/message-id/2840048a-1184-417a-9da8-3299d207a1d7%40postgrespro.ru
2018-06-11 02:26:13 +02:00
/*
* Check if the slot is not moving backwards. Physical slots rely simply
* on restart_lsn as a minimum point, while logical slots have confirmed
* consumption up to confirmed_flush, meaning that in both cases data
* older than that is not available anymore.
Fix a couple of bugs with replication slot advancing feature A review of the code has showed up a couple of issues fixed by this commit: - Physical slots have been using the confirmed LSN position as a start comparison point which is always 0/0, instead use the restart LSN position (logical slots need to use the confirmed LSN position, which was correct). - The actual slot update was incorrect for both physical and logical slots. Physical slots need to use their restart_lsn as base comparison point (confirmed_flush was used because of previous point), and logical slots need to begin reading WAL from restart_lsn (confirmed_flush was used as well), while confirmed_flush is compiled depending on the decoding context and record read, and is the LSN position returned back to the caller. - Never return 0/0 if a slot cannot be advanced. This way, if a slot is advanced while the activity is idle, then the same position is returned to the caller over and over without raising an error. Instead return the LSN the slot has been advanced to. With repetitive calls, the same position is returned hence caller can directly monitor the difference in progress in bytes by doing simply LSN difference calculations, which should be monotonic. Note that as the slot is owned by the backend advancing it, then the read of those fields is fine lock-less, while updates need to happen while the slot mutex is held, so fix that on the way as well. Other locks for in-memory data of replication slots have been already fixed previously. Some of those issues have been pointed out by Petr and Simon during the patch, while I noticed some of them after looking at the code. This also visibly takes of a recently-discovered bug causing assertion failures which can be triggered by a two-step slot forwarding which first advanced the slot to a WAL page boundary and secondly advanced it to the latest position, say 'FF/FFFFFFF' to make sure that the newest LSN is used as forward point. It would have been nice to drop a test for that, but the set of operators working on pg_lsn limits it, so this is left for a future exercise. Author: Michael Paquier Reviewed-by: Petr Jelinek, Simon Riggs Discussion: https://postgr.es/m/CANP8+jLyS=X-CAk59BJnsxKQfjwrmKicHQykyn52Qj-Q=9GLCw@mail.gmail.com Discussion: https://www.postgresql.org/message-id/2840048a-1184-417a-9da8-3299d207a1d7%40postgrespro.ru
2018-06-11 02:26:13 +02:00
*/
if (OidIsValid(MyReplicationSlot->data.database))
minlsn = MyReplicationSlot->data.confirmed_flush;
else
minlsn = MyReplicationSlot->data.restart_lsn;
if (moveto < minlsn)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("cannot advance replication slot to %X/%X, minimum is %X/%X",
LSN_FORMAT_ARGS(moveto), LSN_FORMAT_ARGS(minlsn))));
Fix a couple of bugs with replication slot advancing feature A review of the code has showed up a couple of issues fixed by this commit: - Physical slots have been using the confirmed LSN position as a start comparison point which is always 0/0, instead use the restart LSN position (logical slots need to use the confirmed LSN position, which was correct). - The actual slot update was incorrect for both physical and logical slots. Physical slots need to use their restart_lsn as base comparison point (confirmed_flush was used because of previous point), and logical slots need to begin reading WAL from restart_lsn (confirmed_flush was used as well), while confirmed_flush is compiled depending on the decoding context and record read, and is the LSN position returned back to the caller. - Never return 0/0 if a slot cannot be advanced. This way, if a slot is advanced while the activity is idle, then the same position is returned to the caller over and over without raising an error. Instead return the LSN the slot has been advanced to. With repetitive calls, the same position is returned hence caller can directly monitor the difference in progress in bytes by doing simply LSN difference calculations, which should be monotonic. Note that as the slot is owned by the backend advancing it, then the read of those fields is fine lock-less, while updates need to happen while the slot mutex is held, so fix that on the way as well. Other locks for in-memory data of replication slots have been already fixed previously. Some of those issues have been pointed out by Petr and Simon during the patch, while I noticed some of them after looking at the code. This also visibly takes of a recently-discovered bug causing assertion failures which can be triggered by a two-step slot forwarding which first advanced the slot to a WAL page boundary and secondly advanced it to the latest position, say 'FF/FFFFFFF' to make sure that the newest LSN is used as forward point. It would have been nice to drop a test for that, but the set of operators working on pg_lsn limits it, so this is left for a future exercise. Author: Michael Paquier Reviewed-by: Petr Jelinek, Simon Riggs Discussion: https://postgr.es/m/CANP8+jLyS=X-CAk59BJnsxKQfjwrmKicHQykyn52Qj-Q=9GLCw@mail.gmail.com Discussion: https://www.postgresql.org/message-id/2840048a-1184-417a-9da8-3299d207a1d7%40postgrespro.ru
2018-06-11 02:26:13 +02:00
/* Do the actual slot update, depending on the slot type */
if (OidIsValid(MyReplicationSlot->data.database))
Fix a couple of bugs with replication slot advancing feature A review of the code has showed up a couple of issues fixed by this commit: - Physical slots have been using the confirmed LSN position as a start comparison point which is always 0/0, instead use the restart LSN position (logical slots need to use the confirmed LSN position, which was correct). - The actual slot update was incorrect for both physical and logical slots. Physical slots need to use their restart_lsn as base comparison point (confirmed_flush was used because of previous point), and logical slots need to begin reading WAL from restart_lsn (confirmed_flush was used as well), while confirmed_flush is compiled depending on the decoding context and record read, and is the LSN position returned back to the caller. - Never return 0/0 if a slot cannot be advanced. This way, if a slot is advanced while the activity is idle, then the same position is returned to the caller over and over without raising an error. Instead return the LSN the slot has been advanced to. With repetitive calls, the same position is returned hence caller can directly monitor the difference in progress in bytes by doing simply LSN difference calculations, which should be monotonic. Note that as the slot is owned by the backend advancing it, then the read of those fields is fine lock-less, while updates need to happen while the slot mutex is held, so fix that on the way as well. Other locks for in-memory data of replication slots have been already fixed previously. Some of those issues have been pointed out by Petr and Simon during the patch, while I noticed some of them after looking at the code. This also visibly takes of a recently-discovered bug causing assertion failures which can be triggered by a two-step slot forwarding which first advanced the slot to a WAL page boundary and secondly advanced it to the latest position, say 'FF/FFFFFFF' to make sure that the newest LSN is used as forward point. It would have been nice to drop a test for that, but the set of operators working on pg_lsn limits it, so this is left for a future exercise. Author: Michael Paquier Reviewed-by: Petr Jelinek, Simon Riggs Discussion: https://postgr.es/m/CANP8+jLyS=X-CAk59BJnsxKQfjwrmKicHQykyn52Qj-Q=9GLCw@mail.gmail.com Discussion: https://www.postgresql.org/message-id/2840048a-1184-417a-9da8-3299d207a1d7%40postgrespro.ru
2018-06-11 02:26:13 +02:00
endlsn = pg_logical_replication_slot_advance(moveto);
else
Fix a couple of bugs with replication slot advancing feature A review of the code has showed up a couple of issues fixed by this commit: - Physical slots have been using the confirmed LSN position as a start comparison point which is always 0/0, instead use the restart LSN position (logical slots need to use the confirmed LSN position, which was correct). - The actual slot update was incorrect for both physical and logical slots. Physical slots need to use their restart_lsn as base comparison point (confirmed_flush was used because of previous point), and logical slots need to begin reading WAL from restart_lsn (confirmed_flush was used as well), while confirmed_flush is compiled depending on the decoding context and record read, and is the LSN position returned back to the caller. - Never return 0/0 if a slot cannot be advanced. This way, if a slot is advanced while the activity is idle, then the same position is returned to the caller over and over without raising an error. Instead return the LSN the slot has been advanced to. With repetitive calls, the same position is returned hence caller can directly monitor the difference in progress in bytes by doing simply LSN difference calculations, which should be monotonic. Note that as the slot is owned by the backend advancing it, then the read of those fields is fine lock-less, while updates need to happen while the slot mutex is held, so fix that on the way as well. Other locks for in-memory data of replication slots have been already fixed previously. Some of those issues have been pointed out by Petr and Simon during the patch, while I noticed some of them after looking at the code. This also visibly takes of a recently-discovered bug causing assertion failures which can be triggered by a two-step slot forwarding which first advanced the slot to a WAL page boundary and secondly advanced it to the latest position, say 'FF/FFFFFFF' to make sure that the newest LSN is used as forward point. It would have been nice to drop a test for that, but the set of operators working on pg_lsn limits it, so this is left for a future exercise. Author: Michael Paquier Reviewed-by: Petr Jelinek, Simon Riggs Discussion: https://postgr.es/m/CANP8+jLyS=X-CAk59BJnsxKQfjwrmKicHQykyn52Qj-Q=9GLCw@mail.gmail.com Discussion: https://www.postgresql.org/message-id/2840048a-1184-417a-9da8-3299d207a1d7%40postgrespro.ru
2018-06-11 02:26:13 +02:00
endlsn = pg_physical_replication_slot_advance(moveto);
values[0] = NameGetDatum(&MyReplicationSlot->data.name);
nulls[0] = false;
/*
* Recompute the minimum LSN and xmin across all slots to adjust with the
* advancing potentially done.
*/
ReplicationSlotsComputeRequiredXmin(false);
ReplicationSlotsComputeRequiredLSN();
ReplicationSlotRelease();
/* Return the reached position. */
values[1] = LSNGetDatum(endlsn);
nulls[1] = false;
tuple = heap_form_tuple(tupdesc, values, nulls);
result = HeapTupleGetDatum(tuple);
PG_RETURN_DATUM(result);
}
/*
* Helper function of copying a replication slot.
*/
static Datum
copy_replication_slot(FunctionCallInfo fcinfo, bool logical_slot)
{
Name src_name = PG_GETARG_NAME(0);
Name dst_name = PG_GETARG_NAME(1);
ReplicationSlot *src = NULL;
Don't call palloc() while holding a spinlock, either. Fix some more violations of the "only straight-line code inside a spinlock" rule. These are hazardous not only because they risk holding the lock for an excessively long time, but because it's possible for palloc to throw elog(ERROR), leaving a stuck spinlock behind. copy_replication_slot() had two separate places that did pallocs while holding a spinlock. We can make the code simpler and safer by copying the whole ReplicationSlot struct into a local variable while holding the spinlock, and then referencing that copy. (While that's arguably more cycles than we really need to spend holding the lock, the struct isn't all that big, and this way seems far more maintainable than copying fields piecemeal. Anyway this is surely much cheaper than a palloc.) That bug goes back to v12. InvalidateObsoleteReplicationSlots() not only did a palloc while holding a spinlock, but for extra sloppiness then leaked the memory --- probably for the lifetime of the checkpointer process, though I didn't try to verify that. Fortunately that silliness is new in HEAD. pg_get_replication_slots() had a cosmetic violation of the rule, in that it only assumed it's safe to call namecpy() while holding a spinlock. Still, that's a hazard waiting to bite somebody, and there were some other cosmetic coding-rule violations in the same function, so clean it up. I back-patched this as far as v10; the code exists before that but it looks different, and this didn't seem important enough to adapt the patch further back. Discussion: https://postgr.es/m/20200602.161518.1399689010416646074.horikyota.ntt@gmail.com
2020-06-03 18:36:00 +02:00
ReplicationSlot first_slot_contents;
ReplicationSlot second_slot_contents;
XLogRecPtr src_restart_lsn;
bool src_islogical;
bool temporary;
char *plugin;
Datum values[2];
bool nulls[2];
Datum result;
TupleDesc tupdesc;
HeapTuple tuple;
if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE)
elog(ERROR, "return type must be a row type");
check_permissions();
if (logical_slot)
CheckLogicalDecodingRequirements();
else
CheckSlotRequirements();
LWLockAcquire(ReplicationSlotControlLock, LW_SHARED);
/*
* We need to prevent the source slot's reserved WAL from being removed,
* but we don't want to lock that slot for very long, and it can advance
* in the meantime. So obtain the source slot's data, and create a new
* slot using its restart_lsn. Afterwards we lock the source slot again
* and verify that the data we copied (name, type) has not changed
* incompatibly. No inconvenient WAL removal can occur once the new slot
* is created -- but since WAL removal could have occurred before we
* managed to create the new slot, we advance the new slot's restart_lsn
* to the source slot's updated restart_lsn the second time we lock it.
*/
for (int i = 0; i < max_replication_slots; i++)
{
ReplicationSlot *s = &ReplicationSlotCtl->replication_slots[i];
if (s->in_use && strcmp(NameStr(s->data.name), NameStr(*src_name)) == 0)
{
Don't call palloc() while holding a spinlock, either. Fix some more violations of the "only straight-line code inside a spinlock" rule. These are hazardous not only because they risk holding the lock for an excessively long time, but because it's possible for palloc to throw elog(ERROR), leaving a stuck spinlock behind. copy_replication_slot() had two separate places that did pallocs while holding a spinlock. We can make the code simpler and safer by copying the whole ReplicationSlot struct into a local variable while holding the spinlock, and then referencing that copy. (While that's arguably more cycles than we really need to spend holding the lock, the struct isn't all that big, and this way seems far more maintainable than copying fields piecemeal. Anyway this is surely much cheaper than a palloc.) That bug goes back to v12. InvalidateObsoleteReplicationSlots() not only did a palloc while holding a spinlock, but for extra sloppiness then leaked the memory --- probably for the lifetime of the checkpointer process, though I didn't try to verify that. Fortunately that silliness is new in HEAD. pg_get_replication_slots() had a cosmetic violation of the rule, in that it only assumed it's safe to call namecpy() while holding a spinlock. Still, that's a hazard waiting to bite somebody, and there were some other cosmetic coding-rule violations in the same function, so clean it up. I back-patched this as far as v10; the code exists before that but it looks different, and this didn't seem important enough to adapt the patch further back. Discussion: https://postgr.es/m/20200602.161518.1399689010416646074.horikyota.ntt@gmail.com
2020-06-03 18:36:00 +02:00
/* Copy the slot contents while holding spinlock */
SpinLockAcquire(&s->mutex);
Don't call palloc() while holding a spinlock, either. Fix some more violations of the "only straight-line code inside a spinlock" rule. These are hazardous not only because they risk holding the lock for an excessively long time, but because it's possible for palloc to throw elog(ERROR), leaving a stuck spinlock behind. copy_replication_slot() had two separate places that did pallocs while holding a spinlock. We can make the code simpler and safer by copying the whole ReplicationSlot struct into a local variable while holding the spinlock, and then referencing that copy. (While that's arguably more cycles than we really need to spend holding the lock, the struct isn't all that big, and this way seems far more maintainable than copying fields piecemeal. Anyway this is surely much cheaper than a palloc.) That bug goes back to v12. InvalidateObsoleteReplicationSlots() not only did a palloc while holding a spinlock, but for extra sloppiness then leaked the memory --- probably for the lifetime of the checkpointer process, though I didn't try to verify that. Fortunately that silliness is new in HEAD. pg_get_replication_slots() had a cosmetic violation of the rule, in that it only assumed it's safe to call namecpy() while holding a spinlock. Still, that's a hazard waiting to bite somebody, and there were some other cosmetic coding-rule violations in the same function, so clean it up. I back-patched this as far as v10; the code exists before that but it looks different, and this didn't seem important enough to adapt the patch further back. Discussion: https://postgr.es/m/20200602.161518.1399689010416646074.horikyota.ntt@gmail.com
2020-06-03 18:36:00 +02:00
first_slot_contents = *s;
SpinLockRelease(&s->mutex);
src = s;
break;
}
}
LWLockRelease(ReplicationSlotControlLock);
if (src == NULL)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("replication slot \"%s\" does not exist", NameStr(*src_name))));
Don't call palloc() while holding a spinlock, either. Fix some more violations of the "only straight-line code inside a spinlock" rule. These are hazardous not only because they risk holding the lock for an excessively long time, but because it's possible for palloc to throw elog(ERROR), leaving a stuck spinlock behind. copy_replication_slot() had two separate places that did pallocs while holding a spinlock. We can make the code simpler and safer by copying the whole ReplicationSlot struct into a local variable while holding the spinlock, and then referencing that copy. (While that's arguably more cycles than we really need to spend holding the lock, the struct isn't all that big, and this way seems far more maintainable than copying fields piecemeal. Anyway this is surely much cheaper than a palloc.) That bug goes back to v12. InvalidateObsoleteReplicationSlots() not only did a palloc while holding a spinlock, but for extra sloppiness then leaked the memory --- probably for the lifetime of the checkpointer process, though I didn't try to verify that. Fortunately that silliness is new in HEAD. pg_get_replication_slots() had a cosmetic violation of the rule, in that it only assumed it's safe to call namecpy() while holding a spinlock. Still, that's a hazard waiting to bite somebody, and there were some other cosmetic coding-rule violations in the same function, so clean it up. I back-patched this as far as v10; the code exists before that but it looks different, and this didn't seem important enough to adapt the patch further back. Discussion: https://postgr.es/m/20200602.161518.1399689010416646074.horikyota.ntt@gmail.com
2020-06-03 18:36:00 +02:00
src_islogical = SlotIsLogical(&first_slot_contents);
src_restart_lsn = first_slot_contents.data.restart_lsn;
temporary = (first_slot_contents.data.persistency == RS_TEMPORARY);
plugin = logical_slot ? NameStr(first_slot_contents.data.plugin) : NULL;
/* Check type of replication slot */
if (src_islogical != logical_slot)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
src_islogical ?
errmsg("cannot copy physical replication slot \"%s\" as a logical replication slot",
NameStr(*src_name)) :
errmsg("cannot copy logical replication slot \"%s\" as a physical replication slot",
NameStr(*src_name))));
/* Copying non-reserved slot doesn't make sense */
if (XLogRecPtrIsInvalid(src_restart_lsn))
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("cannot copy a replication slot that doesn't reserve WAL")));
/* Overwrite params from optional arguments */
if (PG_NARGS() >= 3)
temporary = PG_GETARG_BOOL(2);
if (PG_NARGS() >= 4)
{
Assert(logical_slot);
plugin = NameStr(*(PG_GETARG_NAME(3)));
}
/* Create new slot and acquire it */
if (logical_slot)
{
/*
* We must not try to read WAL, since we haven't reserved it yet --
* hence pass find_startpoint false. confirmed_flush will be set
* below, by copying from the source slot.
*/
create_logical_replication_slot(NameStr(*dst_name),
plugin,
temporary,
false,
src_restart_lsn,
false);
}
else
create_physical_replication_slot(NameStr(*dst_name),
true,
temporary,
src_restart_lsn);
/*
* Update the destination slot to current values of the source slot;
* recheck that the source slot is still the one we saw previously.
*/
{
TransactionId copy_effective_xmin;
TransactionId copy_effective_catalog_xmin;
TransactionId copy_xmin;
TransactionId copy_catalog_xmin;
XLogRecPtr copy_restart_lsn;
XLogRecPtr copy_confirmed_flush;
bool copy_islogical;
char *copy_name;
/* Copy data of source slot again */
SpinLockAcquire(&src->mutex);
Don't call palloc() while holding a spinlock, either. Fix some more violations of the "only straight-line code inside a spinlock" rule. These are hazardous not only because they risk holding the lock for an excessively long time, but because it's possible for palloc to throw elog(ERROR), leaving a stuck spinlock behind. copy_replication_slot() had two separate places that did pallocs while holding a spinlock. We can make the code simpler and safer by copying the whole ReplicationSlot struct into a local variable while holding the spinlock, and then referencing that copy. (While that's arguably more cycles than we really need to spend holding the lock, the struct isn't all that big, and this way seems far more maintainable than copying fields piecemeal. Anyway this is surely much cheaper than a palloc.) That bug goes back to v12. InvalidateObsoleteReplicationSlots() not only did a palloc while holding a spinlock, but for extra sloppiness then leaked the memory --- probably for the lifetime of the checkpointer process, though I didn't try to verify that. Fortunately that silliness is new in HEAD. pg_get_replication_slots() had a cosmetic violation of the rule, in that it only assumed it's safe to call namecpy() while holding a spinlock. Still, that's a hazard waiting to bite somebody, and there were some other cosmetic coding-rule violations in the same function, so clean it up. I back-patched this as far as v10; the code exists before that but it looks different, and this didn't seem important enough to adapt the patch further back. Discussion: https://postgr.es/m/20200602.161518.1399689010416646074.horikyota.ntt@gmail.com
2020-06-03 18:36:00 +02:00
second_slot_contents = *src;
SpinLockRelease(&src->mutex);
Don't call palloc() while holding a spinlock, either. Fix some more violations of the "only straight-line code inside a spinlock" rule. These are hazardous not only because they risk holding the lock for an excessively long time, but because it's possible for palloc to throw elog(ERROR), leaving a stuck spinlock behind. copy_replication_slot() had two separate places that did pallocs while holding a spinlock. We can make the code simpler and safer by copying the whole ReplicationSlot struct into a local variable while holding the spinlock, and then referencing that copy. (While that's arguably more cycles than we really need to spend holding the lock, the struct isn't all that big, and this way seems far more maintainable than copying fields piecemeal. Anyway this is surely much cheaper than a palloc.) That bug goes back to v12. InvalidateObsoleteReplicationSlots() not only did a palloc while holding a spinlock, but for extra sloppiness then leaked the memory --- probably for the lifetime of the checkpointer process, though I didn't try to verify that. Fortunately that silliness is new in HEAD. pg_get_replication_slots() had a cosmetic violation of the rule, in that it only assumed it's safe to call namecpy() while holding a spinlock. Still, that's a hazard waiting to bite somebody, and there were some other cosmetic coding-rule violations in the same function, so clean it up. I back-patched this as far as v10; the code exists before that but it looks different, and this didn't seem important enough to adapt the patch further back. Discussion: https://postgr.es/m/20200602.161518.1399689010416646074.horikyota.ntt@gmail.com
2020-06-03 18:36:00 +02:00
copy_effective_xmin = second_slot_contents.effective_xmin;
copy_effective_catalog_xmin = second_slot_contents.effective_catalog_xmin;
copy_xmin = second_slot_contents.data.xmin;
copy_catalog_xmin = second_slot_contents.data.catalog_xmin;
copy_restart_lsn = second_slot_contents.data.restart_lsn;
copy_confirmed_flush = second_slot_contents.data.confirmed_flush;
/* for existence check */
Don't call palloc() while holding a spinlock, either. Fix some more violations of the "only straight-line code inside a spinlock" rule. These are hazardous not only because they risk holding the lock for an excessively long time, but because it's possible for palloc to throw elog(ERROR), leaving a stuck spinlock behind. copy_replication_slot() had two separate places that did pallocs while holding a spinlock. We can make the code simpler and safer by copying the whole ReplicationSlot struct into a local variable while holding the spinlock, and then referencing that copy. (While that's arguably more cycles than we really need to spend holding the lock, the struct isn't all that big, and this way seems far more maintainable than copying fields piecemeal. Anyway this is surely much cheaper than a palloc.) That bug goes back to v12. InvalidateObsoleteReplicationSlots() not only did a palloc while holding a spinlock, but for extra sloppiness then leaked the memory --- probably for the lifetime of the checkpointer process, though I didn't try to verify that. Fortunately that silliness is new in HEAD. pg_get_replication_slots() had a cosmetic violation of the rule, in that it only assumed it's safe to call namecpy() while holding a spinlock. Still, that's a hazard waiting to bite somebody, and there were some other cosmetic coding-rule violations in the same function, so clean it up. I back-patched this as far as v10; the code exists before that but it looks different, and this didn't seem important enough to adapt the patch further back. Discussion: https://postgr.es/m/20200602.161518.1399689010416646074.horikyota.ntt@gmail.com
2020-06-03 18:36:00 +02:00
copy_name = NameStr(second_slot_contents.data.name);
copy_islogical = SlotIsLogical(&second_slot_contents);
/*
2019-04-09 18:59:53 +02:00
* Check if the source slot still exists and is valid. We regard it as
* invalid if the type of replication slot or name has been changed,
* or the restart_lsn either is invalid or has gone backward. (The
* restart_lsn could go backwards if the source slot is dropped and
* copied from an older slot during installation.)
*
* Since erroring out will release and drop the destination slot we
* don't need to release it here.
*/
if (copy_restart_lsn < src_restart_lsn ||
src_islogical != copy_islogical ||
strcmp(copy_name, NameStr(*src_name)) != 0)
ereport(ERROR,
(errmsg("could not copy replication slot \"%s\"",
NameStr(*src_name)),
errdetail("The source replication slot was modified incompatibly during the copy operation.")));
/* The source slot must have a consistent snapshot */
if (src_islogical && XLogRecPtrIsInvalid(copy_confirmed_flush))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot copy unfinished logical replication slot \"%s\"",
NameStr(*src_name)),
errhint("Retry when the source replication slot's confirmed_flush_lsn is valid.")));
/* Install copied values again */
SpinLockAcquire(&MyReplicationSlot->mutex);
MyReplicationSlot->effective_xmin = copy_effective_xmin;
MyReplicationSlot->effective_catalog_xmin = copy_effective_catalog_xmin;
MyReplicationSlot->data.xmin = copy_xmin;
MyReplicationSlot->data.catalog_xmin = copy_catalog_xmin;
MyReplicationSlot->data.restart_lsn = copy_restart_lsn;
MyReplicationSlot->data.confirmed_flush = copy_confirmed_flush;
SpinLockRelease(&MyReplicationSlot->mutex);
ReplicationSlotMarkDirty();
ReplicationSlotsComputeRequiredXmin(false);
ReplicationSlotsComputeRequiredLSN();
ReplicationSlotSave();
#ifdef USE_ASSERT_CHECKING
/* Check that the restart_lsn is available */
{
XLogSegNo segno;
XLByteToSeg(copy_restart_lsn, segno, wal_segment_size);
Assert(XLogGetLastRemovedSegno() < segno);
}
#endif
}
/* target slot fully created, mark as persistent if needed */
if (logical_slot && !temporary)
ReplicationSlotPersist();
/* All done. Set up the return values */
values[0] = NameGetDatum(dst_name);
nulls[0] = false;
if (!XLogRecPtrIsInvalid(MyReplicationSlot->data.confirmed_flush))
{
values[1] = LSNGetDatum(MyReplicationSlot->data.confirmed_flush);
nulls[1] = false;
}
else
nulls[1] = true;
tuple = heap_form_tuple(tupdesc, values, nulls);
result = HeapTupleGetDatum(tuple);
ReplicationSlotRelease();
PG_RETURN_DATUM(result);
}
/* The wrappers below are all to appease opr_sanity */
Datum
pg_copy_logical_replication_slot_a(PG_FUNCTION_ARGS)
{
return copy_replication_slot(fcinfo, true);
}
Datum
pg_copy_logical_replication_slot_b(PG_FUNCTION_ARGS)
{
return copy_replication_slot(fcinfo, true);
}
Datum
pg_copy_logical_replication_slot_c(PG_FUNCTION_ARGS)
{
return copy_replication_slot(fcinfo, true);
}
Datum
pg_copy_physical_replication_slot_a(PG_FUNCTION_ARGS)
{
return copy_replication_slot(fcinfo, false);
}
Datum
pg_copy_physical_replication_slot_b(PG_FUNCTION_ARGS)
{
return copy_replication_slot(fcinfo, false);
}