tableam: Rename wrapper functions to match callback names.

Some of the wrapper functions didn't match the callback names. Many of
them due to staying "consistent" with historic naming of the wrapped
functionality. We decided that for most cases it's more important to
be for tableam to be consistent going forward, than with the past.

The one exception is beginscan/endscan/...  because it'd have looked
odd to have systable_beginscan/endscan/... with a different naming
scheme, and changing the systable_* APIs would have caused way too
much churn (including breaking a lot of external users).

Author: Ashwin Agrawal, with some small additions by Andres Freund
Reviewed-By: Andres Freund
Discussion: https://postgr.es/m/CALfoeiugyrXZfX7n0ORCa4L-m834dzmaE8eFdbNR6PMpetU4Ww@mail.gmail.com
This commit is contained in:
Andres Freund 2019-05-23 16:25:48 -07:00
parent 54487d1560
commit 73b8c3bd28
14 changed files with 178 additions and 169 deletions

View File

@ -1866,12 +1866,12 @@ ReleaseBulkInsertStatePin(BulkInsertState bistate)
* The new tuple is stamped with current transaction ID and the specified
* command ID.
*
* See table_insert for comments about most of the input flags, except that
* this routine directly takes a tuple rather than a slot.
* See table_tuple_insert for comments about most of the input flags, except
* that this routine directly takes a tuple rather than a slot.
*
* There's corresponding HEAP_INSERT_ options to all the TABLE_INSERT_
* options, and there additionally is HEAP_INSERT_SPECULATIVE which is used to
* implement table_insert_speculative().
* implement table_tuple_insert_speculative().
*
* On return the header fields of *tup are updated to match the stored tuple;
* in particular tup->t_self receives the actual TID where the tuple was
@ -2444,8 +2444,8 @@ xmax_infomask_changed(uint16 new_infomask, uint16 old_infomask)
/*
* heap_delete - delete a tuple
*
* See table_delete() for an explanation of the parameters, except that this
* routine directly takes a tuple rather than a slot.
* See table_tuple_delete() for an explanation of the parameters, except that
* this routine directly takes a tuple rather than a slot.
*
* In the failure cases, the routine fills *tmfd with the tuple's t_ctid,
* t_xmax (resolving a possible MultiXact, if necessary), and t_cmax (the last
@ -2890,8 +2890,8 @@ simple_heap_delete(Relation relation, ItemPointer tid)
/*
* heap_update - replace a tuple
*
* See table_update() for an explanation of the parameters, except that this
* routine directly takes a tuple rather than a slot.
* See table_tuple_update() for an explanation of the parameters, except that
* this routine directly takes a tuple rather than a slot.
*
* In the failure cases, the routine fills *tmfd with the tuple's t_ctid,
* t_xmax (resolving a possible MultiXact, if necessary), and t_cmax (the last
@ -3961,7 +3961,7 @@ get_mxact_status_for_lock(LockTupleMode mode, bool is_update)
* *buffer: set to buffer holding tuple (pinned but not locked at exit)
* *tmfd: filled in failure cases (see below)
*
* Function results are the same as the ones for table_lock_tuple().
* Function results are the same as the ones for table_tuple_lock().
*
* In the failure cases other than TM_Invisible, the routine fills
* *tmfd with the tuple's t_ctid, t_xmax (resolving a possible MultiXact,

View File

@ -221,7 +221,7 @@ table_index_fetch_tuple_check(Relation rel,
*/
void
table_get_latest_tid(TableScanDesc scan, ItemPointer tid)
table_tuple_get_latest_tid(TableScanDesc scan, ItemPointer tid)
{
Relation rel = scan->rs_rd;
const TableAmRoutine *tableam = rel->rd_tableam;
@ -248,19 +248,19 @@ table_get_latest_tid(TableScanDesc scan, ItemPointer tid)
*/
/*
* simple_table_insert - insert a tuple
* simple_table_tuple_insert - insert a tuple
*
* Currently, this routine differs from table_insert only in supplying a
* Currently, this routine differs from table_tuple_insert only in supplying a
* default command ID and not allowing access to the speedup options.
*/
void
simple_table_insert(Relation rel, TupleTableSlot *slot)
simple_table_tuple_insert(Relation rel, TupleTableSlot *slot)
{
table_insert(rel, slot, GetCurrentCommandId(true), 0, NULL);
table_tuple_insert(rel, slot, GetCurrentCommandId(true), 0, NULL);
}
/*
* simple_table_delete - delete a tuple
* simple_table_tuple_delete - delete a tuple
*
* This routine may be used to delete a tuple when concurrent updates of
* the target tuple are not expected (for example, because we have a lock
@ -268,16 +268,16 @@ simple_table_insert(Relation rel, TupleTableSlot *slot)
* via ereport().
*/
void
simple_table_delete(Relation rel, ItemPointer tid, Snapshot snapshot)
simple_table_tuple_delete(Relation rel, ItemPointer tid, Snapshot snapshot)
{
TM_Result result;
TM_FailureData tmfd;
result = table_delete(rel, tid,
GetCurrentCommandId(true),
snapshot, InvalidSnapshot,
true /* wait for commit */ ,
&tmfd, false /* changingPart */ );
result = table_tuple_delete(rel, tid,
GetCurrentCommandId(true),
snapshot, InvalidSnapshot,
true /* wait for commit */ ,
&tmfd, false /* changingPart */ );
switch (result)
{
@ -299,13 +299,13 @@ simple_table_delete(Relation rel, ItemPointer tid, Snapshot snapshot)
break;
default:
elog(ERROR, "unrecognized table_delete status: %u", result);
elog(ERROR, "unrecognized table_tuple_delete status: %u", result);
break;
}
}
/*
* simple_table_update - replace a tuple
* simple_table_tuple_update - replace a tuple
*
* This routine may be used to update a tuple when concurrent updates of
* the target tuple are not expected (for example, because we have a lock
@ -313,20 +313,20 @@ simple_table_delete(Relation rel, ItemPointer tid, Snapshot snapshot)
* via ereport().
*/
void
simple_table_update(Relation rel, ItemPointer otid,
TupleTableSlot *slot,
Snapshot snapshot,
bool *update_indexes)
simple_table_tuple_update(Relation rel, ItemPointer otid,
TupleTableSlot *slot,
Snapshot snapshot,
bool *update_indexes)
{
TM_Result result;
TM_FailureData tmfd;
LockTupleMode lockmode;
result = table_update(rel, otid, slot,
GetCurrentCommandId(true),
snapshot, InvalidSnapshot,
true /* wait for commit */ ,
&tmfd, &lockmode, update_indexes);
result = table_tuple_update(rel, otid, slot,
GetCurrentCommandId(true),
snapshot, InvalidSnapshot,
true /* wait for commit */ ,
&tmfd, &lockmode, update_indexes);
switch (result)
{
@ -348,7 +348,7 @@ simple_table_update(Relation rel, ItemPointer otid,
break;
default:
elog(ERROR, "unrecognized table_update status: %u", result);
elog(ERROR, "unrecognized table_tuple_update status: %u", result);
break;
}

View File

@ -90,7 +90,7 @@ typedef enum EolType
*/
typedef enum CopyInsertMethod
{
CIM_SINGLE, /* use table_insert or fdw routine */
CIM_SINGLE, /* use table_tuple_insert or fdw routine */
CIM_MULTI, /* always use table_multi_insert */
CIM_MULTI_CONDITIONAL /* use table_multi_insert only if valid */
} CopyInsertMethod;
@ -2664,7 +2664,7 @@ CopyFrom(CopyState cstate)
PartitionTupleRouting *proute = NULL;
ErrorContextCallback errcallback;
CommandId mycid = GetCurrentCommandId(true);
int ti_options = 0; /* start with default table_insert options */
int ti_options = 0; /* start with default options for insert */
BulkInsertState bistate = NULL;
CopyInsertMethod insertMethod;
CopyMultiInsertInfo multiInsertInfo = {0}; /* pacify compiler */
@ -2737,11 +2737,11 @@ CopyFrom(CopyState cstate)
* FSM for free space is a waste of time, even if we must use WAL because
* of archiving. This could possibly be wrong, but it's unlikely.
*
* The comments for table_insert and RelationGetBufferForTuple specify that
* skipping WAL logging is only safe if we ensure that our tuples do not
* go into pages containing tuples from any other transactions --- but this
* must be the case if we have a new table or new relfilenode, so we need
* no additional work to enforce that.
* The comments for table_tuple_insert and RelationGetBufferForTuple
* specify that skipping WAL logging is only safe if we ensure that our
* tuples do not go into pages containing tuples from any other
* transactions --- but this must be the case if we have a new table or
* new relfilenode, so we need no additional work to enforce that.
*
* We currently don't support this optimization if the COPY target is a
* partitioned table as we currently only lazily initialize partition
@ -2888,9 +2888,9 @@ CopyFrom(CopyState cstate)
/*
* It's generally more efficient to prepare a bunch of tuples for
* insertion, and insert them in one table_multi_insert() call, than call
* table_insert() separately for every tuple. However, there are a number
* of reasons why we might not be able to do this. These are explained
* below.
* table_tuple_insert() separately for every tuple. However, there are a
* number of reasons why we might not be able to do this. These are
* explained below.
*/
if (resultRelInfo->ri_TrigDesc != NULL &&
(resultRelInfo->ri_TrigDesc->trig_insert_before_row ||
@ -3286,8 +3286,8 @@ CopyFrom(CopyState cstate)
else
{
/* OK, store the tuple and create index entries for it */
table_insert(resultRelInfo->ri_RelationDesc, myslot,
mycid, ti_options, bistate);
table_tuple_insert(resultRelInfo->ri_RelationDesc,
myslot, mycid, ti_options, bistate);
if (resultRelInfo->ri_NumIndices > 0)
recheckIndexes = ExecInsertIndexTuples(myslot,

View File

@ -60,7 +60,7 @@ typedef struct
Relation rel; /* relation to write to */
ObjectAddress reladdr; /* address of rel, for ExecCreateTableAs */
CommandId output_cid; /* cmin to insert in output tuples */
int ti_options; /* table_insert performance options */
int ti_options; /* table_tuple_insert performance options */
BulkInsertState bistate; /* bulk insert state */
} DR_intorel;
@ -576,18 +576,18 @@ intorel_receive(TupleTableSlot *slot, DestReceiver *self)
/*
* Note that the input slot might not be of the type of the target
* relation. That's supported by table_insert(), but slightly less
* relation. That's supported by table_tuple_insert(), but slightly less
* efficient than inserting with the right slot - but the alternative
* would be to copy into a slot of the right type, which would not be
* cheap either. This also doesn't allow accessing per-AM data (say a
* tuple's xmin), but since we don't do that here...
*/
table_insert(myState->rel,
slot,
myState->output_cid,
myState->ti_options,
myState->bistate);
table_tuple_insert(myState->rel,
slot,
myState->output_cid,
myState->ti_options,
myState->bistate);
/* We know this is a newly created relation, so there are no indexes */

View File

@ -54,7 +54,7 @@ typedef struct
/* These fields are filled by transientrel_startup: */
Relation transientrel; /* relation to write to */
CommandId output_cid; /* cmin to insert in output tuples */
int ti_options; /* table_insert performance options */
int ti_options; /* table_tuple_insert performance options */
BulkInsertState bistate; /* bulk insert state */
} DR_transientrel;
@ -481,18 +481,18 @@ transientrel_receive(TupleTableSlot *slot, DestReceiver *self)
/*
* Note that the input slot might not be of the type of the target
* relation. That's supported by table_insert(), but slightly less
* relation. That's supported by table_tuple_insert(), but slightly less
* efficient than inserting with the right slot - but the alternative
* would be to copy into a slot of the right type, which would not be
* cheap either. This also doesn't allow accessing per-AM data (say a
* tuple's xmin), but since we don't do that here...
*/
table_insert(myState->transientrel,
slot,
myState->output_cid,
myState->ti_options,
myState->bistate);
table_tuple_insert(myState->transientrel,
slot,
myState->output_cid,
myState->ti_options,
myState->bistate);
/* We know this is a newly created relation, so there are no indexes */

View File

@ -4732,9 +4732,9 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap, LOCKMODE lockmode)
newrel = NULL;
/*
* Prepare a BulkInsertState and options for table_insert. Because we're
* building a new heap, we can skip WAL-logging and fsync it to disk at
* the end instead (unless WAL-logging is required for archiving or
* Prepare a BulkInsertState and options for table_tuple_insert. Because
* we're building a new heap, we can skip WAL-logging and fsync it to disk
* at the end instead (unless WAL-logging is required for archiving or
* streaming replication). The FSM is empty too, so don't bother using it.
*/
if (newrel)
@ -5005,7 +5005,8 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap, LOCKMODE lockmode)
/* Write the tuple out to the new relation */
if (newrel)
table_insert(newrel, insertslot, mycid, ti_options, bistate);
table_tuple_insert(newrel, insertslot, mycid,
ti_options, bistate);
ResetExprContext(econtext);

View File

@ -3332,7 +3332,7 @@ GetTupleForTrigger(EState *estate,
*/
if (!IsolationUsesXactSnapshot())
lockflags |= TUPLE_LOCK_FLAG_FIND_LAST_VERSION;
test = table_lock_tuple(relation, tid, estate->es_snapshot, oldslot,
test = table_tuple_lock(relation, tid, estate->es_snapshot, oldslot,
estate->es_output_cid,
lockmode, LockWaitBlock,
lockflags,
@ -3386,7 +3386,7 @@ GetTupleForTrigger(EState *estate,
ereport(ERROR,
(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
errmsg("could not serialize access due to concurrent update")));
elog(ERROR, "unexpected table_lock_tuple status: %u", test);
elog(ERROR, "unexpected table_tuple_lock status: %u", test);
break;
case TM_Deleted:
@ -3402,7 +3402,7 @@ GetTupleForTrigger(EState *estate,
break;
default:
elog(ERROR, "unrecognized table_lock_tuple status: %u", test);
elog(ERROR, "unrecognized table_tuple_lock status: %u", test);
return false; /* keep compiler quiet */
}
}
@ -3412,7 +3412,8 @@ GetTupleForTrigger(EState *estate,
* We expect the tuple to be present, thus very simple error handling
* suffices.
*/
if (!table_fetch_row_version(relation, tid, SnapshotAny, oldslot))
if (!table_tuple_fetch_row_version(relation, tid, SnapshotAny,
oldslot))
elog(ERROR, "failed to fetch tuple for trigger");
}
@ -4270,7 +4271,9 @@ AfterTriggerExecute(EState *estate,
{
LocTriggerData.tg_trigslot = ExecGetTriggerOldSlot(estate, relInfo);
if (!table_fetch_row_version(rel, &(event->ate_ctid1), SnapshotAny, LocTriggerData.tg_trigslot))
if (!table_tuple_fetch_row_version(rel, &(event->ate_ctid1),
SnapshotAny,
LocTriggerData.tg_trigslot))
elog(ERROR, "failed to fetch tuple1 for AFTER trigger");
LocTriggerData.tg_trigtuple =
ExecFetchSlotHeapTuple(LocTriggerData.tg_trigslot, false, &should_free_trig);
@ -4287,7 +4290,9 @@ AfterTriggerExecute(EState *estate,
{
LocTriggerData.tg_newslot = ExecGetTriggerNewSlot(estate, relInfo);
if (!table_fetch_row_version(rel, &(event->ate_ctid2), SnapshotAny, LocTriggerData.tg_newslot))
if (!table_tuple_fetch_row_version(rel, &(event->ate_ctid2),
SnapshotAny,
LocTriggerData.tg_newslot))
elog(ERROR, "failed to fetch tuple2 for AFTER trigger");
LocTriggerData.tg_newtuple =
ExecFetchSlotHeapTuple(LocTriggerData.tg_newslot, false, &should_free_new);

View File

@ -2436,7 +2436,7 @@ ExecBuildAuxRowMark(ExecRowMark *erm, List *targetlist)
* quals. For that result to be useful, typically the input tuple has to be
* last row version (otherwise the result isn't particularly useful) and
* locked (otherwise the result might be out of date). That's typically
* achieved by using table_lock_tuple() with the
* achieved by using table_tuple_lock() with the
* TUPLE_LOCK_FLAG_FIND_LAST_VERSION flag.
*
* Returns a slot containing the new candidate update/delete tuple, or
@ -2654,9 +2654,9 @@ EvalPlanQualFetchRowMarks(EPQState *epqstate)
else
{
/* ordinary table, fetch the tuple */
if (!table_fetch_row_version(erm->relation,
(ItemPointer) DatumGetPointer(datum),
SnapshotAny, slot))
if (!table_tuple_fetch_row_version(erm->relation,
(ItemPointer) DatumGetPointer(datum),
SnapshotAny, slot))
elog(ERROR, "failed to fetch tuple for EvalPlanQual recheck");
}
}

View File

@ -173,7 +173,7 @@ retry:
PushActiveSnapshot(GetLatestSnapshot());
res = table_lock_tuple(rel, &(outslot->tts_tid), GetLatestSnapshot(),
res = table_tuple_lock(rel, &(outslot->tts_tid), GetLatestSnapshot(),
outslot,
GetCurrentCommandId(false),
lockmode,
@ -208,7 +208,7 @@ retry:
elog(ERROR, "attempted to lock invisible tuple");
break;
default:
elog(ERROR, "unexpected table_lock_tuple status: %u", res);
elog(ERROR, "unexpected table_tuple_lock status: %u", res);
break;
}
}
@ -337,7 +337,7 @@ retry:
PushActiveSnapshot(GetLatestSnapshot());
res = table_lock_tuple(rel, &(outslot->tts_tid), GetLatestSnapshot(),
res = table_tuple_lock(rel, &(outslot->tts_tid), GetLatestSnapshot(),
outslot,
GetCurrentCommandId(false),
lockmode,
@ -372,7 +372,7 @@ retry:
elog(ERROR, "attempted to lock invisible tuple");
break;
default:
elog(ERROR, "unexpected table_lock_tuple status: %u", res);
elog(ERROR, "unexpected table_tuple_lock status: %u", res);
break;
}
}
@ -425,7 +425,7 @@ ExecSimpleRelationInsert(EState *estate, TupleTableSlot *slot)
ExecPartitionCheck(resultRelInfo, slot, estate, true);
/* OK, store the tuple and create index entries for it */
simple_table_insert(resultRelInfo->ri_RelationDesc, slot);
simple_table_tuple_insert(resultRelInfo->ri_RelationDesc, slot);
if (resultRelInfo->ri_NumIndices > 0)
recheckIndexes = ExecInsertIndexTuples(slot, estate, false, NULL,
@ -490,8 +490,8 @@ ExecSimpleRelationUpdate(EState *estate, EPQState *epqstate,
if (resultRelInfo->ri_PartitionCheck)
ExecPartitionCheck(resultRelInfo, slot, estate, true);
simple_table_update(rel, tid, slot, estate->es_snapshot,
&update_indexes);
simple_table_tuple_update(rel, tid, slot, estate->es_snapshot,
&update_indexes);
if (resultRelInfo->ri_NumIndices > 0 && update_indexes)
recheckIndexes = ExecInsertIndexTuples(slot, estate, false, NULL,
@ -535,7 +535,7 @@ ExecSimpleRelationDelete(EState *estate, EPQState *epqstate,
if (!skip_tuple)
{
/* OK, delete the tuple */
simple_table_delete(rel, tid, estate->es_snapshot);
simple_table_tuple_delete(rel, tid, estate->es_snapshot);
/* AFTER ROW DELETE Triggers */
ExecARDeleteTriggers(estate, resultRelInfo,

View File

@ -185,7 +185,7 @@ lnext:
if (!IsolationUsesXactSnapshot())
lockflags |= TUPLE_LOCK_FLAG_FIND_LAST_VERSION;
test = table_lock_tuple(erm->relation, &tid, estate->es_snapshot,
test = table_tuple_lock(erm->relation, &tid, estate->es_snapshot,
markSlot, estate->es_output_cid,
lockmode, erm->waitPolicy,
lockflags,
@ -208,7 +208,7 @@ lnext:
* to fetch the updated tuple instead, but doing so would
* require changing heap_update and heap_delete to not
* complain about updating "invisible" tuples, which seems
* pretty scary (table_lock_tuple will not complain, but few
* pretty scary (table_tuple_lock will not complain, but few
* callers expect TM_Invisible, and we're not one of them). So
* for now, treat the tuple as deleted and do not process.
*/
@ -229,7 +229,7 @@ lnext:
ereport(ERROR,
(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
errmsg("could not serialize access due to concurrent update")));
elog(ERROR, "unexpected table_lock_tuple status: %u",
elog(ERROR, "unexpected table_tuple_lock status: %u",
test);
break;
@ -246,7 +246,7 @@ lnext:
break;
default:
elog(ERROR, "unrecognized table_lock_tuple status: %u",
elog(ERROR, "unrecognized table_tuple_lock status: %u",
test);
}

View File

@ -236,7 +236,7 @@ ExecCheckTIDVisible(EState *estate,
if (!IsolationUsesXactSnapshot())
return;
if (!table_fetch_row_version(rel, tid, SnapshotAny, tempSlot))
if (!table_tuple_fetch_row_version(rel, tid, SnapshotAny, tempSlot))
elog(ERROR, "failed to fetch conflicting tuple for ON CONFLICT");
ExecCheckTupleVisible(estate, rel, tempSlot);
ExecClearTuple(tempSlot);
@ -544,11 +544,11 @@ ExecInsert(ModifyTableState *mtstate,
specToken = SpeculativeInsertionLockAcquire(GetCurrentTransactionId());
/* insert the tuple, with the speculative token */
table_insert_speculative(resultRelationDesc, slot,
estate->es_output_cid,
0,
NULL,
specToken);
table_tuple_insert_speculative(resultRelationDesc, slot,
estate->es_output_cid,
0,
NULL,
specToken);
/* insert index entries for tuple */
recheckIndexes = ExecInsertIndexTuples(slot, estate, true,
@ -556,8 +556,8 @@ ExecInsert(ModifyTableState *mtstate,
arbiterIndexes);
/* adjust the tuple's state accordingly */
table_complete_speculative(resultRelationDesc, slot,
specToken, !specConflict);
table_tuple_complete_speculative(resultRelationDesc, slot,
specToken, !specConflict);
/*
* Wake up anyone waiting for our decision. They will re-check
@ -584,9 +584,9 @@ ExecInsert(ModifyTableState *mtstate,
else
{
/* insert the tuple normally */
table_insert(resultRelationDesc, slot,
estate->es_output_cid,
0, NULL);
table_tuple_insert(resultRelationDesc, slot,
estate->es_output_cid,
0, NULL);
/* insert index entries for tuple */
if (resultRelInfo->ri_NumIndices > 0)
@ -766,13 +766,13 @@ ExecDelete(ModifyTableState *mtstate,
* mode transactions.
*/
ldelete:;
result = table_delete(resultRelationDesc, tupleid,
estate->es_output_cid,
estate->es_snapshot,
estate->es_crosscheck_snapshot,
true /* wait for commit */ ,
&tmfd,
changingPart);
result = table_tuple_delete(resultRelationDesc, tupleid,
estate->es_output_cid,
estate->es_snapshot,
estate->es_crosscheck_snapshot,
true /* wait for commit */ ,
&tmfd,
changingPart);
switch (result)
{
@ -832,7 +832,7 @@ ldelete:;
inputslot = EvalPlanQualSlot(epqstate, resultRelationDesc,
resultRelInfo->ri_RangeTableIndex);
result = table_lock_tuple(resultRelationDesc, tupleid,
result = table_tuple_lock(resultRelationDesc, tupleid,
estate->es_snapshot,
inputslot, estate->es_output_cid,
LockTupleExclusive, LockWaitBlock,
@ -875,7 +875,7 @@ ldelete:;
* out.
*
* See also TM_SelfModified response to
* table_delete() above.
* table_tuple_delete() above.
*/
if (tmfd.cmax != estate->es_output_cid)
ereport(ERROR,
@ -900,7 +900,7 @@ ldelete:;
* locking the latest version via
* TUPLE_LOCK_FLAG_FIND_LAST_VERSION.
*/
elog(ERROR, "unexpected table_lock_tuple status: %u",
elog(ERROR, "unexpected table_tuple_lock status: %u",
result);
return NULL;
}
@ -918,7 +918,8 @@ ldelete:;
return NULL;
default:
elog(ERROR, "unrecognized table_delete status: %u", result);
elog(ERROR, "unrecognized table_tuple_delete status: %u",
result);
return NULL;
}
@ -990,8 +991,8 @@ ldelete:;
}
else
{
if (!table_fetch_row_version(resultRelationDesc, tupleid,
SnapshotAny, slot))
if (!table_tuple_fetch_row_version(resultRelationDesc, tupleid,
SnapshotAny, slot))
elog(ERROR, "failed to fetch deleted tuple for DELETE RETURNING");
}
}
@ -1134,7 +1135,7 @@ ExecUpdate(ModifyTableState *mtstate,
* If we generate a new candidate tuple after EvalPlanQual testing, we
* must loop back here and recheck any RLS policies and constraints.
* (We don't need to redo triggers, however. If there are any BEFORE
* triggers then trigger.c will have done table_lock_tuple to lock the
* triggers then trigger.c will have done table_tuple_lock to lock the
* correct tuple, so there's no need to do them again.)
*/
lreplace:;
@ -1309,12 +1310,12 @@ lreplace:;
* needed for referential integrity updates in transaction-snapshot
* mode transactions.
*/
result = table_update(resultRelationDesc, tupleid, slot,
estate->es_output_cid,
estate->es_snapshot,
estate->es_crosscheck_snapshot,
true /* wait for commit */ ,
&tmfd, &lockmode, &update_indexes);
result = table_tuple_update(resultRelationDesc, tupleid, slot,
estate->es_output_cid,
estate->es_snapshot,
estate->es_crosscheck_snapshot,
true /* wait for commit */ ,
&tmfd, &lockmode, &update_indexes);
switch (result)
{
@ -1373,7 +1374,7 @@ lreplace:;
inputslot = EvalPlanQualSlot(epqstate, resultRelationDesc,
resultRelInfo->ri_RangeTableIndex);
result = table_lock_tuple(resultRelationDesc, tupleid,
result = table_tuple_lock(resultRelationDesc, tupleid,
estate->es_snapshot,
inputslot, estate->es_output_cid,
lockmode, LockWaitBlock,
@ -1412,7 +1413,7 @@ lreplace:;
* otherwise error out.
*
* See also TM_SelfModified response to
* table_update() above.
* table_tuple_update() above.
*/
if (tmfd.cmax != estate->es_output_cid)
ereport(ERROR,
@ -1422,8 +1423,8 @@ lreplace:;
return NULL;
default:
/* see table_lock_tuple call in ExecDelete() */
elog(ERROR, "unexpected table_lock_tuple status: %u",
/* see table_tuple_lock call in ExecDelete() */
elog(ERROR, "unexpected table_tuple_lock status: %u",
result);
return NULL;
}
@ -1440,7 +1441,8 @@ lreplace:;
return NULL;
default:
elog(ERROR, "unrecognized table_update status: %u", result);
elog(ERROR, "unrecognized table_tuple_update status: %u",
result);
return NULL;
}
@ -1521,7 +1523,7 @@ ExecOnConflictUpdate(ModifyTableState *mtstate,
* previous conclusion that the tuple is conclusively committed is not
* true anymore.
*/
test = table_lock_tuple(relation, conflictTid,
test = table_tuple_lock(relation, conflictTid,
estate->es_snapshot,
existing, estate->es_output_cid,
lockmode, LockWaitBlock, 0,
@ -1612,7 +1614,7 @@ ExecOnConflictUpdate(ModifyTableState *mtstate,
return false;
default:
elog(ERROR, "unrecognized table_lock_tuple status: %u", test);
elog(ERROR, "unrecognized table_tuple_lock status: %u", test);
}
/* Success, the tuple is locked. */
@ -1677,7 +1679,7 @@ ExecOnConflictUpdate(ModifyTableState *mtstate,
/*
* Note that it is possible that the target tuple has been modified in
* this session, after the above table_lock_tuple. We choose to not error
* this session, after the above table_tuple_lock. We choose to not error
* out in that case, in line with ExecUpdate's treatment of similar cases.
* This can happen if an UPDATE is triggered from within ExecQual(),
* ExecWithCheckOptions() or ExecProject() above, e.g. by selecting from a

View File

@ -381,9 +381,9 @@ TidNext(TidScanState *node)
* current according to our snapshot.
*/
if (node->tss_isCurrentOf)
table_get_latest_tid(scan, &tid);
table_tuple_get_latest_tid(scan, &tid);
if (table_fetch_row_version(heapRelation, &tid, snapshot, slot))
if (table_tuple_fetch_row_version(heapRelation, &tid, snapshot, slot))
return slot;
/* Bad TID or failed snapshot qual; try next */

View File

@ -382,7 +382,7 @@ currtid_byreloid(PG_FUNCTION_ARGS)
snapshot = RegisterSnapshot(GetLatestSnapshot());
scan = table_beginscan(rel, snapshot, 0, NULL);
table_get_latest_tid(scan, result);
table_tuple_get_latest_tid(scan, result);
table_endscan(scan);
UnregisterSnapshot(snapshot);
@ -420,7 +420,7 @@ currtid_byrelname(PG_FUNCTION_ARGS)
snapshot = RegisterSnapshot(GetLatestSnapshot());
scan = table_beginscan(rel, snapshot, 0, NULL);
table_get_latest_tid(scan, result);
table_tuple_get_latest_tid(scan, result);
table_endscan(scan);
UnregisterSnapshot(snapshot);

View File

@ -101,9 +101,9 @@ typedef enum TM_Result
} TM_Result;
/*
* When table_update, table_delete, or table_lock_tuple fail because the target
* tuple is already outdated, they fill in this struct to provide information
* to the caller about what happened.
* When table_tuple_update, table_tuple_delete, or table_tuple_lock fail
* because the target tuple is already outdated, they fill in this struct to
* provide information to the caller about what happened.
*
* ctid is the target's ctid link: it is the same as the target's TID if the
* target was deleted, or the location of the replacement tuple if the target
@ -127,13 +127,13 @@ typedef struct TM_FailureData
bool traversed;
} TM_FailureData;
/* "options" flag bits for table_insert */
/* "options" flag bits for table_tuple_insert */
#define TABLE_INSERT_SKIP_WAL 0x0001
#define TABLE_INSERT_SKIP_FSM 0x0002
#define TABLE_INSERT_FROZEN 0x0004
#define TABLE_INSERT_NO_LOGICAL 0x0008
/* flag bits for table_lock_tuple */
/* flag bits for table_tuple_lock */
/* Follow tuples whose update is in progress if lock modes don't conflict */
#define TUPLE_LOCK_FLAG_LOCK_UPDATE_IN_PROGRESS (1 << 0)
/* Follow update chain and lock latest version of tuple */
@ -352,12 +352,12 @@ typedef struct TableAmRoutine
* ------------------------------------------------------------------------
*/
/* see table_insert() for reference about parameters */
/* see table_tuple_insert() for reference about parameters */
void (*tuple_insert) (Relation rel, TupleTableSlot *slot,
CommandId cid, int options,
struct BulkInsertStateData *bistate);
/* see table_insert_speculative() for reference about parameters */
/* see table_tuple_insert_speculative() for reference about parameters */
void (*tuple_insert_speculative) (Relation rel,
TupleTableSlot *slot,
CommandId cid,
@ -365,7 +365,7 @@ typedef struct TableAmRoutine
struct BulkInsertStateData *bistate,
uint32 specToken);
/* see table_complete_speculative() for reference about parameters */
/* see table_tuple_complete_speculative() for reference about parameters */
void (*tuple_complete_speculative) (Relation rel,
TupleTableSlot *slot,
uint32 specToken,
@ -375,7 +375,7 @@ typedef struct TableAmRoutine
void (*multi_insert) (Relation rel, TupleTableSlot **slots, int nslots,
CommandId cid, int options, struct BulkInsertStateData *bistate);
/* see table_delete() for reference about parameters */
/* see table_tuple_delete() for reference about parameters */
TM_Result (*tuple_delete) (Relation rel,
ItemPointer tid,
CommandId cid,
@ -385,7 +385,7 @@ typedef struct TableAmRoutine
TM_FailureData *tmfd,
bool changingPart);
/* see table_update() for reference about parameters */
/* see table_tuple_update() for reference about parameters */
TM_Result (*tuple_update) (Relation rel,
ItemPointer otid,
TupleTableSlot *slot,
@ -397,7 +397,7 @@ typedef struct TableAmRoutine
LockTupleMode *lockmode,
bool *update_indexes);
/* see table_lock_tuple() for reference about parameters */
/* see table_tuple_lock() for reference about parameters */
TM_Result (*tuple_lock) (Relation rel,
ItemPointer tid,
Snapshot snapshot,
@ -976,7 +976,7 @@ table_index_fetch_end(struct IndexFetchTableData *scan)
* supports storing multiple row versions reachable via a single index entry
* (like heap's HOT). Whereas table_fetch_row_version only evaluates the
* tuple exactly at `tid`. Outside of index entry ->table tuple lookups,
* table_fetch_row_version is what's usually needed.
* table_tuple_fetch_row_version is what's usually needed.
*/
static inline bool
table_index_fetch_tuple(struct IndexFetchTableData *scan,
@ -1019,10 +1019,10 @@ extern bool table_index_fetch_tuple_check(Relation rel,
* index entry->table tuple lookups.
*/
static inline bool
table_fetch_row_version(Relation rel,
ItemPointer tid,
Snapshot snapshot,
TupleTableSlot *slot)
table_tuple_fetch_row_version(Relation rel,
ItemPointer tid,
Snapshot snapshot,
TupleTableSlot *slot)
{
return rel->rd_tableam->tuple_fetch_row_version(rel, tid, snapshot, slot);
}
@ -1045,7 +1045,7 @@ table_tuple_tid_valid(TableScanDesc scan, ItemPointer tid)
* Return the latest version of the tuple at `tid`, by updating `tid` to
* point at the newest version.
*/
extern void table_get_latest_tid(TableScanDesc scan, ItemPointer tid);
extern void table_tuple_get_latest_tid(TableScanDesc scan, ItemPointer tid);
/*
* Return true iff tuple in slot satisfies the snapshot.
@ -1122,8 +1122,8 @@ table_compute_xid_horizon_for_tuples(Relation rel,
* reflected in the slots contents.
*/
static inline void
table_insert(Relation rel, TupleTableSlot *slot, CommandId cid,
int options, struct BulkInsertStateData *bistate)
table_tuple_insert(Relation rel, TupleTableSlot *slot, CommandId cid,
int options, struct BulkInsertStateData *bistate)
{
rel->rd_tableam->tuple_insert(rel, slot, cid, options,
bistate);
@ -1138,12 +1138,13 @@ table_insert(Relation rel, TupleTableSlot *slot, CommandId cid,
*
* A transaction having performed a speculative insertion has to either abort,
* or finish the speculative insertion with
* table_complete_speculative(succeeded = ...).
* table_tuple_complete_speculative(succeeded = ...).
*/
static inline void
table_insert_speculative(Relation rel, TupleTableSlot *slot, CommandId cid,
int options, struct BulkInsertStateData *bistate,
uint32 specToken)
table_tuple_insert_speculative(Relation rel, TupleTableSlot *slot,
CommandId cid, int options,
struct BulkInsertStateData *bistate,
uint32 specToken)
{
rel->rd_tableam->tuple_insert_speculative(rel, slot, cid, options,
bistate, specToken);
@ -1154,8 +1155,8 @@ table_insert_speculative(Relation rel, TupleTableSlot *slot, CommandId cid,
* succeeded is true, the tuple is fully inserted, if false, it's removed.
*/
static inline void
table_complete_speculative(Relation rel, TupleTableSlot *slot,
uint32 specToken, bool succeeded)
table_tuple_complete_speculative(Relation rel, TupleTableSlot *slot,
uint32 specToken, bool succeeded)
{
rel->rd_tableam->tuple_complete_speculative(rel, slot, specToken,
succeeded);
@ -1170,7 +1171,7 @@ table_complete_speculative(Relation rel, TupleTableSlot *slot,
*
* Except for taking `nslots` tuples as input, as an array of TupleTableSlots
* in `slots`, the parameters for table_multi_insert() are the same as for
* table_insert().
* table_tuple_insert().
*
* Note: this leaks memory into the current memory context. You can create a
* temporary context before calling this, if that's a problem.
@ -1187,7 +1188,7 @@ table_multi_insert(Relation rel, TupleTableSlot **slots, int nslots,
* Delete a tuple.
*
* NB: do not call this directly unless prepared to deal with
* concurrent-update conditions. Use simple_table_delete instead.
* concurrent-update conditions. Use simple_table_tuple_delete instead.
*
* Input parameters:
* relation - table to be modified (caller must hold suitable lock)
@ -1210,9 +1211,9 @@ table_multi_insert(Relation rel, TupleTableSlot **slots, int nslots,
* struct TM_FailureData for additional info.
*/
static inline TM_Result
table_delete(Relation rel, ItemPointer tid, CommandId cid,
Snapshot snapshot, Snapshot crosscheck, bool wait,
TM_FailureData *tmfd, bool changingPart)
table_tuple_delete(Relation rel, ItemPointer tid, CommandId cid,
Snapshot snapshot, Snapshot crosscheck, bool wait,
TM_FailureData *tmfd, bool changingPart)
{
return rel->rd_tableam->tuple_delete(rel, tid, cid,
snapshot, crosscheck,
@ -1223,7 +1224,7 @@ table_delete(Relation rel, ItemPointer tid, CommandId cid,
* Update a tuple.
*
* NB: do not call this directly unless you are prepared to deal with
* concurrent-update conditions. Use simple_table_update instead.
* concurrent-update conditions. Use simple_table_tuple_update instead.
*
* Input parameters:
* relation - table to be modified (caller must hold suitable lock)
@ -1254,10 +1255,10 @@ table_delete(Relation rel, ItemPointer tid, CommandId cid,
* for additional info.
*/
static inline TM_Result
table_update(Relation rel, ItemPointer otid, TupleTableSlot *slot,
CommandId cid, Snapshot snapshot, Snapshot crosscheck, bool wait,
TM_FailureData *tmfd, LockTupleMode *lockmode,
bool *update_indexes)
table_tuple_update(Relation rel, ItemPointer otid, TupleTableSlot *slot,
CommandId cid, Snapshot snapshot, Snapshot crosscheck,
bool wait, TM_FailureData *tmfd, LockTupleMode *lockmode,
bool *update_indexes)
{
return rel->rd_tableam->tuple_update(rel, otid, slot,
cid, snapshot, crosscheck,
@ -1299,7 +1300,7 @@ table_update(Relation rel, ItemPointer otid, TupleTableSlot *slot,
* comments for struct TM_FailureData for additional info.
*/
static inline TM_Result
table_lock_tuple(Relation rel, ItemPointer tid, Snapshot snapshot,
table_tuple_lock(Relation rel, ItemPointer tid, Snapshot snapshot,
TupleTableSlot *slot, CommandId cid, LockTupleMode mode,
LockWaitPolicy wait_policy, uint8 flags,
TM_FailureData *tmfd)
@ -1703,12 +1704,12 @@ table_scan_sample_next_tuple(TableScanDesc scan,
* ----------------------------------------------------------------------------
*/
extern void simple_table_insert(Relation rel, TupleTableSlot *slot);
extern void simple_table_delete(Relation rel, ItemPointer tid,
Snapshot snapshot);
extern void simple_table_update(Relation rel, ItemPointer otid,
TupleTableSlot *slot, Snapshot snapshot,
bool *update_indexes);
extern void simple_table_tuple_insert(Relation rel, TupleTableSlot *slot);
extern void simple_table_tuple_delete(Relation rel, ItemPointer tid,
Snapshot snapshot);
extern void simple_table_tuple_update(Relation rel, ItemPointer otid,
TupleTableSlot *slot, Snapshot snapshot,
bool *update_indexes);
/* ----------------------------------------------------------------------------