1997-08-29 11:05:57 +02:00
|
|
|
/*-------------------------------------------------------------------------
|
|
|
|
*
|
1999-02-14 00:22:53 +01:00
|
|
|
* spi.c
|
1997-09-07 07:04:48 +02:00
|
|
|
* Server Programming Interface
|
1997-08-29 11:05:57 +02:00
|
|
|
*
|
2019-01-02 18:44:25 +01:00
|
|
|
* Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
|
2001-08-02 18:05:23 +02:00
|
|
|
* Portions Copyright (c) 1994, Regents of the University of California
|
|
|
|
*
|
|
|
|
*
|
|
|
|
* IDENTIFICATION
|
2010-09-20 22:08:53 +02:00
|
|
|
* src/backend/executor/spi.c
|
1998-12-14 06:19:16 +01:00
|
|
|
*
|
1997-08-29 11:05:57 +02:00
|
|
|
*-------------------------------------------------------------------------
|
|
|
|
*/
|
2001-08-02 18:05:23 +02:00
|
|
|
#include "postgres.h"
|
|
|
|
|
2012-08-30 22:15:44 +02:00
|
|
|
#include "access/htup_details.h"
|
1997-09-04 15:22:39 +02:00
|
|
|
#include "access/printtup.h"
|
2008-05-12 02:00:54 +02:00
|
|
|
#include "access/sysattr.h"
|
2009-01-07 14:44:37 +01:00
|
|
|
#include "access/xact.h"
|
2001-10-23 19:38:25 +02:00
|
|
|
#include "catalog/heap.h"
|
2009-01-07 14:44:37 +01:00
|
|
|
#include "catalog/pg_type.h"
|
2004-09-10 20:40:09 +02:00
|
|
|
#include "commands/trigger.h"
|
2009-01-07 14:44:37 +01:00
|
|
|
#include "executor/executor.h"
|
2001-08-02 18:05:23 +02:00
|
|
|
#include "executor/spi_priv.h"
|
Create an infrastructure for parallel computation in PostgreSQL.
This does four basic things. First, it provides convenience routines
to coordinate the startup and shutdown of parallel workers. Second,
it synchronizes various pieces of state (e.g. GUCs, combo CID
mappings, transaction snapshot) from the parallel group leader to the
worker processes. Third, it prohibits various operations that would
result in unsafe changes to that state while parallelism is active.
Finally, it propagates events that would result in an ErrorResponse,
NoticeResponse, or NotifyResponse message being sent to the client
from the parallel workers back to the master, from which they can then
be sent on to the client.
Robert Haas, Amit Kapila, Noah Misch, Rushabh Lathia, Jeevan Chalke.
Suggestions and review from Andres Freund, Heikki Linnakangas, Noah
Misch, Simon Riggs, Euler Taveira, and Jim Nasby.
2015-04-30 21:02:14 +02:00
|
|
|
#include "miscadmin.h"
|
2009-01-07 14:44:37 +01:00
|
|
|
#include "tcop/pquery.h"
|
|
|
|
#include "tcop/utility.h"
|
|
|
|
#include "utils/builtins.h"
|
|
|
|
#include "utils/datum.h"
|
2002-01-03 21:30:47 +01:00
|
|
|
#include "utils/lsyscache.h"
|
2005-05-06 19:24:55 +02:00
|
|
|
#include "utils/memutils.h"
|
2011-02-23 18:18:09 +01:00
|
|
|
#include "utils/rel.h"
|
2008-03-26 19:48:59 +01:00
|
|
|
#include "utils/snapmgr.h"
|
2009-01-07 14:44:37 +01:00
|
|
|
#include "utils/syscache.h"
|
2004-04-01 23:28:47 +02:00
|
|
|
#include "utils/typcache.h"
|
2001-08-02 18:05:23 +02:00
|
|
|
|
1997-08-29 11:05:57 +02:00
|
|
|
|
2018-09-08 02:09:57 +02:00
|
|
|
/*
|
|
|
|
* These global variables are part of the API for various SPI functions
|
|
|
|
* (a horrible API choice, but it's too late now). To reduce the risk of
|
|
|
|
* interference between different SPI callers, we save and restore them
|
|
|
|
* when entering/exiting a SPI nesting level.
|
|
|
|
*/
|
Widen query numbers-of-tuples-processed counters to uint64.
This patch widens SPI_processed, EState's es_processed field, PortalData's
portalPos field, FuncCallContext's call_cntr and max_calls fields,
ExecutorRun's count argument, PortalRunFetch's result, and the max number
of rows in a SPITupleTable to uint64, and deals with (I hope) all the
ensuing fallout. Some of these values were declared uint32 before, and
others "long".
I also removed PortalData's posOverflow field, since that logic seems
pretty useless given that portalPos is now always 64 bits.
The user-visible results are that command tags for SELECT etc will
correctly report tuple counts larger than 4G, as will plpgsql's GET
GET DIAGNOSTICS ... ROW_COUNT command. Queries processing more tuples
than that are still not exactly the norm, but they're becoming more
common.
Most values associated with FETCH/MOVE distances, such as PortalRun's count
argument and the count argument of most SPI functions that have one, remain
declared as "long". It's not clear whether it would be worth promoting
those to int64; but it would definitely be a large dollop of additional
API churn on top of this, and it would only help 32-bit platforms which
seem relatively less likely to see any benefit.
Andreas Scherbaum, reviewed by Christian Ullrich, additional hacking by me
2016-03-12 22:05:10 +01:00
|
|
|
uint64 SPI_processed = 0;
|
2001-02-19 20:49:53 +01:00
|
|
|
SPITupleTable *SPI_tuptable = NULL;
|
2018-09-08 02:09:57 +02:00
|
|
|
int SPI_result = 0;
|
2001-02-19 20:49:53 +01:00
|
|
|
|
1997-08-29 11:05:57 +02:00
|
|
|
static _SPI_connection *_SPI_stack = NULL;
|
|
|
|
static _SPI_connection *_SPI_current = NULL;
|
Phase 2 of pgindent updates.
Change pg_bsd_indent to follow upstream rules for placement of comments
to the right of code, and remove pgindent hack that caused comments
following #endif to not obey the general rule.
Commit e3860ffa4dd0dad0dd9eea4be9cc1412373a8c89 wasn't actually using
the published version of pg_bsd_indent, but a hacked-up version that
tried to minimize the amount of movement of comments to the right of
code. The situation of interest is where such a comment has to be
moved to the right of its default placement at column 33 because there's
code there. BSD indent has always moved right in units of tab stops
in such cases --- but in the previous incarnation, indent was working
in 8-space tab stops, while now it knows we use 4-space tabs. So the
net result is that in about half the cases, such comments are placed
one tab stop left of before. This is better all around: it leaves
more room on the line for comment text, and it means that in such
cases the comment uniformly starts at the next 4-space tab stop after
the code, rather than sometimes one and sometimes two tabs after.
Also, ensure that comments following #endif are indented the same
as comments following other preprocessor commands such as #else.
That inconsistency turns out to have been self-inflicted damage
from a poorly-thought-through post-indent "fixup" in pgindent.
This patch is much less interesting than the first round of indent
changes, but also bulkier, so I thought it best to separate the effects.
Discussion: https://postgr.es/m/E1dAmxK-0006EE-1r@gemulon.postgresql.org
Discussion: https://postgr.es/m/30527.1495162840@sss.pgh.pa.us
2017-06-21 21:18:54 +02:00
|
|
|
static int _SPI_stack_depth = 0; /* allocated size of _SPI_stack */
|
Simplify code by getting rid of SPI_push, SPI_pop, SPI_restore_connection.
The idea behind SPI_push was to allow transitioning back into an
"unconnected" state when a SPI-using procedure calls unrelated code that
might or might not invoke SPI. That sounds good, but in practice the only
thing it does for us is to catch cases where a called SPI-using function
forgets to call SPI_connect --- which is a highly improbable failure mode,
since it would be exposed immediately by direct testing of said function.
As against that, we've had multiple bugs induced by forgetting to call
SPI_push/SPI_pop around code that might invoke SPI-using functions; these
are much harder to catch and indeed have gone undetected for years in some
cases. And we've had to band-aid around some problems of this ilk by
introducing conditional push/pop pairs in some places, which really kind
of defeats the purpose altogether; if we can't draw bright lines between
connected and unconnected code, what's the point?
Hence, get rid of SPI_push[_conditional], SPI_pop[_conditional], and the
underlying state variable _SPI_curid. It turns out SPI_restore_connection
can go away too, which is a nice side benefit since it was never more than
a kluge. Provide no-op macros for the deleted functions so as to avoid an
API break for external modules.
A side effect of this removal is that SPI_palloc and allied functions no
longer permit being called when unconnected; they'll throw an error
instead. The apparent usefulness of the previous behavior was a mirage
as well, because it was depended on by only a few places (which I fixed in
preceding commits), and it posed a risk of allocations being unexpectedly
long-lived if someone forgot a SPI_push call.
Discussion: <20808.1478481403@sss.pgh.pa.us>
2016-11-08 23:39:45 +01:00
|
|
|
static int _SPI_connected = -1; /* current stack index */
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2008-06-01 19:32:48 +02:00
|
|
|
static Portal SPI_cursor_open_internal(const char *name, SPIPlanPtr plan,
|
2009-11-04 23:26:08 +01:00
|
|
|
ParamListInfo paramLI, bool read_only);
|
2008-06-01 19:32:48 +02:00
|
|
|
|
Invent a "one-shot" variant of CachedPlans for better performance.
SPI_execute() and related functions create a CachedPlan, execute it once,
and immediately discard it, so that the functionality offered by
plancache.c is of no value in this code path. And performance measurements
show that the extra data copying and invalidation checking done by
plancache.c slows down simple queries by 10% or more compared to 9.1.
However, enough of the SPI code is shared with functions that do need plan
caching that it seems impractical to bypass plancache.c altogether.
Instead, let's invent a variant version of cached plans that preserves
99% of the API but doesn't offer any of the actual functionality, nor the
overhead. This puts SPI_execute() performance back on par, or maybe even
slightly better, than it was before. This change should resolve recent
complaints of performance degradation from Dong Ye, Pavel Stehule, and
others.
By avoiding data copying, this change also reduces the amount of memory
needed to execute many-statement SPI_execute() strings, as for instance in
a recent complaint from Tomas Vondra.
An additional benefit of this change is that multi-statement SPI_execute()
query strings are now processed fully serially, that is we complete
execution of earlier statements before running parse analysis and planning
on following ones. This eliminates a long-standing POLA violation, in that
DDL that affects the behavior of a later statement will now behave as
expected.
Back-patch to 9.2, since this was a performance regression compared to 9.1.
(In 9.2, place the added struct fields so as to avoid changing the offsets
of existing fields.)
Heikki Linnakangas and Tom Lane
2013-01-04 23:42:19 +01:00
|
|
|
static void _SPI_prepare_plan(const char *src, SPIPlanPtr plan);
|
|
|
|
|
|
|
|
static void _SPI_prepare_oneshot_plan(const char *src, SPIPlanPtr plan);
|
1997-09-06 13:23:05 +02:00
|
|
|
|
2008-04-01 05:09:30 +02:00
|
|
|
static int _SPI_execute_plan(SPIPlanPtr plan, ParamListInfo paramLI,
|
2005-10-15 04:49:52 +02:00
|
|
|
Snapshot snapshot, Snapshot crosscheck_snapshot,
|
Widen query numbers-of-tuples-processed counters to uint64.
This patch widens SPI_processed, EState's es_processed field, PortalData's
portalPos field, FuncCallContext's call_cntr and max_calls fields,
ExecutorRun's count argument, PortalRunFetch's result, and the max number
of rows in a SPITupleTable to uint64, and deals with (I hope) all the
ensuing fallout. Some of these values were declared uint32 before, and
others "long".
I also removed PortalData's posOverflow field, since that logic seems
pretty useless given that portalPos is now always 64 bits.
The user-visible results are that command tags for SELECT etc will
correctly report tuple counts larger than 4G, as will plpgsql's GET
GET DIAGNOSTICS ... ROW_COUNT command. Queries processing more tuples
than that are still not exactly the norm, but they're becoming more
common.
Most values associated with FETCH/MOVE distances, such as PortalRun's count
argument and the count argument of most SPI functions that have one, remain
declared as "long". It's not clear whether it would be worth promoting
those to int64; but it would definitely be a large dollop of additional
API churn on top of this, and it would only help 32-bit platforms which
seem relatively less likely to see any benefit.
Andreas Scherbaum, reviewed by Christian Ullrich, additional hacking by me
2016-03-12 22:05:10 +01:00
|
|
|
bool read_only, bool fire_triggers, uint64 tcount);
|
2004-09-13 22:10:13 +02:00
|
|
|
|
2008-04-01 05:09:30 +02:00
|
|
|
static ParamListInfo _SPI_convert_params(int nargs, Oid *argtypes,
|
2011-09-16 06:42:53 +02:00
|
|
|
Datum *Values, const char *Nulls);
|
2008-04-01 05:09:30 +02:00
|
|
|
|
Widen query numbers-of-tuples-processed counters to uint64.
This patch widens SPI_processed, EState's es_processed field, PortalData's
portalPos field, FuncCallContext's call_cntr and max_calls fields,
ExecutorRun's count argument, PortalRunFetch's result, and the max number
of rows in a SPITupleTable to uint64, and deals with (I hope) all the
ensuing fallout. Some of these values were declared uint32 before, and
others "long".
I also removed PortalData's posOverflow field, since that logic seems
pretty useless given that portalPos is now always 64 bits.
The user-visible results are that command tags for SELECT etc will
correctly report tuple counts larger than 4G, as will plpgsql's GET
GET DIAGNOSTICS ... ROW_COUNT command. Queries processing more tuples
than that are still not exactly the norm, but they're becoming more
common.
Most values associated with FETCH/MOVE distances, such as PortalRun's count
argument and the count argument of most SPI functions that have one, remain
declared as "long". It's not clear whether it would be worth promoting
those to int64; but it would definitely be a large dollop of additional
API churn on top of this, and it would only help 32-bit platforms which
seem relatively less likely to see any benefit.
Andreas Scherbaum, reviewed by Christian Ullrich, additional hacking by me
2016-03-12 22:05:10 +01:00
|
|
|
static int _SPI_pquery(QueryDesc *queryDesc, bool fire_triggers, uint64 tcount);
|
1997-09-04 15:22:39 +02:00
|
|
|
|
2004-03-21 23:29:11 +01:00
|
|
|
static void _SPI_error_callback(void *arg);
|
|
|
|
|
2007-04-16 03:14:58 +02:00
|
|
|
static void _SPI_cursor_operation(Portal portal,
|
2007-11-15 22:14:46 +01:00
|
|
|
FetchDirection direction, long count,
|
|
|
|
DestReceiver *dest);
|
2001-05-21 16:22:19 +02:00
|
|
|
|
2011-09-16 06:42:53 +02:00
|
|
|
static SPIPlanPtr _SPI_make_plan_non_temp(SPIPlanPtr plan);
|
2007-03-16 00:12:07 +01:00
|
|
|
static SPIPlanPtr _SPI_save_plan(SPIPlanPtr plan);
|
1997-09-04 15:22:39 +02:00
|
|
|
|
2017-10-07 01:18:58 +02:00
|
|
|
static int _SPI_begin_call(bool use_exec);
|
|
|
|
static int _SPI_end_call(bool use_exec);
|
1997-09-07 07:04:48 +02:00
|
|
|
static MemoryContext _SPI_execmem(void);
|
|
|
|
static MemoryContext _SPI_procmem(void);
|
1997-09-25 14:16:05 +02:00
|
|
|
static bool _SPI_checktuples(void);
|
1997-08-29 11:05:57 +02:00
|
|
|
|
|
|
|
|
1997-09-24 10:28:37 +02:00
|
|
|
/* =================== interface functions =================== */
|
|
|
|
|
1997-08-29 11:05:57 +02:00
|
|
|
int
|
2001-06-01 21:43:55 +02:00
|
|
|
SPI_connect(void)
|
Transaction control in PL procedures
In each of the supplied procedural languages (PL/pgSQL, PL/Perl,
PL/Python, PL/Tcl), add language-specific commit and rollback
functions/commands to control transactions in procedures in that
language. Add similar underlying functions to SPI. Some additional
cleanup so that transaction commit or abort doesn't blow away data
structures still used by the procedure call. Add execution context
tracking to CALL and DO statements so that transaction control commands
can only be issued in top-level procedure and block calls, not function
calls or other procedure or block calls.
- SPI
Add a new function SPI_connect_ext() that is like SPI_connect() but
allows passing option flags. The only option flag right now is
SPI_OPT_NONATOMIC. A nonatomic SPI connection can execute transaction
control commands, otherwise it's not allowed. This is meant to be
passed down from CALL and DO statements which themselves know in which
context they are called. A nonatomic SPI connection uses different
memory management. A normal SPI connection allocates its memory in
TopTransactionContext. For nonatomic connections we use PortalContext
instead. As the comment in SPI_connect_ext() (previously SPI_connect())
indicates, one could potentially use PortalContext in all cases, but it
seems safest to leave the existing uses alone, because this stuff is
complicated enough already.
SPI also gets new functions SPI_start_transaction(), SPI_commit(), and
SPI_rollback(), which can be used by PLs to implement their transaction
control logic.
- portalmem.c
Some adjustments were made in the code that cleans up portals at
transaction abort. The portal code could already handle a command
*committing* a transaction and continuing (e.g., VACUUM), but it was not
quite prepared for a command *aborting* a transaction and continuing.
In AtAbort_Portals(), remove the code that marks an active portal as
failed. As the comment there already predicted, this doesn't work if
the running command wants to keep running after transaction abort. And
it's actually not necessary, because pquery.c is careful to run all
portal code in a PG_TRY block and explicitly runs MarkPortalFailed() if
there is an exception. So the code in AtAbort_Portals() is never used
anyway.
In AtAbort_Portals() and AtCleanup_Portals(), we need to be careful not
to clean up active portals too much. This mirrors similar code in
PreCommit_Portals().
- PL/Perl
Gets new functions spi_commit() and spi_rollback()
- PL/pgSQL
Gets new commands COMMIT and ROLLBACK.
Update the PL/SQL porting example in the documentation to reflect that
transactions are now possible in procedures.
- PL/Python
Gets new functions plpy.commit and plpy.rollback.
- PL/Tcl
Gets new commands commit and rollback.
Reviewed-by: Andrew Dunstan <andrew.dunstan@2ndquadrant.com>
2018-01-22 14:30:16 +01:00
|
|
|
{
|
|
|
|
return SPI_connect_ext(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
SPI_connect_ext(int options)
|
1997-08-29 11:05:57 +02:00
|
|
|
{
|
2004-08-29 07:07:03 +02:00
|
|
|
int newdepth;
|
2001-03-22 05:01:46 +01:00
|
|
|
|
Simplify code by getting rid of SPI_push, SPI_pop, SPI_restore_connection.
The idea behind SPI_push was to allow transitioning back into an
"unconnected" state when a SPI-using procedure calls unrelated code that
might or might not invoke SPI. That sounds good, but in practice the only
thing it does for us is to catch cases where a called SPI-using function
forgets to call SPI_connect --- which is a highly improbable failure mode,
since it would be exposed immediately by direct testing of said function.
As against that, we've had multiple bugs induced by forgetting to call
SPI_push/SPI_pop around code that might invoke SPI-using functions; these
are much harder to catch and indeed have gone undetected for years in some
cases. And we've had to band-aid around some problems of this ilk by
introducing conditional push/pop pairs in some places, which really kind
of defeats the purpose altogether; if we can't draw bright lines between
connected and unconnected code, what's the point?
Hence, get rid of SPI_push[_conditional], SPI_pop[_conditional], and the
underlying state variable _SPI_curid. It turns out SPI_restore_connection
can go away too, which is a nice side benefit since it was never more than
a kluge. Provide no-op macros for the deleted functions so as to avoid an
API break for external modules.
A side effect of this removal is that SPI_palloc and allied functions no
longer permit being called when unconnected; they'll throw an error
instead. The apparent usefulness of the previous behavior was a mirage
as well, because it was depended on by only a few places (which I fixed in
preceding commits), and it posed a risk of allocations being unexpectedly
long-lived if someone forgot a SPI_push call.
Discussion: <20808.1478481403@sss.pgh.pa.us>
2016-11-08 23:39:45 +01:00
|
|
|
/* Enlarge stack if necessary */
|
1997-09-07 07:04:48 +02:00
|
|
|
if (_SPI_stack == NULL)
|
|
|
|
{
|
2004-07-01 02:52:04 +02:00
|
|
|
if (_SPI_connected != -1 || _SPI_stack_depth != 0)
|
2003-07-21 19:05:12 +02:00
|
|
|
elog(ERROR, "SPI stack corrupted");
|
2004-07-01 02:52:04 +02:00
|
|
|
newdepth = 16;
|
|
|
|
_SPI_stack = (_SPI_connection *)
|
Transaction control in PL procedures
In each of the supplied procedural languages (PL/pgSQL, PL/Perl,
PL/Python, PL/Tcl), add language-specific commit and rollback
functions/commands to control transactions in procedures in that
language. Add similar underlying functions to SPI. Some additional
cleanup so that transaction commit or abort doesn't blow away data
structures still used by the procedure call. Add execution context
tracking to CALL and DO statements so that transaction control commands
can only be issued in top-level procedure and block calls, not function
calls or other procedure or block calls.
- SPI
Add a new function SPI_connect_ext() that is like SPI_connect() but
allows passing option flags. The only option flag right now is
SPI_OPT_NONATOMIC. A nonatomic SPI connection can execute transaction
control commands, otherwise it's not allowed. This is meant to be
passed down from CALL and DO statements which themselves know in which
context they are called. A nonatomic SPI connection uses different
memory management. A normal SPI connection allocates its memory in
TopTransactionContext. For nonatomic connections we use PortalContext
instead. As the comment in SPI_connect_ext() (previously SPI_connect())
indicates, one could potentially use PortalContext in all cases, but it
seems safest to leave the existing uses alone, because this stuff is
complicated enough already.
SPI also gets new functions SPI_start_transaction(), SPI_commit(), and
SPI_rollback(), which can be used by PLs to implement their transaction
control logic.
- portalmem.c
Some adjustments were made in the code that cleans up portals at
transaction abort. The portal code could already handle a command
*committing* a transaction and continuing (e.g., VACUUM), but it was not
quite prepared for a command *aborting* a transaction and continuing.
In AtAbort_Portals(), remove the code that marks an active portal as
failed. As the comment there already predicted, this doesn't work if
the running command wants to keep running after transaction abort. And
it's actually not necessary, because pquery.c is careful to run all
portal code in a PG_TRY block and explicitly runs MarkPortalFailed() if
there is an exception. So the code in AtAbort_Portals() is never used
anyway.
In AtAbort_Portals() and AtCleanup_Portals(), we need to be careful not
to clean up active portals too much. This mirrors similar code in
PreCommit_Portals().
- PL/Perl
Gets new functions spi_commit() and spi_rollback()
- PL/pgSQL
Gets new commands COMMIT and ROLLBACK.
Update the PL/SQL porting example in the documentation to reflect that
transactions are now possible in procedures.
- PL/Python
Gets new functions plpy.commit and plpy.rollback.
- PL/Tcl
Gets new commands commit and rollback.
Reviewed-by: Andrew Dunstan <andrew.dunstan@2ndquadrant.com>
2018-01-22 14:30:16 +01:00
|
|
|
MemoryContextAlloc(TopMemoryContext,
|
2004-07-01 02:52:04 +02:00
|
|
|
newdepth * sizeof(_SPI_connection));
|
|
|
|
_SPI_stack_depth = newdepth;
|
1997-09-07 07:04:48 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2004-07-01 02:52:04 +02:00
|
|
|
if (_SPI_stack_depth <= 0 || _SPI_stack_depth <= _SPI_connected)
|
2003-07-21 19:05:12 +02:00
|
|
|
elog(ERROR, "SPI stack corrupted");
|
2004-07-01 02:52:04 +02:00
|
|
|
if (_SPI_stack_depth == _SPI_connected + 1)
|
|
|
|
{
|
|
|
|
newdepth = _SPI_stack_depth * 2;
|
|
|
|
_SPI_stack = (_SPI_connection *)
|
|
|
|
repalloc(_SPI_stack,
|
|
|
|
newdepth * sizeof(_SPI_connection));
|
|
|
|
_SPI_stack_depth = newdepth;
|
|
|
|
}
|
1997-09-07 07:04:48 +02:00
|
|
|
}
|
|
|
|
|
Simplify code by getting rid of SPI_push, SPI_pop, SPI_restore_connection.
The idea behind SPI_push was to allow transitioning back into an
"unconnected" state when a SPI-using procedure calls unrelated code that
might or might not invoke SPI. That sounds good, but in practice the only
thing it does for us is to catch cases where a called SPI-using function
forgets to call SPI_connect --- which is a highly improbable failure mode,
since it would be exposed immediately by direct testing of said function.
As against that, we've had multiple bugs induced by forgetting to call
SPI_push/SPI_pop around code that might invoke SPI-using functions; these
are much harder to catch and indeed have gone undetected for years in some
cases. And we've had to band-aid around some problems of this ilk by
introducing conditional push/pop pairs in some places, which really kind
of defeats the purpose altogether; if we can't draw bright lines between
connected and unconnected code, what's the point?
Hence, get rid of SPI_push[_conditional], SPI_pop[_conditional], and the
underlying state variable _SPI_curid. It turns out SPI_restore_connection
can go away too, which is a nice side benefit since it was never more than
a kluge. Provide no-op macros for the deleted functions so as to avoid an
API break for external modules.
A side effect of this removal is that SPI_palloc and allied functions no
longer permit being called when unconnected; they'll throw an error
instead. The apparent usefulness of the previous behavior was a mirage
as well, because it was depended on by only a few places (which I fixed in
preceding commits), and it posed a risk of allocations being unexpectedly
long-lived if someone forgot a SPI_push call.
Discussion: <20808.1478481403@sss.pgh.pa.us>
2016-11-08 23:39:45 +01:00
|
|
|
/* Enter new stack level */
|
1997-09-07 07:04:48 +02:00
|
|
|
_SPI_connected++;
|
2004-07-01 02:52:04 +02:00
|
|
|
Assert(_SPI_connected >= 0 && _SPI_connected < _SPI_stack_depth);
|
1997-09-07 07:04:48 +02:00
|
|
|
|
|
|
|
_SPI_current = &(_SPI_stack[_SPI_connected]);
|
|
|
|
_SPI_current->processed = 0;
|
|
|
|
_SPI_current->tuptable = NULL;
|
2017-10-07 01:18:58 +02:00
|
|
|
_SPI_current->execSubid = InvalidSubTransactionId;
|
Prevent leakage of SPI tuple tables during subtransaction abort.
plpgsql often just remembers SPI-result tuple tables in local variables,
and has no mechanism for freeing them if an ereport(ERROR) causes an escape
out of the execution function whose local variable it is. In the original
coding, that wasn't a problem because the tuple table would be cleaned up
when the function's SPI context went away during transaction abort.
However, once plpgsql grew the ability to trap exceptions, repeated
trapping of errors within a function could result in significant
intra-function-call memory leakage, as illustrated in bug #8279 from
Chad Wagner.
We could fix this locally in plpgsql with a bunch of PG_TRY/PG_CATCH
coding, but that would be tedious, probably slow, and prone to bugs of
omission; moreover it would do nothing for similar risks elsewhere.
What seems like a better plan is to make SPI itself responsible for
freeing tuple tables at subtransaction abort. This patch attacks the
problem that way, keeping a list of live tuple tables within each SPI
function context. Currently, such freeing is automatic for tuple tables
made within the failed subtransaction. We might later add a SPI call to
mark a tuple table as not to be freed this way, allowing callers to opt
out; but until someone exhibits a clear use-case for such behavior, it
doesn't seem worth bothering.
A very useful side-effect of this change is that SPI_freetuptable() can
now defend itself against bad calls, such as duplicate free requests;
this should make things more robust in many places. (In particular,
this reduces the risks involved if a third-party extension contains
now-redundant SPI_freetuptable() calls in error cleanup code.)
Even though the leakage problem is of long standing, it seems imprudent
to back-patch this into stable branches, since it does represent an API
semantics change for SPI users. We'll patch this in 9.3, but live with
the leakage in older branches.
2013-07-25 22:45:43 +02:00
|
|
|
slist_init(&_SPI_current->tuptables);
|
Phase 2 of pgindent updates.
Change pg_bsd_indent to follow upstream rules for placement of comments
to the right of code, and remove pgindent hack that caused comments
following #endif to not obey the general rule.
Commit e3860ffa4dd0dad0dd9eea4be9cc1412373a8c89 wasn't actually using
the published version of pg_bsd_indent, but a hacked-up version that
tried to minimize the amount of movement of comments to the right of
code. The situation of interest is where such a comment has to be
moved to the right of its default placement at column 33 because there's
code there. BSD indent has always moved right in units of tab stops
in such cases --- but in the previous incarnation, indent was working
in 8-space tab stops, while now it knows we use 4-space tabs. So the
net result is that in about half the cases, such comments are placed
one tab stop left of before. This is better all around: it leaves
more room on the line for comment text, and it means that in such
cases the comment uniformly starts at the next 4-space tab stop after
the code, rather than sometimes one and sometimes two tabs after.
Also, ensure that comments following #endif are indented the same
as comments following other preprocessor commands such as #else.
That inconsistency turns out to have been self-inflicted damage
from a poorly-thought-through post-indent "fixup" in pgindent.
This patch is much less interesting than the first round of indent
changes, but also bulkier, so I thought it best to separate the effects.
Discussion: https://postgr.es/m/E1dAmxK-0006EE-1r@gemulon.postgresql.org
Discussion: https://postgr.es/m/30527.1495162840@sss.pgh.pa.us
2017-06-21 21:18:54 +02:00
|
|
|
_SPI_current->procCxt = NULL; /* in case we fail to create 'em */
|
2004-09-16 22:17:49 +02:00
|
|
|
_SPI_current->execCxt = NULL;
|
2004-09-16 18:58:44 +02:00
|
|
|
_SPI_current->connectSubid = GetCurrentSubTransactionId();
|
2017-04-01 06:17:18 +02:00
|
|
|
_SPI_current->queryEnv = NULL;
|
Transaction control in PL procedures
In each of the supplied procedural languages (PL/pgSQL, PL/Perl,
PL/Python, PL/Tcl), add language-specific commit and rollback
functions/commands to control transactions in procedures in that
language. Add similar underlying functions to SPI. Some additional
cleanup so that transaction commit or abort doesn't blow away data
structures still used by the procedure call. Add execution context
tracking to CALL and DO statements so that transaction control commands
can only be issued in top-level procedure and block calls, not function
calls or other procedure or block calls.
- SPI
Add a new function SPI_connect_ext() that is like SPI_connect() but
allows passing option flags. The only option flag right now is
SPI_OPT_NONATOMIC. A nonatomic SPI connection can execute transaction
control commands, otherwise it's not allowed. This is meant to be
passed down from CALL and DO statements which themselves know in which
context they are called. A nonatomic SPI connection uses different
memory management. A normal SPI connection allocates its memory in
TopTransactionContext. For nonatomic connections we use PortalContext
instead. As the comment in SPI_connect_ext() (previously SPI_connect())
indicates, one could potentially use PortalContext in all cases, but it
seems safest to leave the existing uses alone, because this stuff is
complicated enough already.
SPI also gets new functions SPI_start_transaction(), SPI_commit(), and
SPI_rollback(), which can be used by PLs to implement their transaction
control logic.
- portalmem.c
Some adjustments were made in the code that cleans up portals at
transaction abort. The portal code could already handle a command
*committing* a transaction and continuing (e.g., VACUUM), but it was not
quite prepared for a command *aborting* a transaction and continuing.
In AtAbort_Portals(), remove the code that marks an active portal as
failed. As the comment there already predicted, this doesn't work if
the running command wants to keep running after transaction abort. And
it's actually not necessary, because pquery.c is careful to run all
portal code in a PG_TRY block and explicitly runs MarkPortalFailed() if
there is an exception. So the code in AtAbort_Portals() is never used
anyway.
In AtAbort_Portals() and AtCleanup_Portals(), we need to be careful not
to clean up active portals too much. This mirrors similar code in
PreCommit_Portals().
- PL/Perl
Gets new functions spi_commit() and spi_rollback()
- PL/pgSQL
Gets new commands COMMIT and ROLLBACK.
Update the PL/SQL porting example in the documentation to reflect that
transactions are now possible in procedures.
- PL/Python
Gets new functions plpy.commit and plpy.rollback.
- PL/Tcl
Gets new commands commit and rollback.
Reviewed-by: Andrew Dunstan <andrew.dunstan@2ndquadrant.com>
2018-01-22 14:30:16 +01:00
|
|
|
_SPI_current->atomic = (options & SPI_OPT_NONATOMIC ? false : true);
|
|
|
|
_SPI_current->internal_xact = false;
|
2018-09-08 02:09:57 +02:00
|
|
|
_SPI_current->outer_processed = SPI_processed;
|
|
|
|
_SPI_current->outer_tuptable = SPI_tuptable;
|
|
|
|
_SPI_current->outer_result = SPI_result;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2003-05-02 22:54:36 +02:00
|
|
|
/*
|
|
|
|
* Create memory contexts for this procedure
|
|
|
|
*
|
Transaction control in PL procedures
In each of the supplied procedural languages (PL/pgSQL, PL/Perl,
PL/Python, PL/Tcl), add language-specific commit and rollback
functions/commands to control transactions in procedures in that
language. Add similar underlying functions to SPI. Some additional
cleanup so that transaction commit or abort doesn't blow away data
structures still used by the procedure call. Add execution context
tracking to CALL and DO statements so that transaction control commands
can only be issued in top-level procedure and block calls, not function
calls or other procedure or block calls.
- SPI
Add a new function SPI_connect_ext() that is like SPI_connect() but
allows passing option flags. The only option flag right now is
SPI_OPT_NONATOMIC. A nonatomic SPI connection can execute transaction
control commands, otherwise it's not allowed. This is meant to be
passed down from CALL and DO statements which themselves know in which
context they are called. A nonatomic SPI connection uses different
memory management. A normal SPI connection allocates its memory in
TopTransactionContext. For nonatomic connections we use PortalContext
instead. As the comment in SPI_connect_ext() (previously SPI_connect())
indicates, one could potentially use PortalContext in all cases, but it
seems safest to leave the existing uses alone, because this stuff is
complicated enough already.
SPI also gets new functions SPI_start_transaction(), SPI_commit(), and
SPI_rollback(), which can be used by PLs to implement their transaction
control logic.
- portalmem.c
Some adjustments were made in the code that cleans up portals at
transaction abort. The portal code could already handle a command
*committing* a transaction and continuing (e.g., VACUUM), but it was not
quite prepared for a command *aborting* a transaction and continuing.
In AtAbort_Portals(), remove the code that marks an active portal as
failed. As the comment there already predicted, this doesn't work if
the running command wants to keep running after transaction abort. And
it's actually not necessary, because pquery.c is careful to run all
portal code in a PG_TRY block and explicitly runs MarkPortalFailed() if
there is an exception. So the code in AtAbort_Portals() is never used
anyway.
In AtAbort_Portals() and AtCleanup_Portals(), we need to be careful not
to clean up active portals too much. This mirrors similar code in
PreCommit_Portals().
- PL/Perl
Gets new functions spi_commit() and spi_rollback()
- PL/pgSQL
Gets new commands COMMIT and ROLLBACK.
Update the PL/SQL porting example in the documentation to reflect that
transactions are now possible in procedures.
- PL/Python
Gets new functions plpy.commit and plpy.rollback.
- PL/Tcl
Gets new commands commit and rollback.
Reviewed-by: Andrew Dunstan <andrew.dunstan@2ndquadrant.com>
2018-01-22 14:30:16 +01:00
|
|
|
* In atomic contexts (the normal case), we use TopTransactionContext,
|
|
|
|
* otherwise PortalContext, so that it lives across transaction
|
|
|
|
* boundaries.
|
|
|
|
*
|
|
|
|
* XXX It could be better to use PortalContext as the parent context in
|
|
|
|
* all cases, but we may not be inside a portal (consider deferred-trigger
|
|
|
|
* execution). Perhaps CurTransactionContext could be an option? For now
|
|
|
|
* it doesn't matter because we clean up explicitly in AtEOSubXact_SPI().
|
2003-05-02 22:54:36 +02:00
|
|
|
*/
|
Transaction control in PL procedures
In each of the supplied procedural languages (PL/pgSQL, PL/Perl,
PL/Python, PL/Tcl), add language-specific commit and rollback
functions/commands to control transactions in procedures in that
language. Add similar underlying functions to SPI. Some additional
cleanup so that transaction commit or abort doesn't blow away data
structures still used by the procedure call. Add execution context
tracking to CALL and DO statements so that transaction control commands
can only be issued in top-level procedure and block calls, not function
calls or other procedure or block calls.
- SPI
Add a new function SPI_connect_ext() that is like SPI_connect() but
allows passing option flags. The only option flag right now is
SPI_OPT_NONATOMIC. A nonatomic SPI connection can execute transaction
control commands, otherwise it's not allowed. This is meant to be
passed down from CALL and DO statements which themselves know in which
context they are called. A nonatomic SPI connection uses different
memory management. A normal SPI connection allocates its memory in
TopTransactionContext. For nonatomic connections we use PortalContext
instead. As the comment in SPI_connect_ext() (previously SPI_connect())
indicates, one could potentially use PortalContext in all cases, but it
seems safest to leave the existing uses alone, because this stuff is
complicated enough already.
SPI also gets new functions SPI_start_transaction(), SPI_commit(), and
SPI_rollback(), which can be used by PLs to implement their transaction
control logic.
- portalmem.c
Some adjustments were made in the code that cleans up portals at
transaction abort. The portal code could already handle a command
*committing* a transaction and continuing (e.g., VACUUM), but it was not
quite prepared for a command *aborting* a transaction and continuing.
In AtAbort_Portals(), remove the code that marks an active portal as
failed. As the comment there already predicted, this doesn't work if
the running command wants to keep running after transaction abort. And
it's actually not necessary, because pquery.c is careful to run all
portal code in a PG_TRY block and explicitly runs MarkPortalFailed() if
there is an exception. So the code in AtAbort_Portals() is never used
anyway.
In AtAbort_Portals() and AtCleanup_Portals(), we need to be careful not
to clean up active portals too much. This mirrors similar code in
PreCommit_Portals().
- PL/Perl
Gets new functions spi_commit() and spi_rollback()
- PL/pgSQL
Gets new commands COMMIT and ROLLBACK.
Update the PL/SQL porting example in the documentation to reflect that
transactions are now possible in procedures.
- PL/Python
Gets new functions plpy.commit and plpy.rollback.
- PL/Tcl
Gets new commands commit and rollback.
Reviewed-by: Andrew Dunstan <andrew.dunstan@2ndquadrant.com>
2018-01-22 14:30:16 +01:00
|
|
|
_SPI_current->procCxt = AllocSetContextCreate(_SPI_current->atomic ? TopTransactionContext : PortalContext,
|
2000-06-28 05:33:33 +02:00
|
|
|
"SPI Proc",
|
Add macros to make AllocSetContextCreate() calls simpler and safer.
I found that half a dozen (nearly 5%) of our AllocSetContextCreate calls
had typos in the context-sizing parameters. While none of these led to
especially significant problems, they did create minor inefficiencies,
and it's now clear that expecting people to copy-and-paste those calls
accurately is not a great idea. Let's reduce the risk of future errors
by introducing single macros that encapsulate the common use-cases.
Three such macros are enough to cover all but two special-purpose contexts;
those two calls can be left as-is, I think.
While this patch doesn't in itself improve matters for third-party
extensions, it doesn't break anything for them either, and they can
gradually adopt the simplified notation over time.
In passing, change TopMemoryContext to use the default allocation
parameters. Formerly it could only be extended 8K at a time. That was
probably reasonable when this code was written; but nowadays we create
many more contexts than we did then, so that it's not unusual to have a
couple hundred K in TopMemoryContext, even without considering various
dubious code that sticks other things there. There seems no good reason
not to let it use growing blocks like most other contexts.
Back-patch to 9.6, mostly because that's still close enough to HEAD that
it's easy to do so, and keeping the branches in sync can be expected to
avoid some future back-patching pain. The bugs fixed by these changes
don't seem to be significant enough to justify fixing them further back.
Discussion: <21072.1472321324@sss.pgh.pa.us>
2016-08-27 23:50:38 +02:00
|
|
|
ALLOCSET_DEFAULT_SIZES);
|
Transaction control in PL procedures
In each of the supplied procedural languages (PL/pgSQL, PL/Perl,
PL/Python, PL/Tcl), add language-specific commit and rollback
functions/commands to control transactions in procedures in that
language. Add similar underlying functions to SPI. Some additional
cleanup so that transaction commit or abort doesn't blow away data
structures still used by the procedure call. Add execution context
tracking to CALL and DO statements so that transaction control commands
can only be issued in top-level procedure and block calls, not function
calls or other procedure or block calls.
- SPI
Add a new function SPI_connect_ext() that is like SPI_connect() but
allows passing option flags. The only option flag right now is
SPI_OPT_NONATOMIC. A nonatomic SPI connection can execute transaction
control commands, otherwise it's not allowed. This is meant to be
passed down from CALL and DO statements which themselves know in which
context they are called. A nonatomic SPI connection uses different
memory management. A normal SPI connection allocates its memory in
TopTransactionContext. For nonatomic connections we use PortalContext
instead. As the comment in SPI_connect_ext() (previously SPI_connect())
indicates, one could potentially use PortalContext in all cases, but it
seems safest to leave the existing uses alone, because this stuff is
complicated enough already.
SPI also gets new functions SPI_start_transaction(), SPI_commit(), and
SPI_rollback(), which can be used by PLs to implement their transaction
control logic.
- portalmem.c
Some adjustments were made in the code that cleans up portals at
transaction abort. The portal code could already handle a command
*committing* a transaction and continuing (e.g., VACUUM), but it was not
quite prepared for a command *aborting* a transaction and continuing.
In AtAbort_Portals(), remove the code that marks an active portal as
failed. As the comment there already predicted, this doesn't work if
the running command wants to keep running after transaction abort. And
it's actually not necessary, because pquery.c is careful to run all
portal code in a PG_TRY block and explicitly runs MarkPortalFailed() if
there is an exception. So the code in AtAbort_Portals() is never used
anyway.
In AtAbort_Portals() and AtCleanup_Portals(), we need to be careful not
to clean up active portals too much. This mirrors similar code in
PreCommit_Portals().
- PL/Perl
Gets new functions spi_commit() and spi_rollback()
- PL/pgSQL
Gets new commands COMMIT and ROLLBACK.
Update the PL/SQL porting example in the documentation to reflect that
transactions are now possible in procedures.
- PL/Python
Gets new functions plpy.commit and plpy.rollback.
- PL/Tcl
Gets new commands commit and rollback.
Reviewed-by: Andrew Dunstan <andrew.dunstan@2ndquadrant.com>
2018-01-22 14:30:16 +01:00
|
|
|
_SPI_current->execCxt = AllocSetContextCreate(_SPI_current->atomic ? TopTransactionContext : _SPI_current->procCxt,
|
2000-06-28 05:33:33 +02:00
|
|
|
"SPI Exec",
|
Add macros to make AllocSetContextCreate() calls simpler and safer.
I found that half a dozen (nearly 5%) of our AllocSetContextCreate calls
had typos in the context-sizing parameters. While none of these led to
especially significant problems, they did create minor inefficiencies,
and it's now clear that expecting people to copy-and-paste those calls
accurately is not a great idea. Let's reduce the risk of future errors
by introducing single macros that encapsulate the common use-cases.
Three such macros are enough to cover all but two special-purpose contexts;
those two calls can be left as-is, I think.
While this patch doesn't in itself improve matters for third-party
extensions, it doesn't break anything for them either, and they can
gradually adopt the simplified notation over time.
In passing, change TopMemoryContext to use the default allocation
parameters. Formerly it could only be extended 8K at a time. That was
probably reasonable when this code was written; but nowadays we create
many more contexts than we did then, so that it's not unusual to have a
couple hundred K in TopMemoryContext, even without considering various
dubious code that sticks other things there. There seems no good reason
not to let it use growing blocks like most other contexts.
Back-patch to 9.6, mostly because that's still close enough to HEAD that
it's easy to do so, and keeping the branches in sync can be expected to
avoid some future back-patching pain. The bugs fixed by these changes
don't seem to be significant enough to justify fixing them further back.
Discussion: <21072.1472321324@sss.pgh.pa.us>
2016-08-27 23:50:38 +02:00
|
|
|
ALLOCSET_DEFAULT_SIZES);
|
2000-06-28 05:33:33 +02:00
|
|
|
/* ... and switch to procedure's context */
|
|
|
|
_SPI_current->savedcxt = MemoryContextSwitchTo(_SPI_current->procCxt);
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2018-09-08 02:09:57 +02:00
|
|
|
/*
|
|
|
|
* Reset API global variables so that current caller cannot accidentally
|
|
|
|
* depend on state of an outer caller.
|
|
|
|
*/
|
|
|
|
SPI_processed = 0;
|
|
|
|
SPI_tuptable = NULL;
|
|
|
|
SPI_result = 0;
|
|
|
|
|
1998-09-01 05:29:17 +02:00
|
|
|
return SPI_OK_CONNECT;
|
1997-08-29 11:05:57 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2001-06-01 21:43:55 +02:00
|
|
|
SPI_finish(void)
|
1997-08-29 11:05:57 +02:00
|
|
|
{
|
1997-09-08 04:41:22 +02:00
|
|
|
int res;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2017-10-07 01:18:58 +02:00
|
|
|
res = _SPI_begin_call(false); /* just check we're connected */
|
1997-09-07 07:04:48 +02:00
|
|
|
if (res < 0)
|
1998-09-01 05:29:17 +02:00
|
|
|
return res;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
|
|
|
/* Restore memory context as it was before procedure call */
|
|
|
|
MemoryContextSwitchTo(_SPI_current->savedcxt);
|
2000-06-28 05:33:33 +02:00
|
|
|
|
Prevent leakage of SPI tuple tables during subtransaction abort.
plpgsql often just remembers SPI-result tuple tables in local variables,
and has no mechanism for freeing them if an ereport(ERROR) causes an escape
out of the execution function whose local variable it is. In the original
coding, that wasn't a problem because the tuple table would be cleaned up
when the function's SPI context went away during transaction abort.
However, once plpgsql grew the ability to trap exceptions, repeated
trapping of errors within a function could result in significant
intra-function-call memory leakage, as illustrated in bug #8279 from
Chad Wagner.
We could fix this locally in plpgsql with a bunch of PG_TRY/PG_CATCH
coding, but that would be tedious, probably slow, and prone to bugs of
omission; moreover it would do nothing for similar risks elsewhere.
What seems like a better plan is to make SPI itself responsible for
freeing tuple tables at subtransaction abort. This patch attacks the
problem that way, keeping a list of live tuple tables within each SPI
function context. Currently, such freeing is automatic for tuple tables
made within the failed subtransaction. We might later add a SPI call to
mark a tuple table as not to be freed this way, allowing callers to opt
out; but until someone exhibits a clear use-case for such behavior, it
doesn't seem worth bothering.
A very useful side-effect of this change is that SPI_freetuptable() can
now defend itself against bad calls, such as duplicate free requests;
this should make things more robust in many places. (In particular,
this reduces the risks involved if a third-party extension contains
now-redundant SPI_freetuptable() calls in error cleanup code.)
Even though the leakage problem is of long standing, it seems imprudent
to back-patch this into stable branches, since it does represent an API
semantics change for SPI users. We'll patch this in 9.3, but live with
the leakage in older branches.
2013-07-25 22:45:43 +02:00
|
|
|
/* Release memory used in procedure call (including tuptables) */
|
2000-06-28 05:33:33 +02:00
|
|
|
MemoryContextDelete(_SPI_current->execCxt);
|
2004-09-16 22:17:49 +02:00
|
|
|
_SPI_current->execCxt = NULL;
|
2000-06-28 05:33:33 +02:00
|
|
|
MemoryContextDelete(_SPI_current->procCxt);
|
2004-09-16 22:17:49 +02:00
|
|
|
_SPI_current->procCxt = NULL;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2003-01-29 16:24:46 +01:00
|
|
|
/*
|
2018-09-08 02:09:57 +02:00
|
|
|
* Restore outer API variables, especially SPI_tuptable which is probably
|
2003-01-29 16:24:46 +01:00
|
|
|
* pointing at a just-deleted tuptable
|
|
|
|
*/
|
2018-09-08 02:09:57 +02:00
|
|
|
SPI_processed = _SPI_current->outer_processed;
|
|
|
|
SPI_tuptable = _SPI_current->outer_tuptable;
|
|
|
|
SPI_result = _SPI_current->outer_result;
|
2003-01-29 16:24:46 +01:00
|
|
|
|
Simplify code by getting rid of SPI_push, SPI_pop, SPI_restore_connection.
The idea behind SPI_push was to allow transitioning back into an
"unconnected" state when a SPI-using procedure calls unrelated code that
might or might not invoke SPI. That sounds good, but in practice the only
thing it does for us is to catch cases where a called SPI-using function
forgets to call SPI_connect --- which is a highly improbable failure mode,
since it would be exposed immediately by direct testing of said function.
As against that, we've had multiple bugs induced by forgetting to call
SPI_push/SPI_pop around code that might invoke SPI-using functions; these
are much harder to catch and indeed have gone undetected for years in some
cases. And we've had to band-aid around some problems of this ilk by
introducing conditional push/pop pairs in some places, which really kind
of defeats the purpose altogether; if we can't draw bright lines between
connected and unconnected code, what's the point?
Hence, get rid of SPI_push[_conditional], SPI_pop[_conditional], and the
underlying state variable _SPI_curid. It turns out SPI_restore_connection
can go away too, which is a nice side benefit since it was never more than
a kluge. Provide no-op macros for the deleted functions so as to avoid an
API break for external modules.
A side effect of this removal is that SPI_palloc and allied functions no
longer permit being called when unconnected; they'll throw an error
instead. The apparent usefulness of the previous behavior was a mirage
as well, because it was depended on by only a few places (which I fixed in
preceding commits), and it posed a risk of allocations being unexpectedly
long-lived if someone forgot a SPI_push call.
Discussion: <20808.1478481403@sss.pgh.pa.us>
2016-11-08 23:39:45 +01:00
|
|
|
/* Exit stack level */
|
1997-09-07 07:04:48 +02:00
|
|
|
_SPI_connected--;
|
Simplify code by getting rid of SPI_push, SPI_pop, SPI_restore_connection.
The idea behind SPI_push was to allow transitioning back into an
"unconnected" state when a SPI-using procedure calls unrelated code that
might or might not invoke SPI. That sounds good, but in practice the only
thing it does for us is to catch cases where a called SPI-using function
forgets to call SPI_connect --- which is a highly improbable failure mode,
since it would be exposed immediately by direct testing of said function.
As against that, we've had multiple bugs induced by forgetting to call
SPI_push/SPI_pop around code that might invoke SPI-using functions; these
are much harder to catch and indeed have gone undetected for years in some
cases. And we've had to band-aid around some problems of this ilk by
introducing conditional push/pop pairs in some places, which really kind
of defeats the purpose altogether; if we can't draw bright lines between
connected and unconnected code, what's the point?
Hence, get rid of SPI_push[_conditional], SPI_pop[_conditional], and the
underlying state variable _SPI_curid. It turns out SPI_restore_connection
can go away too, which is a nice side benefit since it was never more than
a kluge. Provide no-op macros for the deleted functions so as to avoid an
API break for external modules.
A side effect of this removal is that SPI_palloc and allied functions no
longer permit being called when unconnected; they'll throw an error
instead. The apparent usefulness of the previous behavior was a mirage
as well, because it was depended on by only a few places (which I fixed in
preceding commits), and it posed a risk of allocations being unexpectedly
long-lived if someone forgot a SPI_push call.
Discussion: <20808.1478481403@sss.pgh.pa.us>
2016-11-08 23:39:45 +01:00
|
|
|
if (_SPI_connected < 0)
|
2000-06-28 05:33:33 +02:00
|
|
|
_SPI_current = NULL;
|
1997-09-07 07:04:48 +02:00
|
|
|
else
|
|
|
|
_SPI_current = &(_SPI_stack[_SPI_connected]);
|
|
|
|
|
1998-09-01 05:29:17 +02:00
|
|
|
return SPI_OK_FINISH;
|
1997-08-29 11:05:57 +02:00
|
|
|
}
|
|
|
|
|
Transaction control in PL procedures
In each of the supplied procedural languages (PL/pgSQL, PL/Perl,
PL/Python, PL/Tcl), add language-specific commit and rollback
functions/commands to control transactions in procedures in that
language. Add similar underlying functions to SPI. Some additional
cleanup so that transaction commit or abort doesn't blow away data
structures still used by the procedure call. Add execution context
tracking to CALL and DO statements so that transaction control commands
can only be issued in top-level procedure and block calls, not function
calls or other procedure or block calls.
- SPI
Add a new function SPI_connect_ext() that is like SPI_connect() but
allows passing option flags. The only option flag right now is
SPI_OPT_NONATOMIC. A nonatomic SPI connection can execute transaction
control commands, otherwise it's not allowed. This is meant to be
passed down from CALL and DO statements which themselves know in which
context they are called. A nonatomic SPI connection uses different
memory management. A normal SPI connection allocates its memory in
TopTransactionContext. For nonatomic connections we use PortalContext
instead. As the comment in SPI_connect_ext() (previously SPI_connect())
indicates, one could potentially use PortalContext in all cases, but it
seems safest to leave the existing uses alone, because this stuff is
complicated enough already.
SPI also gets new functions SPI_start_transaction(), SPI_commit(), and
SPI_rollback(), which can be used by PLs to implement their transaction
control logic.
- portalmem.c
Some adjustments were made in the code that cleans up portals at
transaction abort. The portal code could already handle a command
*committing* a transaction and continuing (e.g., VACUUM), but it was not
quite prepared for a command *aborting* a transaction and continuing.
In AtAbort_Portals(), remove the code that marks an active portal as
failed. As the comment there already predicted, this doesn't work if
the running command wants to keep running after transaction abort. And
it's actually not necessary, because pquery.c is careful to run all
portal code in a PG_TRY block and explicitly runs MarkPortalFailed() if
there is an exception. So the code in AtAbort_Portals() is never used
anyway.
In AtAbort_Portals() and AtCleanup_Portals(), we need to be careful not
to clean up active portals too much. This mirrors similar code in
PreCommit_Portals().
- PL/Perl
Gets new functions spi_commit() and spi_rollback()
- PL/pgSQL
Gets new commands COMMIT and ROLLBACK.
Update the PL/SQL porting example in the documentation to reflect that
transactions are now possible in procedures.
- PL/Python
Gets new functions plpy.commit and plpy.rollback.
- PL/Tcl
Gets new commands commit and rollback.
Reviewed-by: Andrew Dunstan <andrew.dunstan@2ndquadrant.com>
2018-01-22 14:30:16 +01:00
|
|
|
void
|
|
|
|
SPI_start_transaction(void)
|
|
|
|
{
|
|
|
|
MemoryContext oldcontext = CurrentMemoryContext;
|
|
|
|
|
|
|
|
StartTransactionCommand();
|
|
|
|
MemoryContextSwitchTo(oldcontext);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
SPI_commit(void)
|
|
|
|
{
|
|
|
|
MemoryContext oldcontext = CurrentMemoryContext;
|
|
|
|
|
|
|
|
if (_SPI_current->atomic)
|
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_INVALID_TRANSACTION_TERMINATION),
|
|
|
|
errmsg("invalid transaction termination")));
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This restriction is required by PLs implemented on top of SPI. They
|
|
|
|
* use subtransactions to establish exception blocks that are supposed to
|
|
|
|
* be rolled back together if there is an error. Terminating the
|
|
|
|
* top-level transaction in such a block violates that idea. A future PL
|
|
|
|
* implementation might have different ideas about this, in which case
|
|
|
|
* this restriction would have to be refined or the check possibly be
|
|
|
|
* moved out of SPI into the PLs.
|
|
|
|
*/
|
|
|
|
if (IsSubTransaction())
|
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_INVALID_TRANSACTION_TERMINATION),
|
|
|
|
errmsg("cannot commit while a subtransaction is active")));
|
|
|
|
|
|
|
|
_SPI_current->internal_xact = true;
|
|
|
|
|
2018-06-29 13:28:39 +02:00
|
|
|
/*
|
|
|
|
* Before committing, pop all active snapshots to avoid error about
|
|
|
|
* "snapshot %p still active".
|
|
|
|
*/
|
|
|
|
while (ActiveSnapshotSet())
|
Transaction control in PL procedures
In each of the supplied procedural languages (PL/pgSQL, PL/Perl,
PL/Python, PL/Tcl), add language-specific commit and rollback
functions/commands to control transactions in procedures in that
language. Add similar underlying functions to SPI. Some additional
cleanup so that transaction commit or abort doesn't blow away data
structures still used by the procedure call. Add execution context
tracking to CALL and DO statements so that transaction control commands
can only be issued in top-level procedure and block calls, not function
calls or other procedure or block calls.
- SPI
Add a new function SPI_connect_ext() that is like SPI_connect() but
allows passing option flags. The only option flag right now is
SPI_OPT_NONATOMIC. A nonatomic SPI connection can execute transaction
control commands, otherwise it's not allowed. This is meant to be
passed down from CALL and DO statements which themselves know in which
context they are called. A nonatomic SPI connection uses different
memory management. A normal SPI connection allocates its memory in
TopTransactionContext. For nonatomic connections we use PortalContext
instead. As the comment in SPI_connect_ext() (previously SPI_connect())
indicates, one could potentially use PortalContext in all cases, but it
seems safest to leave the existing uses alone, because this stuff is
complicated enough already.
SPI also gets new functions SPI_start_transaction(), SPI_commit(), and
SPI_rollback(), which can be used by PLs to implement their transaction
control logic.
- portalmem.c
Some adjustments were made in the code that cleans up portals at
transaction abort. The portal code could already handle a command
*committing* a transaction and continuing (e.g., VACUUM), but it was not
quite prepared for a command *aborting* a transaction and continuing.
In AtAbort_Portals(), remove the code that marks an active portal as
failed. As the comment there already predicted, this doesn't work if
the running command wants to keep running after transaction abort. And
it's actually not necessary, because pquery.c is careful to run all
portal code in a PG_TRY block and explicitly runs MarkPortalFailed() if
there is an exception. So the code in AtAbort_Portals() is never used
anyway.
In AtAbort_Portals() and AtCleanup_Portals(), we need to be careful not
to clean up active portals too much. This mirrors similar code in
PreCommit_Portals().
- PL/Perl
Gets new functions spi_commit() and spi_rollback()
- PL/pgSQL
Gets new commands COMMIT and ROLLBACK.
Update the PL/SQL porting example in the documentation to reflect that
transactions are now possible in procedures.
- PL/Python
Gets new functions plpy.commit and plpy.rollback.
- PL/Tcl
Gets new commands commit and rollback.
Reviewed-by: Andrew Dunstan <andrew.dunstan@2ndquadrant.com>
2018-01-22 14:30:16 +01:00
|
|
|
PopActiveSnapshot();
|
2018-06-29 13:28:39 +02:00
|
|
|
|
Transaction control in PL procedures
In each of the supplied procedural languages (PL/pgSQL, PL/Perl,
PL/Python, PL/Tcl), add language-specific commit and rollback
functions/commands to control transactions in procedures in that
language. Add similar underlying functions to SPI. Some additional
cleanup so that transaction commit or abort doesn't blow away data
structures still used by the procedure call. Add execution context
tracking to CALL and DO statements so that transaction control commands
can only be issued in top-level procedure and block calls, not function
calls or other procedure or block calls.
- SPI
Add a new function SPI_connect_ext() that is like SPI_connect() but
allows passing option flags. The only option flag right now is
SPI_OPT_NONATOMIC. A nonatomic SPI connection can execute transaction
control commands, otherwise it's not allowed. This is meant to be
passed down from CALL and DO statements which themselves know in which
context they are called. A nonatomic SPI connection uses different
memory management. A normal SPI connection allocates its memory in
TopTransactionContext. For nonatomic connections we use PortalContext
instead. As the comment in SPI_connect_ext() (previously SPI_connect())
indicates, one could potentially use PortalContext in all cases, but it
seems safest to leave the existing uses alone, because this stuff is
complicated enough already.
SPI also gets new functions SPI_start_transaction(), SPI_commit(), and
SPI_rollback(), which can be used by PLs to implement their transaction
control logic.
- portalmem.c
Some adjustments were made in the code that cleans up portals at
transaction abort. The portal code could already handle a command
*committing* a transaction and continuing (e.g., VACUUM), but it was not
quite prepared for a command *aborting* a transaction and continuing.
In AtAbort_Portals(), remove the code that marks an active portal as
failed. As the comment there already predicted, this doesn't work if
the running command wants to keep running after transaction abort. And
it's actually not necessary, because pquery.c is careful to run all
portal code in a PG_TRY block and explicitly runs MarkPortalFailed() if
there is an exception. So the code in AtAbort_Portals() is never used
anyway.
In AtAbort_Portals() and AtCleanup_Portals(), we need to be careful not
to clean up active portals too much. This mirrors similar code in
PreCommit_Portals().
- PL/Perl
Gets new functions spi_commit() and spi_rollback()
- PL/pgSQL
Gets new commands COMMIT and ROLLBACK.
Update the PL/SQL porting example in the documentation to reflect that
transactions are now possible in procedures.
- PL/Python
Gets new functions plpy.commit and plpy.rollback.
- PL/Tcl
Gets new commands commit and rollback.
Reviewed-by: Andrew Dunstan <andrew.dunstan@2ndquadrant.com>
2018-01-22 14:30:16 +01:00
|
|
|
CommitTransactionCommand();
|
|
|
|
MemoryContextSwitchTo(oldcontext);
|
|
|
|
|
|
|
|
_SPI_current->internal_xact = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
SPI_rollback(void)
|
|
|
|
{
|
|
|
|
MemoryContext oldcontext = CurrentMemoryContext;
|
|
|
|
|
|
|
|
if (_SPI_current->atomic)
|
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_INVALID_TRANSACTION_TERMINATION),
|
|
|
|
errmsg("invalid transaction termination")));
|
|
|
|
|
|
|
|
/* see under SPI_commit() */
|
|
|
|
if (IsSubTransaction())
|
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_INVALID_TRANSACTION_TERMINATION),
|
|
|
|
errmsg("cannot roll back while a subtransaction is active")));
|
|
|
|
|
|
|
|
_SPI_current->internal_xact = true;
|
|
|
|
|
|
|
|
AbortCurrentTransaction();
|
|
|
|
MemoryContextSwitchTo(oldcontext);
|
|
|
|
|
|
|
|
_SPI_current->internal_xact = false;
|
|
|
|
}
|
|
|
|
|
2018-05-02 22:50:03 +02:00
|
|
|
/*
|
|
|
|
* Clean up SPI state. Called on transaction end (of non-SPI-internal
|
|
|
|
* transactions) and when returning to the main loop on error.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
SPICleanup(void)
|
|
|
|
{
|
|
|
|
_SPI_current = NULL;
|
|
|
|
_SPI_connected = -1;
|
2018-09-08 02:09:57 +02:00
|
|
|
/* Reset API global variables, too */
|
2018-05-02 22:50:03 +02:00
|
|
|
SPI_processed = 0;
|
|
|
|
SPI_tuptable = NULL;
|
2018-09-08 02:09:57 +02:00
|
|
|
SPI_result = 0;
|
2018-05-02 22:50:03 +02:00
|
|
|
}
|
|
|
|
|
2000-06-28 05:33:33 +02:00
|
|
|
/*
|
2003-12-02 20:26:47 +01:00
|
|
|
* Clean up SPI state at transaction commit or abort.
|
2000-06-28 05:33:33 +02:00
|
|
|
*/
|
|
|
|
void
|
2003-12-02 20:26:47 +01:00
|
|
|
AtEOXact_SPI(bool isCommit)
|
2000-06-28 05:33:33 +02:00
|
|
|
{
|
2018-05-02 22:50:03 +02:00
|
|
|
/* Do nothing if the transaction end was initiated by SPI. */
|
Transaction control in PL procedures
In each of the supplied procedural languages (PL/pgSQL, PL/Perl,
PL/Python, PL/Tcl), add language-specific commit and rollback
functions/commands to control transactions in procedures in that
language. Add similar underlying functions to SPI. Some additional
cleanup so that transaction commit or abort doesn't blow away data
structures still used by the procedure call. Add execution context
tracking to CALL and DO statements so that transaction control commands
can only be issued in top-level procedure and block calls, not function
calls or other procedure or block calls.
- SPI
Add a new function SPI_connect_ext() that is like SPI_connect() but
allows passing option flags. The only option flag right now is
SPI_OPT_NONATOMIC. A nonatomic SPI connection can execute transaction
control commands, otherwise it's not allowed. This is meant to be
passed down from CALL and DO statements which themselves know in which
context they are called. A nonatomic SPI connection uses different
memory management. A normal SPI connection allocates its memory in
TopTransactionContext. For nonatomic connections we use PortalContext
instead. As the comment in SPI_connect_ext() (previously SPI_connect())
indicates, one could potentially use PortalContext in all cases, but it
seems safest to leave the existing uses alone, because this stuff is
complicated enough already.
SPI also gets new functions SPI_start_transaction(), SPI_commit(), and
SPI_rollback(), which can be used by PLs to implement their transaction
control logic.
- portalmem.c
Some adjustments were made in the code that cleans up portals at
transaction abort. The portal code could already handle a command
*committing* a transaction and continuing (e.g., VACUUM), but it was not
quite prepared for a command *aborting* a transaction and continuing.
In AtAbort_Portals(), remove the code that marks an active portal as
failed. As the comment there already predicted, this doesn't work if
the running command wants to keep running after transaction abort. And
it's actually not necessary, because pquery.c is careful to run all
portal code in a PG_TRY block and explicitly runs MarkPortalFailed() if
there is an exception. So the code in AtAbort_Portals() is never used
anyway.
In AtAbort_Portals() and AtCleanup_Portals(), we need to be careful not
to clean up active portals too much. This mirrors similar code in
PreCommit_Portals().
- PL/Perl
Gets new functions spi_commit() and spi_rollback()
- PL/pgSQL
Gets new commands COMMIT and ROLLBACK.
Update the PL/SQL porting example in the documentation to reflect that
transactions are now possible in procedures.
- PL/Python
Gets new functions plpy.commit and plpy.rollback.
- PL/Tcl
Gets new commands commit and rollback.
Reviewed-by: Andrew Dunstan <andrew.dunstan@2ndquadrant.com>
2018-01-22 14:30:16 +01:00
|
|
|
if (_SPI_current && _SPI_current->internal_xact)
|
|
|
|
return;
|
|
|
|
|
2004-07-01 02:52:04 +02:00
|
|
|
if (isCommit && _SPI_connected != -1)
|
|
|
|
ereport(WARNING,
|
|
|
|
(errcode(ERRCODE_WARNING),
|
|
|
|
errmsg("transaction left non-empty SPI stack"),
|
2004-10-12 23:54:45 +02:00
|
|
|
errhint("Check for missing \"SPI_finish\" calls.")));
|
2003-12-02 20:26:47 +01:00
|
|
|
|
2018-05-02 22:50:03 +02:00
|
|
|
SPICleanup();
|
2000-06-28 05:33:33 +02:00
|
|
|
}
|
|
|
|
|
2004-07-01 02:52:04 +02:00
|
|
|
/*
|
|
|
|
* Clean up SPI state at subtransaction commit or abort.
|
|
|
|
*
|
|
|
|
* During commit, there shouldn't be any unclosed entries remaining from
|
2004-09-16 18:58:44 +02:00
|
|
|
* the current subtransaction; we emit a warning if any are found.
|
2004-07-01 02:52:04 +02:00
|
|
|
*/
|
|
|
|
void
|
2004-09-16 18:58:44 +02:00
|
|
|
AtEOSubXact_SPI(bool isCommit, SubTransactionId mySubid)
|
2004-07-01 02:52:04 +02:00
|
|
|
{
|
2004-08-29 07:07:03 +02:00
|
|
|
bool found = false;
|
2004-07-01 02:52:04 +02:00
|
|
|
|
|
|
|
while (_SPI_connected >= 0)
|
|
|
|
{
|
|
|
|
_SPI_connection *connection = &(_SPI_stack[_SPI_connected]);
|
|
|
|
|
2004-09-16 18:58:44 +02:00
|
|
|
if (connection->connectSubid != mySubid)
|
2004-07-01 02:52:04 +02:00
|
|
|
break; /* couldn't be any underneath it either */
|
|
|
|
|
Transaction control in PL procedures
In each of the supplied procedural languages (PL/pgSQL, PL/Perl,
PL/Python, PL/Tcl), add language-specific commit and rollback
functions/commands to control transactions in procedures in that
language. Add similar underlying functions to SPI. Some additional
cleanup so that transaction commit or abort doesn't blow away data
structures still used by the procedure call. Add execution context
tracking to CALL and DO statements so that transaction control commands
can only be issued in top-level procedure and block calls, not function
calls or other procedure or block calls.
- SPI
Add a new function SPI_connect_ext() that is like SPI_connect() but
allows passing option flags. The only option flag right now is
SPI_OPT_NONATOMIC. A nonatomic SPI connection can execute transaction
control commands, otherwise it's not allowed. This is meant to be
passed down from CALL and DO statements which themselves know in which
context they are called. A nonatomic SPI connection uses different
memory management. A normal SPI connection allocates its memory in
TopTransactionContext. For nonatomic connections we use PortalContext
instead. As the comment in SPI_connect_ext() (previously SPI_connect())
indicates, one could potentially use PortalContext in all cases, but it
seems safest to leave the existing uses alone, because this stuff is
complicated enough already.
SPI also gets new functions SPI_start_transaction(), SPI_commit(), and
SPI_rollback(), which can be used by PLs to implement their transaction
control logic.
- portalmem.c
Some adjustments were made in the code that cleans up portals at
transaction abort. The portal code could already handle a command
*committing* a transaction and continuing (e.g., VACUUM), but it was not
quite prepared for a command *aborting* a transaction and continuing.
In AtAbort_Portals(), remove the code that marks an active portal as
failed. As the comment there already predicted, this doesn't work if
the running command wants to keep running after transaction abort. And
it's actually not necessary, because pquery.c is careful to run all
portal code in a PG_TRY block and explicitly runs MarkPortalFailed() if
there is an exception. So the code in AtAbort_Portals() is never used
anyway.
In AtAbort_Portals() and AtCleanup_Portals(), we need to be careful not
to clean up active portals too much. This mirrors similar code in
PreCommit_Portals().
- PL/Perl
Gets new functions spi_commit() and spi_rollback()
- PL/pgSQL
Gets new commands COMMIT and ROLLBACK.
Update the PL/SQL porting example in the documentation to reflect that
transactions are now possible in procedures.
- PL/Python
Gets new functions plpy.commit and plpy.rollback.
- PL/Tcl
Gets new commands commit and rollback.
Reviewed-by: Andrew Dunstan <andrew.dunstan@2ndquadrant.com>
2018-01-22 14:30:16 +01:00
|
|
|
if (connection->internal_xact)
|
|
|
|
break;
|
|
|
|
|
2004-07-01 02:52:04 +02:00
|
|
|
found = true;
|
|
|
|
|
2004-09-16 22:17:49 +02:00
|
|
|
/*
|
|
|
|
* Release procedure memory explicitly (see note in SPI_connect)
|
|
|
|
*/
|
|
|
|
if (connection->execCxt)
|
|
|
|
{
|
|
|
|
MemoryContextDelete(connection->execCxt);
|
|
|
|
connection->execCxt = NULL;
|
|
|
|
}
|
|
|
|
if (connection->procCxt)
|
|
|
|
{
|
|
|
|
MemoryContextDelete(connection->procCxt);
|
|
|
|
connection->procCxt = NULL;
|
|
|
|
}
|
|
|
|
|
2004-07-01 23:17:13 +02:00
|
|
|
/*
|
2018-09-08 02:09:57 +02:00
|
|
|
* Restore outer global variables and pop the stack entry. Unlike
|
2005-10-15 04:49:52 +02:00
|
|
|
* SPI_finish(), we don't risk switching to memory contexts that might
|
|
|
|
* be already gone.
|
2004-07-01 23:17:13 +02:00
|
|
|
*/
|
2018-09-08 02:09:57 +02:00
|
|
|
SPI_processed = connection->outer_processed;
|
|
|
|
SPI_tuptable = connection->outer_tuptable;
|
|
|
|
SPI_result = connection->outer_result;
|
|
|
|
|
2004-07-01 23:17:13 +02:00
|
|
|
_SPI_connected--;
|
Simplify code by getting rid of SPI_push, SPI_pop, SPI_restore_connection.
The idea behind SPI_push was to allow transitioning back into an
"unconnected" state when a SPI-using procedure calls unrelated code that
might or might not invoke SPI. That sounds good, but in practice the only
thing it does for us is to catch cases where a called SPI-using function
forgets to call SPI_connect --- which is a highly improbable failure mode,
since it would be exposed immediately by direct testing of said function.
As against that, we've had multiple bugs induced by forgetting to call
SPI_push/SPI_pop around code that might invoke SPI-using functions; these
are much harder to catch and indeed have gone undetected for years in some
cases. And we've had to band-aid around some problems of this ilk by
introducing conditional push/pop pairs in some places, which really kind
of defeats the purpose altogether; if we can't draw bright lines between
connected and unconnected code, what's the point?
Hence, get rid of SPI_push[_conditional], SPI_pop[_conditional], and the
underlying state variable _SPI_curid. It turns out SPI_restore_connection
can go away too, which is a nice side benefit since it was never more than
a kluge. Provide no-op macros for the deleted functions so as to avoid an
API break for external modules.
A side effect of this removal is that SPI_palloc and allied functions no
longer permit being called when unconnected; they'll throw an error
instead. The apparent usefulness of the previous behavior was a mirage
as well, because it was depended on by only a few places (which I fixed in
preceding commits), and it posed a risk of allocations being unexpectedly
long-lived if someone forgot a SPI_push call.
Discussion: <20808.1478481403@sss.pgh.pa.us>
2016-11-08 23:39:45 +01:00
|
|
|
if (_SPI_connected < 0)
|
2004-07-01 23:17:13 +02:00
|
|
|
_SPI_current = NULL;
|
|
|
|
else
|
|
|
|
_SPI_current = &(_SPI_stack[_SPI_connected]);
|
2004-07-01 02:52:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (found && isCommit)
|
|
|
|
ereport(WARNING,
|
|
|
|
(errcode(ERRCODE_WARNING),
|
|
|
|
errmsg("subtransaction left non-empty SPI stack"),
|
2004-09-16 18:58:44 +02:00
|
|
|
errhint("Check for missing \"SPI_finish\" calls.")));
|
2006-11-21 23:35:29 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If we are aborting a subtransaction and there is an open SPI context
|
|
|
|
* surrounding the subxact, clean up to prevent memory leakage.
|
|
|
|
*/
|
|
|
|
if (_SPI_current && !isCommit)
|
|
|
|
{
|
Prevent leakage of SPI tuple tables during subtransaction abort.
plpgsql often just remembers SPI-result tuple tables in local variables,
and has no mechanism for freeing them if an ereport(ERROR) causes an escape
out of the execution function whose local variable it is. In the original
coding, that wasn't a problem because the tuple table would be cleaned up
when the function's SPI context went away during transaction abort.
However, once plpgsql grew the ability to trap exceptions, repeated
trapping of errors within a function could result in significant
intra-function-call memory leakage, as illustrated in bug #8279 from
Chad Wagner.
We could fix this locally in plpgsql with a bunch of PG_TRY/PG_CATCH
coding, but that would be tedious, probably slow, and prone to bugs of
omission; moreover it would do nothing for similar risks elsewhere.
What seems like a better plan is to make SPI itself responsible for
freeing tuple tables at subtransaction abort. This patch attacks the
problem that way, keeping a list of live tuple tables within each SPI
function context. Currently, such freeing is automatic for tuple tables
made within the failed subtransaction. We might later add a SPI call to
mark a tuple table as not to be freed this way, allowing callers to opt
out; but until someone exhibits a clear use-case for such behavior, it
doesn't seem worth bothering.
A very useful side-effect of this change is that SPI_freetuptable() can
now defend itself against bad calls, such as duplicate free requests;
this should make things more robust in many places. (In particular,
this reduces the risks involved if a third-party extension contains
now-redundant SPI_freetuptable() calls in error cleanup code.)
Even though the leakage problem is of long standing, it seems imprudent
to back-patch this into stable branches, since it does represent an API
semantics change for SPI users. We'll patch this in 9.3, but live with
the leakage in older branches.
2013-07-25 22:45:43 +02:00
|
|
|
slist_mutable_iter siter;
|
|
|
|
|
2017-10-07 01:18:58 +02:00
|
|
|
/*
|
|
|
|
* Throw away executor state if current executor operation was started
|
|
|
|
* within current subxact (essentially, force a _SPI_end_call(true)).
|
|
|
|
*/
|
|
|
|
if (_SPI_current->execSubid >= mySubid)
|
|
|
|
{
|
|
|
|
_SPI_current->execSubid = InvalidSubTransactionId;
|
|
|
|
MemoryContextResetAndDeleteChildren(_SPI_current->execCxt);
|
|
|
|
}
|
Prevent leakage of SPI tuple tables during subtransaction abort.
plpgsql often just remembers SPI-result tuple tables in local variables,
and has no mechanism for freeing them if an ereport(ERROR) causes an escape
out of the execution function whose local variable it is. In the original
coding, that wasn't a problem because the tuple table would be cleaned up
when the function's SPI context went away during transaction abort.
However, once plpgsql grew the ability to trap exceptions, repeated
trapping of errors within a function could result in significant
intra-function-call memory leakage, as illustrated in bug #8279 from
Chad Wagner.
We could fix this locally in plpgsql with a bunch of PG_TRY/PG_CATCH
coding, but that would be tedious, probably slow, and prone to bugs of
omission; moreover it would do nothing for similar risks elsewhere.
What seems like a better plan is to make SPI itself responsible for
freeing tuple tables at subtransaction abort. This patch attacks the
problem that way, keeping a list of live tuple tables within each SPI
function context. Currently, such freeing is automatic for tuple tables
made within the failed subtransaction. We might later add a SPI call to
mark a tuple table as not to be freed this way, allowing callers to opt
out; but until someone exhibits a clear use-case for such behavior, it
doesn't seem worth bothering.
A very useful side-effect of this change is that SPI_freetuptable() can
now defend itself against bad calls, such as duplicate free requests;
this should make things more robust in many places. (In particular,
this reduces the risks involved if a third-party extension contains
now-redundant SPI_freetuptable() calls in error cleanup code.)
Even though the leakage problem is of long standing, it seems imprudent
to back-patch this into stable branches, since it does represent an API
semantics change for SPI users. We'll patch this in 9.3, but live with
the leakage in older branches.
2013-07-25 22:45:43 +02:00
|
|
|
|
|
|
|
/* throw away any tuple tables created within current subxact */
|
|
|
|
slist_foreach_modify(siter, &_SPI_current->tuptables)
|
|
|
|
{
|
|
|
|
SPITupleTable *tuptable;
|
|
|
|
|
|
|
|
tuptable = slist_container(SPITupleTable, next, siter.cur);
|
|
|
|
if (tuptable->subid >= mySubid)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* If we used SPI_freetuptable() here, its internal search of
|
|
|
|
* the tuptables list would make this operation O(N^2).
|
|
|
|
* Instead, just free the tuptable manually. This should
|
|
|
|
* match what SPI_freetuptable() does.
|
|
|
|
*/
|
|
|
|
slist_delete_current(&siter);
|
|
|
|
if (tuptable == _SPI_current->tuptable)
|
|
|
|
_SPI_current->tuptable = NULL;
|
|
|
|
if (tuptable == SPI_tuptable)
|
|
|
|
SPI_tuptable = NULL;
|
|
|
|
MemoryContextDelete(tuptable->tuptabcxt);
|
|
|
|
}
|
|
|
|
}
|
2006-11-21 23:35:29 +01:00
|
|
|
}
|
2004-07-01 02:52:04 +02:00
|
|
|
}
|
|
|
|
|
2018-10-08 22:16:36 +02:00
|
|
|
/*
|
|
|
|
* Are we executing inside a procedure (that is, a nonatomic SPI context)?
|
|
|
|
*/
|
|
|
|
bool
|
|
|
|
SPI_inside_nonatomic_context(void)
|
|
|
|
{
|
|
|
|
if (_SPI_current == NULL)
|
|
|
|
return false; /* not in any SPI context at all */
|
|
|
|
if (_SPI_current->atomic)
|
|
|
|
return false; /* it's atomic (ie function not procedure) */
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2004-07-01 02:52:04 +02:00
|
|
|
|
2005-05-02 02:37:07 +02:00
|
|
|
/* Parse, plan, and execute a query string */
|
1997-08-29 11:05:57 +02:00
|
|
|
int
|
2005-05-02 02:37:07 +02:00
|
|
|
SPI_execute(const char *src, bool read_only, long tcount)
|
1997-09-04 15:22:39 +02:00
|
|
|
{
|
2004-09-13 22:10:13 +02:00
|
|
|
_SPI_plan plan;
|
1997-09-08 04:41:22 +02:00
|
|
|
int res;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
|
|
|
if (src == NULL || tcount < 0)
|
1998-09-01 05:29:17 +02:00
|
|
|
return SPI_ERROR_ARGUMENT;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
|
|
|
res = _SPI_begin_call(true);
|
|
|
|
if (res < 0)
|
1998-09-01 05:29:17 +02:00
|
|
|
return res;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2007-03-16 00:12:07 +01:00
|
|
|
memset(&plan, 0, sizeof(_SPI_plan));
|
|
|
|
plan.magic = _SPI_PLAN_MAGIC;
|
Improve access to parallel query from procedural languages.
In SQL, the ability to use parallel query was previous contingent on
fcache->readonly_func, which is only set for non-volatile functions;
but the volatility of a function has no bearing on whether queries
inside it can use parallelism. Remove that condition.
SPI_execute and SPI_execute_with_args always run the plan just once,
though not necessarily to completion. Given the changes in commit
691b8d59281b5177f16fe80858df921f77a8e955, it's sensible to pass
CURSOR_OPT_PARALLEL_OK here, so do that. This improves access to
parallelism for any caller that uses these functions to execute
queries. Such callers include plperl, plpython, pltcl, and plpgsql,
though it's not the case that they all use these functions
exclusively.
In plpgsql, allow parallel query for plain SELECT queries (as
opposed to PERFORM, which already worked) and for plain expressions
(which probably won't go through the executor at all, because they
will likely be simple expressions, but if they do then this helps).
Rafia Sabih and Robert Haas, reviewed by Dilip Kumar and Amit Kapila
Discussion: http://postgr.es/m/CAOGQiiMfJ+4SQwgG=6CVHWoisiU0+7jtXSuiyXBM3y=A=eJzmg@mail.gmail.com
2017-03-24 19:46:33 +01:00
|
|
|
plan.cursor_options = CURSOR_OPT_PARALLEL_OK;
|
2004-09-13 22:10:13 +02:00
|
|
|
|
Invent a "one-shot" variant of CachedPlans for better performance.
SPI_execute() and related functions create a CachedPlan, execute it once,
and immediately discard it, so that the functionality offered by
plancache.c is of no value in this code path. And performance measurements
show that the extra data copying and invalidation checking done by
plancache.c slows down simple queries by 10% or more compared to 9.1.
However, enough of the SPI code is shared with functions that do need plan
caching that it seems impractical to bypass plancache.c altogether.
Instead, let's invent a variant version of cached plans that preserves
99% of the API but doesn't offer any of the actual functionality, nor the
overhead. This puts SPI_execute() performance back on par, or maybe even
slightly better, than it was before. This change should resolve recent
complaints of performance degradation from Dong Ye, Pavel Stehule, and
others.
By avoiding data copying, this change also reduces the amount of memory
needed to execute many-statement SPI_execute() strings, as for instance in
a recent complaint from Tomas Vondra.
An additional benefit of this change is that multi-statement SPI_execute()
query strings are now processed fully serially, that is we complete
execution of earlier statements before running parse analysis and planning
on following ones. This eliminates a long-standing POLA violation, in that
DDL that affects the behavior of a later statement will now behave as
expected.
Back-patch to 9.2, since this was a performance regression compared to 9.1.
(In 9.2, place the added struct fields so as to avoid changing the offsets
of existing fields.)
Heikki Linnakangas and Tom Lane
2013-01-04 23:42:19 +01:00
|
|
|
_SPI_prepare_oneshot_plan(src, &plan);
|
2004-09-13 22:10:13 +02:00
|
|
|
|
2008-04-01 05:09:30 +02:00
|
|
|
res = _SPI_execute_plan(&plan, NULL,
|
2004-09-13 22:10:13 +02:00
|
|
|
InvalidSnapshot, InvalidSnapshot,
|
2007-08-15 21:15:47 +02:00
|
|
|
read_only, true, tcount);
|
1997-09-07 07:04:48 +02:00
|
|
|
|
|
|
|
_SPI_end_call(true);
|
1998-09-01 05:29:17 +02:00
|
|
|
return res;
|
1997-09-04 15:22:39 +02:00
|
|
|
}
|
|
|
|
|
2004-09-13 22:10:13 +02:00
|
|
|
/* Obsolete version of SPI_execute */
|
1997-09-07 07:04:48 +02:00
|
|
|
int
|
2005-05-02 02:37:07 +02:00
|
|
|
SPI_exec(const char *src, long tcount)
|
2004-09-13 22:10:13 +02:00
|
|
|
{
|
|
|
|
return SPI_execute(src, false, tcount);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Execute a previously prepared plan */
|
|
|
|
int
|
2007-03-16 00:12:07 +01:00
|
|
|
SPI_execute_plan(SPIPlanPtr plan, Datum *Values, const char *Nulls,
|
2005-05-02 02:37:07 +02:00
|
|
|
bool read_only, long tcount)
|
1997-09-04 15:22:39 +02:00
|
|
|
{
|
1997-09-08 04:41:22 +02:00
|
|
|
int res;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2007-03-16 00:12:07 +01:00
|
|
|
if (plan == NULL || plan->magic != _SPI_PLAN_MAGIC || tcount < 0)
|
1998-09-01 05:29:17 +02:00
|
|
|
return SPI_ERROR_ARGUMENT;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2007-03-16 00:12:07 +01:00
|
|
|
if (plan->nargs > 0 && Values == NULL)
|
1998-09-01 05:29:17 +02:00
|
|
|
return SPI_ERROR_PARAM;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
|
|
|
res = _SPI_begin_call(true);
|
|
|
|
if (res < 0)
|
1998-09-01 05:29:17 +02:00
|
|
|
return res;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2007-03-16 00:12:07 +01:00
|
|
|
res = _SPI_execute_plan(plan,
|
2008-04-01 05:09:30 +02:00
|
|
|
_SPI_convert_params(plan->nargs, plan->argtypes,
|
2011-09-16 06:42:53 +02:00
|
|
|
Values, Nulls),
|
2004-09-13 22:10:13 +02:00
|
|
|
InvalidSnapshot, InvalidSnapshot,
|
2007-08-15 21:15:47 +02:00
|
|
|
read_only, true, tcount);
|
2003-09-25 20:58:36 +02:00
|
|
|
|
|
|
|
_SPI_end_call(true);
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2004-09-13 22:10:13 +02:00
|
|
|
/* Obsolete version of SPI_execute_plan */
|
|
|
|
int
|
2007-03-16 00:12:07 +01:00
|
|
|
SPI_execp(SPIPlanPtr plan, Datum *Values, const char *Nulls, long tcount)
|
2004-09-13 22:10:13 +02:00
|
|
|
{
|
|
|
|
return SPI_execute_plan(plan, Values, Nulls, false, tcount);
|
|
|
|
}
|
|
|
|
|
2009-11-04 23:26:08 +01:00
|
|
|
/* Execute a previously prepared plan */
|
|
|
|
int
|
|
|
|
SPI_execute_plan_with_paramlist(SPIPlanPtr plan, ParamListInfo params,
|
|
|
|
bool read_only, long tcount)
|
|
|
|
{
|
|
|
|
int res;
|
|
|
|
|
|
|
|
if (plan == NULL || plan->magic != _SPI_PLAN_MAGIC || tcount < 0)
|
|
|
|
return SPI_ERROR_ARGUMENT;
|
|
|
|
|
|
|
|
res = _SPI_begin_call(true);
|
|
|
|
if (res < 0)
|
|
|
|
return res;
|
|
|
|
|
|
|
|
res = _SPI_execute_plan(plan, params,
|
|
|
|
InvalidSnapshot, InvalidSnapshot,
|
|
|
|
read_only, true, tcount);
|
|
|
|
|
|
|
|
_SPI_end_call(true);
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2003-09-25 20:58:36 +02:00
|
|
|
/*
|
2004-09-13 22:10:13 +02:00
|
|
|
* SPI_execute_snapshot -- identical to SPI_execute_plan, except that we allow
|
2008-05-12 22:02:02 +02:00
|
|
|
* the caller to specify exactly which snapshots to use, which will be
|
|
|
|
* registered here. Also, the caller may specify that AFTER triggers should be
|
|
|
|
* queued as part of the outer query rather than being fired immediately at the
|
|
|
|
* end of the command.
|
2007-08-15 21:15:47 +02:00
|
|
|
*
|
|
|
|
* This is currently not documented in spi.sgml because it is only intended
|
|
|
|
* for use by RI triggers.
|
2004-09-13 22:10:13 +02:00
|
|
|
*
|
|
|
|
* Passing snapshot == InvalidSnapshot will select the normal behavior of
|
|
|
|
* fetching a new snapshot for each query.
|
2003-09-25 20:58:36 +02:00
|
|
|
*/
|
2004-10-13 03:25:13 +02:00
|
|
|
int
|
2007-03-16 00:12:07 +01:00
|
|
|
SPI_execute_snapshot(SPIPlanPtr plan,
|
2004-09-13 22:10:13 +02:00
|
|
|
Datum *Values, const char *Nulls,
|
|
|
|
Snapshot snapshot, Snapshot crosscheck_snapshot,
|
2007-08-15 21:15:47 +02:00
|
|
|
bool read_only, bool fire_triggers, long tcount)
|
2003-09-25 20:58:36 +02:00
|
|
|
{
|
|
|
|
int res;
|
|
|
|
|
2007-03-16 00:12:07 +01:00
|
|
|
if (plan == NULL || plan->magic != _SPI_PLAN_MAGIC || tcount < 0)
|
2003-09-25 20:58:36 +02:00
|
|
|
return SPI_ERROR_ARGUMENT;
|
|
|
|
|
2007-03-16 00:12:07 +01:00
|
|
|
if (plan->nargs > 0 && Values == NULL)
|
2003-09-25 20:58:36 +02:00
|
|
|
return SPI_ERROR_PARAM;
|
|
|
|
|
|
|
|
res = _SPI_begin_call(true);
|
|
|
|
if (res < 0)
|
|
|
|
return res;
|
|
|
|
|
2007-03-16 00:12:07 +01:00
|
|
|
res = _SPI_execute_plan(plan,
|
2008-04-01 05:09:30 +02:00
|
|
|
_SPI_convert_params(plan->nargs, plan->argtypes,
|
2011-09-16 06:42:53 +02:00
|
|
|
Values, Nulls),
|
2004-09-13 22:10:13 +02:00
|
|
|
snapshot, crosscheck_snapshot,
|
2007-08-15 21:15:47 +02:00
|
|
|
read_only, fire_triggers, tcount);
|
1997-09-07 07:04:48 +02:00
|
|
|
|
|
|
|
_SPI_end_call(true);
|
1998-09-01 05:29:17 +02:00
|
|
|
return res;
|
1997-09-04 15:22:39 +02:00
|
|
|
}
|
|
|
|
|
2008-04-01 05:09:30 +02:00
|
|
|
/*
|
|
|
|
* SPI_execute_with_args -- plan and execute a query with supplied arguments
|
|
|
|
*
|
2011-09-16 06:42:53 +02:00
|
|
|
* This is functionally equivalent to SPI_prepare followed by
|
|
|
|
* SPI_execute_plan.
|
2008-04-01 05:09:30 +02:00
|
|
|
*/
|
|
|
|
int
|
|
|
|
SPI_execute_with_args(const char *src,
|
|
|
|
int nargs, Oid *argtypes,
|
|
|
|
Datum *Values, const char *Nulls,
|
|
|
|
bool read_only, long tcount)
|
|
|
|
{
|
|
|
|
int res;
|
|
|
|
_SPI_plan plan;
|
|
|
|
ParamListInfo paramLI;
|
|
|
|
|
|
|
|
if (src == NULL || nargs < 0 || tcount < 0)
|
|
|
|
return SPI_ERROR_ARGUMENT;
|
|
|
|
|
|
|
|
if (nargs > 0 && (argtypes == NULL || Values == NULL))
|
|
|
|
return SPI_ERROR_PARAM;
|
|
|
|
|
|
|
|
res = _SPI_begin_call(true);
|
|
|
|
if (res < 0)
|
|
|
|
return res;
|
|
|
|
|
|
|
|
memset(&plan, 0, sizeof(_SPI_plan));
|
|
|
|
plan.magic = _SPI_PLAN_MAGIC;
|
Improve access to parallel query from procedural languages.
In SQL, the ability to use parallel query was previous contingent on
fcache->readonly_func, which is only set for non-volatile functions;
but the volatility of a function has no bearing on whether queries
inside it can use parallelism. Remove that condition.
SPI_execute and SPI_execute_with_args always run the plan just once,
though not necessarily to completion. Given the changes in commit
691b8d59281b5177f16fe80858df921f77a8e955, it's sensible to pass
CURSOR_OPT_PARALLEL_OK here, so do that. This improves access to
parallelism for any caller that uses these functions to execute
queries. Such callers include plperl, plpython, pltcl, and plpgsql,
though it's not the case that they all use these functions
exclusively.
In plpgsql, allow parallel query for plain SELECT queries (as
opposed to PERFORM, which already worked) and for plain expressions
(which probably won't go through the executor at all, because they
will likely be simple expressions, but if they do then this helps).
Rafia Sabih and Robert Haas, reviewed by Dilip Kumar and Amit Kapila
Discussion: http://postgr.es/m/CAOGQiiMfJ+4SQwgG=6CVHWoisiU0+7jtXSuiyXBM3y=A=eJzmg@mail.gmail.com
2017-03-24 19:46:33 +01:00
|
|
|
plan.cursor_options = CURSOR_OPT_PARALLEL_OK;
|
2008-04-01 05:09:30 +02:00
|
|
|
plan.nargs = nargs;
|
|
|
|
plan.argtypes = argtypes;
|
2009-11-04 23:26:08 +01:00
|
|
|
plan.parserSetup = NULL;
|
|
|
|
plan.parserSetupArg = NULL;
|
2008-04-01 05:09:30 +02:00
|
|
|
|
|
|
|
paramLI = _SPI_convert_params(nargs, argtypes,
|
2011-09-16 06:42:53 +02:00
|
|
|
Values, Nulls);
|
2008-04-01 05:09:30 +02:00
|
|
|
|
Invent a "one-shot" variant of CachedPlans for better performance.
SPI_execute() and related functions create a CachedPlan, execute it once,
and immediately discard it, so that the functionality offered by
plancache.c is of no value in this code path. And performance measurements
show that the extra data copying and invalidation checking done by
plancache.c slows down simple queries by 10% or more compared to 9.1.
However, enough of the SPI code is shared with functions that do need plan
caching that it seems impractical to bypass plancache.c altogether.
Instead, let's invent a variant version of cached plans that preserves
99% of the API but doesn't offer any of the actual functionality, nor the
overhead. This puts SPI_execute() performance back on par, or maybe even
slightly better, than it was before. This change should resolve recent
complaints of performance degradation from Dong Ye, Pavel Stehule, and
others.
By avoiding data copying, this change also reduces the amount of memory
needed to execute many-statement SPI_execute() strings, as for instance in
a recent complaint from Tomas Vondra.
An additional benefit of this change is that multi-statement SPI_execute()
query strings are now processed fully serially, that is we complete
execution of earlier statements before running parse analysis and planning
on following ones. This eliminates a long-standing POLA violation, in that
DDL that affects the behavior of a later statement will now behave as
expected.
Back-patch to 9.2, since this was a performance regression compared to 9.1.
(In 9.2, place the added struct fields so as to avoid changing the offsets
of existing fields.)
Heikki Linnakangas and Tom Lane
2013-01-04 23:42:19 +01:00
|
|
|
_SPI_prepare_oneshot_plan(src, &plan);
|
2008-04-01 05:09:30 +02:00
|
|
|
|
|
|
|
res = _SPI_execute_plan(&plan, paramLI,
|
|
|
|
InvalidSnapshot, InvalidSnapshot,
|
|
|
|
read_only, true, tcount);
|
|
|
|
|
|
|
|
_SPI_end_call(true);
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2007-03-16 00:12:07 +01:00
|
|
|
SPIPlanPtr
|
2002-12-30 23:10:54 +01:00
|
|
|
SPI_prepare(const char *src, int nargs, Oid *argtypes)
|
2007-04-16 03:14:58 +02:00
|
|
|
{
|
|
|
|
return SPI_prepare_cursor(src, nargs, argtypes, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
SPIPlanPtr
|
|
|
|
SPI_prepare_cursor(const char *src, int nargs, Oid *argtypes,
|
|
|
|
int cursorOptions)
|
1997-09-04 15:22:39 +02:00
|
|
|
{
|
2004-03-21 23:29:11 +01:00
|
|
|
_SPI_plan plan;
|
2007-03-16 00:12:07 +01:00
|
|
|
SPIPlanPtr result;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
1997-09-26 15:57:39 +02:00
|
|
|
if (src == NULL || nargs < 0 || (nargs > 0 && argtypes == NULL))
|
1997-09-07 07:04:48 +02:00
|
|
|
{
|
|
|
|
SPI_result = SPI_ERROR_ARGUMENT;
|
1998-09-01 05:29:17 +02:00
|
|
|
return NULL;
|
1997-09-07 07:04:48 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
SPI_result = _SPI_begin_call(true);
|
|
|
|
if (SPI_result < 0)
|
1998-09-01 05:29:17 +02:00
|
|
|
return NULL;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2007-03-16 00:12:07 +01:00
|
|
|
memset(&plan, 0, sizeof(_SPI_plan));
|
|
|
|
plan.magic = _SPI_PLAN_MAGIC;
|
2007-04-16 19:21:24 +02:00
|
|
|
plan.cursor_options = cursorOptions;
|
2004-03-21 23:29:11 +01:00
|
|
|
plan.nargs = nargs;
|
|
|
|
plan.argtypes = argtypes;
|
2009-11-04 23:26:08 +01:00
|
|
|
plan.parserSetup = NULL;
|
|
|
|
plan.parserSetupArg = NULL;
|
|
|
|
|
Invent a "one-shot" variant of CachedPlans for better performance.
SPI_execute() and related functions create a CachedPlan, execute it once,
and immediately discard it, so that the functionality offered by
plancache.c is of no value in this code path. And performance measurements
show that the extra data copying and invalidation checking done by
plancache.c slows down simple queries by 10% or more compared to 9.1.
However, enough of the SPI code is shared with functions that do need plan
caching that it seems impractical to bypass plancache.c altogether.
Instead, let's invent a variant version of cached plans that preserves
99% of the API but doesn't offer any of the actual functionality, nor the
overhead. This puts SPI_execute() performance back on par, or maybe even
slightly better, than it was before. This change should resolve recent
complaints of performance degradation from Dong Ye, Pavel Stehule, and
others.
By avoiding data copying, this change also reduces the amount of memory
needed to execute many-statement SPI_execute() strings, as for instance in
a recent complaint from Tomas Vondra.
An additional benefit of this change is that multi-statement SPI_execute()
query strings are now processed fully serially, that is we complete
execution of earlier statements before running parse analysis and planning
on following ones. This eliminates a long-standing POLA violation, in that
DDL that affects the behavior of a later statement will now behave as
expected.
Back-patch to 9.2, since this was a performance regression compared to 9.1.
(In 9.2, place the added struct fields so as to avoid changing the offsets
of existing fields.)
Heikki Linnakangas and Tom Lane
2013-01-04 23:42:19 +01:00
|
|
|
_SPI_prepare_plan(src, &plan);
|
2009-11-04 23:26:08 +01:00
|
|
|
|
|
|
|
/* copy plan to procedure context */
|
2011-09-16 06:42:53 +02:00
|
|
|
result = _SPI_make_plan_non_temp(&plan);
|
2009-11-04 23:26:08 +01:00
|
|
|
|
|
|
|
_SPI_end_call(true);
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
SPIPlanPtr
|
|
|
|
SPI_prepare_params(const char *src,
|
|
|
|
ParserSetupHook parserSetup,
|
|
|
|
void *parserSetupArg,
|
|
|
|
int cursorOptions)
|
|
|
|
{
|
|
|
|
_SPI_plan plan;
|
|
|
|
SPIPlanPtr result;
|
|
|
|
|
|
|
|
if (src == NULL)
|
|
|
|
{
|
|
|
|
SPI_result = SPI_ERROR_ARGUMENT;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
SPI_result = _SPI_begin_call(true);
|
|
|
|
if (SPI_result < 0)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
memset(&plan, 0, sizeof(_SPI_plan));
|
|
|
|
plan.magic = _SPI_PLAN_MAGIC;
|
|
|
|
plan.cursor_options = cursorOptions;
|
|
|
|
plan.nargs = 0;
|
|
|
|
plan.argtypes = NULL;
|
|
|
|
plan.parserSetup = parserSetup;
|
|
|
|
plan.parserSetupArg = parserSetupArg;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
Invent a "one-shot" variant of CachedPlans for better performance.
SPI_execute() and related functions create a CachedPlan, execute it once,
and immediately discard it, so that the functionality offered by
plancache.c is of no value in this code path. And performance measurements
show that the extra data copying and invalidation checking done by
plancache.c slows down simple queries by 10% or more compared to 9.1.
However, enough of the SPI code is shared with functions that do need plan
caching that it seems impractical to bypass plancache.c altogether.
Instead, let's invent a variant version of cached plans that preserves
99% of the API but doesn't offer any of the actual functionality, nor the
overhead. This puts SPI_execute() performance back on par, or maybe even
slightly better, than it was before. This change should resolve recent
complaints of performance degradation from Dong Ye, Pavel Stehule, and
others.
By avoiding data copying, this change also reduces the amount of memory
needed to execute many-statement SPI_execute() strings, as for instance in
a recent complaint from Tomas Vondra.
An additional benefit of this change is that multi-statement SPI_execute()
query strings are now processed fully serially, that is we complete
execution of earlier statements before running parse analysis and planning
on following ones. This eliminates a long-standing POLA violation, in that
DDL that affects the behavior of a later statement will now behave as
expected.
Back-patch to 9.2, since this was a performance regression compared to 9.1.
(In 9.2, place the added struct fields so as to avoid changing the offsets
of existing fields.)
Heikki Linnakangas and Tom Lane
2013-01-04 23:42:19 +01:00
|
|
|
_SPI_prepare_plan(src, &plan);
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2004-09-13 22:10:13 +02:00
|
|
|
/* copy plan to procedure context */
|
2011-09-16 06:42:53 +02:00
|
|
|
result = _SPI_make_plan_non_temp(&plan);
|
1997-09-07 07:04:48 +02:00
|
|
|
|
|
|
|
_SPI_end_call(true);
|
|
|
|
|
2007-03-16 00:12:07 +01:00
|
|
|
return result;
|
1997-09-06 13:23:05 +02:00
|
|
|
}
|
|
|
|
|
2011-09-16 06:42:53 +02:00
|
|
|
int
|
|
|
|
SPI_keepplan(SPIPlanPtr plan)
|
|
|
|
{
|
|
|
|
ListCell *lc;
|
|
|
|
|
Invent a "one-shot" variant of CachedPlans for better performance.
SPI_execute() and related functions create a CachedPlan, execute it once,
and immediately discard it, so that the functionality offered by
plancache.c is of no value in this code path. And performance measurements
show that the extra data copying and invalidation checking done by
plancache.c slows down simple queries by 10% or more compared to 9.1.
However, enough of the SPI code is shared with functions that do need plan
caching that it seems impractical to bypass plancache.c altogether.
Instead, let's invent a variant version of cached plans that preserves
99% of the API but doesn't offer any of the actual functionality, nor the
overhead. This puts SPI_execute() performance back on par, or maybe even
slightly better, than it was before. This change should resolve recent
complaints of performance degradation from Dong Ye, Pavel Stehule, and
others.
By avoiding data copying, this change also reduces the amount of memory
needed to execute many-statement SPI_execute() strings, as for instance in
a recent complaint from Tomas Vondra.
An additional benefit of this change is that multi-statement SPI_execute()
query strings are now processed fully serially, that is we complete
execution of earlier statements before running parse analysis and planning
on following ones. This eliminates a long-standing POLA violation, in that
DDL that affects the behavior of a later statement will now behave as
expected.
Back-patch to 9.2, since this was a performance regression compared to 9.1.
(In 9.2, place the added struct fields so as to avoid changing the offsets
of existing fields.)
Heikki Linnakangas and Tom Lane
2013-01-04 23:42:19 +01:00
|
|
|
if (plan == NULL || plan->magic != _SPI_PLAN_MAGIC ||
|
|
|
|
plan->saved || plan->oneshot)
|
2011-09-16 06:42:53 +02:00
|
|
|
return SPI_ERROR_ARGUMENT;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Mark it saved, reparent it under CacheMemoryContext, and mark all the
|
|
|
|
* component CachedPlanSources as saved. This sequence cannot fail
|
|
|
|
* partway through, so there's no risk of long-term memory leakage.
|
|
|
|
*/
|
|
|
|
plan->saved = true;
|
|
|
|
MemoryContextSetParent(plan->plancxt, CacheMemoryContext);
|
|
|
|
|
|
|
|
foreach(lc, plan->plancache_list)
|
|
|
|
{
|
|
|
|
CachedPlanSource *plansource = (CachedPlanSource *) lfirst(lc);
|
|
|
|
|
|
|
|
SaveCachedPlan(plansource);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-03-16 00:12:07 +01:00
|
|
|
SPIPlanPtr
|
|
|
|
SPI_saveplan(SPIPlanPtr plan)
|
1997-09-06 13:23:05 +02:00
|
|
|
{
|
2007-03-16 00:12:07 +01:00
|
|
|
SPIPlanPtr newplan;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2011-09-16 06:42:53 +02:00
|
|
|
if (plan == NULL || plan->magic != _SPI_PLAN_MAGIC)
|
1997-09-07 07:04:48 +02:00
|
|
|
{
|
|
|
|
SPI_result = SPI_ERROR_ARGUMENT;
|
1998-09-01 05:29:17 +02:00
|
|
|
return NULL;
|
1997-09-07 07:04:48 +02:00
|
|
|
}
|
|
|
|
|
Phase 2 of pgindent updates.
Change pg_bsd_indent to follow upstream rules for placement of comments
to the right of code, and remove pgindent hack that caused comments
following #endif to not obey the general rule.
Commit e3860ffa4dd0dad0dd9eea4be9cc1412373a8c89 wasn't actually using
the published version of pg_bsd_indent, but a hacked-up version that
tried to minimize the amount of movement of comments to the right of
code. The situation of interest is where such a comment has to be
moved to the right of its default placement at column 33 because there's
code there. BSD indent has always moved right in units of tab stops
in such cases --- but in the previous incarnation, indent was working
in 8-space tab stops, while now it knows we use 4-space tabs. So the
net result is that in about half the cases, such comments are placed
one tab stop left of before. This is better all around: it leaves
more room on the line for comment text, and it means that in such
cases the comment uniformly starts at the next 4-space tab stop after
the code, rather than sometimes one and sometimes two tabs after.
Also, ensure that comments following #endif are indented the same
as comments following other preprocessor commands such as #else.
That inconsistency turns out to have been self-inflicted damage
from a poorly-thought-through post-indent "fixup" in pgindent.
This patch is much less interesting than the first round of indent
changes, but also bulkier, so I thought it best to separate the effects.
Discussion: https://postgr.es/m/E1dAmxK-0006EE-1r@gemulon.postgresql.org
Discussion: https://postgr.es/m/30527.1495162840@sss.pgh.pa.us
2017-06-21 21:18:54 +02:00
|
|
|
SPI_result = _SPI_begin_call(false); /* don't change context */
|
1997-09-07 07:04:48 +02:00
|
|
|
if (SPI_result < 0)
|
1998-09-01 05:29:17 +02:00
|
|
|
return NULL;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2007-03-16 00:12:07 +01:00
|
|
|
newplan = _SPI_save_plan(plan);
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2011-09-16 06:42:53 +02:00
|
|
|
SPI_result = _SPI_end_call(false);
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2007-03-16 00:12:07 +01:00
|
|
|
return newplan;
|
1997-09-06 13:23:05 +02:00
|
|
|
}
|
|
|
|
|
2001-05-21 16:22:19 +02:00
|
|
|
int
|
2007-03-16 00:12:07 +01:00
|
|
|
SPI_freeplan(SPIPlanPtr plan)
|
2001-05-21 16:22:19 +02:00
|
|
|
{
|
2011-09-16 06:42:53 +02:00
|
|
|
ListCell *lc;
|
|
|
|
|
2007-03-16 00:12:07 +01:00
|
|
|
if (plan == NULL || plan->magic != _SPI_PLAN_MAGIC)
|
2001-05-21 16:22:19 +02:00
|
|
|
return SPI_ERROR_ARGUMENT;
|
|
|
|
|
2011-09-16 06:42:53 +02:00
|
|
|
/* Release the plancache entries */
|
|
|
|
foreach(lc, plan->plancache_list)
|
2007-03-16 00:12:07 +01:00
|
|
|
{
|
2011-09-16 06:42:53 +02:00
|
|
|
CachedPlanSource *plansource = (CachedPlanSource *) lfirst(lc);
|
2007-03-16 00:12:07 +01:00
|
|
|
|
2011-09-16 06:42:53 +02:00
|
|
|
DropCachedPlan(plansource);
|
2007-03-16 00:12:07 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Now get rid of the _SPI_plan and subsidiary data in its plancxt */
|
|
|
|
MemoryContextDelete(plan->plancxt);
|
|
|
|
|
2001-05-21 16:22:19 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
1997-09-12 10:37:52 +02:00
|
|
|
HeapTuple
|
|
|
|
SPI_copytuple(HeapTuple tuple)
|
|
|
|
{
|
Simplify code by getting rid of SPI_push, SPI_pop, SPI_restore_connection.
The idea behind SPI_push was to allow transitioning back into an
"unconnected" state when a SPI-using procedure calls unrelated code that
might or might not invoke SPI. That sounds good, but in practice the only
thing it does for us is to catch cases where a called SPI-using function
forgets to call SPI_connect --- which is a highly improbable failure mode,
since it would be exposed immediately by direct testing of said function.
As against that, we've had multiple bugs induced by forgetting to call
SPI_push/SPI_pop around code that might invoke SPI-using functions; these
are much harder to catch and indeed have gone undetected for years in some
cases. And we've had to band-aid around some problems of this ilk by
introducing conditional push/pop pairs in some places, which really kind
of defeats the purpose altogether; if we can't draw bright lines between
connected and unconnected code, what's the point?
Hence, get rid of SPI_push[_conditional], SPI_pop[_conditional], and the
underlying state variable _SPI_curid. It turns out SPI_restore_connection
can go away too, which is a nice side benefit since it was never more than
a kluge. Provide no-op macros for the deleted functions so as to avoid an
API break for external modules.
A side effect of this removal is that SPI_palloc and allied functions no
longer permit being called when unconnected; they'll throw an error
instead. The apparent usefulness of the previous behavior was a mirage
as well, because it was depended on by only a few places (which I fixed in
preceding commits), and it posed a risk of allocations being unexpectedly
long-lived if someone forgot a SPI_push call.
Discussion: <20808.1478481403@sss.pgh.pa.us>
2016-11-08 23:39:45 +01:00
|
|
|
MemoryContext oldcxt;
|
1997-09-12 10:37:52 +02:00
|
|
|
HeapTuple ctuple;
|
|
|
|
|
|
|
|
if (tuple == NULL)
|
|
|
|
{
|
|
|
|
SPI_result = SPI_ERROR_ARGUMENT;
|
1998-09-01 05:29:17 +02:00
|
|
|
return NULL;
|
1997-09-12 10:37:52 +02:00
|
|
|
}
|
|
|
|
|
Simplify code by getting rid of SPI_push, SPI_pop, SPI_restore_connection.
The idea behind SPI_push was to allow transitioning back into an
"unconnected" state when a SPI-using procedure calls unrelated code that
might or might not invoke SPI. That sounds good, but in practice the only
thing it does for us is to catch cases where a called SPI-using function
forgets to call SPI_connect --- which is a highly improbable failure mode,
since it would be exposed immediately by direct testing of said function.
As against that, we've had multiple bugs induced by forgetting to call
SPI_push/SPI_pop around code that might invoke SPI-using functions; these
are much harder to catch and indeed have gone undetected for years in some
cases. And we've had to band-aid around some problems of this ilk by
introducing conditional push/pop pairs in some places, which really kind
of defeats the purpose altogether; if we can't draw bright lines between
connected and unconnected code, what's the point?
Hence, get rid of SPI_push[_conditional], SPI_pop[_conditional], and the
underlying state variable _SPI_curid. It turns out SPI_restore_connection
can go away too, which is a nice side benefit since it was never more than
a kluge. Provide no-op macros for the deleted functions so as to avoid an
API break for external modules.
A side effect of this removal is that SPI_palloc and allied functions no
longer permit being called when unconnected; they'll throw an error
instead. The apparent usefulness of the previous behavior was a mirage
as well, because it was depended on by only a few places (which I fixed in
preceding commits), and it posed a risk of allocations being unexpectedly
long-lived if someone forgot a SPI_push call.
Discussion: <20808.1478481403@sss.pgh.pa.us>
2016-11-08 23:39:45 +01:00
|
|
|
if (_SPI_current == NULL)
|
1997-09-12 10:37:52 +02:00
|
|
|
{
|
Simplify code by getting rid of SPI_push, SPI_pop, SPI_restore_connection.
The idea behind SPI_push was to allow transitioning back into an
"unconnected" state when a SPI-using procedure calls unrelated code that
might or might not invoke SPI. That sounds good, but in practice the only
thing it does for us is to catch cases where a called SPI-using function
forgets to call SPI_connect --- which is a highly improbable failure mode,
since it would be exposed immediately by direct testing of said function.
As against that, we've had multiple bugs induced by forgetting to call
SPI_push/SPI_pop around code that might invoke SPI-using functions; these
are much harder to catch and indeed have gone undetected for years in some
cases. And we've had to band-aid around some problems of this ilk by
introducing conditional push/pop pairs in some places, which really kind
of defeats the purpose altogether; if we can't draw bright lines between
connected and unconnected code, what's the point?
Hence, get rid of SPI_push[_conditional], SPI_pop[_conditional], and the
underlying state variable _SPI_curid. It turns out SPI_restore_connection
can go away too, which is a nice side benefit since it was never more than
a kluge. Provide no-op macros for the deleted functions so as to avoid an
API break for external modules.
A side effect of this removal is that SPI_palloc and allied functions no
longer permit being called when unconnected; they'll throw an error
instead. The apparent usefulness of the previous behavior was a mirage
as well, because it was depended on by only a few places (which I fixed in
preceding commits), and it posed a risk of allocations being unexpectedly
long-lived if someone forgot a SPI_push call.
Discussion: <20808.1478481403@sss.pgh.pa.us>
2016-11-08 23:39:45 +01:00
|
|
|
SPI_result = SPI_ERROR_UNCONNECTED;
|
|
|
|
return NULL;
|
1997-09-12 10:37:52 +02:00
|
|
|
}
|
|
|
|
|
Simplify code by getting rid of SPI_push, SPI_pop, SPI_restore_connection.
The idea behind SPI_push was to allow transitioning back into an
"unconnected" state when a SPI-using procedure calls unrelated code that
might or might not invoke SPI. That sounds good, but in practice the only
thing it does for us is to catch cases where a called SPI-using function
forgets to call SPI_connect --- which is a highly improbable failure mode,
since it would be exposed immediately by direct testing of said function.
As against that, we've had multiple bugs induced by forgetting to call
SPI_push/SPI_pop around code that might invoke SPI-using functions; these
are much harder to catch and indeed have gone undetected for years in some
cases. And we've had to band-aid around some problems of this ilk by
introducing conditional push/pop pairs in some places, which really kind
of defeats the purpose altogether; if we can't draw bright lines between
connected and unconnected code, what's the point?
Hence, get rid of SPI_push[_conditional], SPI_pop[_conditional], and the
underlying state variable _SPI_curid. It turns out SPI_restore_connection
can go away too, which is a nice side benefit since it was never more than
a kluge. Provide no-op macros for the deleted functions so as to avoid an
API break for external modules.
A side effect of this removal is that SPI_palloc and allied functions no
longer permit being called when unconnected; they'll throw an error
instead. The apparent usefulness of the previous behavior was a mirage
as well, because it was depended on by only a few places (which I fixed in
preceding commits), and it posed a risk of allocations being unexpectedly
long-lived if someone forgot a SPI_push call.
Discussion: <20808.1478481403@sss.pgh.pa.us>
2016-11-08 23:39:45 +01:00
|
|
|
oldcxt = MemoryContextSwitchTo(_SPI_current->savedcxt);
|
|
|
|
|
1997-09-12 10:37:52 +02:00
|
|
|
ctuple = heap_copytuple(tuple);
|
|
|
|
|
Simplify code by getting rid of SPI_push, SPI_pop, SPI_restore_connection.
The idea behind SPI_push was to allow transitioning back into an
"unconnected" state when a SPI-using procedure calls unrelated code that
might or might not invoke SPI. That sounds good, but in practice the only
thing it does for us is to catch cases where a called SPI-using function
forgets to call SPI_connect --- which is a highly improbable failure mode,
since it would be exposed immediately by direct testing of said function.
As against that, we've had multiple bugs induced by forgetting to call
SPI_push/SPI_pop around code that might invoke SPI-using functions; these
are much harder to catch and indeed have gone undetected for years in some
cases. And we've had to band-aid around some problems of this ilk by
introducing conditional push/pop pairs in some places, which really kind
of defeats the purpose altogether; if we can't draw bright lines between
connected and unconnected code, what's the point?
Hence, get rid of SPI_push[_conditional], SPI_pop[_conditional], and the
underlying state variable _SPI_curid. It turns out SPI_restore_connection
can go away too, which is a nice side benefit since it was never more than
a kluge. Provide no-op macros for the deleted functions so as to avoid an
API break for external modules.
A side effect of this removal is that SPI_palloc and allied functions no
longer permit being called when unconnected; they'll throw an error
instead. The apparent usefulness of the previous behavior was a mirage
as well, because it was depended on by only a few places (which I fixed in
preceding commits), and it posed a risk of allocations being unexpectedly
long-lived if someone forgot a SPI_push call.
Discussion: <20808.1478481403@sss.pgh.pa.us>
2016-11-08 23:39:45 +01:00
|
|
|
MemoryContextSwitchTo(oldcxt);
|
1997-09-12 10:37:52 +02:00
|
|
|
|
1998-09-01 05:29:17 +02:00
|
|
|
return ctuple;
|
1997-09-12 10:37:52 +02:00
|
|
|
}
|
|
|
|
|
2004-04-01 23:28:47 +02:00
|
|
|
HeapTupleHeader
|
|
|
|
SPI_returntuple(HeapTuple tuple, TupleDesc tupdesc)
|
2001-08-02 20:08:43 +02:00
|
|
|
{
|
Simplify code by getting rid of SPI_push, SPI_pop, SPI_restore_connection.
The idea behind SPI_push was to allow transitioning back into an
"unconnected" state when a SPI-using procedure calls unrelated code that
might or might not invoke SPI. That sounds good, but in practice the only
thing it does for us is to catch cases where a called SPI-using function
forgets to call SPI_connect --- which is a highly improbable failure mode,
since it would be exposed immediately by direct testing of said function.
As against that, we've had multiple bugs induced by forgetting to call
SPI_push/SPI_pop around code that might invoke SPI-using functions; these
are much harder to catch and indeed have gone undetected for years in some
cases. And we've had to band-aid around some problems of this ilk by
introducing conditional push/pop pairs in some places, which really kind
of defeats the purpose altogether; if we can't draw bright lines between
connected and unconnected code, what's the point?
Hence, get rid of SPI_push[_conditional], SPI_pop[_conditional], and the
underlying state variable _SPI_curid. It turns out SPI_restore_connection
can go away too, which is a nice side benefit since it was never more than
a kluge. Provide no-op macros for the deleted functions so as to avoid an
API break for external modules.
A side effect of this removal is that SPI_palloc and allied functions no
longer permit being called when unconnected; they'll throw an error
instead. The apparent usefulness of the previous behavior was a mirage
as well, because it was depended on by only a few places (which I fixed in
preceding commits), and it posed a risk of allocations being unexpectedly
long-lived if someone forgot a SPI_push call.
Discussion: <20808.1478481403@sss.pgh.pa.us>
2016-11-08 23:39:45 +01:00
|
|
|
MemoryContext oldcxt;
|
2004-08-29 07:07:03 +02:00
|
|
|
HeapTupleHeader dtup;
|
2001-11-05 20:41:56 +01:00
|
|
|
|
|
|
|
if (tuple == NULL || tupdesc == NULL)
|
|
|
|
{
|
|
|
|
SPI_result = SPI_ERROR_ARGUMENT;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
Simplify code by getting rid of SPI_push, SPI_pop, SPI_restore_connection.
The idea behind SPI_push was to allow transitioning back into an
"unconnected" state when a SPI-using procedure calls unrelated code that
might or might not invoke SPI. That sounds good, but in practice the only
thing it does for us is to catch cases where a called SPI-using function
forgets to call SPI_connect --- which is a highly improbable failure mode,
since it would be exposed immediately by direct testing of said function.
As against that, we've had multiple bugs induced by forgetting to call
SPI_push/SPI_pop around code that might invoke SPI-using functions; these
are much harder to catch and indeed have gone undetected for years in some
cases. And we've had to band-aid around some problems of this ilk by
introducing conditional push/pop pairs in some places, which really kind
of defeats the purpose altogether; if we can't draw bright lines between
connected and unconnected code, what's the point?
Hence, get rid of SPI_push[_conditional], SPI_pop[_conditional], and the
underlying state variable _SPI_curid. It turns out SPI_restore_connection
can go away too, which is a nice side benefit since it was never more than
a kluge. Provide no-op macros for the deleted functions so as to avoid an
API break for external modules.
A side effect of this removal is that SPI_palloc and allied functions no
longer permit being called when unconnected; they'll throw an error
instead. The apparent usefulness of the previous behavior was a mirage
as well, because it was depended on by only a few places (which I fixed in
preceding commits), and it posed a risk of allocations being unexpectedly
long-lived if someone forgot a SPI_push call.
Discussion: <20808.1478481403@sss.pgh.pa.us>
2016-11-08 23:39:45 +01:00
|
|
|
if (_SPI_current == NULL)
|
|
|
|
{
|
|
|
|
SPI_result = SPI_ERROR_UNCONNECTED;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2004-04-01 23:28:47 +02:00
|
|
|
/* For RECORD results, make sure a typmod has been assigned */
|
|
|
|
if (tupdesc->tdtypeid == RECORDOID &&
|
|
|
|
tupdesc->tdtypmod < 0)
|
|
|
|
assign_record_type_typmod(tupdesc);
|
|
|
|
|
Simplify code by getting rid of SPI_push, SPI_pop, SPI_restore_connection.
The idea behind SPI_push was to allow transitioning back into an
"unconnected" state when a SPI-using procedure calls unrelated code that
might or might not invoke SPI. That sounds good, but in practice the only
thing it does for us is to catch cases where a called SPI-using function
forgets to call SPI_connect --- which is a highly improbable failure mode,
since it would be exposed immediately by direct testing of said function.
As against that, we've had multiple bugs induced by forgetting to call
SPI_push/SPI_pop around code that might invoke SPI-using functions; these
are much harder to catch and indeed have gone undetected for years in some
cases. And we've had to band-aid around some problems of this ilk by
introducing conditional push/pop pairs in some places, which really kind
of defeats the purpose altogether; if we can't draw bright lines between
connected and unconnected code, what's the point?
Hence, get rid of SPI_push[_conditional], SPI_pop[_conditional], and the
underlying state variable _SPI_curid. It turns out SPI_restore_connection
can go away too, which is a nice side benefit since it was never more than
a kluge. Provide no-op macros for the deleted functions so as to avoid an
API break for external modules.
A side effect of this removal is that SPI_palloc and allied functions no
longer permit being called when unconnected; they'll throw an error
instead. The apparent usefulness of the previous behavior was a mirage
as well, because it was depended on by only a few places (which I fixed in
preceding commits), and it posed a risk of allocations being unexpectedly
long-lived if someone forgot a SPI_push call.
Discussion: <20808.1478481403@sss.pgh.pa.us>
2016-11-08 23:39:45 +01:00
|
|
|
oldcxt = MemoryContextSwitchTo(_SPI_current->savedcxt);
|
2001-11-05 20:41:56 +01:00
|
|
|
|
Fix failure to detoast fields in composite elements of structured types.
If we have an array of records stored on disk, the individual record fields
cannot contain out-of-line TOAST pointers: the tuptoaster.c mechanisms are
only prepared to deal with TOAST pointers appearing in top-level fields of
a stored row. The same applies for ranges over composite types, nested
composites, etc. However, the existing code only took care of expanding
sub-field TOAST pointers for the case of nested composites, not for other
structured types containing composites. For example, given a command such
as
UPDATE tab SET arraycol = ARRAY[(ROW(x,42)::mycompositetype] ...
where x is a direct reference to a field of an on-disk tuple, if that field
is long enough to be toasted out-of-line then the TOAST pointer would be
inserted as-is into the array column. If the source record for x is later
deleted, the array field value would become a dangling pointer, leading
to errors along the line of "missing chunk number 0 for toast value ..."
when the value is referenced. A reproducible test case for this was
provided by Jan Pecek, but it seems likely that some of the "missing chunk
number" reports we've heard in the past were caused by similar issues.
Code-wise, the problem is that PG_DETOAST_DATUM() is not adequate to
produce a self-contained Datum value if the Datum is of composite type.
Seen in this light, the problem is not just confined to arrays and ranges,
but could also affect some other places where detoasting is done in that
way, for example form_index_tuple().
I tried teaching the array code to apply toast_flatten_tuple_attribute()
along with PG_DETOAST_DATUM() when the array element type is composite,
but this was messy and imposed extra cache lookup costs whether or not any
TOAST pointers were present, indeed sometimes when the array element type
isn't even composite (since sometimes it takes a typcache lookup to find
that out). The idea of extending that approach to all the places that
currently use PG_DETOAST_DATUM() wasn't attractive at all.
This patch instead solves the problem by decreeing that composite Datum
values must not contain any out-of-line TOAST pointers in the first place;
that is, we expand out-of-line fields at the point of constructing a
composite Datum, not at the point where we're about to insert it into a
larger tuple. This rule is applied only to true composite Datums, not
to tuples that are being passed around the system as tuples, so it's not
as invasive as it might sound at first. With this approach, the amount
of code that has to be touched for a full solution is greatly reduced,
and added cache lookup costs are avoided except when there actually is
a TOAST pointer that needs to be inlined.
The main drawback of this approach is that we might sometimes dereference
a TOAST pointer that will never actually be used by the query, imposing a
rather large cost that wasn't there before. On the other side of the coin,
if the field value is used multiple times then we'll come out ahead by
avoiding repeat detoastings. Experimentation suggests that common SQL
coding patterns are unaffected either way, though. Applications that are
very negatively affected could be advised to modify their code to not fetch
columns they won't be using.
In future, we might consider reverting this solution in favor of detoasting
only at the point where data is about to be stored to disk, using some
method that can drill down into multiple levels of nested structured types.
That will require defining new APIs for structured types, though, so it
doesn't seem feasible as a back-patchable fix.
Note that this patch changes HeapTupleGetDatum() from a macro to a function
call; this means that any third-party code using that macro will not get
protection against creating TOAST-pointer-containing Datums until it's
recompiled. The same applies to any uses of PG_RETURN_HEAPTUPLEHEADER().
It seems likely that this is not a big problem in practice: most of the
tuple-returning functions in core and contrib produce outputs that could
not possibly be toasted anyway, and the same probably holds for third-party
extensions.
This bug has existed since TOAST was invented, so back-patch to all
supported branches.
2014-05-01 21:19:06 +02:00
|
|
|
dtup = DatumGetHeapTupleHeader(heap_copy_tuple_as_datum(tuple, tupdesc));
|
2001-11-05 20:41:56 +01:00
|
|
|
|
Simplify code by getting rid of SPI_push, SPI_pop, SPI_restore_connection.
The idea behind SPI_push was to allow transitioning back into an
"unconnected" state when a SPI-using procedure calls unrelated code that
might or might not invoke SPI. That sounds good, but in practice the only
thing it does for us is to catch cases where a called SPI-using function
forgets to call SPI_connect --- which is a highly improbable failure mode,
since it would be exposed immediately by direct testing of said function.
As against that, we've had multiple bugs induced by forgetting to call
SPI_push/SPI_pop around code that might invoke SPI-using functions; these
are much harder to catch and indeed have gone undetected for years in some
cases. And we've had to band-aid around some problems of this ilk by
introducing conditional push/pop pairs in some places, which really kind
of defeats the purpose altogether; if we can't draw bright lines between
connected and unconnected code, what's the point?
Hence, get rid of SPI_push[_conditional], SPI_pop[_conditional], and the
underlying state variable _SPI_curid. It turns out SPI_restore_connection
can go away too, which is a nice side benefit since it was never more than
a kluge. Provide no-op macros for the deleted functions so as to avoid an
API break for external modules.
A side effect of this removal is that SPI_palloc and allied functions no
longer permit being called when unconnected; they'll throw an error
instead. The apparent usefulness of the previous behavior was a mirage
as well, because it was depended on by only a few places (which I fixed in
preceding commits), and it posed a risk of allocations being unexpectedly
long-lived if someone forgot a SPI_push call.
Discussion: <20808.1478481403@sss.pgh.pa.us>
2016-11-08 23:39:45 +01:00
|
|
|
MemoryContextSwitchTo(oldcxt);
|
2001-11-05 20:41:56 +01:00
|
|
|
|
2004-04-01 23:28:47 +02:00
|
|
|
return dtup;
|
2001-11-05 20:41:56 +01:00
|
|
|
}
|
|
|
|
|
1997-09-12 10:37:52 +02:00
|
|
|
HeapTuple
|
|
|
|
SPI_modifytuple(Relation rel, HeapTuple tuple, int natts, int *attnum,
|
2002-12-30 23:10:54 +01:00
|
|
|
Datum *Values, const char *Nulls)
|
1997-09-12 10:37:52 +02:00
|
|
|
{
|
Simplify code by getting rid of SPI_push, SPI_pop, SPI_restore_connection.
The idea behind SPI_push was to allow transitioning back into an
"unconnected" state when a SPI-using procedure calls unrelated code that
might or might not invoke SPI. That sounds good, but in practice the only
thing it does for us is to catch cases where a called SPI-using function
forgets to call SPI_connect --- which is a highly improbable failure mode,
since it would be exposed immediately by direct testing of said function.
As against that, we've had multiple bugs induced by forgetting to call
SPI_push/SPI_pop around code that might invoke SPI-using functions; these
are much harder to catch and indeed have gone undetected for years in some
cases. And we've had to band-aid around some problems of this ilk by
introducing conditional push/pop pairs in some places, which really kind
of defeats the purpose altogether; if we can't draw bright lines between
connected and unconnected code, what's the point?
Hence, get rid of SPI_push[_conditional], SPI_pop[_conditional], and the
underlying state variable _SPI_curid. It turns out SPI_restore_connection
can go away too, which is a nice side benefit since it was never more than
a kluge. Provide no-op macros for the deleted functions so as to avoid an
API break for external modules.
A side effect of this removal is that SPI_palloc and allied functions no
longer permit being called when unconnected; they'll throw an error
instead. The apparent usefulness of the previous behavior was a mirage
as well, because it was depended on by only a few places (which I fixed in
preceding commits), and it posed a risk of allocations being unexpectedly
long-lived if someone forgot a SPI_push call.
Discussion: <20808.1478481403@sss.pgh.pa.us>
2016-11-08 23:39:45 +01:00
|
|
|
MemoryContext oldcxt;
|
1997-09-12 10:37:52 +02:00
|
|
|
HeapTuple mtuple;
|
|
|
|
int numberOfAttributes;
|
|
|
|
Datum *v;
|
2008-11-02 02:45:28 +01:00
|
|
|
bool *n;
|
1997-09-12 10:37:52 +02:00
|
|
|
int i;
|
|
|
|
|
2003-09-16 02:50:09 +02:00
|
|
|
if (rel == NULL || tuple == NULL || natts < 0 || attnum == NULL || Values == NULL)
|
1997-09-12 10:37:52 +02:00
|
|
|
{
|
|
|
|
SPI_result = SPI_ERROR_ARGUMENT;
|
1998-09-01 05:29:17 +02:00
|
|
|
return NULL;
|
1997-09-12 10:37:52 +02:00
|
|
|
}
|
|
|
|
|
Simplify code by getting rid of SPI_push, SPI_pop, SPI_restore_connection.
The idea behind SPI_push was to allow transitioning back into an
"unconnected" state when a SPI-using procedure calls unrelated code that
might or might not invoke SPI. That sounds good, but in practice the only
thing it does for us is to catch cases where a called SPI-using function
forgets to call SPI_connect --- which is a highly improbable failure mode,
since it would be exposed immediately by direct testing of said function.
As against that, we've had multiple bugs induced by forgetting to call
SPI_push/SPI_pop around code that might invoke SPI-using functions; these
are much harder to catch and indeed have gone undetected for years in some
cases. And we've had to band-aid around some problems of this ilk by
introducing conditional push/pop pairs in some places, which really kind
of defeats the purpose altogether; if we can't draw bright lines between
connected and unconnected code, what's the point?
Hence, get rid of SPI_push[_conditional], SPI_pop[_conditional], and the
underlying state variable _SPI_curid. It turns out SPI_restore_connection
can go away too, which is a nice side benefit since it was never more than
a kluge. Provide no-op macros for the deleted functions so as to avoid an
API break for external modules.
A side effect of this removal is that SPI_palloc and allied functions no
longer permit being called when unconnected; they'll throw an error
instead. The apparent usefulness of the previous behavior was a mirage
as well, because it was depended on by only a few places (which I fixed in
preceding commits), and it posed a risk of allocations being unexpectedly
long-lived if someone forgot a SPI_push call.
Discussion: <20808.1478481403@sss.pgh.pa.us>
2016-11-08 23:39:45 +01:00
|
|
|
if (_SPI_current == NULL)
|
1997-09-12 10:37:52 +02:00
|
|
|
{
|
Simplify code by getting rid of SPI_push, SPI_pop, SPI_restore_connection.
The idea behind SPI_push was to allow transitioning back into an
"unconnected" state when a SPI-using procedure calls unrelated code that
might or might not invoke SPI. That sounds good, but in practice the only
thing it does for us is to catch cases where a called SPI-using function
forgets to call SPI_connect --- which is a highly improbable failure mode,
since it would be exposed immediately by direct testing of said function.
As against that, we've had multiple bugs induced by forgetting to call
SPI_push/SPI_pop around code that might invoke SPI-using functions; these
are much harder to catch and indeed have gone undetected for years in some
cases. And we've had to band-aid around some problems of this ilk by
introducing conditional push/pop pairs in some places, which really kind
of defeats the purpose altogether; if we can't draw bright lines between
connected and unconnected code, what's the point?
Hence, get rid of SPI_push[_conditional], SPI_pop[_conditional], and the
underlying state variable _SPI_curid. It turns out SPI_restore_connection
can go away too, which is a nice side benefit since it was never more than
a kluge. Provide no-op macros for the deleted functions so as to avoid an
API break for external modules.
A side effect of this removal is that SPI_palloc and allied functions no
longer permit being called when unconnected; they'll throw an error
instead. The apparent usefulness of the previous behavior was a mirage
as well, because it was depended on by only a few places (which I fixed in
preceding commits), and it posed a risk of allocations being unexpectedly
long-lived if someone forgot a SPI_push call.
Discussion: <20808.1478481403@sss.pgh.pa.us>
2016-11-08 23:39:45 +01:00
|
|
|
SPI_result = SPI_ERROR_UNCONNECTED;
|
|
|
|
return NULL;
|
1997-09-12 10:37:52 +02:00
|
|
|
}
|
Simplify code by getting rid of SPI_push, SPI_pop, SPI_restore_connection.
The idea behind SPI_push was to allow transitioning back into an
"unconnected" state when a SPI-using procedure calls unrelated code that
might or might not invoke SPI. That sounds good, but in practice the only
thing it does for us is to catch cases where a called SPI-using function
forgets to call SPI_connect --- which is a highly improbable failure mode,
since it would be exposed immediately by direct testing of said function.
As against that, we've had multiple bugs induced by forgetting to call
SPI_push/SPI_pop around code that might invoke SPI-using functions; these
are much harder to catch and indeed have gone undetected for years in some
cases. And we've had to band-aid around some problems of this ilk by
introducing conditional push/pop pairs in some places, which really kind
of defeats the purpose altogether; if we can't draw bright lines between
connected and unconnected code, what's the point?
Hence, get rid of SPI_push[_conditional], SPI_pop[_conditional], and the
underlying state variable _SPI_curid. It turns out SPI_restore_connection
can go away too, which is a nice side benefit since it was never more than
a kluge. Provide no-op macros for the deleted functions so as to avoid an
API break for external modules.
A side effect of this removal is that SPI_palloc and allied functions no
longer permit being called when unconnected; they'll throw an error
instead. The apparent usefulness of the previous behavior was a mirage
as well, because it was depended on by only a few places (which I fixed in
preceding commits), and it posed a risk of allocations being unexpectedly
long-lived if someone forgot a SPI_push call.
Discussion: <20808.1478481403@sss.pgh.pa.us>
2016-11-08 23:39:45 +01:00
|
|
|
|
|
|
|
oldcxt = MemoryContextSwitchTo(_SPI_current->savedcxt);
|
|
|
|
|
1997-09-12 10:37:52 +02:00
|
|
|
SPI_result = 0;
|
Simplify code by getting rid of SPI_push, SPI_pop, SPI_restore_connection.
The idea behind SPI_push was to allow transitioning back into an
"unconnected" state when a SPI-using procedure calls unrelated code that
might or might not invoke SPI. That sounds good, but in practice the only
thing it does for us is to catch cases where a called SPI-using function
forgets to call SPI_connect --- which is a highly improbable failure mode,
since it would be exposed immediately by direct testing of said function.
As against that, we've had multiple bugs induced by forgetting to call
SPI_push/SPI_pop around code that might invoke SPI-using functions; these
are much harder to catch and indeed have gone undetected for years in some
cases. And we've had to band-aid around some problems of this ilk by
introducing conditional push/pop pairs in some places, which really kind
of defeats the purpose altogether; if we can't draw bright lines between
connected and unconnected code, what's the point?
Hence, get rid of SPI_push[_conditional], SPI_pop[_conditional], and the
underlying state variable _SPI_curid. It turns out SPI_restore_connection
can go away too, which is a nice side benefit since it was never more than
a kluge. Provide no-op macros for the deleted functions so as to avoid an
API break for external modules.
A side effect of this removal is that SPI_palloc and allied functions no
longer permit being called when unconnected; they'll throw an error
instead. The apparent usefulness of the previous behavior was a mirage
as well, because it was depended on by only a few places (which I fixed in
preceding commits), and it posed a risk of allocations being unexpectedly
long-lived if someone forgot a SPI_push call.
Discussion: <20808.1478481403@sss.pgh.pa.us>
2016-11-08 23:39:45 +01:00
|
|
|
|
1997-09-12 10:37:52 +02:00
|
|
|
numberOfAttributes = rel->rd_att->natts;
|
|
|
|
v = (Datum *) palloc(numberOfAttributes * sizeof(Datum));
|
2008-11-02 02:45:28 +01:00
|
|
|
n = (bool *) palloc(numberOfAttributes * sizeof(bool));
|
1997-09-12 10:37:52 +02:00
|
|
|
|
|
|
|
/* fetch old values and nulls */
|
2008-11-02 02:45:28 +01:00
|
|
|
heap_deform_tuple(tuple, rel->rd_att, v, n);
|
1997-09-12 10:37:52 +02:00
|
|
|
|
|
|
|
/* replace values and nulls */
|
|
|
|
for (i = 0; i < natts; i++)
|
|
|
|
{
|
|
|
|
if (attnum[i] <= 0 || attnum[i] > numberOfAttributes)
|
|
|
|
break;
|
|
|
|
v[attnum[i] - 1] = Values[i];
|
2008-11-02 02:45:28 +01:00
|
|
|
n[attnum[i] - 1] = (Nulls && Nulls[i] == 'n') ? true : false;
|
1997-09-12 10:37:52 +02:00
|
|
|
}
|
|
|
|
|
1998-09-01 05:29:17 +02:00
|
|
|
if (i == natts) /* no errors in *attnum */
|
1997-09-12 10:37:52 +02:00
|
|
|
{
|
2008-11-02 02:45:28 +01:00
|
|
|
mtuple = heap_form_tuple(rel->rd_att, v, n);
|
2002-09-04 22:31:48 +02:00
|
|
|
|
2002-07-20 07:16:59 +02:00
|
|
|
/*
|
2005-10-15 04:49:52 +02:00
|
|
|
* copy the identification info of the old tuple: t_ctid, t_self, and
|
|
|
|
* OID (if any)
|
2002-07-20 07:16:59 +02:00
|
|
|
*/
|
2002-09-02 03:05:06 +02:00
|
|
|
mtuple->t_data->t_ctid = tuple->t_data->t_ctid;
|
|
|
|
mtuple->t_self = tuple->t_self;
|
|
|
|
mtuple->t_tableOid = tuple->t_tableOid;
|
1997-09-12 10:37:52 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
mtuple = NULL;
|
|
|
|
SPI_result = SPI_ERROR_NOATTRIBUTE;
|
|
|
|
}
|
|
|
|
|
|
|
|
pfree(v);
|
|
|
|
pfree(n);
|
|
|
|
|
Simplify code by getting rid of SPI_push, SPI_pop, SPI_restore_connection.
The idea behind SPI_push was to allow transitioning back into an
"unconnected" state when a SPI-using procedure calls unrelated code that
might or might not invoke SPI. That sounds good, but in practice the only
thing it does for us is to catch cases where a called SPI-using function
forgets to call SPI_connect --- which is a highly improbable failure mode,
since it would be exposed immediately by direct testing of said function.
As against that, we've had multiple bugs induced by forgetting to call
SPI_push/SPI_pop around code that might invoke SPI-using functions; these
are much harder to catch and indeed have gone undetected for years in some
cases. And we've had to band-aid around some problems of this ilk by
introducing conditional push/pop pairs in some places, which really kind
of defeats the purpose altogether; if we can't draw bright lines between
connected and unconnected code, what's the point?
Hence, get rid of SPI_push[_conditional], SPI_pop[_conditional], and the
underlying state variable _SPI_curid. It turns out SPI_restore_connection
can go away too, which is a nice side benefit since it was never more than
a kluge. Provide no-op macros for the deleted functions so as to avoid an
API break for external modules.
A side effect of this removal is that SPI_palloc and allied functions no
longer permit being called when unconnected; they'll throw an error
instead. The apparent usefulness of the previous behavior was a mirage
as well, because it was depended on by only a few places (which I fixed in
preceding commits), and it posed a risk of allocations being unexpectedly
long-lived if someone forgot a SPI_push call.
Discussion: <20808.1478481403@sss.pgh.pa.us>
2016-11-08 23:39:45 +01:00
|
|
|
MemoryContextSwitchTo(oldcxt);
|
1997-09-12 10:37:52 +02:00
|
|
|
|
1998-09-01 05:29:17 +02:00
|
|
|
return mtuple;
|
1997-09-12 10:37:52 +02:00
|
|
|
}
|
|
|
|
|
1997-09-06 13:23:05 +02:00
|
|
|
int
|
2002-12-30 23:10:54 +01:00
|
|
|
SPI_fnumber(TupleDesc tupdesc, const char *fname)
|
1997-09-06 13:23:05 +02:00
|
|
|
{
|
1997-09-08 04:41:22 +02:00
|
|
|
int res;
|
2018-10-16 18:44:43 +02:00
|
|
|
const FormData_pg_attribute *sysatt;
|
1997-09-12 10:37:52 +02:00
|
|
|
|
1997-09-07 07:04:48 +02:00
|
|
|
for (res = 0; res < tupdesc->natts; res++)
|
|
|
|
{
|
2017-08-20 20:19:07 +02:00
|
|
|
Form_pg_attribute attr = TupleDescAttr(tupdesc, res);
|
|
|
|
|
|
|
|
if (namestrcmp(&attr->attname, fname) == 0 &&
|
|
|
|
!attr->attisdropped)
|
1998-09-01 05:29:17 +02:00
|
|
|
return res + 1;
|
1997-09-07 07:04:48 +02:00
|
|
|
}
|
|
|
|
|
Remove WITH OIDS support, change oid catalog column visibility.
Previously tables declared WITH OIDS, including a significant fraction
of the catalog tables, stored the oid column not as a normal column,
but as part of the tuple header.
This special column was not shown by default, which was somewhat odd,
as it's often (consider e.g. pg_class.oid) one of the more important
parts of a row. Neither pg_dump nor COPY included the contents of the
oid column by default.
The fact that the oid column was not an ordinary column necessitated a
significant amount of special case code to support oid columns. That
already was painful for the existing, but upcoming work aiming to make
table storage pluggable, would have required expanding and duplicating
that "specialness" significantly.
WITH OIDS has been deprecated since 2005 (commit ff02d0a05280e0).
Remove it.
Removing includes:
- CREATE TABLE and ALTER TABLE syntax for declaring the table to be
WITH OIDS has been removed (WITH (oids[ = true]) will error out)
- pg_dump does not support dumping tables declared WITH OIDS and will
issue a warning when dumping one (and ignore the oid column).
- restoring an pg_dump archive with pg_restore will warn when
restoring a table with oid contents (and ignore the oid column)
- COPY will refuse to load binary dump that includes oids.
- pg_upgrade will error out when encountering tables declared WITH
OIDS, they have to be altered to remove the oid column first.
- Functionality to access the oid of the last inserted row (like
plpgsql's RESULT_OID, spi's SPI_lastoid, ...) has been removed.
The syntax for declaring a table WITHOUT OIDS (or WITH (oids = false)
for CREATE TABLE) is still supported. While that requires a bit of
support code, it seems unnecessary to break applications / dumps that
do not use oids, and are explicit about not using them.
The biggest user of WITH OID columns was postgres' catalog. This
commit changes all 'magic' oid columns to be columns that are normally
declared and stored. To reduce unnecessary query breakage all the
newly added columns are still named 'oid', even if a table's column
naming scheme would indicate 'reloid' or such. This obviously
requires adapting a lot code, mostly replacing oid access via
HeapTupleGetOid() with access to the underlying Form_pg_*->oid column.
The bootstrap process now assigns oids for all oid columns in
genbki.pl that do not have an explicit value (starting at the largest
oid previously used), only oids assigned later by oids will be above
FirstBootstrapObjectId. As the oid column now is a normal column the
special bootstrap syntax for oids has been removed.
Oids are not automatically assigned during insertion anymore, all
backend code explicitly assigns oids with GetNewOidWithIndex(). For
the rare case that insertions into the catalog via SQL are called for
the new pg_nextoid() function can be used (which only works on catalog
tables).
The fact that oid columns on system tables are now normal columns
means that they will be included in the set of columns expanded
by * (i.e. SELECT * FROM pg_class will now include the table's oid,
previously it did not). It'd not technically be hard to hide oid
column by default, but that'd mean confusing behavior would either
have to be carried forward forever, or it'd cause breakage down the
line.
While it's not unlikely that further adjustments are needed, the
scope/invasiveness of the patch makes it worthwhile to get merge this
now. It's painful to maintain externally, too complicated to commit
after the code code freeze, and a dependency of a number of other
patches.
Catversion bump, for obvious reasons.
Author: Andres Freund, with contributions by John Naylor
Discussion: https://postgr.es/m/20180930034810.ywp2c7awz7opzcfr@alap3.anarazel.de
2018-11-21 00:36:57 +01:00
|
|
|
sysatt = SystemAttributeByName(fname);
|
2001-10-23 19:38:25 +02:00
|
|
|
if (sysatt != NULL)
|
|
|
|
return sysatt->attnum;
|
|
|
|
|
|
|
|
/* SPI_ERROR_NOATTRIBUTE is different from all sys column numbers */
|
1998-09-01 05:29:17 +02:00
|
|
|
return SPI_ERROR_NOATTRIBUTE;
|
1997-09-06 13:23:05 +02:00
|
|
|
}
|
|
|
|
|
1998-02-26 05:46:47 +01:00
|
|
|
char *
|
1997-09-11 09:24:37 +02:00
|
|
|
SPI_fname(TupleDesc tupdesc, int fnumber)
|
|
|
|
{
|
2018-10-16 18:44:43 +02:00
|
|
|
const FormData_pg_attribute *att;
|
1997-09-11 09:24:37 +02:00
|
|
|
|
|
|
|
SPI_result = 0;
|
2001-10-23 19:38:25 +02:00
|
|
|
|
|
|
|
if (fnumber > tupdesc->natts || fnumber == 0 ||
|
|
|
|
fnumber <= FirstLowInvalidHeapAttributeNumber)
|
1997-09-11 09:24:37 +02:00
|
|
|
{
|
|
|
|
SPI_result = SPI_ERROR_NOATTRIBUTE;
|
1998-09-01 05:29:17 +02:00
|
|
|
return NULL;
|
1997-09-11 09:24:37 +02:00
|
|
|
}
|
|
|
|
|
2001-10-23 19:38:25 +02:00
|
|
|
if (fnumber > 0)
|
2017-08-20 20:19:07 +02:00
|
|
|
att = TupleDescAttr(tupdesc, fnumber - 1);
|
2001-10-23 19:38:25 +02:00
|
|
|
else
|
Remove WITH OIDS support, change oid catalog column visibility.
Previously tables declared WITH OIDS, including a significant fraction
of the catalog tables, stored the oid column not as a normal column,
but as part of the tuple header.
This special column was not shown by default, which was somewhat odd,
as it's often (consider e.g. pg_class.oid) one of the more important
parts of a row. Neither pg_dump nor COPY included the contents of the
oid column by default.
The fact that the oid column was not an ordinary column necessitated a
significant amount of special case code to support oid columns. That
already was painful for the existing, but upcoming work aiming to make
table storage pluggable, would have required expanding and duplicating
that "specialness" significantly.
WITH OIDS has been deprecated since 2005 (commit ff02d0a05280e0).
Remove it.
Removing includes:
- CREATE TABLE and ALTER TABLE syntax for declaring the table to be
WITH OIDS has been removed (WITH (oids[ = true]) will error out)
- pg_dump does not support dumping tables declared WITH OIDS and will
issue a warning when dumping one (and ignore the oid column).
- restoring an pg_dump archive with pg_restore will warn when
restoring a table with oid contents (and ignore the oid column)
- COPY will refuse to load binary dump that includes oids.
- pg_upgrade will error out when encountering tables declared WITH
OIDS, they have to be altered to remove the oid column first.
- Functionality to access the oid of the last inserted row (like
plpgsql's RESULT_OID, spi's SPI_lastoid, ...) has been removed.
The syntax for declaring a table WITHOUT OIDS (or WITH (oids = false)
for CREATE TABLE) is still supported. While that requires a bit of
support code, it seems unnecessary to break applications / dumps that
do not use oids, and are explicit about not using them.
The biggest user of WITH OID columns was postgres' catalog. This
commit changes all 'magic' oid columns to be columns that are normally
declared and stored. To reduce unnecessary query breakage all the
newly added columns are still named 'oid', even if a table's column
naming scheme would indicate 'reloid' or such. This obviously
requires adapting a lot code, mostly replacing oid access via
HeapTupleGetOid() with access to the underlying Form_pg_*->oid column.
The bootstrap process now assigns oids for all oid columns in
genbki.pl that do not have an explicit value (starting at the largest
oid previously used), only oids assigned later by oids will be above
FirstBootstrapObjectId. As the oid column now is a normal column the
special bootstrap syntax for oids has been removed.
Oids are not automatically assigned during insertion anymore, all
backend code explicitly assigns oids with GetNewOidWithIndex(). For
the rare case that insertions into the catalog via SQL are called for
the new pg_nextoid() function can be used (which only works on catalog
tables).
The fact that oid columns on system tables are now normal columns
means that they will be included in the set of columns expanded
by * (i.e. SELECT * FROM pg_class will now include the table's oid,
previously it did not). It'd not technically be hard to hide oid
column by default, but that'd mean confusing behavior would either
have to be carried forward forever, or it'd cause breakage down the
line.
While it's not unlikely that further adjustments are needed, the
scope/invasiveness of the patch makes it worthwhile to get merge this
now. It's painful to maintain externally, too complicated to commit
after the code code freeze, and a dependency of a number of other
patches.
Catversion bump, for obvious reasons.
Author: Andres Freund, with contributions by John Naylor
Discussion: https://postgr.es/m/20180930034810.ywp2c7awz7opzcfr@alap3.anarazel.de
2018-11-21 00:36:57 +01:00
|
|
|
att = SystemAttributeDefinition(fnumber);
|
2001-10-23 19:38:25 +02:00
|
|
|
|
|
|
|
return pstrdup(NameStr(att->attname));
|
1997-09-11 09:24:37 +02:00
|
|
|
}
|
|
|
|
|
1998-02-26 05:46:47 +01:00
|
|
|
char *
|
1997-09-07 07:04:48 +02:00
|
|
|
SPI_getvalue(HeapTuple tuple, TupleDesc tupdesc, int fnumber)
|
1997-09-06 13:23:05 +02:00
|
|
|
{
|
Prevent memory leaks from accumulating across printtup() calls.
Historically, printtup() has assumed that it could prevent memory leakage
by pfree'ing the string result of each output function and manually
managing detoasting of toasted values. This amounts to assuming that
datatype output functions never leak any memory internally; an assumption
we've already decided to be bogus elsewhere, for example in COPY OUT.
range_out in particular is known to leak multiple kilobytes per call, as
noted in bug #8573 from Godfried Vanluffelen. While we could go in and fix
that leak, it wouldn't be very notationally convenient, and in any case
there have been and undoubtedly will again be other leaks in other output
functions. So what seems like the best solution is to run the output
functions in a temporary memory context that can be reset after each row,
as we're doing in COPY OUT. Some quick experimentation suggests this is
actually a tad faster than the retail pfree's anyway.
This patch fixes all the variants of printtup, except for debugtup()
which is used in standalone mode. It doesn't seem worth worrying
about query-lifespan leaks in standalone mode, and fixing that case
would be a bit tedious since debugtup() doesn't currently have any
startup or shutdown functions.
While at it, remove manual detoast management from several other
output-function call sites that had copied it from printtup(). This
doesn't make a lot of difference right now, but in view of recent
discussions about supporting "non-flattened" Datums, we're going to
want that code gone eventually anyway.
Back-patch to 9.2 where range_out was introduced. We might eventually
decide to back-patch this further, but in the absence of known major
leaks in older output functions, I'll refrain for now.
2013-11-03 17:33:05 +01:00
|
|
|
Datum val;
|
1997-09-08 04:41:22 +02:00
|
|
|
bool isnull;
|
2001-10-23 19:38:25 +02:00
|
|
|
Oid typoid,
|
2005-05-01 20:56:19 +02:00
|
|
|
foutoid;
|
2000-12-01 23:10:31 +01:00
|
|
|
bool typisvarlena;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
|
|
|
SPI_result = 0;
|
2001-10-23 19:38:25 +02:00
|
|
|
|
2008-10-16 15:23:21 +02:00
|
|
|
if (fnumber > tupdesc->natts || fnumber == 0 ||
|
2001-10-23 19:38:25 +02:00
|
|
|
fnumber <= FirstLowInvalidHeapAttributeNumber)
|
1997-09-29 08:28:45 +02:00
|
|
|
{
|
|
|
|
SPI_result = SPI_ERROR_NOATTRIBUTE;
|
1998-09-01 05:29:17 +02:00
|
|
|
return NULL;
|
1997-09-29 08:28:45 +02:00
|
|
|
}
|
1997-09-07 07:04:48 +02:00
|
|
|
|
Prevent memory leaks from accumulating across printtup() calls.
Historically, printtup() has assumed that it could prevent memory leakage
by pfree'ing the string result of each output function and manually
managing detoasting of toasted values. This amounts to assuming that
datatype output functions never leak any memory internally; an assumption
we've already decided to be bogus elsewhere, for example in COPY OUT.
range_out in particular is known to leak multiple kilobytes per call, as
noted in bug #8573 from Godfried Vanluffelen. While we could go in and fix
that leak, it wouldn't be very notationally convenient, and in any case
there have been and undoubtedly will again be other leaks in other output
functions. So what seems like the best solution is to run the output
functions in a temporary memory context that can be reset after each row,
as we're doing in COPY OUT. Some quick experimentation suggests this is
actually a tad faster than the retail pfree's anyway.
This patch fixes all the variants of printtup, except for debugtup()
which is used in standalone mode. It doesn't seem worth worrying
about query-lifespan leaks in standalone mode, and fixing that case
would be a bit tedious since debugtup() doesn't currently have any
startup or shutdown functions.
While at it, remove manual detoast management from several other
output-function call sites that had copied it from printtup(). This
doesn't make a lot of difference right now, but in view of recent
discussions about supporting "non-flattened" Datums, we're going to
want that code gone eventually anyway.
Back-patch to 9.2 where range_out was introduced. We might eventually
decide to back-patch this further, but in the absence of known major
leaks in older output functions, I'll refrain for now.
2013-11-03 17:33:05 +01:00
|
|
|
val = heap_getattr(tuple, fnumber, tupdesc, &isnull);
|
1997-09-07 07:04:48 +02:00
|
|
|
if (isnull)
|
1998-09-01 05:29:17 +02:00
|
|
|
return NULL;
|
2001-10-23 19:38:25 +02:00
|
|
|
|
|
|
|
if (fnumber > 0)
|
2017-08-20 20:19:07 +02:00
|
|
|
typoid = TupleDescAttr(tupdesc, fnumber - 1)->atttypid;
|
2001-10-23 19:38:25 +02:00
|
|
|
else
|
Remove WITH OIDS support, change oid catalog column visibility.
Previously tables declared WITH OIDS, including a significant fraction
of the catalog tables, stored the oid column not as a normal column,
but as part of the tuple header.
This special column was not shown by default, which was somewhat odd,
as it's often (consider e.g. pg_class.oid) one of the more important
parts of a row. Neither pg_dump nor COPY included the contents of the
oid column by default.
The fact that the oid column was not an ordinary column necessitated a
significant amount of special case code to support oid columns. That
already was painful for the existing, but upcoming work aiming to make
table storage pluggable, would have required expanding and duplicating
that "specialness" significantly.
WITH OIDS has been deprecated since 2005 (commit ff02d0a05280e0).
Remove it.
Removing includes:
- CREATE TABLE and ALTER TABLE syntax for declaring the table to be
WITH OIDS has been removed (WITH (oids[ = true]) will error out)
- pg_dump does not support dumping tables declared WITH OIDS and will
issue a warning when dumping one (and ignore the oid column).
- restoring an pg_dump archive with pg_restore will warn when
restoring a table with oid contents (and ignore the oid column)
- COPY will refuse to load binary dump that includes oids.
- pg_upgrade will error out when encountering tables declared WITH
OIDS, they have to be altered to remove the oid column first.
- Functionality to access the oid of the last inserted row (like
plpgsql's RESULT_OID, spi's SPI_lastoid, ...) has been removed.
The syntax for declaring a table WITHOUT OIDS (or WITH (oids = false)
for CREATE TABLE) is still supported. While that requires a bit of
support code, it seems unnecessary to break applications / dumps that
do not use oids, and are explicit about not using them.
The biggest user of WITH OID columns was postgres' catalog. This
commit changes all 'magic' oid columns to be columns that are normally
declared and stored. To reduce unnecessary query breakage all the
newly added columns are still named 'oid', even if a table's column
naming scheme would indicate 'reloid' or such. This obviously
requires adapting a lot code, mostly replacing oid access via
HeapTupleGetOid() with access to the underlying Form_pg_*->oid column.
The bootstrap process now assigns oids for all oid columns in
genbki.pl that do not have an explicit value (starting at the largest
oid previously used), only oids assigned later by oids will be above
FirstBootstrapObjectId. As the oid column now is a normal column the
special bootstrap syntax for oids has been removed.
Oids are not automatically assigned during insertion anymore, all
backend code explicitly assigns oids with GetNewOidWithIndex(). For
the rare case that insertions into the catalog via SQL are called for
the new pg_nextoid() function can be used (which only works on catalog
tables).
The fact that oid columns on system tables are now normal columns
means that they will be included in the set of columns expanded
by * (i.e. SELECT * FROM pg_class will now include the table's oid,
previously it did not). It'd not technically be hard to hide oid
column by default, but that'd mean confusing behavior would either
have to be carried forward forever, or it'd cause breakage down the
line.
While it's not unlikely that further adjustments are needed, the
scope/invasiveness of the patch makes it worthwhile to get merge this
now. It's painful to maintain externally, too complicated to commit
after the code code freeze, and a dependency of a number of other
patches.
Catversion bump, for obvious reasons.
Author: Andres Freund, with contributions by John Naylor
Discussion: https://postgr.es/m/20180930034810.ywp2c7awz7opzcfr@alap3.anarazel.de
2018-11-21 00:36:57 +01:00
|
|
|
typoid = (SystemAttributeDefinition(fnumber))->atttypid;
|
2001-10-23 19:38:25 +02:00
|
|
|
|
2005-05-01 20:56:19 +02:00
|
|
|
getTypeOutputInfo(typoid, &foutoid, &typisvarlena);
|
1997-09-07 07:04:48 +02:00
|
|
|
|
Prevent memory leaks from accumulating across printtup() calls.
Historically, printtup() has assumed that it could prevent memory leakage
by pfree'ing the string result of each output function and manually
managing detoasting of toasted values. This amounts to assuming that
datatype output functions never leak any memory internally; an assumption
we've already decided to be bogus elsewhere, for example in COPY OUT.
range_out in particular is known to leak multiple kilobytes per call, as
noted in bug #8573 from Godfried Vanluffelen. While we could go in and fix
that leak, it wouldn't be very notationally convenient, and in any case
there have been and undoubtedly will again be other leaks in other output
functions. So what seems like the best solution is to run the output
functions in a temporary memory context that can be reset after each row,
as we're doing in COPY OUT. Some quick experimentation suggests this is
actually a tad faster than the retail pfree's anyway.
This patch fixes all the variants of printtup, except for debugtup()
which is used in standalone mode. It doesn't seem worth worrying
about query-lifespan leaks in standalone mode, and fixing that case
would be a bit tedious since debugtup() doesn't currently have any
startup or shutdown functions.
While at it, remove manual detoast management from several other
output-function call sites that had copied it from printtup(). This
doesn't make a lot of difference right now, but in view of recent
discussions about supporting "non-flattened" Datums, we're going to
want that code gone eventually anyway.
Back-patch to 9.2 where range_out was introduced. We might eventually
decide to back-patch this further, but in the absence of known major
leaks in older output functions, I'll refrain for now.
2013-11-03 17:33:05 +01:00
|
|
|
return OidOutputFunctionCall(foutoid, val);
|
1997-09-06 13:23:05 +02:00
|
|
|
}
|
|
|
|
|
1997-09-12 06:09:08 +02:00
|
|
|
Datum
|
1998-02-26 05:46:47 +01:00
|
|
|
SPI_getbinval(HeapTuple tuple, TupleDesc tupdesc, int fnumber, bool *isnull)
|
1997-09-06 13:23:05 +02:00
|
|
|
{
|
1997-09-07 07:04:48 +02:00
|
|
|
SPI_result = 0;
|
2001-10-23 19:38:25 +02:00
|
|
|
|
2008-10-16 15:23:21 +02:00
|
|
|
if (fnumber > tupdesc->natts || fnumber == 0 ||
|
2001-10-23 19:38:25 +02:00
|
|
|
fnumber <= FirstLowInvalidHeapAttributeNumber)
|
1997-09-29 08:28:45 +02:00
|
|
|
{
|
|
|
|
SPI_result = SPI_ERROR_NOATTRIBUTE;
|
2001-10-23 19:38:25 +02:00
|
|
|
*isnull = true;
|
1998-09-01 05:29:17 +02:00
|
|
|
return (Datum) NULL;
|
1997-09-29 08:28:45 +02:00
|
|
|
}
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2001-10-23 19:38:25 +02:00
|
|
|
return heap_getattr(tuple, fnumber, tupdesc, isnull);
|
1997-09-06 13:23:05 +02:00
|
|
|
}
|
|
|
|
|
1998-02-26 05:46:47 +01:00
|
|
|
char *
|
1997-09-07 07:04:48 +02:00
|
|
|
SPI_gettype(TupleDesc tupdesc, int fnumber)
|
1997-09-06 13:23:05 +02:00
|
|
|
{
|
2001-10-23 19:38:25 +02:00
|
|
|
Oid typoid;
|
1997-09-08 04:41:22 +02:00
|
|
|
HeapTuple typeTuple;
|
2000-11-16 23:30:52 +01:00
|
|
|
char *result;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
|
|
|
SPI_result = 0;
|
2001-10-23 19:38:25 +02:00
|
|
|
|
|
|
|
if (fnumber > tupdesc->natts || fnumber == 0 ||
|
|
|
|
fnumber <= FirstLowInvalidHeapAttributeNumber)
|
1997-09-07 07:04:48 +02:00
|
|
|
{
|
|
|
|
SPI_result = SPI_ERROR_NOATTRIBUTE;
|
1998-09-01 05:29:17 +02:00
|
|
|
return NULL;
|
1997-09-07 07:04:48 +02:00
|
|
|
}
|
|
|
|
|
2001-10-23 19:38:25 +02:00
|
|
|
if (fnumber > 0)
|
2017-08-20 20:19:07 +02:00
|
|
|
typoid = TupleDescAttr(tupdesc, fnumber - 1)->atttypid;
|
2001-10-23 19:38:25 +02:00
|
|
|
else
|
Remove WITH OIDS support, change oid catalog column visibility.
Previously tables declared WITH OIDS, including a significant fraction
of the catalog tables, stored the oid column not as a normal column,
but as part of the tuple header.
This special column was not shown by default, which was somewhat odd,
as it's often (consider e.g. pg_class.oid) one of the more important
parts of a row. Neither pg_dump nor COPY included the contents of the
oid column by default.
The fact that the oid column was not an ordinary column necessitated a
significant amount of special case code to support oid columns. That
already was painful for the existing, but upcoming work aiming to make
table storage pluggable, would have required expanding and duplicating
that "specialness" significantly.
WITH OIDS has been deprecated since 2005 (commit ff02d0a05280e0).
Remove it.
Removing includes:
- CREATE TABLE and ALTER TABLE syntax for declaring the table to be
WITH OIDS has been removed (WITH (oids[ = true]) will error out)
- pg_dump does not support dumping tables declared WITH OIDS and will
issue a warning when dumping one (and ignore the oid column).
- restoring an pg_dump archive with pg_restore will warn when
restoring a table with oid contents (and ignore the oid column)
- COPY will refuse to load binary dump that includes oids.
- pg_upgrade will error out when encountering tables declared WITH
OIDS, they have to be altered to remove the oid column first.
- Functionality to access the oid of the last inserted row (like
plpgsql's RESULT_OID, spi's SPI_lastoid, ...) has been removed.
The syntax for declaring a table WITHOUT OIDS (or WITH (oids = false)
for CREATE TABLE) is still supported. While that requires a bit of
support code, it seems unnecessary to break applications / dumps that
do not use oids, and are explicit about not using them.
The biggest user of WITH OID columns was postgres' catalog. This
commit changes all 'magic' oid columns to be columns that are normally
declared and stored. To reduce unnecessary query breakage all the
newly added columns are still named 'oid', even if a table's column
naming scheme would indicate 'reloid' or such. This obviously
requires adapting a lot code, mostly replacing oid access via
HeapTupleGetOid() with access to the underlying Form_pg_*->oid column.
The bootstrap process now assigns oids for all oid columns in
genbki.pl that do not have an explicit value (starting at the largest
oid previously used), only oids assigned later by oids will be above
FirstBootstrapObjectId. As the oid column now is a normal column the
special bootstrap syntax for oids has been removed.
Oids are not automatically assigned during insertion anymore, all
backend code explicitly assigns oids with GetNewOidWithIndex(). For
the rare case that insertions into the catalog via SQL are called for
the new pg_nextoid() function can be used (which only works on catalog
tables).
The fact that oid columns on system tables are now normal columns
means that they will be included in the set of columns expanded
by * (i.e. SELECT * FROM pg_class will now include the table's oid,
previously it did not). It'd not technically be hard to hide oid
column by default, but that'd mean confusing behavior would either
have to be carried forward forever, or it'd cause breakage down the
line.
While it's not unlikely that further adjustments are needed, the
scope/invasiveness of the patch makes it worthwhile to get merge this
now. It's painful to maintain externally, too complicated to commit
after the code code freeze, and a dependency of a number of other
patches.
Catversion bump, for obvious reasons.
Author: Andres Freund, with contributions by John Naylor
Discussion: https://postgr.es/m/20180930034810.ywp2c7awz7opzcfr@alap3.anarazel.de
2018-11-21 00:36:57 +01:00
|
|
|
typoid = (SystemAttributeDefinition(fnumber))->atttypid;
|
2001-10-23 19:38:25 +02:00
|
|
|
|
2010-02-14 19:42:19 +01:00
|
|
|
typeTuple = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typoid));
|
1997-09-07 07:04:48 +02:00
|
|
|
|
|
|
|
if (!HeapTupleIsValid(typeTuple))
|
|
|
|
{
|
|
|
|
SPI_result = SPI_ERROR_TYPUNKNOWN;
|
1998-09-01 05:29:17 +02:00
|
|
|
return NULL;
|
1997-09-07 07:04:48 +02:00
|
|
|
}
|
|
|
|
|
2000-11-16 23:30:52 +01:00
|
|
|
result = pstrdup(NameStr(((Form_pg_type) GETSTRUCT(typeTuple))->typname));
|
|
|
|
ReleaseSysCache(typeTuple);
|
|
|
|
return result;
|
1997-09-06 13:23:05 +02:00
|
|
|
}
|
|
|
|
|
2013-07-13 00:07:46 +02:00
|
|
|
/*
|
|
|
|
* Get the data type OID for a column.
|
|
|
|
*
|
|
|
|
* There's nothing similar for typmod and typcollation. The rare consumers
|
|
|
|
* thereof should inspect the TupleDesc directly.
|
|
|
|
*/
|
1997-09-06 13:23:05 +02:00
|
|
|
Oid
|
1997-09-07 07:04:48 +02:00
|
|
|
SPI_gettypeid(TupleDesc tupdesc, int fnumber)
|
1997-09-06 13:23:05 +02:00
|
|
|
{
|
1997-09-07 07:04:48 +02:00
|
|
|
SPI_result = 0;
|
2001-10-23 19:38:25 +02:00
|
|
|
|
|
|
|
if (fnumber > tupdesc->natts || fnumber == 0 ||
|
|
|
|
fnumber <= FirstLowInvalidHeapAttributeNumber)
|
1997-09-07 07:04:48 +02:00
|
|
|
{
|
|
|
|
SPI_result = SPI_ERROR_NOATTRIBUTE;
|
1998-09-01 05:29:17 +02:00
|
|
|
return InvalidOid;
|
1997-09-07 07:04:48 +02:00
|
|
|
}
|
|
|
|
|
2001-10-23 19:38:25 +02:00
|
|
|
if (fnumber > 0)
|
2017-08-20 20:19:07 +02:00
|
|
|
return TupleDescAttr(tupdesc, fnumber - 1)->atttypid;
|
2001-10-23 19:38:25 +02:00
|
|
|
else
|
Remove WITH OIDS support, change oid catalog column visibility.
Previously tables declared WITH OIDS, including a significant fraction
of the catalog tables, stored the oid column not as a normal column,
but as part of the tuple header.
This special column was not shown by default, which was somewhat odd,
as it's often (consider e.g. pg_class.oid) one of the more important
parts of a row. Neither pg_dump nor COPY included the contents of the
oid column by default.
The fact that the oid column was not an ordinary column necessitated a
significant amount of special case code to support oid columns. That
already was painful for the existing, but upcoming work aiming to make
table storage pluggable, would have required expanding and duplicating
that "specialness" significantly.
WITH OIDS has been deprecated since 2005 (commit ff02d0a05280e0).
Remove it.
Removing includes:
- CREATE TABLE and ALTER TABLE syntax for declaring the table to be
WITH OIDS has been removed (WITH (oids[ = true]) will error out)
- pg_dump does not support dumping tables declared WITH OIDS and will
issue a warning when dumping one (and ignore the oid column).
- restoring an pg_dump archive with pg_restore will warn when
restoring a table with oid contents (and ignore the oid column)
- COPY will refuse to load binary dump that includes oids.
- pg_upgrade will error out when encountering tables declared WITH
OIDS, they have to be altered to remove the oid column first.
- Functionality to access the oid of the last inserted row (like
plpgsql's RESULT_OID, spi's SPI_lastoid, ...) has been removed.
The syntax for declaring a table WITHOUT OIDS (or WITH (oids = false)
for CREATE TABLE) is still supported. While that requires a bit of
support code, it seems unnecessary to break applications / dumps that
do not use oids, and are explicit about not using them.
The biggest user of WITH OID columns was postgres' catalog. This
commit changes all 'magic' oid columns to be columns that are normally
declared and stored. To reduce unnecessary query breakage all the
newly added columns are still named 'oid', even if a table's column
naming scheme would indicate 'reloid' or such. This obviously
requires adapting a lot code, mostly replacing oid access via
HeapTupleGetOid() with access to the underlying Form_pg_*->oid column.
The bootstrap process now assigns oids for all oid columns in
genbki.pl that do not have an explicit value (starting at the largest
oid previously used), only oids assigned later by oids will be above
FirstBootstrapObjectId. As the oid column now is a normal column the
special bootstrap syntax for oids has been removed.
Oids are not automatically assigned during insertion anymore, all
backend code explicitly assigns oids with GetNewOidWithIndex(). For
the rare case that insertions into the catalog via SQL are called for
the new pg_nextoid() function can be used (which only works on catalog
tables).
The fact that oid columns on system tables are now normal columns
means that they will be included in the set of columns expanded
by * (i.e. SELECT * FROM pg_class will now include the table's oid,
previously it did not). It'd not technically be hard to hide oid
column by default, but that'd mean confusing behavior would either
have to be carried forward forever, or it'd cause breakage down the
line.
While it's not unlikely that further adjustments are needed, the
scope/invasiveness of the patch makes it worthwhile to get merge this
now. It's painful to maintain externally, too complicated to commit
after the code code freeze, and a dependency of a number of other
patches.
Catversion bump, for obvious reasons.
Author: Andres Freund, with contributions by John Naylor
Discussion: https://postgr.es/m/20180930034810.ywp2c7awz7opzcfr@alap3.anarazel.de
2018-11-21 00:36:57 +01:00
|
|
|
return (SystemAttributeDefinition(fnumber))->atttypid;
|
1997-09-06 13:23:05 +02:00
|
|
|
}
|
|
|
|
|
1998-02-26 05:46:47 +01:00
|
|
|
char *
|
1997-09-07 07:04:48 +02:00
|
|
|
SPI_getrelname(Relation rel)
|
1997-09-06 13:23:05 +02:00
|
|
|
{
|
1999-11-08 00:08:36 +01:00
|
|
|
return pstrdup(RelationGetRelationName(rel));
|
1997-09-06 13:23:05 +02:00
|
|
|
}
|
|
|
|
|
2005-03-29 04:53:53 +02:00
|
|
|
char *
|
|
|
|
SPI_getnspname(Relation rel)
|
|
|
|
{
|
2005-10-15 04:49:52 +02:00
|
|
|
return get_namespace_name(RelationGetNamespace(rel));
|
2005-03-29 04:53:53 +02:00
|
|
|
}
|
|
|
|
|
1997-09-24 10:28:37 +02:00
|
|
|
void *
|
1998-02-26 05:46:47 +01:00
|
|
|
SPI_palloc(Size size)
|
1997-09-24 10:28:37 +02:00
|
|
|
{
|
Simplify code by getting rid of SPI_push, SPI_pop, SPI_restore_connection.
The idea behind SPI_push was to allow transitioning back into an
"unconnected" state when a SPI-using procedure calls unrelated code that
might or might not invoke SPI. That sounds good, but in practice the only
thing it does for us is to catch cases where a called SPI-using function
forgets to call SPI_connect --- which is a highly improbable failure mode,
since it would be exposed immediately by direct testing of said function.
As against that, we've had multiple bugs induced by forgetting to call
SPI_push/SPI_pop around code that might invoke SPI-using functions; these
are much harder to catch and indeed have gone undetected for years in some
cases. And we've had to band-aid around some problems of this ilk by
introducing conditional push/pop pairs in some places, which really kind
of defeats the purpose altogether; if we can't draw bright lines between
connected and unconnected code, what's the point?
Hence, get rid of SPI_push[_conditional], SPI_pop[_conditional], and the
underlying state variable _SPI_curid. It turns out SPI_restore_connection
can go away too, which is a nice side benefit since it was never more than
a kluge. Provide no-op macros for the deleted functions so as to avoid an
API break for external modules.
A side effect of this removal is that SPI_palloc and allied functions no
longer permit being called when unconnected; they'll throw an error
instead. The apparent usefulness of the previous behavior was a mirage
as well, because it was depended on by only a few places (which I fixed in
preceding commits), and it posed a risk of allocations being unexpectedly
long-lived if someone forgot a SPI_push call.
Discussion: <20808.1478481403@sss.pgh.pa.us>
2016-11-08 23:39:45 +01:00
|
|
|
if (_SPI_current == NULL)
|
|
|
|
elog(ERROR, "SPI_palloc called while not connected to SPI");
|
1998-02-26 05:46:47 +01:00
|
|
|
|
Simplify code by getting rid of SPI_push, SPI_pop, SPI_restore_connection.
The idea behind SPI_push was to allow transitioning back into an
"unconnected" state when a SPI-using procedure calls unrelated code that
might or might not invoke SPI. That sounds good, but in practice the only
thing it does for us is to catch cases where a called SPI-using function
forgets to call SPI_connect --- which is a highly improbable failure mode,
since it would be exposed immediately by direct testing of said function.
As against that, we've had multiple bugs induced by forgetting to call
SPI_push/SPI_pop around code that might invoke SPI-using functions; these
are much harder to catch and indeed have gone undetected for years in some
cases. And we've had to band-aid around some problems of this ilk by
introducing conditional push/pop pairs in some places, which really kind
of defeats the purpose altogether; if we can't draw bright lines between
connected and unconnected code, what's the point?
Hence, get rid of SPI_push[_conditional], SPI_pop[_conditional], and the
underlying state variable _SPI_curid. It turns out SPI_restore_connection
can go away too, which is a nice side benefit since it was never more than
a kluge. Provide no-op macros for the deleted functions so as to avoid an
API break for external modules.
A side effect of this removal is that SPI_palloc and allied functions no
longer permit being called when unconnected; they'll throw an error
instead. The apparent usefulness of the previous behavior was a mirage
as well, because it was depended on by only a few places (which I fixed in
preceding commits), and it posed a risk of allocations being unexpectedly
long-lived if someone forgot a SPI_push call.
Discussion: <20808.1478481403@sss.pgh.pa.us>
2016-11-08 23:39:45 +01:00
|
|
|
return MemoryContextAlloc(_SPI_current->savedcxt, size);
|
1997-09-24 10:28:37 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void *
|
1998-02-26 05:46:47 +01:00
|
|
|
SPI_repalloc(void *pointer, Size size)
|
1997-09-24 10:28:37 +02:00
|
|
|
{
|
2000-06-28 05:33:33 +02:00
|
|
|
/* No longer need to worry which context chunk was in... */
|
|
|
|
return repalloc(pointer, size);
|
1997-09-24 10:28:37 +02:00
|
|
|
}
|
|
|
|
|
1998-02-26 05:46:47 +01:00
|
|
|
void
|
|
|
|
SPI_pfree(void *pointer)
|
1997-09-24 10:28:37 +02:00
|
|
|
{
|
2000-06-28 05:33:33 +02:00
|
|
|
/* No longer need to worry which context chunk was in... */
|
1998-02-26 05:46:47 +01:00
|
|
|
pfree(pointer);
|
1997-09-24 10:28:37 +02:00
|
|
|
}
|
|
|
|
|
Support "expanded" objects, particularly arrays, for better performance.
This patch introduces the ability for complex datatypes to have an
in-memory representation that is different from their on-disk format.
On-disk formats are typically optimized for minimal size, and in any case
they can't contain pointers, so they are often not well-suited for
computation. Now a datatype can invent an "expanded" in-memory format
that is better suited for its operations, and then pass that around among
the C functions that operate on the datatype. There are also provisions
(rudimentary as yet) to allow an expanded object to be modified in-place
under suitable conditions, so that operations like assignment to an element
of an array need not involve copying the entire array.
The initial application for this feature is arrays, but it is not hard
to foresee using it for other container types like JSON, XML and hstore.
I have hopes that it will be useful to PostGIS as well.
In this initial implementation, a few heuristics have been hard-wired
into plpgsql to improve performance for arrays that are stored in
plpgsql variables. We would like to generalize those hacks so that
other datatypes can obtain similar improvements, but figuring out some
appropriate APIs is left as a task for future work. (The heuristics
themselves are probably not optimal yet, either, as they sometimes
force expansion of arrays that would be better left alone.)
Preliminary performance testing shows impressive speed gains for plpgsql
functions that do element-by-element access or update of large arrays.
There are other cases that get a little slower, as a result of added array
format conversions; but we can hope to improve anything that's annoyingly
bad. In any case most applications should see a net win.
Tom Lane, reviewed by Andres Freund
2015-05-14 18:08:40 +02:00
|
|
|
Datum
|
|
|
|
SPI_datumTransfer(Datum value, bool typByVal, int typLen)
|
|
|
|
{
|
Simplify code by getting rid of SPI_push, SPI_pop, SPI_restore_connection.
The idea behind SPI_push was to allow transitioning back into an
"unconnected" state when a SPI-using procedure calls unrelated code that
might or might not invoke SPI. That sounds good, but in practice the only
thing it does for us is to catch cases where a called SPI-using function
forgets to call SPI_connect --- which is a highly improbable failure mode,
since it would be exposed immediately by direct testing of said function.
As against that, we've had multiple bugs induced by forgetting to call
SPI_push/SPI_pop around code that might invoke SPI-using functions; these
are much harder to catch and indeed have gone undetected for years in some
cases. And we've had to band-aid around some problems of this ilk by
introducing conditional push/pop pairs in some places, which really kind
of defeats the purpose altogether; if we can't draw bright lines between
connected and unconnected code, what's the point?
Hence, get rid of SPI_push[_conditional], SPI_pop[_conditional], and the
underlying state variable _SPI_curid. It turns out SPI_restore_connection
can go away too, which is a nice side benefit since it was never more than
a kluge. Provide no-op macros for the deleted functions so as to avoid an
API break for external modules.
A side effect of this removal is that SPI_palloc and allied functions no
longer permit being called when unconnected; they'll throw an error
instead. The apparent usefulness of the previous behavior was a mirage
as well, because it was depended on by only a few places (which I fixed in
preceding commits), and it posed a risk of allocations being unexpectedly
long-lived if someone forgot a SPI_push call.
Discussion: <20808.1478481403@sss.pgh.pa.us>
2016-11-08 23:39:45 +01:00
|
|
|
MemoryContext oldcxt;
|
Support "expanded" objects, particularly arrays, for better performance.
This patch introduces the ability for complex datatypes to have an
in-memory representation that is different from their on-disk format.
On-disk formats are typically optimized for minimal size, and in any case
they can't contain pointers, so they are often not well-suited for
computation. Now a datatype can invent an "expanded" in-memory format
that is better suited for its operations, and then pass that around among
the C functions that operate on the datatype. There are also provisions
(rudimentary as yet) to allow an expanded object to be modified in-place
under suitable conditions, so that operations like assignment to an element
of an array need not involve copying the entire array.
The initial application for this feature is arrays, but it is not hard
to foresee using it for other container types like JSON, XML and hstore.
I have hopes that it will be useful to PostGIS as well.
In this initial implementation, a few heuristics have been hard-wired
into plpgsql to improve performance for arrays that are stored in
plpgsql variables. We would like to generalize those hacks so that
other datatypes can obtain similar improvements, but figuring out some
appropriate APIs is left as a task for future work. (The heuristics
themselves are probably not optimal yet, either, as they sometimes
force expansion of arrays that would be better left alone.)
Preliminary performance testing shows impressive speed gains for plpgsql
functions that do element-by-element access or update of large arrays.
There are other cases that get a little slower, as a result of added array
format conversions; but we can hope to improve anything that's annoyingly
bad. In any case most applications should see a net win.
Tom Lane, reviewed by Andres Freund
2015-05-14 18:08:40 +02:00
|
|
|
Datum result;
|
|
|
|
|
Simplify code by getting rid of SPI_push, SPI_pop, SPI_restore_connection.
The idea behind SPI_push was to allow transitioning back into an
"unconnected" state when a SPI-using procedure calls unrelated code that
might or might not invoke SPI. That sounds good, but in practice the only
thing it does for us is to catch cases where a called SPI-using function
forgets to call SPI_connect --- which is a highly improbable failure mode,
since it would be exposed immediately by direct testing of said function.
As against that, we've had multiple bugs induced by forgetting to call
SPI_push/SPI_pop around code that might invoke SPI-using functions; these
are much harder to catch and indeed have gone undetected for years in some
cases. And we've had to band-aid around some problems of this ilk by
introducing conditional push/pop pairs in some places, which really kind
of defeats the purpose altogether; if we can't draw bright lines between
connected and unconnected code, what's the point?
Hence, get rid of SPI_push[_conditional], SPI_pop[_conditional], and the
underlying state variable _SPI_curid. It turns out SPI_restore_connection
can go away too, which is a nice side benefit since it was never more than
a kluge. Provide no-op macros for the deleted functions so as to avoid an
API break for external modules.
A side effect of this removal is that SPI_palloc and allied functions no
longer permit being called when unconnected; they'll throw an error
instead. The apparent usefulness of the previous behavior was a mirage
as well, because it was depended on by only a few places (which I fixed in
preceding commits), and it posed a risk of allocations being unexpectedly
long-lived if someone forgot a SPI_push call.
Discussion: <20808.1478481403@sss.pgh.pa.us>
2016-11-08 23:39:45 +01:00
|
|
|
if (_SPI_current == NULL)
|
|
|
|
elog(ERROR, "SPI_datumTransfer called while not connected to SPI");
|
|
|
|
|
|
|
|
oldcxt = MemoryContextSwitchTo(_SPI_current->savedcxt);
|
Support "expanded" objects, particularly arrays, for better performance.
This patch introduces the ability for complex datatypes to have an
in-memory representation that is different from their on-disk format.
On-disk formats are typically optimized for minimal size, and in any case
they can't contain pointers, so they are often not well-suited for
computation. Now a datatype can invent an "expanded" in-memory format
that is better suited for its operations, and then pass that around among
the C functions that operate on the datatype. There are also provisions
(rudimentary as yet) to allow an expanded object to be modified in-place
under suitable conditions, so that operations like assignment to an element
of an array need not involve copying the entire array.
The initial application for this feature is arrays, but it is not hard
to foresee using it for other container types like JSON, XML and hstore.
I have hopes that it will be useful to PostGIS as well.
In this initial implementation, a few heuristics have been hard-wired
into plpgsql to improve performance for arrays that are stored in
plpgsql variables. We would like to generalize those hacks so that
other datatypes can obtain similar improvements, but figuring out some
appropriate APIs is left as a task for future work. (The heuristics
themselves are probably not optimal yet, either, as they sometimes
force expansion of arrays that would be better left alone.)
Preliminary performance testing shows impressive speed gains for plpgsql
functions that do element-by-element access or update of large arrays.
There are other cases that get a little slower, as a result of added array
format conversions; but we can hope to improve anything that's annoyingly
bad. In any case most applications should see a net win.
Tom Lane, reviewed by Andres Freund
2015-05-14 18:08:40 +02:00
|
|
|
|
|
|
|
result = datumTransfer(value, typByVal, typLen);
|
|
|
|
|
Simplify code by getting rid of SPI_push, SPI_pop, SPI_restore_connection.
The idea behind SPI_push was to allow transitioning back into an
"unconnected" state when a SPI-using procedure calls unrelated code that
might or might not invoke SPI. That sounds good, but in practice the only
thing it does for us is to catch cases where a called SPI-using function
forgets to call SPI_connect --- which is a highly improbable failure mode,
since it would be exposed immediately by direct testing of said function.
As against that, we've had multiple bugs induced by forgetting to call
SPI_push/SPI_pop around code that might invoke SPI-using functions; these
are much harder to catch and indeed have gone undetected for years in some
cases. And we've had to band-aid around some problems of this ilk by
introducing conditional push/pop pairs in some places, which really kind
of defeats the purpose altogether; if we can't draw bright lines between
connected and unconnected code, what's the point?
Hence, get rid of SPI_push[_conditional], SPI_pop[_conditional], and the
underlying state variable _SPI_curid. It turns out SPI_restore_connection
can go away too, which is a nice side benefit since it was never more than
a kluge. Provide no-op macros for the deleted functions so as to avoid an
API break for external modules.
A side effect of this removal is that SPI_palloc and allied functions no
longer permit being called when unconnected; they'll throw an error
instead. The apparent usefulness of the previous behavior was a mirage
as well, because it was depended on by only a few places (which I fixed in
preceding commits), and it posed a risk of allocations being unexpectedly
long-lived if someone forgot a SPI_push call.
Discussion: <20808.1478481403@sss.pgh.pa.us>
2016-11-08 23:39:45 +01:00
|
|
|
MemoryContextSwitchTo(oldcxt);
|
Support "expanded" objects, particularly arrays, for better performance.
This patch introduces the ability for complex datatypes to have an
in-memory representation that is different from their on-disk format.
On-disk formats are typically optimized for minimal size, and in any case
they can't contain pointers, so they are often not well-suited for
computation. Now a datatype can invent an "expanded" in-memory format
that is better suited for its operations, and then pass that around among
the C functions that operate on the datatype. There are also provisions
(rudimentary as yet) to allow an expanded object to be modified in-place
under suitable conditions, so that operations like assignment to an element
of an array need not involve copying the entire array.
The initial application for this feature is arrays, but it is not hard
to foresee using it for other container types like JSON, XML and hstore.
I have hopes that it will be useful to PostGIS as well.
In this initial implementation, a few heuristics have been hard-wired
into plpgsql to improve performance for arrays that are stored in
plpgsql variables. We would like to generalize those hacks so that
other datatypes can obtain similar improvements, but figuring out some
appropriate APIs is left as a task for future work. (The heuristics
themselves are probably not optimal yet, either, as they sometimes
force expansion of arrays that would be better left alone.)
Preliminary performance testing shows impressive speed gains for plpgsql
functions that do element-by-element access or update of large arrays.
There are other cases that get a little slower, as a result of added array
format conversions; but we can hope to improve anything that's annoyingly
bad. In any case most applications should see a net win.
Tom Lane, reviewed by Andres Freund
2015-05-14 18:08:40 +02:00
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
1999-12-16 23:20:03 +01:00
|
|
|
void
|
|
|
|
SPI_freetuple(HeapTuple tuple)
|
|
|
|
{
|
2000-06-28 05:33:33 +02:00
|
|
|
/* No longer need to worry which context tuple was in... */
|
1999-12-16 23:20:03 +01:00
|
|
|
heap_freetuple(tuple);
|
|
|
|
}
|
|
|
|
|
2001-05-21 16:22:19 +02:00
|
|
|
void
|
|
|
|
SPI_freetuptable(SPITupleTable *tuptable)
|
|
|
|
{
|
Prevent leakage of SPI tuple tables during subtransaction abort.
plpgsql often just remembers SPI-result tuple tables in local variables,
and has no mechanism for freeing them if an ereport(ERROR) causes an escape
out of the execution function whose local variable it is. In the original
coding, that wasn't a problem because the tuple table would be cleaned up
when the function's SPI context went away during transaction abort.
However, once plpgsql grew the ability to trap exceptions, repeated
trapping of errors within a function could result in significant
intra-function-call memory leakage, as illustrated in bug #8279 from
Chad Wagner.
We could fix this locally in plpgsql with a bunch of PG_TRY/PG_CATCH
coding, but that would be tedious, probably slow, and prone to bugs of
omission; moreover it would do nothing for similar risks elsewhere.
What seems like a better plan is to make SPI itself responsible for
freeing tuple tables at subtransaction abort. This patch attacks the
problem that way, keeping a list of live tuple tables within each SPI
function context. Currently, such freeing is automatic for tuple tables
made within the failed subtransaction. We might later add a SPI call to
mark a tuple table as not to be freed this way, allowing callers to opt
out; but until someone exhibits a clear use-case for such behavior, it
doesn't seem worth bothering.
A very useful side-effect of this change is that SPI_freetuptable() can
now defend itself against bad calls, such as duplicate free requests;
this should make things more robust in many places. (In particular,
this reduces the risks involved if a third-party extension contains
now-redundant SPI_freetuptable() calls in error cleanup code.)
Even though the leakage problem is of long standing, it seems imprudent
to back-patch this into stable branches, since it does represent an API
semantics change for SPI users. We'll patch this in 9.3, but live with
the leakage in older branches.
2013-07-25 22:45:43 +02:00
|
|
|
bool found = false;
|
|
|
|
|
|
|
|
/* ignore call if NULL pointer */
|
|
|
|
if (tuptable == NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
Simplify code by getting rid of SPI_push, SPI_pop, SPI_restore_connection.
The idea behind SPI_push was to allow transitioning back into an
"unconnected" state when a SPI-using procedure calls unrelated code that
might or might not invoke SPI. That sounds good, but in practice the only
thing it does for us is to catch cases where a called SPI-using function
forgets to call SPI_connect --- which is a highly improbable failure mode,
since it would be exposed immediately by direct testing of said function.
As against that, we've had multiple bugs induced by forgetting to call
SPI_push/SPI_pop around code that might invoke SPI-using functions; these
are much harder to catch and indeed have gone undetected for years in some
cases. And we've had to band-aid around some problems of this ilk by
introducing conditional push/pop pairs in some places, which really kind
of defeats the purpose altogether; if we can't draw bright lines between
connected and unconnected code, what's the point?
Hence, get rid of SPI_push[_conditional], SPI_pop[_conditional], and the
underlying state variable _SPI_curid. It turns out SPI_restore_connection
can go away too, which is a nice side benefit since it was never more than
a kluge. Provide no-op macros for the deleted functions so as to avoid an
API break for external modules.
A side effect of this removal is that SPI_palloc and allied functions no
longer permit being called when unconnected; they'll throw an error
instead. The apparent usefulness of the previous behavior was a mirage
as well, because it was depended on by only a few places (which I fixed in
preceding commits), and it posed a risk of allocations being unexpectedly
long-lived if someone forgot a SPI_push call.
Discussion: <20808.1478481403@sss.pgh.pa.us>
2016-11-08 23:39:45 +01:00
|
|
|
* Search only the topmost SPI context for a matching tuple table.
|
Prevent leakage of SPI tuple tables during subtransaction abort.
plpgsql often just remembers SPI-result tuple tables in local variables,
and has no mechanism for freeing them if an ereport(ERROR) causes an escape
out of the execution function whose local variable it is. In the original
coding, that wasn't a problem because the tuple table would be cleaned up
when the function's SPI context went away during transaction abort.
However, once plpgsql grew the ability to trap exceptions, repeated
trapping of errors within a function could result in significant
intra-function-call memory leakage, as illustrated in bug #8279 from
Chad Wagner.
We could fix this locally in plpgsql with a bunch of PG_TRY/PG_CATCH
coding, but that would be tedious, probably slow, and prone to bugs of
omission; moreover it would do nothing for similar risks elsewhere.
What seems like a better plan is to make SPI itself responsible for
freeing tuple tables at subtransaction abort. This patch attacks the
problem that way, keeping a list of live tuple tables within each SPI
function context. Currently, such freeing is automatic for tuple tables
made within the failed subtransaction. We might later add a SPI call to
mark a tuple table as not to be freed this way, allowing callers to opt
out; but until someone exhibits a clear use-case for such behavior, it
doesn't seem worth bothering.
A very useful side-effect of this change is that SPI_freetuptable() can
now defend itself against bad calls, such as duplicate free requests;
this should make things more robust in many places. (In particular,
this reduces the risks involved if a third-party extension contains
now-redundant SPI_freetuptable() calls in error cleanup code.)
Even though the leakage problem is of long standing, it seems imprudent
to back-patch this into stable branches, since it does represent an API
semantics change for SPI users. We'll patch this in 9.3, but live with
the leakage in older branches.
2013-07-25 22:45:43 +02:00
|
|
|
*/
|
Simplify code by getting rid of SPI_push, SPI_pop, SPI_restore_connection.
The idea behind SPI_push was to allow transitioning back into an
"unconnected" state when a SPI-using procedure calls unrelated code that
might or might not invoke SPI. That sounds good, but in practice the only
thing it does for us is to catch cases where a called SPI-using function
forgets to call SPI_connect --- which is a highly improbable failure mode,
since it would be exposed immediately by direct testing of said function.
As against that, we've had multiple bugs induced by forgetting to call
SPI_push/SPI_pop around code that might invoke SPI-using functions; these
are much harder to catch and indeed have gone undetected for years in some
cases. And we've had to band-aid around some problems of this ilk by
introducing conditional push/pop pairs in some places, which really kind
of defeats the purpose altogether; if we can't draw bright lines between
connected and unconnected code, what's the point?
Hence, get rid of SPI_push[_conditional], SPI_pop[_conditional], and the
underlying state variable _SPI_curid. It turns out SPI_restore_connection
can go away too, which is a nice side benefit since it was never more than
a kluge. Provide no-op macros for the deleted functions so as to avoid an
API break for external modules.
A side effect of this removal is that SPI_palloc and allied functions no
longer permit being called when unconnected; they'll throw an error
instead. The apparent usefulness of the previous behavior was a mirage
as well, because it was depended on by only a few places (which I fixed in
preceding commits), and it posed a risk of allocations being unexpectedly
long-lived if someone forgot a SPI_push call.
Discussion: <20808.1478481403@sss.pgh.pa.us>
2016-11-08 23:39:45 +01:00
|
|
|
if (_SPI_current != NULL)
|
Prevent leakage of SPI tuple tables during subtransaction abort.
plpgsql often just remembers SPI-result tuple tables in local variables,
and has no mechanism for freeing them if an ereport(ERROR) causes an escape
out of the execution function whose local variable it is. In the original
coding, that wasn't a problem because the tuple table would be cleaned up
when the function's SPI context went away during transaction abort.
However, once plpgsql grew the ability to trap exceptions, repeated
trapping of errors within a function could result in significant
intra-function-call memory leakage, as illustrated in bug #8279 from
Chad Wagner.
We could fix this locally in plpgsql with a bunch of PG_TRY/PG_CATCH
coding, but that would be tedious, probably slow, and prone to bugs of
omission; moreover it would do nothing for similar risks elsewhere.
What seems like a better plan is to make SPI itself responsible for
freeing tuple tables at subtransaction abort. This patch attacks the
problem that way, keeping a list of live tuple tables within each SPI
function context. Currently, such freeing is automatic for tuple tables
made within the failed subtransaction. We might later add a SPI call to
mark a tuple table as not to be freed this way, allowing callers to opt
out; but until someone exhibits a clear use-case for such behavior, it
doesn't seem worth bothering.
A very useful side-effect of this change is that SPI_freetuptable() can
now defend itself against bad calls, such as duplicate free requests;
this should make things more robust in many places. (In particular,
this reduces the risks involved if a third-party extension contains
now-redundant SPI_freetuptable() calls in error cleanup code.)
Even though the leakage problem is of long standing, it seems imprudent
to back-patch this into stable branches, since it does represent an API
semantics change for SPI users. We'll patch this in 9.3, but live with
the leakage in older branches.
2013-07-25 22:45:43 +02:00
|
|
|
{
|
|
|
|
slist_mutable_iter siter;
|
|
|
|
|
|
|
|
/* find tuptable in active list, then remove it */
|
|
|
|
slist_foreach_modify(siter, &_SPI_current->tuptables)
|
|
|
|
{
|
|
|
|
SPITupleTable *tt;
|
|
|
|
|
|
|
|
tt = slist_container(SPITupleTable, next, siter.cur);
|
|
|
|
if (tt == tuptable)
|
|
|
|
{
|
|
|
|
slist_delete_current(&siter);
|
|
|
|
found = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Refuse the deletion if we didn't find it in the topmost SPI context.
|
|
|
|
* This is primarily a guard against double deletion, but might prevent
|
|
|
|
* other errors as well. Since the worst consequence of not deleting a
|
|
|
|
* tuptable would be a transient memory leak, this is just a WARNING.
|
|
|
|
*/
|
|
|
|
if (!found)
|
|
|
|
{
|
|
|
|
elog(WARNING, "attempt to delete invalid SPITupleTable %p", tuptable);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* for safety, reset global variables that might point at tuptable */
|
|
|
|
if (tuptable == _SPI_current->tuptable)
|
|
|
|
_SPI_current->tuptable = NULL;
|
|
|
|
if (tuptable == SPI_tuptable)
|
|
|
|
SPI_tuptable = NULL;
|
|
|
|
|
|
|
|
/* release all memory belonging to tuptable */
|
|
|
|
MemoryContextDelete(tuptable->tuptabcxt);
|
2001-05-21 16:22:19 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* SPI_cursor_open()
|
|
|
|
*
|
|
|
|
* Open a prepared SPI plan as a portal
|
|
|
|
*/
|
|
|
|
Portal
|
2007-03-16 00:12:07 +01:00
|
|
|
SPI_cursor_open(const char *name, SPIPlanPtr plan,
|
2004-09-13 22:10:13 +02:00
|
|
|
Datum *Values, const char *Nulls,
|
|
|
|
bool read_only)
|
2008-06-01 19:32:48 +02:00
|
|
|
{
|
2009-11-04 23:26:08 +01:00
|
|
|
Portal portal;
|
|
|
|
ParamListInfo paramLI;
|
|
|
|
|
|
|
|
/* build transient ParamListInfo in caller's context */
|
|
|
|
paramLI = _SPI_convert_params(plan->nargs, plan->argtypes,
|
2011-09-16 06:42:53 +02:00
|
|
|
Values, Nulls);
|
2009-11-04 23:26:08 +01:00
|
|
|
|
|
|
|
portal = SPI_cursor_open_internal(name, plan, paramLI, read_only);
|
|
|
|
|
|
|
|
/* done with the transient ParamListInfo */
|
|
|
|
if (paramLI)
|
|
|
|
pfree(paramLI);
|
|
|
|
|
|
|
|
return portal;
|
2008-06-01 19:32:48 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* SPI_cursor_open_with_args()
|
|
|
|
*
|
2011-09-16 06:42:53 +02:00
|
|
|
* Parse and plan a query and open it as a portal.
|
2008-06-01 19:32:48 +02:00
|
|
|
*/
|
|
|
|
Portal
|
|
|
|
SPI_cursor_open_with_args(const char *name,
|
|
|
|
const char *src,
|
|
|
|
int nargs, Oid *argtypes,
|
|
|
|
Datum *Values, const char *Nulls,
|
|
|
|
bool read_only, int cursorOptions)
|
|
|
|
{
|
|
|
|
Portal result;
|
|
|
|
_SPI_plan plan;
|
|
|
|
ParamListInfo paramLI;
|
|
|
|
|
|
|
|
if (src == NULL || nargs < 0)
|
|
|
|
elog(ERROR, "SPI_cursor_open_with_args called with invalid arguments");
|
|
|
|
|
|
|
|
if (nargs > 0 && (argtypes == NULL || Values == NULL))
|
|
|
|
elog(ERROR, "SPI_cursor_open_with_args called with missing parameters");
|
|
|
|
|
|
|
|
SPI_result = _SPI_begin_call(true);
|
|
|
|
if (SPI_result < 0)
|
|
|
|
elog(ERROR, "SPI_cursor_open_with_args called while not connected");
|
|
|
|
|
|
|
|
memset(&plan, 0, sizeof(_SPI_plan));
|
|
|
|
plan.magic = _SPI_PLAN_MAGIC;
|
|
|
|
plan.cursor_options = cursorOptions;
|
|
|
|
plan.nargs = nargs;
|
|
|
|
plan.argtypes = argtypes;
|
2009-11-04 23:26:08 +01:00
|
|
|
plan.parserSetup = NULL;
|
|
|
|
plan.parserSetupArg = NULL;
|
2008-06-01 19:32:48 +02:00
|
|
|
|
2009-11-04 23:26:08 +01:00
|
|
|
/* build transient ParamListInfo in executor context */
|
2008-06-01 19:32:48 +02:00
|
|
|
paramLI = _SPI_convert_params(nargs, argtypes,
|
2011-09-16 06:42:53 +02:00
|
|
|
Values, Nulls);
|
2008-06-01 19:32:48 +02:00
|
|
|
|
Invent a "one-shot" variant of CachedPlans for better performance.
SPI_execute() and related functions create a CachedPlan, execute it once,
and immediately discard it, so that the functionality offered by
plancache.c is of no value in this code path. And performance measurements
show that the extra data copying and invalidation checking done by
plancache.c slows down simple queries by 10% or more compared to 9.1.
However, enough of the SPI code is shared with functions that do need plan
caching that it seems impractical to bypass plancache.c altogether.
Instead, let's invent a variant version of cached plans that preserves
99% of the API but doesn't offer any of the actual functionality, nor the
overhead. This puts SPI_execute() performance back on par, or maybe even
slightly better, than it was before. This change should resolve recent
complaints of performance degradation from Dong Ye, Pavel Stehule, and
others.
By avoiding data copying, this change also reduces the amount of memory
needed to execute many-statement SPI_execute() strings, as for instance in
a recent complaint from Tomas Vondra.
An additional benefit of this change is that multi-statement SPI_execute()
query strings are now processed fully serially, that is we complete
execution of earlier statements before running parse analysis and planning
on following ones. This eliminates a long-standing POLA violation, in that
DDL that affects the behavior of a later statement will now behave as
expected.
Back-patch to 9.2, since this was a performance regression compared to 9.1.
(In 9.2, place the added struct fields so as to avoid changing the offsets
of existing fields.)
Heikki Linnakangas and Tom Lane
2013-01-04 23:42:19 +01:00
|
|
|
_SPI_prepare_plan(src, &plan);
|
2008-06-01 19:32:48 +02:00
|
|
|
|
|
|
|
/* We needn't copy the plan; SPI_cursor_open_internal will do so */
|
|
|
|
|
2009-11-04 23:26:08 +01:00
|
|
|
result = SPI_cursor_open_internal(name, &plan, paramLI, read_only);
|
2008-06-01 19:32:48 +02:00
|
|
|
|
|
|
|
/* And clean up */
|
|
|
|
_SPI_end_call(true);
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-11-04 23:26:08 +01:00
|
|
|
/*
|
|
|
|
* SPI_cursor_open_with_paramlist()
|
|
|
|
*
|
|
|
|
* Same as SPI_cursor_open except that parameters (if any) are passed
|
|
|
|
* as a ParamListInfo, which supports dynamic parameter set determination
|
|
|
|
*/
|
|
|
|
Portal
|
|
|
|
SPI_cursor_open_with_paramlist(const char *name, SPIPlanPtr plan,
|
|
|
|
ParamListInfo params, bool read_only)
|
|
|
|
{
|
|
|
|
return SPI_cursor_open_internal(name, plan, params, read_only);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-06-01 19:32:48 +02:00
|
|
|
/*
|
|
|
|
* SPI_cursor_open_internal()
|
|
|
|
*
|
2009-11-04 23:26:08 +01:00
|
|
|
* Common code for SPI_cursor_open variants
|
2008-06-01 19:32:48 +02:00
|
|
|
*/
|
|
|
|
static Portal
|
|
|
|
SPI_cursor_open_internal(const char *name, SPIPlanPtr plan,
|
2009-11-04 23:26:08 +01:00
|
|
|
ParamListInfo paramLI, bool read_only)
|
2001-05-21 16:22:19 +02:00
|
|
|
{
|
2007-03-16 00:12:07 +01:00
|
|
|
CachedPlanSource *plansource;
|
|
|
|
CachedPlan *cplan;
|
2007-02-20 18:32:18 +01:00
|
|
|
List *stmt_list;
|
2008-04-02 20:31:50 +02:00
|
|
|
char *query_string;
|
2012-11-26 21:55:43 +01:00
|
|
|
Snapshot snapshot;
|
2001-10-25 07:50:21 +02:00
|
|
|
MemoryContext oldcontext;
|
|
|
|
Portal portal;
|
Fix plpgsql's reporting of plan-time errors in possibly-simple expressions.
exec_simple_check_plan and exec_eval_simple_expr attempted to call
GetCachedPlan directly. This meant that if an error was thrown during
planning, the resulting context traceback would not include the line
normally contributed by _SPI_error_callback. This is already inconsistent,
but just to be really odd, a re-execution of the very same expression
*would* show the additional context line, because we'd already have cached
the plan and marked the expression as non-simple.
The problem is easy to demonstrate in 9.2 and HEAD because planning of a
cached plan doesn't occur at all until GetCachedPlan is done. In earlier
versions, it could only be an issue if initial planning had succeeded, then
a replan was forced (already somewhat improbable for a simple expression),
and the replan attempt failed. Since the issue is mainly cosmetic in older
branches anyway, it doesn't seem worth the risk of trying to fix it there.
It is worth fixing in 9.2 since the instability of the context printout can
affect the results of GET STACKED DIAGNOSTICS, as per a recent discussion
on pgsql-novice.
To fix, introduce a SPI function that wraps GetCachedPlan while installing
the correct callback function. Use this instead of calling GetCachedPlan
directly from plpgsql.
Also introduce a wrapper function for extracting a SPI plan's
CachedPlanSource list. This lets us stop including spi_priv.h in
pl_exec.c, which was never a very good idea from a modularity standpoint.
In passing, fix a similar inconsistency that could occur in SPI_cursor_open,
which was also calling GetCachedPlan without setting up a context callback.
2013-01-31 02:02:23 +01:00
|
|
|
ErrorContextCallback spierrcontext;
|
2001-05-21 16:22:19 +02:00
|
|
|
|
2006-08-15 00:57:15 +02:00
|
|
|
/*
|
2006-10-04 02:30:14 +02:00
|
|
|
* Check that the plan is something the Portal code will special-case as
|
|
|
|
* returning one tupleset.
|
2006-08-15 00:57:15 +02:00
|
|
|
*/
|
2007-03-16 00:12:07 +01:00
|
|
|
if (!SPI_is_cursor_plan(plan))
|
2006-08-15 00:57:15 +02:00
|
|
|
{
|
|
|
|
/* try to give a good error message */
|
2007-03-16 00:12:07 +01:00
|
|
|
if (list_length(plan->plancache_list) != 1)
|
2006-08-15 00:57:15 +02:00
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_INVALID_CURSOR_DEFINITION),
|
|
|
|
errmsg("cannot open multi-query plan as cursor")));
|
2007-03-16 00:12:07 +01:00
|
|
|
plansource = (CachedPlanSource *) linitial(plan->plancache_list);
|
2006-08-12 22:05:56 +02:00
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_INVALID_CURSOR_DEFINITION),
|
2006-10-04 02:30:14 +02:00
|
|
|
/* translator: %s is name of a SQL command, eg INSERT */
|
2006-08-12 22:05:56 +02:00
|
|
|
errmsg("cannot open %s query as cursor",
|
2007-03-16 00:12:07 +01:00
|
|
|
plansource->commandTag)));
|
2006-08-15 00:57:15 +02:00
|
|
|
}
|
|
|
|
|
2007-03-16 00:12:07 +01:00
|
|
|
Assert(list_length(plan->plancache_list) == 1);
|
|
|
|
plansource = (CachedPlanSource *) linitial(plan->plancache_list);
|
2001-05-21 16:22:19 +02:00
|
|
|
|
Fix SPI_cursor_open() and SPI_is_cursor_plan() to push the SPI stack before
doing anything interesting, such as calling RevalidateCachedPlan(). The
necessity of this is demonstrated by an example from Willem Buitendyk:
during a replan, the planner might try to evaluate SPI-using functions,
and so we'd better be in a clean SPI context.
A small downside of this fix is that these two functions will now fail
outright if called when not inside a SPI-using procedure (ie, a
SPI_connect/SPI_finish pair). The documentation never promised or suggested
that that would work, though; and they are normally used in concert with
other functions, mainly SPI_prepare, that always have failed in such a case.
So the odds of breaking something seem pretty low.
In passing, make SPI_is_cursor_plan's error handling convention clearer,
and fix documentation's erroneous claim that SPI_cursor_open would
return NULL on error.
Before 8.3 these functions could not invoke replanning, so there is probably
no need for back-patching.
2008-02-12 05:09:44 +01:00
|
|
|
/* Push the SPI stack */
|
2011-09-16 06:42:53 +02:00
|
|
|
if (_SPI_begin_call(true) < 0)
|
Fix SPI_cursor_open() and SPI_is_cursor_plan() to push the SPI stack before
doing anything interesting, such as calling RevalidateCachedPlan(). The
necessity of this is demonstrated by an example from Willem Buitendyk:
during a replan, the planner might try to evaluate SPI-using functions,
and so we'd better be in a clean SPI context.
A small downside of this fix is that these two functions will now fail
outright if called when not inside a SPI-using procedure (ie, a
SPI_connect/SPI_finish pair). The documentation never promised or suggested
that that would work, though; and they are normally used in concert with
other functions, mainly SPI_prepare, that always have failed in such a case.
So the odds of breaking something seem pretty low.
In passing, make SPI_is_cursor_plan's error handling convention clearer,
and fix documentation's erroneous claim that SPI_cursor_open would
return NULL on error.
Before 8.3 these functions could not invoke replanning, so there is probably
no need for back-patching.
2008-02-12 05:09:44 +01:00
|
|
|
elog(ERROR, "SPI_cursor_open called while not connected");
|
|
|
|
|
2005-10-01 20:43:19 +02:00
|
|
|
/* Reset SPI result (note we deliberately don't touch lastoid) */
|
2001-05-21 16:22:19 +02:00
|
|
|
SPI_processed = 0;
|
|
|
|
SPI_tuptable = NULL;
|
|
|
|
_SPI_current->processed = 0;
|
|
|
|
_SPI_current->tuptable = NULL;
|
|
|
|
|
2003-05-02 22:54:36 +02:00
|
|
|
/* Create the portal */
|
|
|
|
if (name == NULL || name[0] == '\0')
|
2001-05-21 16:22:19 +02:00
|
|
|
{
|
2003-05-02 22:54:36 +02:00
|
|
|
/* Use a random nonconflicting name */
|
|
|
|
portal = CreateNewPortal();
|
2001-05-21 16:22:19 +02:00
|
|
|
}
|
2002-02-14 16:24:10 +01:00
|
|
|
else
|
|
|
|
{
|
2003-05-02 22:54:36 +02:00
|
|
|
/* In this path, error if portal of same name already exists */
|
|
|
|
portal = CreatePortal(name, false, false);
|
2002-02-14 16:24:10 +01:00
|
|
|
}
|
2001-05-21 16:22:19 +02:00
|
|
|
|
Adjust things so that the query_string of a cached plan and the sourceText of
a portal are never NULL, but reliably provide the source text of the query.
It turns out that there was only one place that was really taking a short-cut,
which was the 'EXECUTE' utility statement. That doesn't seem like a
sufficiently critical performance hotspot to justify not offering a guarantee
of validity of the portal source text. Fix it to copy the source text over
from the cached plan. Add Asserts in the places that set up cached plans and
portals to reject null source strings, and simplify a bunch of places that
formerly needed to guard against nulls.
There may be a few places that cons up statements for execution without
having any source text at all; I found one such in ConvertTriggerToFK().
It seems sufficient to inject a phony source string in such a case,
for instance
ProcessUtility((Node *) atstmt,
"(generated ALTER TABLE ADD FOREIGN KEY command)",
NULL, false, None_Receiver, NULL);
We should take a second look at the usage of debug_query_string,
particularly the recently added current_query() SQL function.
ITAGAKI Takahiro and Tom Lane
2008-07-18 22:26:06 +02:00
|
|
|
/* Copy the plan's query string into the portal */
|
2017-12-16 23:26:26 +01:00
|
|
|
query_string = MemoryContextStrdup(portal->portalContext,
|
2009-11-04 23:26:08 +01:00
|
|
|
plansource->query_string);
|
2008-04-02 20:31:50 +02:00
|
|
|
|
Fix plpgsql's reporting of plan-time errors in possibly-simple expressions.
exec_simple_check_plan and exec_eval_simple_expr attempted to call
GetCachedPlan directly. This meant that if an error was thrown during
planning, the resulting context traceback would not include the line
normally contributed by _SPI_error_callback. This is already inconsistent,
but just to be really odd, a re-execution of the very same expression
*would* show the additional context line, because we'd already have cached
the plan and marked the expression as non-simple.
The problem is easy to demonstrate in 9.2 and HEAD because planning of a
cached plan doesn't occur at all until GetCachedPlan is done. In earlier
versions, it could only be an issue if initial planning had succeeded, then
a replan was forced (already somewhat improbable for a simple expression),
and the replan attempt failed. Since the issue is mainly cosmetic in older
branches anyway, it doesn't seem worth the risk of trying to fix it there.
It is worth fixing in 9.2 since the instability of the context printout can
affect the results of GET STACKED DIAGNOSTICS, as per a recent discussion
on pgsql-novice.
To fix, introduce a SPI function that wraps GetCachedPlan while installing
the correct callback function. Use this instead of calling GetCachedPlan
directly from plpgsql.
Also introduce a wrapper function for extracting a SPI plan's
CachedPlanSource list. This lets us stop including spi_priv.h in
pl_exec.c, which was never a very good idea from a modularity standpoint.
In passing, fix a similar inconsistency that could occur in SPI_cursor_open,
which was also calling GetCachedPlan without setting up a context callback.
2013-01-31 02:02:23 +01:00
|
|
|
/*
|
|
|
|
* Setup error traceback support for ereport(), in case GetCachedPlan
|
|
|
|
* throws an error.
|
|
|
|
*/
|
|
|
|
spierrcontext.callback = _SPI_error_callback;
|
2019-01-29 01:16:24 +01:00
|
|
|
spierrcontext.arg = unconstify(char *, plansource->query_string);
|
Fix plpgsql's reporting of plan-time errors in possibly-simple expressions.
exec_simple_check_plan and exec_eval_simple_expr attempted to call
GetCachedPlan directly. This meant that if an error was thrown during
planning, the resulting context traceback would not include the line
normally contributed by _SPI_error_callback. This is already inconsistent,
but just to be really odd, a re-execution of the very same expression
*would* show the additional context line, because we'd already have cached
the plan and marked the expression as non-simple.
The problem is easy to demonstrate in 9.2 and HEAD because planning of a
cached plan doesn't occur at all until GetCachedPlan is done. In earlier
versions, it could only be an issue if initial planning had succeeded, then
a replan was forced (already somewhat improbable for a simple expression),
and the replan attempt failed. Since the issue is mainly cosmetic in older
branches anyway, it doesn't seem worth the risk of trying to fix it there.
It is worth fixing in 9.2 since the instability of the context printout can
affect the results of GET STACKED DIAGNOSTICS, as per a recent discussion
on pgsql-novice.
To fix, introduce a SPI function that wraps GetCachedPlan while installing
the correct callback function. Use this instead of calling GetCachedPlan
directly from plpgsql.
Also introduce a wrapper function for extracting a SPI plan's
CachedPlanSource list. This lets us stop including spi_priv.h in
pl_exec.c, which was never a very good idea from a modularity standpoint.
In passing, fix a similar inconsistency that could occur in SPI_cursor_open,
which was also calling GetCachedPlan without setting up a context callback.
2013-01-31 02:02:23 +01:00
|
|
|
spierrcontext.previous = error_context_stack;
|
|
|
|
error_context_stack = &spierrcontext;
|
|
|
|
|
2009-11-04 23:26:08 +01:00
|
|
|
/*
|
2011-09-16 06:42:53 +02:00
|
|
|
* Note: for a saved plan, we mustn't have any failure occur between
|
|
|
|
* GetCachedPlan and PortalDefineQuery; that would result in leaking our
|
|
|
|
* plancache refcount.
|
2009-11-04 23:26:08 +01:00
|
|
|
*/
|
2011-09-16 06:42:53 +02:00
|
|
|
|
|
|
|
/* Replan if needed, and increment plan refcount for portal */
|
2017-04-01 06:17:18 +02:00
|
|
|
cplan = GetCachedPlan(plansource, paramLI, false, _SPI_current->queryEnv);
|
2011-09-16 06:42:53 +02:00
|
|
|
stmt_list = cplan->stmt_list;
|
|
|
|
|
|
|
|
if (!plan->saved)
|
2007-03-16 00:12:07 +01:00
|
|
|
{
|
2011-09-16 06:42:53 +02:00
|
|
|
/*
|
|
|
|
* We don't want the portal to depend on an unsaved CachedPlanSource,
|
|
|
|
* so must copy the plan into the portal's context. An error here
|
|
|
|
* will result in leaking our refcount on the plan, but it doesn't
|
|
|
|
* matter because the plan is unsaved and hence transient anyway.
|
|
|
|
*/
|
2017-12-16 23:26:26 +01:00
|
|
|
oldcontext = MemoryContextSwitchTo(portal->portalContext);
|
2011-09-16 06:42:53 +02:00
|
|
|
stmt_list = copyObject(stmt_list);
|
2007-03-16 00:12:07 +01:00
|
|
|
MemoryContextSwitchTo(oldcontext);
|
2011-09-16 06:42:53 +02:00
|
|
|
ReleaseCachedPlan(cplan, false);
|
2007-03-16 00:12:07 +01:00
|
|
|
cplan = NULL; /* portal shouldn't depend on cplan */
|
|
|
|
}
|
|
|
|
|
2003-05-02 22:54:36 +02:00
|
|
|
/*
|
|
|
|
* Set up the portal.
|
|
|
|
*/
|
|
|
|
PortalDefineQuery(portal,
|
2006-08-12 22:05:56 +02:00
|
|
|
NULL, /* no statement name */
|
2008-04-02 20:31:50 +02:00
|
|
|
query_string,
|
2007-03-16 00:12:07 +01:00
|
|
|
plansource->commandTag,
|
2007-02-20 18:32:18 +01:00
|
|
|
stmt_list,
|
2007-03-16 00:12:07 +01:00
|
|
|
cplan);
|
2003-05-02 22:54:36 +02:00
|
|
|
|
|
|
|
/*
|
2007-11-15 22:14:46 +01:00
|
|
|
* Set up options for portal. Default SCROLL type is chosen the same way
|
|
|
|
* as PerformCursorOpen does it.
|
2003-05-02 22:54:36 +02:00
|
|
|
*/
|
2007-04-16 19:21:24 +02:00
|
|
|
portal->cursorOptions = plan->cursor_options;
|
|
|
|
if (!(portal->cursorOptions & (CURSOR_OPT_SCROLL | CURSOR_OPT_NO_SCROLL)))
|
|
|
|
{
|
|
|
|
if (list_length(stmt_list) == 1 &&
|
Phase 3 of pgindent updates.
Don't move parenthesized lines to the left, even if that means they
flow past the right margin.
By default, BSD indent lines up statement continuation lines that are
within parentheses so that they start just to the right of the preceding
left parenthesis. However, traditionally, if that resulted in the
continuation line extending to the right of the desired right margin,
then indent would push it left just far enough to not overrun the margin,
if it could do so without making the continuation line start to the left of
the current statement indent. That makes for a weird mix of indentations
unless one has been completely rigid about never violating the 80-column
limit.
This behavior has been pretty universally panned by Postgres developers.
Hence, disable it with indent's new -lpl switch, so that parenthesized
lines are always lined up with the preceding left paren.
This patch is much less interesting than the first round of indent
changes, but also bulkier, so I thought it best to separate the effects.
Discussion: https://postgr.es/m/E1dAmxK-0006EE-1r@gemulon.postgresql.org
Discussion: https://postgr.es/m/30527.1495162840@sss.pgh.pa.us
2017-06-21 21:35:54 +02:00
|
|
|
linitial_node(PlannedStmt, stmt_list)->commandType != CMD_UTILITY &&
|
Improve castNode notation by introducing list-extraction-specific variants.
This extends the castNode() notation introduced by commit 5bcab1114 to
provide, in one step, extraction of a list cell's pointer and coercion to
a concrete node type. For example, "lfirst_node(Foo, lc)" is the same
as "castNode(Foo, lfirst(lc))". Almost half of the uses of castNode
that have appeared so far include a list extraction call, so this is
pretty widely useful, and it saves a few more keystrokes compared to the
old way.
As with the previous patch, back-patch the addition of these macros to
pg_list.h, so that the notation will be available when back-patching.
Patch by me, after an idea of Andrew Gierth's.
Discussion: https://postgr.es/m/14197.1491841216@sss.pgh.pa.us
2017-04-10 19:51:29 +02:00
|
|
|
linitial_node(PlannedStmt, stmt_list)->rowMarks == NIL &&
|
|
|
|
ExecSupportsBackwardScan(linitial_node(PlannedStmt, stmt_list)->planTree))
|
2007-04-16 19:21:24 +02:00
|
|
|
portal->cursorOptions |= CURSOR_OPT_SCROLL;
|
|
|
|
else
|
|
|
|
portal->cursorOptions |= CURSOR_OPT_NO_SCROLL;
|
|
|
|
}
|
2001-05-21 16:22:19 +02:00
|
|
|
|
2007-10-25 01:27:08 +02:00
|
|
|
/*
|
2014-05-06 18:12:18 +02:00
|
|
|
* Disallow SCROLL with SELECT FOR UPDATE. This is not redundant with the
|
2007-11-15 22:14:46 +01:00
|
|
|
* check in transformDeclareCursorStmt because the cursor options might
|
|
|
|
* not have come through there.
|
2007-10-25 01:27:08 +02:00
|
|
|
*/
|
|
|
|
if (portal->cursorOptions & CURSOR_OPT_SCROLL)
|
|
|
|
{
|
|
|
|
if (list_length(stmt_list) == 1 &&
|
Phase 3 of pgindent updates.
Don't move parenthesized lines to the left, even if that means they
flow past the right margin.
By default, BSD indent lines up statement continuation lines that are
within parentheses so that they start just to the right of the preceding
left parenthesis. However, traditionally, if that resulted in the
continuation line extending to the right of the desired right margin,
then indent would push it left just far enough to not overrun the margin,
if it could do so without making the continuation line start to the left of
the current statement indent. That makes for a weird mix of indentations
unless one has been completely rigid about never violating the 80-column
limit.
This behavior has been pretty universally panned by Postgres developers.
Hence, disable it with indent's new -lpl switch, so that parenthesized
lines are always lined up with the preceding left paren.
This patch is much less interesting than the first round of indent
changes, but also bulkier, so I thought it best to separate the effects.
Discussion: https://postgr.es/m/E1dAmxK-0006EE-1r@gemulon.postgresql.org
Discussion: https://postgr.es/m/30527.1495162840@sss.pgh.pa.us
2017-06-21 21:35:54 +02:00
|
|
|
linitial_node(PlannedStmt, stmt_list)->commandType != CMD_UTILITY &&
|
Improve castNode notation by introducing list-extraction-specific variants.
This extends the castNode() notation introduced by commit 5bcab1114 to
provide, in one step, extraction of a list cell's pointer and coercion to
a concrete node type. For example, "lfirst_node(Foo, lc)" is the same
as "castNode(Foo, lfirst(lc))". Almost half of the uses of castNode
that have appeared so far include a list extraction call, so this is
pretty widely useful, and it saves a few more keystrokes compared to the
old way.
As with the previous patch, back-patch the addition of these macros to
pg_list.h, so that the notation will be available when back-patching.
Patch by me, after an idea of Andrew Gierth's.
Discussion: https://postgr.es/m/14197.1491841216@sss.pgh.pa.us
2017-04-10 19:51:29 +02:00
|
|
|
linitial_node(PlannedStmt, stmt_list)->rowMarks != NIL)
|
2007-10-25 01:27:08 +02:00
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
2007-10-25 15:48:57 +02:00
|
|
|
errmsg("DECLARE SCROLL CURSOR ... FOR UPDATE/SHARE is not supported"),
|
2007-10-25 01:27:08 +02:00
|
|
|
errdetail("Scrollable cursors must be READ ONLY.")));
|
|
|
|
}
|
|
|
|
|
2017-04-05 01:36:39 +02:00
|
|
|
/* Make current query environment available to portal at execution time. */
|
|
|
|
portal->queryEnv = _SPI_current->queryEnv;
|
|
|
|
|
2007-03-17 04:15:38 +01:00
|
|
|
/*
|
2015-05-24 03:35:49 +02:00
|
|
|
* If told to be read-only, or in parallel mode, verify that this query is
|
|
|
|
* in fact read-only. This can't be done earlier because we need to look
|
|
|
|
* at the finished, planned queries. (In particular, we don't want to do
|
|
|
|
* it between GetCachedPlan and PortalDefineQuery, because throwing an
|
|
|
|
* error between those steps would result in leaking our plancache
|
Create an infrastructure for parallel computation in PostgreSQL.
This does four basic things. First, it provides convenience routines
to coordinate the startup and shutdown of parallel workers. Second,
it synchronizes various pieces of state (e.g. GUCs, combo CID
mappings, transaction snapshot) from the parallel group leader to the
worker processes. Third, it prohibits various operations that would
result in unsafe changes to that state while parallelism is active.
Finally, it propagates events that would result in an ErrorResponse,
NoticeResponse, or NotifyResponse message being sent to the client
from the parallel workers back to the master, from which they can then
be sent on to the client.
Robert Haas, Amit Kapila, Noah Misch, Rushabh Lathia, Jeevan Chalke.
Suggestions and review from Andres Freund, Heikki Linnakangas, Noah
Misch, Simon Riggs, Euler Taveira, and Jim Nasby.
2015-04-30 21:02:14 +02:00
|
|
|
* refcount.)
|
2007-03-17 04:15:38 +01:00
|
|
|
*/
|
Create an infrastructure for parallel computation in PostgreSQL.
This does four basic things. First, it provides convenience routines
to coordinate the startup and shutdown of parallel workers. Second,
it synchronizes various pieces of state (e.g. GUCs, combo CID
mappings, transaction snapshot) from the parallel group leader to the
worker processes. Third, it prohibits various operations that would
result in unsafe changes to that state while parallelism is active.
Finally, it propagates events that would result in an ErrorResponse,
NoticeResponse, or NotifyResponse message being sent to the client
from the parallel workers back to the master, from which they can then
be sent on to the client.
Robert Haas, Amit Kapila, Noah Misch, Rushabh Lathia, Jeevan Chalke.
Suggestions and review from Andres Freund, Heikki Linnakangas, Noah
Misch, Simon Riggs, Euler Taveira, and Jim Nasby.
2015-04-30 21:02:14 +02:00
|
|
|
if (read_only || IsInParallelMode())
|
2007-03-17 04:15:38 +01:00
|
|
|
{
|
|
|
|
ListCell *lc;
|
|
|
|
|
|
|
|
foreach(lc, stmt_list)
|
|
|
|
{
|
Improve castNode notation by introducing list-extraction-specific variants.
This extends the castNode() notation introduced by commit 5bcab1114 to
provide, in one step, extraction of a list cell's pointer and coercion to
a concrete node type. For example, "lfirst_node(Foo, lc)" is the same
as "castNode(Foo, lfirst(lc))". Almost half of the uses of castNode
that have appeared so far include a list extraction call, so this is
pretty widely useful, and it saves a few more keystrokes compared to the
old way.
As with the previous patch, back-patch the addition of these macros to
pg_list.h, so that the notation will be available when back-patching.
Patch by me, after an idea of Andrew Gierth's.
Discussion: https://postgr.es/m/14197.1491841216@sss.pgh.pa.us
2017-04-10 19:51:29 +02:00
|
|
|
PlannedStmt *pstmt = lfirst_node(PlannedStmt, lc);
|
2007-03-17 04:15:38 +01:00
|
|
|
|
|
|
|
if (!CommandIsReadOnly(pstmt))
|
Create an infrastructure for parallel computation in PostgreSQL.
This does four basic things. First, it provides convenience routines
to coordinate the startup and shutdown of parallel workers. Second,
it synchronizes various pieces of state (e.g. GUCs, combo CID
mappings, transaction snapshot) from the parallel group leader to the
worker processes. Third, it prohibits various operations that would
result in unsafe changes to that state while parallelism is active.
Finally, it propagates events that would result in an ErrorResponse,
NoticeResponse, or NotifyResponse message being sent to the client
from the parallel workers back to the master, from which they can then
be sent on to the client.
Robert Haas, Amit Kapila, Noah Misch, Rushabh Lathia, Jeevan Chalke.
Suggestions and review from Andres Freund, Heikki Linnakangas, Noah
Misch, Simon Riggs, Euler Taveira, and Jim Nasby.
2015-04-30 21:02:14 +02:00
|
|
|
{
|
|
|
|
if (read_only)
|
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
|
|
|
/* translator: %s is a SQL statement name */
|
Phase 3 of pgindent updates.
Don't move parenthesized lines to the left, even if that means they
flow past the right margin.
By default, BSD indent lines up statement continuation lines that are
within parentheses so that they start just to the right of the preceding
left parenthesis. However, traditionally, if that resulted in the
continuation line extending to the right of the desired right margin,
then indent would push it left just far enough to not overrun the margin,
if it could do so without making the continuation line start to the left of
the current statement indent. That makes for a weird mix of indentations
unless one has been completely rigid about never violating the 80-column
limit.
This behavior has been pretty universally panned by Postgres developers.
Hence, disable it with indent's new -lpl switch, so that parenthesized
lines are always lined up with the preceding left paren.
This patch is much less interesting than the first round of indent
changes, but also bulkier, so I thought it best to separate the effects.
Discussion: https://postgr.es/m/E1dAmxK-0006EE-1r@gemulon.postgresql.org
Discussion: https://postgr.es/m/30527.1495162840@sss.pgh.pa.us
2017-06-21 21:35:54 +02:00
|
|
|
errmsg("%s is not allowed in a non-volatile function",
|
|
|
|
CreateCommandTag((Node *) pstmt))));
|
Create an infrastructure for parallel computation in PostgreSQL.
This does four basic things. First, it provides convenience routines
to coordinate the startup and shutdown of parallel workers. Second,
it synchronizes various pieces of state (e.g. GUCs, combo CID
mappings, transaction snapshot) from the parallel group leader to the
worker processes. Third, it prohibits various operations that would
result in unsafe changes to that state while parallelism is active.
Finally, it propagates events that would result in an ErrorResponse,
NoticeResponse, or NotifyResponse message being sent to the client
from the parallel workers back to the master, from which they can then
be sent on to the client.
Robert Haas, Amit Kapila, Noah Misch, Rushabh Lathia, Jeevan Chalke.
Suggestions and review from Andres Freund, Heikki Linnakangas, Noah
Misch, Simon Riggs, Euler Taveira, and Jim Nasby.
2015-04-30 21:02:14 +02:00
|
|
|
else
|
Change representation of statement lists, and add statement location info.
This patch makes several changes that improve the consistency of
representation of lists of statements. It's always been the case
that the output of parse analysis is a list of Query nodes, whatever
the types of the individual statements in the list. This patch brings
similar consistency to the outputs of raw parsing and planning steps:
* The output of raw parsing is now always a list of RawStmt nodes;
the statement-type-dependent nodes are one level down from that.
* The output of pg_plan_queries() is now always a list of PlannedStmt
nodes, even for utility statements. In the case of a utility statement,
"planning" just consists of wrapping a CMD_UTILITY PlannedStmt around
the utility node. This list representation is now used in Portal and
CachedPlan plan lists, replacing the former convention of intermixing
PlannedStmts with bare utility-statement nodes.
Now, every list of statements has a consistent head-node type depending
on how far along it is in processing. This allows changing many places
that formerly used generic "Node *" pointers to use a more specific
pointer type, thus reducing the number of IsA() tests and casts needed,
as well as improving code clarity.
Also, the post-parse-analysis representation of DECLARE CURSOR is changed
so that it looks more like EXPLAIN, PREPARE, etc. That is, the contained
SELECT remains a child of the DeclareCursorStmt rather than getting flipped
around to be the other way. It's now true for both Query and PlannedStmt
that utilityStmt is non-null if and only if commandType is CMD_UTILITY.
That allows simplifying a lot of places that were testing both fields.
(I think some of those were just defensive programming, but in many places,
it was actually necessary to avoid confusing DECLARE CURSOR with SELECT.)
Because PlannedStmt carries a canSetTag field, we're also able to get rid
of some ad-hoc rules about how to reconstruct canSetTag for a bare utility
statement; specifically, the assumption that a utility is canSetTag if and
only if it's the only one in its list. While I see no near-term need for
relaxing that restriction, it's nice to get rid of the ad-hocery.
The API of ProcessUtility() is changed so that what it's passed is the
wrapper PlannedStmt not just the bare utility statement. This will affect
all users of ProcessUtility_hook, but the changes are pretty trivial; see
the affected contrib modules for examples of the minimum change needed.
(Most compilers should give pointer-type-mismatch warnings for uncorrected
code.)
There's also a change in the API of ExplainOneQuery_hook, to pass through
cursorOptions instead of expecting hook functions to know what to pick.
This is needed because of the DECLARE CURSOR changes, but really should
have been done in 9.6; it's unlikely that any extant hook functions
know about using CURSOR_OPT_PARALLEL_OK.
Finally, teach gram.y to save statement boundary locations in RawStmt
nodes, and pass those through to Query and PlannedStmt nodes. This allows
more intelligent handling of cases where a source query string contains
multiple statements. This patch doesn't actually do anything with the
information, but a follow-on patch will. (Passing this information through
cleanly is the true motivation for these changes; while I think this is all
good cleanup, it's unlikely we'd have bothered without this end goal.)
catversion bump because addition of location fields to struct Query
affects stored rules.
This patch is by me, but it owes a good deal to Fabien Coelho who did
a lot of preliminary work on the problem, and also reviewed the patch.
Discussion: https://postgr.es/m/alpine.DEB.2.20.1612200926310.29821@lancre
2017-01-14 22:02:35 +01:00
|
|
|
PreventCommandIfParallelMode(CreateCommandTag((Node *) pstmt));
|
Create an infrastructure for parallel computation in PostgreSQL.
This does four basic things. First, it provides convenience routines
to coordinate the startup and shutdown of parallel workers. Second,
it synchronizes various pieces of state (e.g. GUCs, combo CID
mappings, transaction snapshot) from the parallel group leader to the
worker processes. Third, it prohibits various operations that would
result in unsafe changes to that state while parallelism is active.
Finally, it propagates events that would result in an ErrorResponse,
NoticeResponse, or NotifyResponse message being sent to the client
from the parallel workers back to the master, from which they can then
be sent on to the client.
Robert Haas, Amit Kapila, Noah Misch, Rushabh Lathia, Jeevan Chalke.
Suggestions and review from Andres Freund, Heikki Linnakangas, Noah
Misch, Simon Riggs, Euler Taveira, and Jim Nasby.
2015-04-30 21:02:14 +02:00
|
|
|
}
|
2007-03-17 04:15:38 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-11-26 21:55:43 +01:00
|
|
|
/* Set up the snapshot to use. */
|
|
|
|
if (read_only)
|
|
|
|
snapshot = GetActiveSnapshot();
|
|
|
|
else
|
|
|
|
{
|
|
|
|
CommandCounterIncrement();
|
|
|
|
snapshot = GetTransactionSnapshot();
|
|
|
|
}
|
|
|
|
|
2009-11-04 23:26:08 +01:00
|
|
|
/*
|
2010-02-26 03:01:40 +01:00
|
|
|
* If the plan has parameters, copy them into the portal. Note that this
|
|
|
|
* must be done after revalidating the plan, because in dynamic parameter
|
|
|
|
* cases the set of parameters could have changed during re-parsing.
|
2009-11-04 23:26:08 +01:00
|
|
|
*/
|
|
|
|
if (paramLI)
|
|
|
|
{
|
2017-12-16 23:26:26 +01:00
|
|
|
oldcontext = MemoryContextSwitchTo(portal->portalContext);
|
2009-11-04 23:26:08 +01:00
|
|
|
paramLI = copyParamList(paramLI);
|
|
|
|
MemoryContextSwitchTo(oldcontext);
|
|
|
|
}
|
|
|
|
|
2003-05-02 22:54:36 +02:00
|
|
|
/*
|
|
|
|
* Start portal execution.
|
|
|
|
*/
|
2012-11-26 21:55:43 +01:00
|
|
|
PortalStart(portal, paramLI, 0, snapshot);
|
2001-05-21 16:22:19 +02:00
|
|
|
|
2006-08-15 00:57:15 +02:00
|
|
|
Assert(portal->strategy != PORTAL_MULTI_QUERY);
|
2001-05-21 16:22:19 +02:00
|
|
|
|
2017-04-18 19:20:59 +02:00
|
|
|
/* Pop the error context stack */
|
|
|
|
error_context_stack = spierrcontext.previous;
|
|
|
|
|
Fix SPI_cursor_open() and SPI_is_cursor_plan() to push the SPI stack before
doing anything interesting, such as calling RevalidateCachedPlan(). The
necessity of this is demonstrated by an example from Willem Buitendyk:
during a replan, the planner might try to evaluate SPI-using functions,
and so we'd better be in a clean SPI context.
A small downside of this fix is that these two functions will now fail
outright if called when not inside a SPI-using procedure (ie, a
SPI_connect/SPI_finish pair). The documentation never promised or suggested
that that would work, though; and they are normally used in concert with
other functions, mainly SPI_prepare, that always have failed in such a case.
So the odds of breaking something seem pretty low.
In passing, make SPI_is_cursor_plan's error handling convention clearer,
and fix documentation's erroneous claim that SPI_cursor_open would
return NULL on error.
Before 8.3 these functions could not invoke replanning, so there is probably
no need for back-patching.
2008-02-12 05:09:44 +01:00
|
|
|
/* Pop the SPI stack */
|
2011-09-16 06:42:53 +02:00
|
|
|
_SPI_end_call(true);
|
Fix SPI_cursor_open() and SPI_is_cursor_plan() to push the SPI stack before
doing anything interesting, such as calling RevalidateCachedPlan(). The
necessity of this is demonstrated by an example from Willem Buitendyk:
during a replan, the planner might try to evaluate SPI-using functions,
and so we'd better be in a clean SPI context.
A small downside of this fix is that these two functions will now fail
outright if called when not inside a SPI-using procedure (ie, a
SPI_connect/SPI_finish pair). The documentation never promised or suggested
that that would work, though; and they are normally used in concert with
other functions, mainly SPI_prepare, that always have failed in such a case.
So the odds of breaking something seem pretty low.
In passing, make SPI_is_cursor_plan's error handling convention clearer,
and fix documentation's erroneous claim that SPI_cursor_open would
return NULL on error.
Before 8.3 these functions could not invoke replanning, so there is probably
no need for back-patching.
2008-02-12 05:09:44 +01:00
|
|
|
|
2001-05-21 16:22:19 +02:00
|
|
|
/* Return the created portal */
|
|
|
|
return portal;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* SPI_cursor_find()
|
|
|
|
*
|
|
|
|
* Find the portal of an existing open cursor
|
|
|
|
*/
|
|
|
|
Portal
|
2002-12-30 23:10:54 +01:00
|
|
|
SPI_cursor_find(const char *name)
|
2001-05-21 16:22:19 +02:00
|
|
|
{
|
|
|
|
return GetPortalByName(name);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* SPI_cursor_fetch()
|
|
|
|
*
|
|
|
|
* Fetch rows in a cursor
|
|
|
|
*/
|
|
|
|
void
|
2006-09-03 05:19:45 +02:00
|
|
|
SPI_cursor_fetch(Portal portal, bool forward, long count)
|
2001-05-21 16:22:19 +02:00
|
|
|
{
|
2007-04-16 03:14:58 +02:00
|
|
|
_SPI_cursor_operation(portal,
|
|
|
|
forward ? FETCH_FORWARD : FETCH_BACKWARD, count,
|
2008-11-30 21:51:25 +01:00
|
|
|
CreateDestReceiver(DestSPI));
|
2005-11-03 18:11:40 +01:00
|
|
|
/* we know that the DestSPI receiver doesn't need a destroy call */
|
2001-05-21 16:22:19 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* SPI_cursor_move()
|
|
|
|
*
|
|
|
|
* Move in a cursor
|
|
|
|
*/
|
|
|
|
void
|
2006-09-03 05:19:45 +02:00
|
|
|
SPI_cursor_move(Portal portal, bool forward, long count)
|
2001-05-21 16:22:19 +02:00
|
|
|
{
|
2007-04-16 03:14:58 +02:00
|
|
|
_SPI_cursor_operation(portal,
|
|
|
|
forward ? FETCH_FORWARD : FETCH_BACKWARD, count,
|
|
|
|
None_Receiver);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* SPI_scroll_cursor_fetch()
|
|
|
|
*
|
|
|
|
* Fetch rows in a scrollable cursor
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
SPI_scroll_cursor_fetch(Portal portal, FetchDirection direction, long count)
|
|
|
|
{
|
|
|
|
_SPI_cursor_operation(portal,
|
|
|
|
direction, count,
|
2008-11-30 21:51:25 +01:00
|
|
|
CreateDestReceiver(DestSPI));
|
2007-04-16 03:14:58 +02:00
|
|
|
/* we know that the DestSPI receiver doesn't need a destroy call */
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* SPI_scroll_cursor_move()
|
|
|
|
*
|
|
|
|
* Move in a scrollable cursor
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
SPI_scroll_cursor_move(Portal portal, FetchDirection direction, long count)
|
|
|
|
{
|
|
|
|
_SPI_cursor_operation(portal, direction, count, None_Receiver);
|
2001-05-21 16:22:19 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* SPI_cursor_close()
|
|
|
|
*
|
|
|
|
* Close a cursor
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
SPI_cursor_close(Portal portal)
|
|
|
|
{
|
2001-10-05 19:28:13 +02:00
|
|
|
if (!PortalIsValid(portal))
|
2001-05-21 16:22:19 +02:00
|
|
|
elog(ERROR, "invalid portal in SPI cursor operation");
|
|
|
|
|
This patch implements holdable cursors, following the proposal
(materialization into a tuple store) discussed on pgsql-hackers earlier.
I've updated the documentation and the regression tests.
Notes on the implementation:
- I needed to change the tuple store API slightly -- it assumes that it
won't be used to hold data across transaction boundaries, so the temp
files that it uses for on-disk storage are automatically reclaimed at
end-of-transaction. I added a flag to tuplestore_begin_heap() to control
this behavior. Is changing the tuple store API in this fashion OK?
- in order to store executor results in a tuple store, I added a new
CommandDest. This works well for the most part, with one exception: the
current DestFunction API doesn't provide enough information to allow the
Executor to store results into an arbitrary tuple store (where the
particular tuple store to use is chosen by the call site of
ExecutorRun). To workaround this, I've temporarily hacked up a solution
that works, but is not ideal: since the receiveTuple DestFunction is
passed the portal name, we can use that to lookup the Portal data
structure for the cursor and then use that to get at the tuple store the
Portal is using. This unnecessarily ties the Portal code with the
tupleReceiver code, but it works...
The proper fix for this is probably to change the DestFunction API --
Tom suggested passing the full QueryDesc to the receiveTuple function.
In that case, callers of ExecutorRun could "subclass" QueryDesc to add
any additional fields that their particular CommandDest needed to get
access to. This approach would work, but I'd like to think about it for
a little bit longer before deciding which route to go. In the mean time,
the code works fine, so I don't think a fix is urgent.
- (semi-related) I added a NO SCROLL keyword to DECLARE CURSOR, and
adjusted the behavior of SCROLL in accordance with the discussion on
-hackers.
- (unrelated) Cleaned up some SGML markup in sql.sgml, copy.sgml
Neil Conway
2003-03-27 17:51:29 +01:00
|
|
|
PortalDrop(portal, false);
|
2001-05-21 16:22:19 +02:00
|
|
|
}
|
|
|
|
|
2004-03-05 01:47:01 +01:00
|
|
|
/*
|
|
|
|
* Returns the Oid representing the type id for argument at argIndex. First
|
|
|
|
* parameter is at index zero.
|
|
|
|
*/
|
|
|
|
Oid
|
2007-03-16 00:12:07 +01:00
|
|
|
SPI_getargtypeid(SPIPlanPtr plan, int argIndex)
|
2004-03-05 01:47:01 +01:00
|
|
|
{
|
2007-03-16 00:12:07 +01:00
|
|
|
if (plan == NULL || plan->magic != _SPI_PLAN_MAGIC ||
|
|
|
|
argIndex < 0 || argIndex >= plan->nargs)
|
2004-03-21 23:29:11 +01:00
|
|
|
{
|
|
|
|
SPI_result = SPI_ERROR_ARGUMENT;
|
|
|
|
return InvalidOid;
|
|
|
|
}
|
2007-03-16 00:12:07 +01:00
|
|
|
return plan->argtypes[argIndex];
|
2004-03-05 01:47:01 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Returns the number of arguments for the prepared plan.
|
|
|
|
*/
|
|
|
|
int
|
2007-03-16 00:12:07 +01:00
|
|
|
SPI_getargcount(SPIPlanPtr plan)
|
2004-03-05 01:47:01 +01:00
|
|
|
{
|
2007-03-16 00:12:07 +01:00
|
|
|
if (plan == NULL || plan->magic != _SPI_PLAN_MAGIC)
|
2004-03-21 23:29:11 +01:00
|
|
|
{
|
|
|
|
SPI_result = SPI_ERROR_ARGUMENT;
|
|
|
|
return -1;
|
|
|
|
}
|
2007-03-16 00:12:07 +01:00
|
|
|
return plan->nargs;
|
2004-03-05 01:47:01 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Returns true if the plan contains exactly one command
|
2006-08-12 22:05:56 +02:00
|
|
|
* and that command returns tuples to the caller (eg, SELECT or
|
|
|
|
* INSERT ... RETURNING, but not SELECT ... INTO). In essence,
|
|
|
|
* the result indicates if the command can be used with SPI_cursor_open
|
2004-03-05 01:47:01 +01:00
|
|
|
*
|
|
|
|
* Parameters
|
2006-08-12 22:05:56 +02:00
|
|
|
* plan: A plan previously prepared using SPI_prepare
|
2004-03-05 01:47:01 +01:00
|
|
|
*/
|
|
|
|
bool
|
2007-03-16 00:12:07 +01:00
|
|
|
SPI_is_cursor_plan(SPIPlanPtr plan)
|
2004-03-05 01:47:01 +01:00
|
|
|
{
|
2007-03-16 00:12:07 +01:00
|
|
|
CachedPlanSource *plansource;
|
2004-03-21 23:29:11 +01:00
|
|
|
|
2007-03-16 00:12:07 +01:00
|
|
|
if (plan == NULL || plan->magic != _SPI_PLAN_MAGIC)
|
2004-03-21 23:29:11 +01:00
|
|
|
{
|
|
|
|
SPI_result = SPI_ERROR_ARGUMENT;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2007-03-16 00:12:07 +01:00
|
|
|
if (list_length(plan->plancache_list) != 1)
|
Fix SPI_cursor_open() and SPI_is_cursor_plan() to push the SPI stack before
doing anything interesting, such as calling RevalidateCachedPlan(). The
necessity of this is demonstrated by an example from Willem Buitendyk:
during a replan, the planner might try to evaluate SPI-using functions,
and so we'd better be in a clean SPI context.
A small downside of this fix is that these two functions will now fail
outright if called when not inside a SPI-using procedure (ie, a
SPI_connect/SPI_finish pair). The documentation never promised or suggested
that that would work, though; and they are normally used in concert with
other functions, mainly SPI_prepare, that always have failed in such a case.
So the odds of breaking something seem pretty low.
In passing, make SPI_is_cursor_plan's error handling convention clearer,
and fix documentation's erroneous claim that SPI_cursor_open would
return NULL on error.
Before 8.3 these functions could not invoke replanning, so there is probably
no need for back-patching.
2008-02-12 05:09:44 +01:00
|
|
|
{
|
|
|
|
SPI_result = 0;
|
2006-08-15 00:57:15 +02:00
|
|
|
return false; /* not exactly 1 pre-rewrite command */
|
Fix SPI_cursor_open() and SPI_is_cursor_plan() to push the SPI stack before
doing anything interesting, such as calling RevalidateCachedPlan(). The
necessity of this is demonstrated by an example from Willem Buitendyk:
during a replan, the planner might try to evaluate SPI-using functions,
and so we'd better be in a clean SPI context.
A small downside of this fix is that these two functions will now fail
outright if called when not inside a SPI-using procedure (ie, a
SPI_connect/SPI_finish pair). The documentation never promised or suggested
that that would work, though; and they are normally used in concert with
other functions, mainly SPI_prepare, that always have failed in such a case.
So the odds of breaking something seem pretty low.
In passing, make SPI_is_cursor_plan's error handling convention clearer,
and fix documentation's erroneous claim that SPI_cursor_open would
return NULL on error.
Before 8.3 these functions could not invoke replanning, so there is probably
no need for back-patching.
2008-02-12 05:09:44 +01:00
|
|
|
}
|
2007-03-16 00:12:07 +01:00
|
|
|
plansource = (CachedPlanSource *) linitial(plan->plancache_list);
|
2004-03-21 23:29:11 +01:00
|
|
|
|
2011-09-16 06:42:53 +02:00
|
|
|
/*
|
|
|
|
* We used to force revalidation of the cached plan here, but that seems
|
|
|
|
* unnecessary: invalidation could mean a change in the rowtype of the
|
|
|
|
* tuples returned by a plan, but not whether it returns tuples at all.
|
|
|
|
*/
|
Fix SPI_cursor_open() and SPI_is_cursor_plan() to push the SPI stack before
doing anything interesting, such as calling RevalidateCachedPlan(). The
necessity of this is demonstrated by an example from Willem Buitendyk:
during a replan, the planner might try to evaluate SPI-using functions,
and so we'd better be in a clean SPI context.
A small downside of this fix is that these two functions will now fail
outright if called when not inside a SPI-using procedure (ie, a
SPI_connect/SPI_finish pair). The documentation never promised or suggested
that that would work, though; and they are normally used in concert with
other functions, mainly SPI_prepare, that always have failed in such a case.
So the odds of breaking something seem pretty low.
In passing, make SPI_is_cursor_plan's error handling convention clearer,
and fix documentation's erroneous claim that SPI_cursor_open would
return NULL on error.
Before 8.3 these functions could not invoke replanning, so there is probably
no need for back-patching.
2008-02-12 05:09:44 +01:00
|
|
|
SPI_result = 0;
|
|
|
|
|
2007-03-16 00:12:07 +01:00
|
|
|
/* Does it return tuples? */
|
|
|
|
if (plansource->resultDesc)
|
|
|
|
return true;
|
|
|
|
|
2004-03-21 23:29:11 +01:00
|
|
|
return false;
|
2004-03-05 01:47:01 +01:00
|
|
|
}
|
|
|
|
|
2008-09-16 01:37:40 +02:00
|
|
|
/*
|
|
|
|
* SPI_plan_is_valid --- test whether a SPI plan is currently valid
|
|
|
|
* (that is, not marked as being in need of revalidation).
|
|
|
|
*
|
|
|
|
* See notes for CachedPlanIsValid before using this.
|
|
|
|
*/
|
|
|
|
bool
|
|
|
|
SPI_plan_is_valid(SPIPlanPtr plan)
|
|
|
|
{
|
2011-09-16 06:42:53 +02:00
|
|
|
ListCell *lc;
|
2008-09-16 01:37:40 +02:00
|
|
|
|
2011-09-16 06:42:53 +02:00
|
|
|
Assert(plan->magic == _SPI_PLAN_MAGIC);
|
2008-09-16 01:37:40 +02:00
|
|
|
|
2011-09-16 06:42:53 +02:00
|
|
|
foreach(lc, plan->plancache_list)
|
2008-09-16 01:37:40 +02:00
|
|
|
{
|
2011-09-16 06:42:53 +02:00
|
|
|
CachedPlanSource *plansource = (CachedPlanSource *) lfirst(lc);
|
|
|
|
|
|
|
|
if (!CachedPlanIsValid(plansource))
|
|
|
|
return false;
|
2008-09-16 01:37:40 +02:00
|
|
|
}
|
2011-09-16 06:42:53 +02:00
|
|
|
return true;
|
2008-09-16 01:37:40 +02:00
|
|
|
}
|
|
|
|
|
2004-07-31 22:55:45 +02:00
|
|
|
/*
|
|
|
|
* SPI_result_code_string --- convert any SPI return code to a string
|
|
|
|
*
|
2014-05-06 18:12:18 +02:00
|
|
|
* This is often useful in error messages. Most callers will probably
|
2004-07-31 22:55:45 +02:00
|
|
|
* only pass negative (error-case) codes, but for generality we recognize
|
|
|
|
* the success codes too.
|
|
|
|
*/
|
|
|
|
const char *
|
|
|
|
SPI_result_code_string(int code)
|
|
|
|
{
|
|
|
|
static char buf[64];
|
|
|
|
|
|
|
|
switch (code)
|
|
|
|
{
|
|
|
|
case SPI_ERROR_CONNECT:
|
|
|
|
return "SPI_ERROR_CONNECT";
|
|
|
|
case SPI_ERROR_COPY:
|
|
|
|
return "SPI_ERROR_COPY";
|
|
|
|
case SPI_ERROR_OPUNKNOWN:
|
|
|
|
return "SPI_ERROR_OPUNKNOWN";
|
|
|
|
case SPI_ERROR_UNCONNECTED:
|
|
|
|
return "SPI_ERROR_UNCONNECTED";
|
|
|
|
case SPI_ERROR_ARGUMENT:
|
|
|
|
return "SPI_ERROR_ARGUMENT";
|
|
|
|
case SPI_ERROR_PARAM:
|
|
|
|
return "SPI_ERROR_PARAM";
|
|
|
|
case SPI_ERROR_TRANSACTION:
|
|
|
|
return "SPI_ERROR_TRANSACTION";
|
|
|
|
case SPI_ERROR_NOATTRIBUTE:
|
|
|
|
return "SPI_ERROR_NOATTRIBUTE";
|
|
|
|
case SPI_ERROR_NOOUTFUNC:
|
|
|
|
return "SPI_ERROR_NOOUTFUNC";
|
|
|
|
case SPI_ERROR_TYPUNKNOWN:
|
|
|
|
return "SPI_ERROR_TYPUNKNOWN";
|
2017-04-01 06:17:18 +02:00
|
|
|
case SPI_ERROR_REL_DUPLICATE:
|
|
|
|
return "SPI_ERROR_REL_DUPLICATE";
|
|
|
|
case SPI_ERROR_REL_NOT_FOUND:
|
|
|
|
return "SPI_ERROR_REL_NOT_FOUND";
|
2004-07-31 22:55:45 +02:00
|
|
|
case SPI_OK_CONNECT:
|
|
|
|
return "SPI_OK_CONNECT";
|
|
|
|
case SPI_OK_FINISH:
|
|
|
|
return "SPI_OK_FINISH";
|
|
|
|
case SPI_OK_FETCH:
|
|
|
|
return "SPI_OK_FETCH";
|
|
|
|
case SPI_OK_UTILITY:
|
|
|
|
return "SPI_OK_UTILITY";
|
|
|
|
case SPI_OK_SELECT:
|
|
|
|
return "SPI_OK_SELECT";
|
|
|
|
case SPI_OK_SELINTO:
|
|
|
|
return "SPI_OK_SELINTO";
|
|
|
|
case SPI_OK_INSERT:
|
|
|
|
return "SPI_OK_INSERT";
|
|
|
|
case SPI_OK_DELETE:
|
|
|
|
return "SPI_OK_DELETE";
|
|
|
|
case SPI_OK_UPDATE:
|
|
|
|
return "SPI_OK_UPDATE";
|
|
|
|
case SPI_OK_CURSOR:
|
|
|
|
return "SPI_OK_CURSOR";
|
2006-08-28 01:47:58 +02:00
|
|
|
case SPI_OK_INSERT_RETURNING:
|
|
|
|
return "SPI_OK_INSERT_RETURNING";
|
|
|
|
case SPI_OK_DELETE_RETURNING:
|
|
|
|
return "SPI_OK_DELETE_RETURNING";
|
|
|
|
case SPI_OK_UPDATE_RETURNING:
|
|
|
|
return "SPI_OK_UPDATE_RETURNING";
|
2009-01-21 12:02:40 +01:00
|
|
|
case SPI_OK_REWRITTEN:
|
|
|
|
return "SPI_OK_REWRITTEN";
|
2017-04-01 06:17:18 +02:00
|
|
|
case SPI_OK_REL_REGISTER:
|
|
|
|
return "SPI_OK_REL_REGISTER";
|
|
|
|
case SPI_OK_REL_UNREGISTER:
|
|
|
|
return "SPI_OK_REL_UNREGISTER";
|
2004-07-31 22:55:45 +02:00
|
|
|
}
|
|
|
|
/* Unrecognized code ... return something useful ... */
|
|
|
|
sprintf(buf, "Unrecognized SPI code %d", code);
|
|
|
|
return buf;
|
|
|
|
}
|
|
|
|
|
Fix plpgsql's reporting of plan-time errors in possibly-simple expressions.
exec_simple_check_plan and exec_eval_simple_expr attempted to call
GetCachedPlan directly. This meant that if an error was thrown during
planning, the resulting context traceback would not include the line
normally contributed by _SPI_error_callback. This is already inconsistent,
but just to be really odd, a re-execution of the very same expression
*would* show the additional context line, because we'd already have cached
the plan and marked the expression as non-simple.
The problem is easy to demonstrate in 9.2 and HEAD because planning of a
cached plan doesn't occur at all until GetCachedPlan is done. In earlier
versions, it could only be an issue if initial planning had succeeded, then
a replan was forced (already somewhat improbable for a simple expression),
and the replan attempt failed. Since the issue is mainly cosmetic in older
branches anyway, it doesn't seem worth the risk of trying to fix it there.
It is worth fixing in 9.2 since the instability of the context printout can
affect the results of GET STACKED DIAGNOSTICS, as per a recent discussion
on pgsql-novice.
To fix, introduce a SPI function that wraps GetCachedPlan while installing
the correct callback function. Use this instead of calling GetCachedPlan
directly from plpgsql.
Also introduce a wrapper function for extracting a SPI plan's
CachedPlanSource list. This lets us stop including spi_priv.h in
pl_exec.c, which was never a very good idea from a modularity standpoint.
In passing, fix a similar inconsistency that could occur in SPI_cursor_open,
which was also calling GetCachedPlan without setting up a context callback.
2013-01-31 02:02:23 +01:00
|
|
|
/*
|
|
|
|
* SPI_plan_get_plan_sources --- get a SPI plan's underlying list of
|
|
|
|
* CachedPlanSources.
|
|
|
|
*
|
2017-04-05 06:38:25 +02:00
|
|
|
* This is exported so that PL/pgSQL can use it (this beats letting PL/pgSQL
|
2014-05-06 18:12:18 +02:00
|
|
|
* look directly into the SPIPlan for itself). It's not documented in
|
Fix plpgsql's reporting of plan-time errors in possibly-simple expressions.
exec_simple_check_plan and exec_eval_simple_expr attempted to call
GetCachedPlan directly. This meant that if an error was thrown during
planning, the resulting context traceback would not include the line
normally contributed by _SPI_error_callback. This is already inconsistent,
but just to be really odd, a re-execution of the very same expression
*would* show the additional context line, because we'd already have cached
the plan and marked the expression as non-simple.
The problem is easy to demonstrate in 9.2 and HEAD because planning of a
cached plan doesn't occur at all until GetCachedPlan is done. In earlier
versions, it could only be an issue if initial planning had succeeded, then
a replan was forced (already somewhat improbable for a simple expression),
and the replan attempt failed. Since the issue is mainly cosmetic in older
branches anyway, it doesn't seem worth the risk of trying to fix it there.
It is worth fixing in 9.2 since the instability of the context printout can
affect the results of GET STACKED DIAGNOSTICS, as per a recent discussion
on pgsql-novice.
To fix, introduce a SPI function that wraps GetCachedPlan while installing
the correct callback function. Use this instead of calling GetCachedPlan
directly from plpgsql.
Also introduce a wrapper function for extracting a SPI plan's
CachedPlanSource list. This lets us stop including spi_priv.h in
pl_exec.c, which was never a very good idea from a modularity standpoint.
In passing, fix a similar inconsistency that could occur in SPI_cursor_open,
which was also calling GetCachedPlan without setting up a context callback.
2013-01-31 02:02:23 +01:00
|
|
|
* spi.sgml because we'd just as soon not have too many places using this.
|
|
|
|
*/
|
|
|
|
List *
|
|
|
|
SPI_plan_get_plan_sources(SPIPlanPtr plan)
|
|
|
|
{
|
|
|
|
Assert(plan->magic == _SPI_PLAN_MAGIC);
|
|
|
|
return plan->plancache_list;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* SPI_plan_get_cached_plan --- get a SPI plan's generic CachedPlan,
|
|
|
|
* if the SPI plan contains exactly one CachedPlanSource. If not,
|
|
|
|
* return NULL. Caller is responsible for doing ReleaseCachedPlan().
|
|
|
|
*
|
2017-04-05 06:38:25 +02:00
|
|
|
* This is exported so that PL/pgSQL can use it (this beats letting PL/pgSQL
|
2014-05-06 18:12:18 +02:00
|
|
|
* look directly into the SPIPlan for itself). It's not documented in
|
Fix plpgsql's reporting of plan-time errors in possibly-simple expressions.
exec_simple_check_plan and exec_eval_simple_expr attempted to call
GetCachedPlan directly. This meant that if an error was thrown during
planning, the resulting context traceback would not include the line
normally contributed by _SPI_error_callback. This is already inconsistent,
but just to be really odd, a re-execution of the very same expression
*would* show the additional context line, because we'd already have cached
the plan and marked the expression as non-simple.
The problem is easy to demonstrate in 9.2 and HEAD because planning of a
cached plan doesn't occur at all until GetCachedPlan is done. In earlier
versions, it could only be an issue if initial planning had succeeded, then
a replan was forced (already somewhat improbable for a simple expression),
and the replan attempt failed. Since the issue is mainly cosmetic in older
branches anyway, it doesn't seem worth the risk of trying to fix it there.
It is worth fixing in 9.2 since the instability of the context printout can
affect the results of GET STACKED DIAGNOSTICS, as per a recent discussion
on pgsql-novice.
To fix, introduce a SPI function that wraps GetCachedPlan while installing
the correct callback function. Use this instead of calling GetCachedPlan
directly from plpgsql.
Also introduce a wrapper function for extracting a SPI plan's
CachedPlanSource list. This lets us stop including spi_priv.h in
pl_exec.c, which was never a very good idea from a modularity standpoint.
In passing, fix a similar inconsistency that could occur in SPI_cursor_open,
which was also calling GetCachedPlan without setting up a context callback.
2013-01-31 02:02:23 +01:00
|
|
|
* spi.sgml because we'd just as soon not have too many places using this.
|
|
|
|
*/
|
|
|
|
CachedPlan *
|
|
|
|
SPI_plan_get_cached_plan(SPIPlanPtr plan)
|
|
|
|
{
|
|
|
|
CachedPlanSource *plansource;
|
|
|
|
CachedPlan *cplan;
|
|
|
|
ErrorContextCallback spierrcontext;
|
|
|
|
|
|
|
|
Assert(plan->magic == _SPI_PLAN_MAGIC);
|
|
|
|
|
|
|
|
/* Can't support one-shot plans here */
|
|
|
|
if (plan->oneshot)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
/* Must have exactly one CachedPlanSource */
|
|
|
|
if (list_length(plan->plancache_list) != 1)
|
|
|
|
return NULL;
|
|
|
|
plansource = (CachedPlanSource *) linitial(plan->plancache_list);
|
|
|
|
|
|
|
|
/* Setup error traceback support for ereport() */
|
|
|
|
spierrcontext.callback = _SPI_error_callback;
|
2019-01-29 01:16:24 +01:00
|
|
|
spierrcontext.arg = unconstify(char *, plansource->query_string);
|
Fix plpgsql's reporting of plan-time errors in possibly-simple expressions.
exec_simple_check_plan and exec_eval_simple_expr attempted to call
GetCachedPlan directly. This meant that if an error was thrown during
planning, the resulting context traceback would not include the line
normally contributed by _SPI_error_callback. This is already inconsistent,
but just to be really odd, a re-execution of the very same expression
*would* show the additional context line, because we'd already have cached
the plan and marked the expression as non-simple.
The problem is easy to demonstrate in 9.2 and HEAD because planning of a
cached plan doesn't occur at all until GetCachedPlan is done. In earlier
versions, it could only be an issue if initial planning had succeeded, then
a replan was forced (already somewhat improbable for a simple expression),
and the replan attempt failed. Since the issue is mainly cosmetic in older
branches anyway, it doesn't seem worth the risk of trying to fix it there.
It is worth fixing in 9.2 since the instability of the context printout can
affect the results of GET STACKED DIAGNOSTICS, as per a recent discussion
on pgsql-novice.
To fix, introduce a SPI function that wraps GetCachedPlan while installing
the correct callback function. Use this instead of calling GetCachedPlan
directly from plpgsql.
Also introduce a wrapper function for extracting a SPI plan's
CachedPlanSource list. This lets us stop including spi_priv.h in
pl_exec.c, which was never a very good idea from a modularity standpoint.
In passing, fix a similar inconsistency that could occur in SPI_cursor_open,
which was also calling GetCachedPlan without setting up a context callback.
2013-01-31 02:02:23 +01:00
|
|
|
spierrcontext.previous = error_context_stack;
|
|
|
|
error_context_stack = &spierrcontext;
|
|
|
|
|
|
|
|
/* Get the generic plan for the query */
|
2017-04-01 06:17:18 +02:00
|
|
|
cplan = GetCachedPlan(plansource, NULL, plan->saved,
|
|
|
|
_SPI_current->queryEnv);
|
Fix plpgsql's reporting of plan-time errors in possibly-simple expressions.
exec_simple_check_plan and exec_eval_simple_expr attempted to call
GetCachedPlan directly. This meant that if an error was thrown during
planning, the resulting context traceback would not include the line
normally contributed by _SPI_error_callback. This is already inconsistent,
but just to be really odd, a re-execution of the very same expression
*would* show the additional context line, because we'd already have cached
the plan and marked the expression as non-simple.
The problem is easy to demonstrate in 9.2 and HEAD because planning of a
cached plan doesn't occur at all until GetCachedPlan is done. In earlier
versions, it could only be an issue if initial planning had succeeded, then
a replan was forced (already somewhat improbable for a simple expression),
and the replan attempt failed. Since the issue is mainly cosmetic in older
branches anyway, it doesn't seem worth the risk of trying to fix it there.
It is worth fixing in 9.2 since the instability of the context printout can
affect the results of GET STACKED DIAGNOSTICS, as per a recent discussion
on pgsql-novice.
To fix, introduce a SPI function that wraps GetCachedPlan while installing
the correct callback function. Use this instead of calling GetCachedPlan
directly from plpgsql.
Also introduce a wrapper function for extracting a SPI plan's
CachedPlanSource list. This lets us stop including spi_priv.h in
pl_exec.c, which was never a very good idea from a modularity standpoint.
In passing, fix a similar inconsistency that could occur in SPI_cursor_open,
which was also calling GetCachedPlan without setting up a context callback.
2013-01-31 02:02:23 +01:00
|
|
|
Assert(cplan == plansource->gplan);
|
|
|
|
|
|
|
|
/* Pop the error context stack */
|
|
|
|
error_context_stack = spierrcontext.previous;
|
|
|
|
|
|
|
|
return cplan;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
1997-09-24 10:28:37 +02:00
|
|
|
/* =================== private functions =================== */
|
|
|
|
|
2003-01-21 23:06:12 +01:00
|
|
|
/*
|
2003-05-06 22:26:28 +02:00
|
|
|
* spi_dest_startup
|
2003-01-21 23:06:12 +01:00
|
|
|
* Initialize to receive tuples from Executor into SPITupleTable
|
|
|
|
* of current SPI procedure
|
|
|
|
*/
|
|
|
|
void
|
2003-05-08 20:16:37 +02:00
|
|
|
spi_dest_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
|
2003-01-21 23:06:12 +01:00
|
|
|
{
|
|
|
|
SPITupleTable *tuptable;
|
|
|
|
MemoryContext oldcxt;
|
|
|
|
MemoryContext tuptabcxt;
|
|
|
|
|
Simplify code by getting rid of SPI_push, SPI_pop, SPI_restore_connection.
The idea behind SPI_push was to allow transitioning back into an
"unconnected" state when a SPI-using procedure calls unrelated code that
might or might not invoke SPI. That sounds good, but in practice the only
thing it does for us is to catch cases where a called SPI-using function
forgets to call SPI_connect --- which is a highly improbable failure mode,
since it would be exposed immediately by direct testing of said function.
As against that, we've had multiple bugs induced by forgetting to call
SPI_push/SPI_pop around code that might invoke SPI-using functions; these
are much harder to catch and indeed have gone undetected for years in some
cases. And we've had to band-aid around some problems of this ilk by
introducing conditional push/pop pairs in some places, which really kind
of defeats the purpose altogether; if we can't draw bright lines between
connected and unconnected code, what's the point?
Hence, get rid of SPI_push[_conditional], SPI_pop[_conditional], and the
underlying state variable _SPI_curid. It turns out SPI_restore_connection
can go away too, which is a nice side benefit since it was never more than
a kluge. Provide no-op macros for the deleted functions so as to avoid an
API break for external modules.
A side effect of this removal is that SPI_palloc and allied functions no
longer permit being called when unconnected; they'll throw an error
instead. The apparent usefulness of the previous behavior was a mirage
as well, because it was depended on by only a few places (which I fixed in
preceding commits), and it posed a risk of allocations being unexpectedly
long-lived if someone forgot a SPI_push call.
Discussion: <20808.1478481403@sss.pgh.pa.us>
2016-11-08 23:39:45 +01:00
|
|
|
if (_SPI_current == NULL)
|
|
|
|
elog(ERROR, "spi_dest_startup called while not connected to SPI");
|
2003-01-21 23:06:12 +01:00
|
|
|
|
|
|
|
if (_SPI_current->tuptable != NULL)
|
2003-07-21 19:05:12 +02:00
|
|
|
elog(ERROR, "improper call to spi_dest_startup");
|
2003-01-21 23:06:12 +01:00
|
|
|
|
Prevent leakage of SPI tuple tables during subtransaction abort.
plpgsql often just remembers SPI-result tuple tables in local variables,
and has no mechanism for freeing them if an ereport(ERROR) causes an escape
out of the execution function whose local variable it is. In the original
coding, that wasn't a problem because the tuple table would be cleaned up
when the function's SPI context went away during transaction abort.
However, once plpgsql grew the ability to trap exceptions, repeated
trapping of errors within a function could result in significant
intra-function-call memory leakage, as illustrated in bug #8279 from
Chad Wagner.
We could fix this locally in plpgsql with a bunch of PG_TRY/PG_CATCH
coding, but that would be tedious, probably slow, and prone to bugs of
omission; moreover it would do nothing for similar risks elsewhere.
What seems like a better plan is to make SPI itself responsible for
freeing tuple tables at subtransaction abort. This patch attacks the
problem that way, keeping a list of live tuple tables within each SPI
function context. Currently, such freeing is automatic for tuple tables
made within the failed subtransaction. We might later add a SPI call to
mark a tuple table as not to be freed this way, allowing callers to opt
out; but until someone exhibits a clear use-case for such behavior, it
doesn't seem worth bothering.
A very useful side-effect of this change is that SPI_freetuptable() can
now defend itself against bad calls, such as duplicate free requests;
this should make things more robust in many places. (In particular,
this reduces the risks involved if a third-party extension contains
now-redundant SPI_freetuptable() calls in error cleanup code.)
Even though the leakage problem is of long standing, it seems imprudent
to back-patch this into stable branches, since it does represent an API
semantics change for SPI users. We'll patch this in 9.3, but live with
the leakage in older branches.
2013-07-25 22:45:43 +02:00
|
|
|
/* We create the tuple table context as a child of procCxt */
|
|
|
|
|
2003-01-21 23:06:12 +01:00
|
|
|
oldcxt = _SPI_procmem(); /* switch to procedure memory context */
|
|
|
|
|
|
|
|
tuptabcxt = AllocSetContextCreate(CurrentMemoryContext,
|
|
|
|
"SPI TupTable",
|
Add macros to make AllocSetContextCreate() calls simpler and safer.
I found that half a dozen (nearly 5%) of our AllocSetContextCreate calls
had typos in the context-sizing parameters. While none of these led to
especially significant problems, they did create minor inefficiencies,
and it's now clear that expecting people to copy-and-paste those calls
accurately is not a great idea. Let's reduce the risk of future errors
by introducing single macros that encapsulate the common use-cases.
Three such macros are enough to cover all but two special-purpose contexts;
those two calls can be left as-is, I think.
While this patch doesn't in itself improve matters for third-party
extensions, it doesn't break anything for them either, and they can
gradually adopt the simplified notation over time.
In passing, change TopMemoryContext to use the default allocation
parameters. Formerly it could only be extended 8K at a time. That was
probably reasonable when this code was written; but nowadays we create
many more contexts than we did then, so that it's not unusual to have a
couple hundred K in TopMemoryContext, even without considering various
dubious code that sticks other things there. There seems no good reason
not to let it use growing blocks like most other contexts.
Back-patch to 9.6, mostly because that's still close enough to HEAD that
it's easy to do so, and keeping the branches in sync can be expected to
avoid some future back-patching pain. The bugs fixed by these changes
don't seem to be significant enough to justify fixing them further back.
Discussion: <21072.1472321324@sss.pgh.pa.us>
2016-08-27 23:50:38 +02:00
|
|
|
ALLOCSET_DEFAULT_SIZES);
|
2003-01-21 23:06:12 +01:00
|
|
|
MemoryContextSwitchTo(tuptabcxt);
|
|
|
|
|
|
|
|
_SPI_current->tuptable = tuptable = (SPITupleTable *)
|
Prevent leakage of SPI tuple tables during subtransaction abort.
plpgsql often just remembers SPI-result tuple tables in local variables,
and has no mechanism for freeing them if an ereport(ERROR) causes an escape
out of the execution function whose local variable it is. In the original
coding, that wasn't a problem because the tuple table would be cleaned up
when the function's SPI context went away during transaction abort.
However, once plpgsql grew the ability to trap exceptions, repeated
trapping of errors within a function could result in significant
intra-function-call memory leakage, as illustrated in bug #8279 from
Chad Wagner.
We could fix this locally in plpgsql with a bunch of PG_TRY/PG_CATCH
coding, but that would be tedious, probably slow, and prone to bugs of
omission; moreover it would do nothing for similar risks elsewhere.
What seems like a better plan is to make SPI itself responsible for
freeing tuple tables at subtransaction abort. This patch attacks the
problem that way, keeping a list of live tuple tables within each SPI
function context. Currently, such freeing is automatic for tuple tables
made within the failed subtransaction. We might later add a SPI call to
mark a tuple table as not to be freed this way, allowing callers to opt
out; but until someone exhibits a clear use-case for such behavior, it
doesn't seem worth bothering.
A very useful side-effect of this change is that SPI_freetuptable() can
now defend itself against bad calls, such as duplicate free requests;
this should make things more robust in many places. (In particular,
this reduces the risks involved if a third-party extension contains
now-redundant SPI_freetuptable() calls in error cleanup code.)
Even though the leakage problem is of long standing, it seems imprudent
to back-patch this into stable branches, since it does represent an API
semantics change for SPI users. We'll patch this in 9.3, but live with
the leakage in older branches.
2013-07-25 22:45:43 +02:00
|
|
|
palloc0(sizeof(SPITupleTable));
|
2003-01-21 23:06:12 +01:00
|
|
|
tuptable->tuptabcxt = tuptabcxt;
|
Prevent leakage of SPI tuple tables during subtransaction abort.
plpgsql often just remembers SPI-result tuple tables in local variables,
and has no mechanism for freeing them if an ereport(ERROR) causes an escape
out of the execution function whose local variable it is. In the original
coding, that wasn't a problem because the tuple table would be cleaned up
when the function's SPI context went away during transaction abort.
However, once plpgsql grew the ability to trap exceptions, repeated
trapping of errors within a function could result in significant
intra-function-call memory leakage, as illustrated in bug #8279 from
Chad Wagner.
We could fix this locally in plpgsql with a bunch of PG_TRY/PG_CATCH
coding, but that would be tedious, probably slow, and prone to bugs of
omission; moreover it would do nothing for similar risks elsewhere.
What seems like a better plan is to make SPI itself responsible for
freeing tuple tables at subtransaction abort. This patch attacks the
problem that way, keeping a list of live tuple tables within each SPI
function context. Currently, such freeing is automatic for tuple tables
made within the failed subtransaction. We might later add a SPI call to
mark a tuple table as not to be freed this way, allowing callers to opt
out; but until someone exhibits a clear use-case for such behavior, it
doesn't seem worth bothering.
A very useful side-effect of this change is that SPI_freetuptable() can
now defend itself against bad calls, such as duplicate free requests;
this should make things more robust in many places. (In particular,
this reduces the risks involved if a third-party extension contains
now-redundant SPI_freetuptable() calls in error cleanup code.)
Even though the leakage problem is of long standing, it seems imprudent
to back-patch this into stable branches, since it does represent an API
semantics change for SPI users. We'll patch this in 9.3, but live with
the leakage in older branches.
2013-07-25 22:45:43 +02:00
|
|
|
tuptable->subid = GetCurrentSubTransactionId();
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The tuptable is now valid enough to be freed by AtEOSubXact_SPI, so put
|
|
|
|
* it onto the SPI context's tuptables list. This will ensure it's not
|
|
|
|
* leaked even in the unlikely event the following few lines fail.
|
|
|
|
*/
|
|
|
|
slist_push_head(&_SPI_current->tuptables, &tuptable->next);
|
|
|
|
|
|
|
|
/* set up initial allocations */
|
2003-01-21 23:06:12 +01:00
|
|
|
tuptable->alloced = tuptable->free = 128;
|
|
|
|
tuptable->vals = (HeapTuple *) palloc(tuptable->alloced * sizeof(HeapTuple));
|
|
|
|
tuptable->tupdesc = CreateTupleDescCopy(typeinfo);
|
|
|
|
|
|
|
|
MemoryContextSwitchTo(oldcxt);
|
|
|
|
}
|
|
|
|
|
1997-09-06 13:23:05 +02:00
|
|
|
/*
|
1999-05-25 18:15:34 +02:00
|
|
|
* spi_printtup
|
1997-09-07 07:04:48 +02:00
|
|
|
* store tuple retrieved by Executor into SPITupleTable
|
|
|
|
* of current SPI procedure
|
1997-09-06 13:23:05 +02:00
|
|
|
*/
|
2016-06-06 20:52:58 +02:00
|
|
|
bool
|
2005-03-16 22:38:10 +01:00
|
|
|
spi_printtup(TupleTableSlot *slot, DestReceiver *self)
|
1997-09-06 13:23:05 +02:00
|
|
|
{
|
1997-09-08 04:41:22 +02:00
|
|
|
SPITupleTable *tuptable;
|
|
|
|
MemoryContext oldcxt;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
Simplify code by getting rid of SPI_push, SPI_pop, SPI_restore_connection.
The idea behind SPI_push was to allow transitioning back into an
"unconnected" state when a SPI-using procedure calls unrelated code that
might or might not invoke SPI. That sounds good, but in practice the only
thing it does for us is to catch cases where a called SPI-using function
forgets to call SPI_connect --- which is a highly improbable failure mode,
since it would be exposed immediately by direct testing of said function.
As against that, we've had multiple bugs induced by forgetting to call
SPI_push/SPI_pop around code that might invoke SPI-using functions; these
are much harder to catch and indeed have gone undetected for years in some
cases. And we've had to band-aid around some problems of this ilk by
introducing conditional push/pop pairs in some places, which really kind
of defeats the purpose altogether; if we can't draw bright lines between
connected and unconnected code, what's the point?
Hence, get rid of SPI_push[_conditional], SPI_pop[_conditional], and the
underlying state variable _SPI_curid. It turns out SPI_restore_connection
can go away too, which is a nice side benefit since it was never more than
a kluge. Provide no-op macros for the deleted functions so as to avoid an
API break for external modules.
A side effect of this removal is that SPI_palloc and allied functions no
longer permit being called when unconnected; they'll throw an error
instead. The apparent usefulness of the previous behavior was a mirage
as well, because it was depended on by only a few places (which I fixed in
preceding commits), and it posed a risk of allocations being unexpectedly
long-lived if someone forgot a SPI_push call.
Discussion: <20808.1478481403@sss.pgh.pa.us>
2016-11-08 23:39:45 +01:00
|
|
|
if (_SPI_current == NULL)
|
|
|
|
elog(ERROR, "spi_printtup called while not connected to SPI");
|
1997-09-07 07:04:48 +02:00
|
|
|
|
|
|
|
tuptable = _SPI_current->tuptable;
|
|
|
|
if (tuptable == NULL)
|
2003-07-21 19:05:12 +02:00
|
|
|
elog(ERROR, "improper call to spi_printtup");
|
2001-05-21 16:22:19 +02:00
|
|
|
|
2003-01-21 23:06:12 +01:00
|
|
|
oldcxt = MemoryContextSwitchTo(tuptable->tuptabcxt);
|
|
|
|
|
|
|
|
if (tuptable->free == 0)
|
|
|
|
{
|
2015-08-22 02:32:11 +02:00
|
|
|
/* Double the size of the pointer array */
|
|
|
|
tuptable->free = tuptable->alloced;
|
2003-01-21 23:06:12 +01:00
|
|
|
tuptable->alloced += tuptable->free;
|
2016-03-14 19:22:16 +01:00
|
|
|
tuptable->vals = (HeapTuple *) repalloc_huge(tuptable->vals,
|
Phase 3 of pgindent updates.
Don't move parenthesized lines to the left, even if that means they
flow past the right margin.
By default, BSD indent lines up statement continuation lines that are
within parentheses so that they start just to the right of the preceding
left parenthesis. However, traditionally, if that resulted in the
continuation line extending to the right of the desired right margin,
then indent would push it left just far enough to not overrun the margin,
if it could do so without making the continuation line start to the left of
the current statement indent. That makes for a weird mix of indentations
unless one has been completely rigid about never violating the 80-column
limit.
This behavior has been pretty universally panned by Postgres developers.
Hence, disable it with indent's new -lpl switch, so that parenthesized
lines are always lined up with the preceding left paren.
This patch is much less interesting than the first round of indent
changes, but also bulkier, so I thought it best to separate the effects.
Discussion: https://postgr.es/m/E1dAmxK-0006EE-1r@gemulon.postgresql.org
Discussion: https://postgr.es/m/30527.1495162840@sss.pgh.pa.us
2017-06-21 21:35:54 +02:00
|
|
|
tuptable->alloced * sizeof(HeapTuple));
|
1997-09-07 07:04:48 +02:00
|
|
|
}
|
|
|
|
|
2005-03-16 22:38:10 +01:00
|
|
|
tuptable->vals[tuptable->alloced - tuptable->free] =
|
Make TupleTableSlots extensible, finish split of existing slot type.
This commit completes the work prepared in 1a0586de36, splitting the
old TupleTableSlot implementation (which could store buffer, heap,
minimal and virtual slots) into four different slot types. As
described in the aforementioned commit, this is done with the goal of
making tuple table slots extensible, to allow for pluggable table
access methods.
To achieve runtime extensibility for TupleTableSlots, operations on
slots that can differ between types of slots are performed using the
TupleTableSlotOps struct provided at slot creation time. That
includes information from the size of TupleTableSlot struct to be
allocated, initialization, deforming etc. See the struct's definition
for more detailed information about callbacks TupleTableSlotOps.
I decided to rename TTSOpsBufferTuple to TTSOpsBufferHeapTuple and
ExecCopySlotTuple to ExecCopySlotHeapTuple, as that seems more
consistent with other naming introduced in recent patches.
There's plenty optimization potential in the slot implementation, but
according to benchmarking the state after this commit has similar
performance characteristics to before this set of changes, which seems
sufficient.
There's a few changes in execReplication.c that currently need to poke
through the slot abstraction, that'll be repaired once the pluggable
storage patchset provides the necessary infrastructure.
Author: Andres Freund and Ashutosh Bapat, with changes by Amit Khandekar
Discussion: https://postgr.es/m/20181105210039.hh4vvi4vwoq5ba2q@alap3.anarazel.de
2018-11-17 01:35:11 +01:00
|
|
|
ExecCopySlotHeapTuple(slot);
|
1997-09-07 07:04:48 +02:00
|
|
|
(tuptable->free)--;
|
|
|
|
|
|
|
|
MemoryContextSwitchTo(oldcxt);
|
2016-06-06 20:52:58 +02:00
|
|
|
|
|
|
|
return true;
|
1997-09-04 15:22:39 +02:00
|
|
|
}
|
|
|
|
|
1997-09-06 13:23:05 +02:00
|
|
|
/*
|
|
|
|
* Static functions
|
|
|
|
*/
|
|
|
|
|
2002-10-15 01:49:20 +02:00
|
|
|
/*
|
2011-09-16 06:42:53 +02:00
|
|
|
* Parse and analyze a querystring.
|
2004-09-13 22:10:13 +02:00
|
|
|
*
|
2009-11-04 23:26:08 +01:00
|
|
|
* At entry, plan->argtypes and plan->nargs (or alternatively plan->parserSetup
|
|
|
|
* and plan->parserSetupArg) must be valid, as must plan->cursor_options.
|
2002-10-15 01:49:20 +02:00
|
|
|
*
|
2007-03-16 00:12:07 +01:00
|
|
|
* Results are stored into *plan (specifically, plan->plancache_list).
|
2011-09-16 06:42:53 +02:00
|
|
|
* Note that the result data is all in CurrentMemoryContext or child contexts
|
|
|
|
* thereof; in practice this means it is in the SPI executor context, and
|
|
|
|
* what we are creating is a "temporary" SPIPlan. Cruft generated during
|
|
|
|
* parsing is also left in CurrentMemoryContext.
|
2002-10-15 01:49:20 +02:00
|
|
|
*/
|
2004-09-13 22:10:13 +02:00
|
|
|
static void
|
Invent a "one-shot" variant of CachedPlans for better performance.
SPI_execute() and related functions create a CachedPlan, execute it once,
and immediately discard it, so that the functionality offered by
plancache.c is of no value in this code path. And performance measurements
show that the extra data copying and invalidation checking done by
plancache.c slows down simple queries by 10% or more compared to 9.1.
However, enough of the SPI code is shared with functions that do need plan
caching that it seems impractical to bypass plancache.c altogether.
Instead, let's invent a variant version of cached plans that preserves
99% of the API but doesn't offer any of the actual functionality, nor the
overhead. This puts SPI_execute() performance back on par, or maybe even
slightly better, than it was before. This change should resolve recent
complaints of performance degradation from Dong Ye, Pavel Stehule, and
others.
By avoiding data copying, this change also reduces the amount of memory
needed to execute many-statement SPI_execute() strings, as for instance in
a recent complaint from Tomas Vondra.
An additional benefit of this change is that multi-statement SPI_execute()
query strings are now processed fully serially, that is we complete
execution of earlier statements before running parse analysis and planning
on following ones. This eliminates a long-standing POLA violation, in that
DDL that affects the behavior of a later statement will now behave as
expected.
Back-patch to 9.2, since this was a performance regression compared to 9.1.
(In 9.2, place the added struct fields so as to avoid changing the offsets
of existing fields.)
Heikki Linnakangas and Tom Lane
2013-01-04 23:42:19 +01:00
|
|
|
_SPI_prepare_plan(const char *src, SPIPlanPtr plan)
|
1997-08-29 11:05:57 +02:00
|
|
|
{
|
2002-10-15 01:49:20 +02:00
|
|
|
List *raw_parsetree_list;
|
2007-03-16 00:12:07 +01:00
|
|
|
List *plancache_list;
|
2004-05-26 06:41:50 +02:00
|
|
|
ListCell *list_item;
|
2004-03-21 23:29:11 +01:00
|
|
|
ErrorContextCallback spierrcontext;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2004-03-21 23:29:11 +01:00
|
|
|
/*
|
|
|
|
* Setup error traceback support for ereport()
|
|
|
|
*/
|
|
|
|
spierrcontext.callback = _SPI_error_callback;
|
2019-01-29 01:16:24 +01:00
|
|
|
spierrcontext.arg = unconstify(char *, src);
|
2004-03-21 23:29:11 +01:00
|
|
|
spierrcontext.previous = error_context_stack;
|
|
|
|
error_context_stack = &spierrcontext;
|
|
|
|
|
2002-10-15 01:49:20 +02:00
|
|
|
/*
|
|
|
|
* Parse the request string into a list of raw parse trees.
|
|
|
|
*/
|
2003-04-30 00:13:11 +02:00
|
|
|
raw_parsetree_list = pg_parse_query(src);
|
2002-10-15 01:49:20 +02:00
|
|
|
|
|
|
|
/*
|
2012-06-10 21:20:04 +02:00
|
|
|
* Do parse analysis and rule rewrite for each raw parsetree, storing the
|
|
|
|
* results into unsaved plancache entries.
|
2002-10-15 01:49:20 +02:00
|
|
|
*/
|
2007-03-16 00:12:07 +01:00
|
|
|
plancache_list = NIL;
|
2002-10-15 01:49:20 +02:00
|
|
|
|
|
|
|
foreach(list_item, raw_parsetree_list)
|
1997-09-07 07:04:48 +02:00
|
|
|
{
|
Improve castNode notation by introducing list-extraction-specific variants.
This extends the castNode() notation introduced by commit 5bcab1114 to
provide, in one step, extraction of a list cell's pointer and coercion to
a concrete node type. For example, "lfirst_node(Foo, lc)" is the same
as "castNode(Foo, lfirst(lc))". Almost half of the uses of castNode
that have appeared so far include a list extraction call, so this is
pretty widely useful, and it saves a few more keystrokes compared to the
old way.
As with the previous patch, back-patch the addition of these macros to
pg_list.h, so that the notation will be available when back-patching.
Patch by me, after an idea of Andrew Gierth's.
Discussion: https://postgr.es/m/14197.1491841216@sss.pgh.pa.us
2017-04-10 19:51:29 +02:00
|
|
|
RawStmt *parsetree = lfirst_node(RawStmt, list_item);
|
2007-03-16 00:12:07 +01:00
|
|
|
List *stmt_list;
|
|
|
|
CachedPlanSource *plansource;
|
2011-09-16 06:42:53 +02:00
|
|
|
|
|
|
|
/*
|
2012-06-10 21:20:04 +02:00
|
|
|
* Create the CachedPlanSource before we do parse analysis, since it
|
|
|
|
* needs to see the unmodified raw parse tree.
|
2011-09-16 06:42:53 +02:00
|
|
|
*/
|
|
|
|
plansource = CreateCachedPlan(parsetree,
|
|
|
|
src,
|
2017-04-01 22:21:05 +02:00
|
|
|
CreateCommandTag(parsetree->stmt));
|
2007-03-16 00:12:07 +01:00
|
|
|
|
2009-11-04 23:26:08 +01:00
|
|
|
/*
|
|
|
|
* Parameter datatypes are driven by parserSetup hook if provided,
|
|
|
|
* otherwise we use the fixed parameter list.
|
|
|
|
*/
|
|
|
|
if (plan->parserSetup != NULL)
|
|
|
|
{
|
|
|
|
Assert(plan->nargs == 0);
|
2011-09-16 06:42:53 +02:00
|
|
|
stmt_list = pg_analyze_and_rewrite_params(parsetree,
|
2009-11-04 23:26:08 +01:00
|
|
|
src,
|
|
|
|
plan->parserSetup,
|
2017-04-01 06:17:18 +02:00
|
|
|
plan->parserSetupArg,
|
|
|
|
_SPI_current->queryEnv);
|
2009-11-04 23:26:08 +01:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2011-09-16 06:42:53 +02:00
|
|
|
stmt_list = pg_analyze_and_rewrite(parsetree,
|
2009-11-04 23:26:08 +01:00
|
|
|
src,
|
|
|
|
plan->argtypes,
|
2017-04-01 06:17:18 +02:00
|
|
|
plan->nargs,
|
|
|
|
_SPI_current->queryEnv);
|
2009-11-04 23:26:08 +01:00
|
|
|
}
|
2011-09-16 06:42:53 +02:00
|
|
|
|
|
|
|
/* Finish filling in the CachedPlanSource */
|
|
|
|
CompleteCachedPlan(plansource,
|
|
|
|
stmt_list,
|
|
|
|
NULL,
|
|
|
|
plan->argtypes,
|
|
|
|
plan->nargs,
|
|
|
|
plan->parserSetup,
|
|
|
|
plan->parserSetupArg,
|
Invent a "one-shot" variant of CachedPlans for better performance.
SPI_execute() and related functions create a CachedPlan, execute it once,
and immediately discard it, so that the functionality offered by
plancache.c is of no value in this code path. And performance measurements
show that the extra data copying and invalidation checking done by
plancache.c slows down simple queries by 10% or more compared to 9.1.
However, enough of the SPI code is shared with functions that do need plan
caching that it seems impractical to bypass plancache.c altogether.
Instead, let's invent a variant version of cached plans that preserves
99% of the API but doesn't offer any of the actual functionality, nor the
overhead. This puts SPI_execute() performance back on par, or maybe even
slightly better, than it was before. This change should resolve recent
complaints of performance degradation from Dong Ye, Pavel Stehule, and
others.
By avoiding data copying, this change also reduces the amount of memory
needed to execute many-statement SPI_execute() strings, as for instance in
a recent complaint from Tomas Vondra.
An additional benefit of this change is that multi-statement SPI_execute()
query strings are now processed fully serially, that is we complete
execution of earlier statements before running parse analysis and planning
on following ones. This eliminates a long-standing POLA violation, in that
DDL that affects the behavior of a later statement will now behave as
expected.
Back-patch to 9.2, since this was a performance regression compared to 9.1.
(In 9.2, place the added struct fields so as to avoid changing the offsets
of existing fields.)
Heikki Linnakangas and Tom Lane
2013-01-04 23:42:19 +01:00
|
|
|
plan->cursor_options,
|
Phase 2 of pgindent updates.
Change pg_bsd_indent to follow upstream rules for placement of comments
to the right of code, and remove pgindent hack that caused comments
following #endif to not obey the general rule.
Commit e3860ffa4dd0dad0dd9eea4be9cc1412373a8c89 wasn't actually using
the published version of pg_bsd_indent, but a hacked-up version that
tried to minimize the amount of movement of comments to the right of
code. The situation of interest is where such a comment has to be
moved to the right of its default placement at column 33 because there's
code there. BSD indent has always moved right in units of tab stops
in such cases --- but in the previous incarnation, indent was working
in 8-space tab stops, while now it knows we use 4-space tabs. So the
net result is that in about half the cases, such comments are placed
one tab stop left of before. This is better all around: it leaves
more room on the line for comment text, and it means that in such
cases the comment uniformly starts at the next 4-space tab stop after
the code, rather than sometimes one and sometimes two tabs after.
Also, ensure that comments following #endif are indented the same
as comments following other preprocessor commands such as #else.
That inconsistency turns out to have been self-inflicted damage
from a poorly-thought-through post-indent "fixup" in pgindent.
This patch is much less interesting than the first round of indent
changes, but also bulkier, so I thought it best to separate the effects.
Discussion: https://postgr.es/m/E1dAmxK-0006EE-1r@gemulon.postgresql.org
Discussion: https://postgr.es/m/30527.1495162840@sss.pgh.pa.us
2017-06-21 21:18:54 +02:00
|
|
|
false); /* not fixed result */
|
2007-03-16 00:12:07 +01:00
|
|
|
|
|
|
|
plancache_list = lappend(plancache_list, plansource);
|
1997-09-07 07:04:48 +02:00
|
|
|
}
|
|
|
|
|
2007-03-16 00:12:07 +01:00
|
|
|
plan->plancache_list = plancache_list;
|
Invent a "one-shot" variant of CachedPlans for better performance.
SPI_execute() and related functions create a CachedPlan, execute it once,
and immediately discard it, so that the functionality offered by
plancache.c is of no value in this code path. And performance measurements
show that the extra data copying and invalidation checking done by
plancache.c slows down simple queries by 10% or more compared to 9.1.
However, enough of the SPI code is shared with functions that do need plan
caching that it seems impractical to bypass plancache.c altogether.
Instead, let's invent a variant version of cached plans that preserves
99% of the API but doesn't offer any of the actual functionality, nor the
overhead. This puts SPI_execute() performance back on par, or maybe even
slightly better, than it was before. This change should resolve recent
complaints of performance degradation from Dong Ye, Pavel Stehule, and
others.
By avoiding data copying, this change also reduces the amount of memory
needed to execute many-statement SPI_execute() strings, as for instance in
a recent complaint from Tomas Vondra.
An additional benefit of this change is that multi-statement SPI_execute()
query strings are now processed fully serially, that is we complete
execution of earlier statements before running parse analysis and planning
on following ones. This eliminates a long-standing POLA violation, in that
DDL that affects the behavior of a later statement will now behave as
expected.
Back-patch to 9.2, since this was a performance regression compared to 9.1.
(In 9.2, place the added struct fields so as to avoid changing the offsets
of existing fields.)
Heikki Linnakangas and Tom Lane
2013-01-04 23:42:19 +01:00
|
|
|
plan->oneshot = false;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Pop the error context stack
|
|
|
|
*/
|
|
|
|
error_context_stack = spierrcontext.previous;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Parse, but don't analyze, a querystring.
|
|
|
|
*
|
|
|
|
* This is a stripped-down version of _SPI_prepare_plan that only does the
|
|
|
|
* initial raw parsing. It creates "one shot" CachedPlanSources
|
|
|
|
* that still require parse analysis before execution is possible.
|
|
|
|
*
|
|
|
|
* The advantage of using the "one shot" form of CachedPlanSource is that
|
|
|
|
* we eliminate data copying and invalidation overhead. Postponing parse
|
|
|
|
* analysis also prevents issues if some of the raw parsetrees are DDL
|
|
|
|
* commands that affect validity of later parsetrees. Both of these
|
|
|
|
* attributes are good things for SPI_execute() and similar cases.
|
|
|
|
*
|
|
|
|
* Results are stored into *plan (specifically, plan->plancache_list).
|
|
|
|
* Note that the result data is all in CurrentMemoryContext or child contexts
|
|
|
|
* thereof; in practice this means it is in the SPI executor context, and
|
|
|
|
* what we are creating is a "temporary" SPIPlan. Cruft generated during
|
|
|
|
* parsing is also left in CurrentMemoryContext.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
_SPI_prepare_oneshot_plan(const char *src, SPIPlanPtr plan)
|
|
|
|
{
|
|
|
|
List *raw_parsetree_list;
|
|
|
|
List *plancache_list;
|
|
|
|
ListCell *list_item;
|
|
|
|
ErrorContextCallback spierrcontext;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Setup error traceback support for ereport()
|
|
|
|
*/
|
|
|
|
spierrcontext.callback = _SPI_error_callback;
|
2019-01-29 01:16:24 +01:00
|
|
|
spierrcontext.arg = unconstify(char *, src);
|
Invent a "one-shot" variant of CachedPlans for better performance.
SPI_execute() and related functions create a CachedPlan, execute it once,
and immediately discard it, so that the functionality offered by
plancache.c is of no value in this code path. And performance measurements
show that the extra data copying and invalidation checking done by
plancache.c slows down simple queries by 10% or more compared to 9.1.
However, enough of the SPI code is shared with functions that do need plan
caching that it seems impractical to bypass plancache.c altogether.
Instead, let's invent a variant version of cached plans that preserves
99% of the API but doesn't offer any of the actual functionality, nor the
overhead. This puts SPI_execute() performance back on par, or maybe even
slightly better, than it was before. This change should resolve recent
complaints of performance degradation from Dong Ye, Pavel Stehule, and
others.
By avoiding data copying, this change also reduces the amount of memory
needed to execute many-statement SPI_execute() strings, as for instance in
a recent complaint from Tomas Vondra.
An additional benefit of this change is that multi-statement SPI_execute()
query strings are now processed fully serially, that is we complete
execution of earlier statements before running parse analysis and planning
on following ones. This eliminates a long-standing POLA violation, in that
DDL that affects the behavior of a later statement will now behave as
expected.
Back-patch to 9.2, since this was a performance regression compared to 9.1.
(In 9.2, place the added struct fields so as to avoid changing the offsets
of existing fields.)
Heikki Linnakangas and Tom Lane
2013-01-04 23:42:19 +01:00
|
|
|
spierrcontext.previous = error_context_stack;
|
|
|
|
error_context_stack = &spierrcontext;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Parse the request string into a list of raw parse trees.
|
|
|
|
*/
|
|
|
|
raw_parsetree_list = pg_parse_query(src);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Construct plancache entries, but don't do parse analysis yet.
|
|
|
|
*/
|
|
|
|
plancache_list = NIL;
|
|
|
|
|
|
|
|
foreach(list_item, raw_parsetree_list)
|
|
|
|
{
|
Improve castNode notation by introducing list-extraction-specific variants.
This extends the castNode() notation introduced by commit 5bcab1114 to
provide, in one step, extraction of a list cell's pointer and coercion to
a concrete node type. For example, "lfirst_node(Foo, lc)" is the same
as "castNode(Foo, lfirst(lc))". Almost half of the uses of castNode
that have appeared so far include a list extraction call, so this is
pretty widely useful, and it saves a few more keystrokes compared to the
old way.
As with the previous patch, back-patch the addition of these macros to
pg_list.h, so that the notation will be available when back-patching.
Patch by me, after an idea of Andrew Gierth's.
Discussion: https://postgr.es/m/14197.1491841216@sss.pgh.pa.us
2017-04-10 19:51:29 +02:00
|
|
|
RawStmt *parsetree = lfirst_node(RawStmt, list_item);
|
Invent a "one-shot" variant of CachedPlans for better performance.
SPI_execute() and related functions create a CachedPlan, execute it once,
and immediately discard it, so that the functionality offered by
plancache.c is of no value in this code path. And performance measurements
show that the extra data copying and invalidation checking done by
plancache.c slows down simple queries by 10% or more compared to 9.1.
However, enough of the SPI code is shared with functions that do need plan
caching that it seems impractical to bypass plancache.c altogether.
Instead, let's invent a variant version of cached plans that preserves
99% of the API but doesn't offer any of the actual functionality, nor the
overhead. This puts SPI_execute() performance back on par, or maybe even
slightly better, than it was before. This change should resolve recent
complaints of performance degradation from Dong Ye, Pavel Stehule, and
others.
By avoiding data copying, this change also reduces the amount of memory
needed to execute many-statement SPI_execute() strings, as for instance in
a recent complaint from Tomas Vondra.
An additional benefit of this change is that multi-statement SPI_execute()
query strings are now processed fully serially, that is we complete
execution of earlier statements before running parse analysis and planning
on following ones. This eliminates a long-standing POLA violation, in that
DDL that affects the behavior of a later statement will now behave as
expected.
Back-patch to 9.2, since this was a performance regression compared to 9.1.
(In 9.2, place the added struct fields so as to avoid changing the offsets
of existing fields.)
Heikki Linnakangas and Tom Lane
2013-01-04 23:42:19 +01:00
|
|
|
CachedPlanSource *plansource;
|
|
|
|
|
|
|
|
plansource = CreateOneShotCachedPlan(parsetree,
|
|
|
|
src,
|
Phase 3 of pgindent updates.
Don't move parenthesized lines to the left, even if that means they
flow past the right margin.
By default, BSD indent lines up statement continuation lines that are
within parentheses so that they start just to the right of the preceding
left parenthesis. However, traditionally, if that resulted in the
continuation line extending to the right of the desired right margin,
then indent would push it left just far enough to not overrun the margin,
if it could do so without making the continuation line start to the left of
the current statement indent. That makes for a weird mix of indentations
unless one has been completely rigid about never violating the 80-column
limit.
This behavior has been pretty universally panned by Postgres developers.
Hence, disable it with indent's new -lpl switch, so that parenthesized
lines are always lined up with the preceding left paren.
This patch is much less interesting than the first round of indent
changes, but also bulkier, so I thought it best to separate the effects.
Discussion: https://postgr.es/m/E1dAmxK-0006EE-1r@gemulon.postgresql.org
Discussion: https://postgr.es/m/30527.1495162840@sss.pgh.pa.us
2017-06-21 21:35:54 +02:00
|
|
|
CreateCommandTag(parsetree->stmt));
|
Invent a "one-shot" variant of CachedPlans for better performance.
SPI_execute() and related functions create a CachedPlan, execute it once,
and immediately discard it, so that the functionality offered by
plancache.c is of no value in this code path. And performance measurements
show that the extra data copying and invalidation checking done by
plancache.c slows down simple queries by 10% or more compared to 9.1.
However, enough of the SPI code is shared with functions that do need plan
caching that it seems impractical to bypass plancache.c altogether.
Instead, let's invent a variant version of cached plans that preserves
99% of the API but doesn't offer any of the actual functionality, nor the
overhead. This puts SPI_execute() performance back on par, or maybe even
slightly better, than it was before. This change should resolve recent
complaints of performance degradation from Dong Ye, Pavel Stehule, and
others.
By avoiding data copying, this change also reduces the amount of memory
needed to execute many-statement SPI_execute() strings, as for instance in
a recent complaint from Tomas Vondra.
An additional benefit of this change is that multi-statement SPI_execute()
query strings are now processed fully serially, that is we complete
execution of earlier statements before running parse analysis and planning
on following ones. This eliminates a long-standing POLA violation, in that
DDL that affects the behavior of a later statement will now behave as
expected.
Back-patch to 9.2, since this was a performance regression compared to 9.1.
(In 9.2, place the added struct fields so as to avoid changing the offsets
of existing fields.)
Heikki Linnakangas and Tom Lane
2013-01-04 23:42:19 +01:00
|
|
|
|
|
|
|
plancache_list = lappend(plancache_list, plansource);
|
|
|
|
}
|
|
|
|
|
|
|
|
plan->plancache_list = plancache_list;
|
|
|
|
plan->oneshot = true;
|
2004-03-21 23:29:11 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Pop the error context stack
|
|
|
|
*/
|
|
|
|
error_context_stack = spierrcontext.previous;
|
1997-08-29 11:05:57 +02:00
|
|
|
}
|
|
|
|
|
2004-09-13 22:10:13 +02:00
|
|
|
/*
|
|
|
|
* Execute the given plan with the given parameter values
|
|
|
|
*
|
|
|
|
* snapshot: query snapshot to use, or InvalidSnapshot for the normal
|
|
|
|
* behavior of taking a new snapshot for each query.
|
|
|
|
* crosscheck_snapshot: for RI use, all others pass InvalidSnapshot
|
2017-08-16 06:22:32 +02:00
|
|
|
* read_only: true for read-only execution (no CommandCounterIncrement)
|
|
|
|
* fire_triggers: true to fire AFTER triggers at end of query (normal case);
|
|
|
|
* false means any AFTER triggers are postponed to end of outer query
|
2004-09-13 22:10:13 +02:00
|
|
|
* tcount: execution tuple-count limit, or 0 for none
|
|
|
|
*/
|
1997-08-29 11:05:57 +02:00
|
|
|
static int
|
2008-04-01 05:09:30 +02:00
|
|
|
_SPI_execute_plan(SPIPlanPtr plan, ParamListInfo paramLI,
|
2004-09-13 22:10:13 +02:00
|
|
|
Snapshot snapshot, Snapshot crosscheck_snapshot,
|
Widen query numbers-of-tuples-processed counters to uint64.
This patch widens SPI_processed, EState's es_processed field, PortalData's
portalPos field, FuncCallContext's call_cntr and max_calls fields,
ExecutorRun's count argument, PortalRunFetch's result, and the max number
of rows in a SPITupleTable to uint64, and deals with (I hope) all the
ensuing fallout. Some of these values were declared uint32 before, and
others "long".
I also removed PortalData's posOverflow field, since that logic seems
pretty useless given that portalPos is now always 64 bits.
The user-visible results are that command tags for SELECT etc will
correctly report tuple counts larger than 4G, as will plpgsql's GET
GET DIAGNOSTICS ... ROW_COUNT command. Queries processing more tuples
than that are still not exactly the norm, but they're becoming more
common.
Most values associated with FETCH/MOVE distances, such as PortalRun's count
argument and the count argument of most SPI functions that have one, remain
declared as "long". It's not clear whether it would be worth promoting
those to int64; but it would definitely be a large dollop of additional
API churn on top of this, and it would only help 32-bit platforms which
seem relatively less likely to see any benefit.
Andreas Scherbaum, reviewed by Christian Ullrich, additional hacking by me
2016-03-12 22:05:10 +01:00
|
|
|
bool read_only, bool fire_triggers, uint64 tcount)
|
1997-08-29 11:05:57 +02:00
|
|
|
{
|
2008-05-12 22:02:02 +02:00
|
|
|
int my_res = 0;
|
Widen query numbers-of-tuples-processed counters to uint64.
This patch widens SPI_processed, EState's es_processed field, PortalData's
portalPos field, FuncCallContext's call_cntr and max_calls fields,
ExecutorRun's count argument, PortalRunFetch's result, and the max number
of rows in a SPITupleTable to uint64, and deals with (I hope) all the
ensuing fallout. Some of these values were declared uint32 before, and
others "long".
I also removed PortalData's posOverflow field, since that logic seems
pretty useless given that portalPos is now always 64 bits.
The user-visible results are that command tags for SELECT etc will
correctly report tuple counts larger than 4G, as will plpgsql's GET
GET DIAGNOSTICS ... ROW_COUNT command. Queries processing more tuples
than that are still not exactly the norm, but they're becoming more
common.
Most values associated with FETCH/MOVE distances, such as PortalRun's count
argument and the count argument of most SPI functions that have one, remain
declared as "long". It's not clear whether it would be worth promoting
those to int64; but it would definitely be a large dollop of additional
API churn on top of this, and it would only help 32-bit platforms which
seem relatively less likely to see any benefit.
Andreas Scherbaum, reviewed by Christian Ullrich, additional hacking by me
2016-03-12 22:05:10 +01:00
|
|
|
uint64 my_processed = 0;
|
2008-05-12 22:02:02 +02:00
|
|
|
SPITupleTable *my_tuptable = NULL;
|
|
|
|
int res = 0;
|
2011-03-01 05:27:18 +01:00
|
|
|
bool pushed_active_snap = false;
|
2008-05-12 22:02:02 +02:00
|
|
|
ErrorContextCallback spierrcontext;
|
|
|
|
CachedPlan *cplan = NULL;
|
|
|
|
ListCell *lc1;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Setup error traceback support for ereport()
|
|
|
|
*/
|
|
|
|
spierrcontext.callback = _SPI_error_callback;
|
Invent a "one-shot" variant of CachedPlans for better performance.
SPI_execute() and related functions create a CachedPlan, execute it once,
and immediately discard it, so that the functionality offered by
plancache.c is of no value in this code path. And performance measurements
show that the extra data copying and invalidation checking done by
plancache.c slows down simple queries by 10% or more compared to 9.1.
However, enough of the SPI code is shared with functions that do need plan
caching that it seems impractical to bypass plancache.c altogether.
Instead, let's invent a variant version of cached plans that preserves
99% of the API but doesn't offer any of the actual functionality, nor the
overhead. This puts SPI_execute() performance back on par, or maybe even
slightly better, than it was before. This change should resolve recent
complaints of performance degradation from Dong Ye, Pavel Stehule, and
others.
By avoiding data copying, this change also reduces the amount of memory
needed to execute many-statement SPI_execute() strings, as for instance in
a recent complaint from Tomas Vondra.
An additional benefit of this change is that multi-statement SPI_execute()
query strings are now processed fully serially, that is we complete
execution of earlier statements before running parse analysis and planning
on following ones. This eliminates a long-standing POLA violation, in that
DDL that affects the behavior of a later statement will now behave as
expected.
Back-patch to 9.2, since this was a performance regression compared to 9.1.
(In 9.2, place the added struct fields so as to avoid changing the offsets
of existing fields.)
Heikki Linnakangas and Tom Lane
2013-01-04 23:42:19 +01:00
|
|
|
spierrcontext.arg = NULL; /* we'll fill this below */
|
2008-05-12 22:02:02 +02:00
|
|
|
spierrcontext.previous = error_context_stack;
|
|
|
|
error_context_stack = &spierrcontext;
|
|
|
|
|
2011-03-01 05:27:18 +01:00
|
|
|
/*
|
|
|
|
* We support four distinct snapshot management behaviors:
|
|
|
|
*
|
|
|
|
* snapshot != InvalidSnapshot, read_only = true: use exactly the given
|
|
|
|
* snapshot.
|
|
|
|
*
|
2011-04-10 17:42:00 +02:00
|
|
|
* snapshot != InvalidSnapshot, read_only = false: use the given snapshot,
|
|
|
|
* modified by advancing its command ID before each querytree.
|
2011-03-01 05:27:18 +01:00
|
|
|
*
|
|
|
|
* snapshot == InvalidSnapshot, read_only = true: use the entry-time
|
|
|
|
* ActiveSnapshot, if any (if there isn't one, we run with no snapshot).
|
|
|
|
*
|
|
|
|
* snapshot == InvalidSnapshot, read_only = false: take a full new
|
|
|
|
* snapshot for each user command, and advance its command ID before each
|
|
|
|
* querytree within the command.
|
|
|
|
*
|
2011-04-10 17:42:00 +02:00
|
|
|
* In the first two cases, we can just push the snap onto the stack once
|
|
|
|
* for the whole plan list.
|
2018-03-24 15:05:06 +01:00
|
|
|
*
|
|
|
|
* But if the plan has no_snapshots set to true, then don't manage
|
|
|
|
* snapshots at all. The caller should then take care of that.
|
2011-03-01 05:27:18 +01:00
|
|
|
*/
|
2018-03-24 15:05:06 +01:00
|
|
|
if (snapshot != InvalidSnapshot && !plan->no_snapshots)
|
2011-03-01 05:27:18 +01:00
|
|
|
{
|
|
|
|
if (read_only)
|
|
|
|
{
|
|
|
|
PushActiveSnapshot(snapshot);
|
|
|
|
pushed_active_snap = true;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* Make sure we have a private copy of the snapshot to modify */
|
|
|
|
PushCopiedSnapshot(snapshot);
|
|
|
|
pushed_active_snap = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-05-12 22:02:02 +02:00
|
|
|
foreach(lc1, plan->plancache_list)
|
2003-05-02 22:54:36 +02:00
|
|
|
{
|
2008-05-12 22:02:02 +02:00
|
|
|
CachedPlanSource *plansource = (CachedPlanSource *) lfirst(lc1);
|
|
|
|
List *stmt_list;
|
|
|
|
ListCell *lc2;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2019-01-29 01:16:24 +01:00
|
|
|
spierrcontext.arg = unconstify(char *, plansource->query_string);
|
2004-09-13 22:10:13 +02:00
|
|
|
|
Invent a "one-shot" variant of CachedPlans for better performance.
SPI_execute() and related functions create a CachedPlan, execute it once,
and immediately discard it, so that the functionality offered by
plancache.c is of no value in this code path. And performance measurements
show that the extra data copying and invalidation checking done by
plancache.c slows down simple queries by 10% or more compared to 9.1.
However, enough of the SPI code is shared with functions that do need plan
caching that it seems impractical to bypass plancache.c altogether.
Instead, let's invent a variant version of cached plans that preserves
99% of the API but doesn't offer any of the actual functionality, nor the
overhead. This puts SPI_execute() performance back on par, or maybe even
slightly better, than it was before. This change should resolve recent
complaints of performance degradation from Dong Ye, Pavel Stehule, and
others.
By avoiding data copying, this change also reduces the amount of memory
needed to execute many-statement SPI_execute() strings, as for instance in
a recent complaint from Tomas Vondra.
An additional benefit of this change is that multi-statement SPI_execute()
query strings are now processed fully serially, that is we complete
execution of earlier statements before running parse analysis and planning
on following ones. This eliminates a long-standing POLA violation, in that
DDL that affects the behavior of a later statement will now behave as
expected.
Back-patch to 9.2, since this was a performance regression compared to 9.1.
(In 9.2, place the added struct fields so as to avoid changing the offsets
of existing fields.)
Heikki Linnakangas and Tom Lane
2013-01-04 23:42:19 +01:00
|
|
|
/*
|
|
|
|
* If this is a one-shot plan, we still need to do parse analysis.
|
|
|
|
*/
|
|
|
|
if (plan->oneshot)
|
|
|
|
{
|
Change representation of statement lists, and add statement location info.
This patch makes several changes that improve the consistency of
representation of lists of statements. It's always been the case
that the output of parse analysis is a list of Query nodes, whatever
the types of the individual statements in the list. This patch brings
similar consistency to the outputs of raw parsing and planning steps:
* The output of raw parsing is now always a list of RawStmt nodes;
the statement-type-dependent nodes are one level down from that.
* The output of pg_plan_queries() is now always a list of PlannedStmt
nodes, even for utility statements. In the case of a utility statement,
"planning" just consists of wrapping a CMD_UTILITY PlannedStmt around
the utility node. This list representation is now used in Portal and
CachedPlan plan lists, replacing the former convention of intermixing
PlannedStmts with bare utility-statement nodes.
Now, every list of statements has a consistent head-node type depending
on how far along it is in processing. This allows changing many places
that formerly used generic "Node *" pointers to use a more specific
pointer type, thus reducing the number of IsA() tests and casts needed,
as well as improving code clarity.
Also, the post-parse-analysis representation of DECLARE CURSOR is changed
so that it looks more like EXPLAIN, PREPARE, etc. That is, the contained
SELECT remains a child of the DeclareCursorStmt rather than getting flipped
around to be the other way. It's now true for both Query and PlannedStmt
that utilityStmt is non-null if and only if commandType is CMD_UTILITY.
That allows simplifying a lot of places that were testing both fields.
(I think some of those were just defensive programming, but in many places,
it was actually necessary to avoid confusing DECLARE CURSOR with SELECT.)
Because PlannedStmt carries a canSetTag field, we're also able to get rid
of some ad-hoc rules about how to reconstruct canSetTag for a bare utility
statement; specifically, the assumption that a utility is canSetTag if and
only if it's the only one in its list. While I see no near-term need for
relaxing that restriction, it's nice to get rid of the ad-hocery.
The API of ProcessUtility() is changed so that what it's passed is the
wrapper PlannedStmt not just the bare utility statement. This will affect
all users of ProcessUtility_hook, but the changes are pretty trivial; see
the affected contrib modules for examples of the minimum change needed.
(Most compilers should give pointer-type-mismatch warnings for uncorrected
code.)
There's also a change in the API of ExplainOneQuery_hook, to pass through
cursorOptions instead of expecting hook functions to know what to pick.
This is needed because of the DECLARE CURSOR changes, but really should
have been done in 9.6; it's unlikely that any extant hook functions
know about using CURSOR_OPT_PARALLEL_OK.
Finally, teach gram.y to save statement boundary locations in RawStmt
nodes, and pass those through to Query and PlannedStmt nodes. This allows
more intelligent handling of cases where a source query string contains
multiple statements. This patch doesn't actually do anything with the
information, but a follow-on patch will. (Passing this information through
cleanly is the true motivation for these changes; while I think this is all
good cleanup, it's unlikely we'd have bothered without this end goal.)
catversion bump because addition of location fields to struct Query
affects stored rules.
This patch is by me, but it owes a good deal to Fabien Coelho who did
a lot of preliminary work on the problem, and also reviewed the patch.
Discussion: https://postgr.es/m/alpine.DEB.2.20.1612200926310.29821@lancre
2017-01-14 22:02:35 +01:00
|
|
|
RawStmt *parsetree = plansource->raw_parse_tree;
|
Invent a "one-shot" variant of CachedPlans for better performance.
SPI_execute() and related functions create a CachedPlan, execute it once,
and immediately discard it, so that the functionality offered by
plancache.c is of no value in this code path. And performance measurements
show that the extra data copying and invalidation checking done by
plancache.c slows down simple queries by 10% or more compared to 9.1.
However, enough of the SPI code is shared with functions that do need plan
caching that it seems impractical to bypass plancache.c altogether.
Instead, let's invent a variant version of cached plans that preserves
99% of the API but doesn't offer any of the actual functionality, nor the
overhead. This puts SPI_execute() performance back on par, or maybe even
slightly better, than it was before. This change should resolve recent
complaints of performance degradation from Dong Ye, Pavel Stehule, and
others.
By avoiding data copying, this change also reduces the amount of memory
needed to execute many-statement SPI_execute() strings, as for instance in
a recent complaint from Tomas Vondra.
An additional benefit of this change is that multi-statement SPI_execute()
query strings are now processed fully serially, that is we complete
execution of earlier statements before running parse analysis and planning
on following ones. This eliminates a long-standing POLA violation, in that
DDL that affects the behavior of a later statement will now behave as
expected.
Back-patch to 9.2, since this was a performance regression compared to 9.1.
(In 9.2, place the added struct fields so as to avoid changing the offsets
of existing fields.)
Heikki Linnakangas and Tom Lane
2013-01-04 23:42:19 +01:00
|
|
|
const char *src = plansource->query_string;
|
|
|
|
List *stmt_list;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Parameter datatypes are driven by parserSetup hook if provided,
|
|
|
|
* otherwise we use the fixed parameter list.
|
|
|
|
*/
|
2014-11-12 21:58:37 +01:00
|
|
|
if (parsetree == NULL)
|
|
|
|
stmt_list = NIL;
|
|
|
|
else if (plan->parserSetup != NULL)
|
Invent a "one-shot" variant of CachedPlans for better performance.
SPI_execute() and related functions create a CachedPlan, execute it once,
and immediately discard it, so that the functionality offered by
plancache.c is of no value in this code path. And performance measurements
show that the extra data copying and invalidation checking done by
plancache.c slows down simple queries by 10% or more compared to 9.1.
However, enough of the SPI code is shared with functions that do need plan
caching that it seems impractical to bypass plancache.c altogether.
Instead, let's invent a variant version of cached plans that preserves
99% of the API but doesn't offer any of the actual functionality, nor the
overhead. This puts SPI_execute() performance back on par, or maybe even
slightly better, than it was before. This change should resolve recent
complaints of performance degradation from Dong Ye, Pavel Stehule, and
others.
By avoiding data copying, this change also reduces the amount of memory
needed to execute many-statement SPI_execute() strings, as for instance in
a recent complaint from Tomas Vondra.
An additional benefit of this change is that multi-statement SPI_execute()
query strings are now processed fully serially, that is we complete
execution of earlier statements before running parse analysis and planning
on following ones. This eliminates a long-standing POLA violation, in that
DDL that affects the behavior of a later statement will now behave as
expected.
Back-patch to 9.2, since this was a performance regression compared to 9.1.
(In 9.2, place the added struct fields so as to avoid changing the offsets
of existing fields.)
Heikki Linnakangas and Tom Lane
2013-01-04 23:42:19 +01:00
|
|
|
{
|
|
|
|
Assert(plan->nargs == 0);
|
|
|
|
stmt_list = pg_analyze_and_rewrite_params(parsetree,
|
|
|
|
src,
|
|
|
|
plan->parserSetup,
|
Phase 3 of pgindent updates.
Don't move parenthesized lines to the left, even if that means they
flow past the right margin.
By default, BSD indent lines up statement continuation lines that are
within parentheses so that they start just to the right of the preceding
left parenthesis. However, traditionally, if that resulted in the
continuation line extending to the right of the desired right margin,
then indent would push it left just far enough to not overrun the margin,
if it could do so without making the continuation line start to the left of
the current statement indent. That makes for a weird mix of indentations
unless one has been completely rigid about never violating the 80-column
limit.
This behavior has been pretty universally panned by Postgres developers.
Hence, disable it with indent's new -lpl switch, so that parenthesized
lines are always lined up with the preceding left paren.
This patch is much less interesting than the first round of indent
changes, but also bulkier, so I thought it best to separate the effects.
Discussion: https://postgr.es/m/E1dAmxK-0006EE-1r@gemulon.postgresql.org
Discussion: https://postgr.es/m/30527.1495162840@sss.pgh.pa.us
2017-06-21 21:35:54 +02:00
|
|
|
plan->parserSetupArg,
|
|
|
|
_SPI_current->queryEnv);
|
Invent a "one-shot" variant of CachedPlans for better performance.
SPI_execute() and related functions create a CachedPlan, execute it once,
and immediately discard it, so that the functionality offered by
plancache.c is of no value in this code path. And performance measurements
show that the extra data copying and invalidation checking done by
plancache.c slows down simple queries by 10% or more compared to 9.1.
However, enough of the SPI code is shared with functions that do need plan
caching that it seems impractical to bypass plancache.c altogether.
Instead, let's invent a variant version of cached plans that preserves
99% of the API but doesn't offer any of the actual functionality, nor the
overhead. This puts SPI_execute() performance back on par, or maybe even
slightly better, than it was before. This change should resolve recent
complaints of performance degradation from Dong Ye, Pavel Stehule, and
others.
By avoiding data copying, this change also reduces the amount of memory
needed to execute many-statement SPI_execute() strings, as for instance in
a recent complaint from Tomas Vondra.
An additional benefit of this change is that multi-statement SPI_execute()
query strings are now processed fully serially, that is we complete
execution of earlier statements before running parse analysis and planning
on following ones. This eliminates a long-standing POLA violation, in that
DDL that affects the behavior of a later statement will now behave as
expected.
Back-patch to 9.2, since this was a performance regression compared to 9.1.
(In 9.2, place the added struct fields so as to avoid changing the offsets
of existing fields.)
Heikki Linnakangas and Tom Lane
2013-01-04 23:42:19 +01:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
stmt_list = pg_analyze_and_rewrite(parsetree,
|
|
|
|
src,
|
|
|
|
plan->argtypes,
|
2017-04-01 06:17:18 +02:00
|
|
|
plan->nargs,
|
|
|
|
_SPI_current->queryEnv);
|
Invent a "one-shot" variant of CachedPlans for better performance.
SPI_execute() and related functions create a CachedPlan, execute it once,
and immediately discard it, so that the functionality offered by
plancache.c is of no value in this code path. And performance measurements
show that the extra data copying and invalidation checking done by
plancache.c slows down simple queries by 10% or more compared to 9.1.
However, enough of the SPI code is shared with functions that do need plan
caching that it seems impractical to bypass plancache.c altogether.
Instead, let's invent a variant version of cached plans that preserves
99% of the API but doesn't offer any of the actual functionality, nor the
overhead. This puts SPI_execute() performance back on par, or maybe even
slightly better, than it was before. This change should resolve recent
complaints of performance degradation from Dong Ye, Pavel Stehule, and
others.
By avoiding data copying, this change also reduces the amount of memory
needed to execute many-statement SPI_execute() strings, as for instance in
a recent complaint from Tomas Vondra.
An additional benefit of this change is that multi-statement SPI_execute()
query strings are now processed fully serially, that is we complete
execution of earlier statements before running parse analysis and planning
on following ones. This eliminates a long-standing POLA violation, in that
DDL that affects the behavior of a later statement will now behave as
expected.
Back-patch to 9.2, since this was a performance regression compared to 9.1.
(In 9.2, place the added struct fields so as to avoid changing the offsets
of existing fields.)
Heikki Linnakangas and Tom Lane
2013-01-04 23:42:19 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Finish filling in the CachedPlanSource */
|
|
|
|
CompleteCachedPlan(plansource,
|
|
|
|
stmt_list,
|
|
|
|
NULL,
|
|
|
|
plan->argtypes,
|
|
|
|
plan->nargs,
|
|
|
|
plan->parserSetup,
|
|
|
|
plan->parserSetupArg,
|
|
|
|
plan->cursor_options,
|
2013-05-29 22:58:43 +02:00
|
|
|
false); /* not fixed result */
|
Invent a "one-shot" variant of CachedPlans for better performance.
SPI_execute() and related functions create a CachedPlan, execute it once,
and immediately discard it, so that the functionality offered by
plancache.c is of no value in this code path. And performance measurements
show that the extra data copying and invalidation checking done by
plancache.c slows down simple queries by 10% or more compared to 9.1.
However, enough of the SPI code is shared with functions that do need plan
caching that it seems impractical to bypass plancache.c altogether.
Instead, let's invent a variant version of cached plans that preserves
99% of the API but doesn't offer any of the actual functionality, nor the
overhead. This puts SPI_execute() performance back on par, or maybe even
slightly better, than it was before. This change should resolve recent
complaints of performance degradation from Dong Ye, Pavel Stehule, and
others.
By avoiding data copying, this change also reduces the amount of memory
needed to execute many-statement SPI_execute() strings, as for instance in
a recent complaint from Tomas Vondra.
An additional benefit of this change is that multi-statement SPI_execute()
query strings are now processed fully serially, that is we complete
execution of earlier statements before running parse analysis and planning
on following ones. This eliminates a long-standing POLA violation, in that
DDL that affects the behavior of a later statement will now behave as
expected.
Back-patch to 9.2, since this was a performance regression compared to 9.1.
(In 9.2, place the added struct fields so as to avoid changing the offsets
of existing fields.)
Heikki Linnakangas and Tom Lane
2013-01-04 23:42:19 +01:00
|
|
|
}
|
|
|
|
|
2011-09-16 06:42:53 +02:00
|
|
|
/*
|
|
|
|
* Replan if needed, and increment plan refcount. If it's a saved
|
|
|
|
* plan, the refcount must be backed by the CurrentResourceOwner.
|
|
|
|
*/
|
2017-04-01 06:17:18 +02:00
|
|
|
cplan = GetCachedPlan(plansource, paramLI, plan->saved, _SPI_current->queryEnv);
|
2011-09-16 06:42:53 +02:00
|
|
|
stmt_list = cplan->stmt_list;
|
2008-05-12 22:02:02 +02:00
|
|
|
|
2011-03-01 05:27:18 +01:00
|
|
|
/*
|
|
|
|
* In the default non-read-only case, get a new snapshot, replacing
|
|
|
|
* any that we pushed in a previous cycle.
|
|
|
|
*/
|
2018-03-24 15:05:06 +01:00
|
|
|
if (snapshot == InvalidSnapshot && !read_only && !plan->no_snapshots)
|
2011-03-01 05:27:18 +01:00
|
|
|
{
|
|
|
|
if (pushed_active_snap)
|
|
|
|
PopActiveSnapshot();
|
|
|
|
PushActiveSnapshot(GetTransactionSnapshot());
|
|
|
|
pushed_active_snap = true;
|
|
|
|
}
|
|
|
|
|
2008-05-12 22:02:02 +02:00
|
|
|
foreach(lc2, stmt_list)
|
|
|
|
{
|
Improve castNode notation by introducing list-extraction-specific variants.
This extends the castNode() notation introduced by commit 5bcab1114 to
provide, in one step, extraction of a list cell's pointer and coercion to
a concrete node type. For example, "lfirst_node(Foo, lc)" is the same
as "castNode(Foo, lfirst(lc))". Almost half of the uses of castNode
that have appeared so far include a list extraction call, so this is
pretty widely useful, and it saves a few more keystrokes compared to the
old way.
As with the previous patch, back-patch the addition of these macros to
pg_list.h, so that the notation will be available when back-patching.
Patch by me, after an idea of Andrew Gierth's.
Discussion: https://postgr.es/m/14197.1491841216@sss.pgh.pa.us
2017-04-10 19:51:29 +02:00
|
|
|
PlannedStmt *stmt = lfirst_node(PlannedStmt, lc2);
|
Change representation of statement lists, and add statement location info.
This patch makes several changes that improve the consistency of
representation of lists of statements. It's always been the case
that the output of parse analysis is a list of Query nodes, whatever
the types of the individual statements in the list. This patch brings
similar consistency to the outputs of raw parsing and planning steps:
* The output of raw parsing is now always a list of RawStmt nodes;
the statement-type-dependent nodes are one level down from that.
* The output of pg_plan_queries() is now always a list of PlannedStmt
nodes, even for utility statements. In the case of a utility statement,
"planning" just consists of wrapping a CMD_UTILITY PlannedStmt around
the utility node. This list representation is now used in Portal and
CachedPlan plan lists, replacing the former convention of intermixing
PlannedStmts with bare utility-statement nodes.
Now, every list of statements has a consistent head-node type depending
on how far along it is in processing. This allows changing many places
that formerly used generic "Node *" pointers to use a more specific
pointer type, thus reducing the number of IsA() tests and casts needed,
as well as improving code clarity.
Also, the post-parse-analysis representation of DECLARE CURSOR is changed
so that it looks more like EXPLAIN, PREPARE, etc. That is, the contained
SELECT remains a child of the DeclareCursorStmt rather than getting flipped
around to be the other way. It's now true for both Query and PlannedStmt
that utilityStmt is non-null if and only if commandType is CMD_UTILITY.
That allows simplifying a lot of places that were testing both fields.
(I think some of those were just defensive programming, but in many places,
it was actually necessary to avoid confusing DECLARE CURSOR with SELECT.)
Because PlannedStmt carries a canSetTag field, we're also able to get rid
of some ad-hoc rules about how to reconstruct canSetTag for a bare utility
statement; specifically, the assumption that a utility is canSetTag if and
only if it's the only one in its list. While I see no near-term need for
relaxing that restriction, it's nice to get rid of the ad-hocery.
The API of ProcessUtility() is changed so that what it's passed is the
wrapper PlannedStmt not just the bare utility statement. This will affect
all users of ProcessUtility_hook, but the changes are pretty trivial; see
the affected contrib modules for examples of the minimum change needed.
(Most compilers should give pointer-type-mismatch warnings for uncorrected
code.)
There's also a change in the API of ExplainOneQuery_hook, to pass through
cursorOptions instead of expecting hook functions to know what to pick.
This is needed because of the DECLARE CURSOR changes, but really should
have been done in 9.6; it's unlikely that any extant hook functions
know about using CURSOR_OPT_PARALLEL_OK.
Finally, teach gram.y to save statement boundary locations in RawStmt
nodes, and pass those through to Query and PlannedStmt nodes. This allows
more intelligent handling of cases where a source query string contains
multiple statements. This patch doesn't actually do anything with the
information, but a follow-on patch will. (Passing this information through
cleanly is the true motivation for these changes; while I think this is all
good cleanup, it's unlikely we'd have bothered without this end goal.)
catversion bump because addition of location fields to struct Query
affects stored rules.
This patch is by me, but it owes a good deal to Fabien Coelho who did
a lot of preliminary work on the problem, and also reviewed the patch.
Discussion: https://postgr.es/m/alpine.DEB.2.20.1612200926310.29821@lancre
2017-01-14 22:02:35 +01:00
|
|
|
bool canSetTag = stmt->canSetTag;
|
2008-05-12 22:02:02 +02:00
|
|
|
DestReceiver *dest;
|
2002-10-15 01:49:20 +02:00
|
|
|
|
2008-05-12 22:02:02 +02:00
|
|
|
_SPI_current->processed = 0;
|
|
|
|
_SPI_current->tuptable = NULL;
|
2007-03-16 00:12:07 +01:00
|
|
|
|
Change representation of statement lists, and add statement location info.
This patch makes several changes that improve the consistency of
representation of lists of statements. It's always been the case
that the output of parse analysis is a list of Query nodes, whatever
the types of the individual statements in the list. This patch brings
similar consistency to the outputs of raw parsing and planning steps:
* The output of raw parsing is now always a list of RawStmt nodes;
the statement-type-dependent nodes are one level down from that.
* The output of pg_plan_queries() is now always a list of PlannedStmt
nodes, even for utility statements. In the case of a utility statement,
"planning" just consists of wrapping a CMD_UTILITY PlannedStmt around
the utility node. This list representation is now used in Portal and
CachedPlan plan lists, replacing the former convention of intermixing
PlannedStmts with bare utility-statement nodes.
Now, every list of statements has a consistent head-node type depending
on how far along it is in processing. This allows changing many places
that formerly used generic "Node *" pointers to use a more specific
pointer type, thus reducing the number of IsA() tests and casts needed,
as well as improving code clarity.
Also, the post-parse-analysis representation of DECLARE CURSOR is changed
so that it looks more like EXPLAIN, PREPARE, etc. That is, the contained
SELECT remains a child of the DeclareCursorStmt rather than getting flipped
around to be the other way. It's now true for both Query and PlannedStmt
that utilityStmt is non-null if and only if commandType is CMD_UTILITY.
That allows simplifying a lot of places that were testing both fields.
(I think some of those were just defensive programming, but in many places,
it was actually necessary to avoid confusing DECLARE CURSOR with SELECT.)
Because PlannedStmt carries a canSetTag field, we're also able to get rid
of some ad-hoc rules about how to reconstruct canSetTag for a bare utility
statement; specifically, the assumption that a utility is canSetTag if and
only if it's the only one in its list. While I see no near-term need for
relaxing that restriction, it's nice to get rid of the ad-hocery.
The API of ProcessUtility() is changed so that what it's passed is the
wrapper PlannedStmt not just the bare utility statement. This will affect
all users of ProcessUtility_hook, but the changes are pretty trivial; see
the affected contrib modules for examples of the minimum change needed.
(Most compilers should give pointer-type-mismatch warnings for uncorrected
code.)
There's also a change in the API of ExplainOneQuery_hook, to pass through
cursorOptions instead of expecting hook functions to know what to pick.
This is needed because of the DECLARE CURSOR changes, but really should
have been done in 9.6; it's unlikely that any extant hook functions
know about using CURSOR_OPT_PARALLEL_OK.
Finally, teach gram.y to save statement boundary locations in RawStmt
nodes, and pass those through to Query and PlannedStmt nodes. This allows
more intelligent handling of cases where a source query string contains
multiple statements. This patch doesn't actually do anything with the
information, but a follow-on patch will. (Passing this information through
cleanly is the true motivation for these changes; while I think this is all
good cleanup, it's unlikely we'd have bothered without this end goal.)
catversion bump because addition of location fields to struct Query
affects stored rules.
This patch is by me, but it owes a good deal to Fabien Coelho who did
a lot of preliminary work on the problem, and also reviewed the patch.
Discussion: https://postgr.es/m/alpine.DEB.2.20.1612200926310.29821@lancre
2017-01-14 22:02:35 +01:00
|
|
|
if (stmt->utilityStmt)
|
1997-09-07 07:04:48 +02:00
|
|
|
{
|
Change representation of statement lists, and add statement location info.
This patch makes several changes that improve the consistency of
representation of lists of statements. It's always been the case
that the output of parse analysis is a list of Query nodes, whatever
the types of the individual statements in the list. This patch brings
similar consistency to the outputs of raw parsing and planning steps:
* The output of raw parsing is now always a list of RawStmt nodes;
the statement-type-dependent nodes are one level down from that.
* The output of pg_plan_queries() is now always a list of PlannedStmt
nodes, even for utility statements. In the case of a utility statement,
"planning" just consists of wrapping a CMD_UTILITY PlannedStmt around
the utility node. This list representation is now used in Portal and
CachedPlan plan lists, replacing the former convention of intermixing
PlannedStmts with bare utility-statement nodes.
Now, every list of statements has a consistent head-node type depending
on how far along it is in processing. This allows changing many places
that formerly used generic "Node *" pointers to use a more specific
pointer type, thus reducing the number of IsA() tests and casts needed,
as well as improving code clarity.
Also, the post-parse-analysis representation of DECLARE CURSOR is changed
so that it looks more like EXPLAIN, PREPARE, etc. That is, the contained
SELECT remains a child of the DeclareCursorStmt rather than getting flipped
around to be the other way. It's now true for both Query and PlannedStmt
that utilityStmt is non-null if and only if commandType is CMD_UTILITY.
That allows simplifying a lot of places that were testing both fields.
(I think some of those were just defensive programming, but in many places,
it was actually necessary to avoid confusing DECLARE CURSOR with SELECT.)
Because PlannedStmt carries a canSetTag field, we're also able to get rid
of some ad-hoc rules about how to reconstruct canSetTag for a bare utility
statement; specifically, the assumption that a utility is canSetTag if and
only if it's the only one in its list. While I see no near-term need for
relaxing that restriction, it's nice to get rid of the ad-hocery.
The API of ProcessUtility() is changed so that what it's passed is the
wrapper PlannedStmt not just the bare utility statement. This will affect
all users of ProcessUtility_hook, but the changes are pretty trivial; see
the affected contrib modules for examples of the minimum change needed.
(Most compilers should give pointer-type-mismatch warnings for uncorrected
code.)
There's also a change in the API of ExplainOneQuery_hook, to pass through
cursorOptions instead of expecting hook functions to know what to pick.
This is needed because of the DECLARE CURSOR changes, but really should
have been done in 9.6; it's unlikely that any extant hook functions
know about using CURSOR_OPT_PARALLEL_OK.
Finally, teach gram.y to save statement boundary locations in RawStmt
nodes, and pass those through to Query and PlannedStmt nodes. This allows
more intelligent handling of cases where a source query string contains
multiple statements. This patch doesn't actually do anything with the
information, but a follow-on patch will. (Passing this information through
cleanly is the true motivation for these changes; while I think this is all
good cleanup, it's unlikely we'd have bothered without this end goal.)
catversion bump because addition of location fields to struct Query
affects stored rules.
This patch is by me, but it owes a good deal to Fabien Coelho who did
a lot of preliminary work on the problem, and also reviewed the patch.
Discussion: https://postgr.es/m/alpine.DEB.2.20.1612200926310.29821@lancre
2017-01-14 22:02:35 +01:00
|
|
|
if (IsA(stmt->utilityStmt, CopyStmt))
|
2004-09-13 22:10:13 +02:00
|
|
|
{
|
Change representation of statement lists, and add statement location info.
This patch makes several changes that improve the consistency of
representation of lists of statements. It's always been the case
that the output of parse analysis is a list of Query nodes, whatever
the types of the individual statements in the list. This patch brings
similar consistency to the outputs of raw parsing and planning steps:
* The output of raw parsing is now always a list of RawStmt nodes;
the statement-type-dependent nodes are one level down from that.
* The output of pg_plan_queries() is now always a list of PlannedStmt
nodes, even for utility statements. In the case of a utility statement,
"planning" just consists of wrapping a CMD_UTILITY PlannedStmt around
the utility node. This list representation is now used in Portal and
CachedPlan plan lists, replacing the former convention of intermixing
PlannedStmts with bare utility-statement nodes.
Now, every list of statements has a consistent head-node type depending
on how far along it is in processing. This allows changing many places
that formerly used generic "Node *" pointers to use a more specific
pointer type, thus reducing the number of IsA() tests and casts needed,
as well as improving code clarity.
Also, the post-parse-analysis representation of DECLARE CURSOR is changed
so that it looks more like EXPLAIN, PREPARE, etc. That is, the contained
SELECT remains a child of the DeclareCursorStmt rather than getting flipped
around to be the other way. It's now true for both Query and PlannedStmt
that utilityStmt is non-null if and only if commandType is CMD_UTILITY.
That allows simplifying a lot of places that were testing both fields.
(I think some of those were just defensive programming, but in many places,
it was actually necessary to avoid confusing DECLARE CURSOR with SELECT.)
Because PlannedStmt carries a canSetTag field, we're also able to get rid
of some ad-hoc rules about how to reconstruct canSetTag for a bare utility
statement; specifically, the assumption that a utility is canSetTag if and
only if it's the only one in its list. While I see no near-term need for
relaxing that restriction, it's nice to get rid of the ad-hocery.
The API of ProcessUtility() is changed so that what it's passed is the
wrapper PlannedStmt not just the bare utility statement. This will affect
all users of ProcessUtility_hook, but the changes are pretty trivial; see
the affected contrib modules for examples of the minimum change needed.
(Most compilers should give pointer-type-mismatch warnings for uncorrected
code.)
There's also a change in the API of ExplainOneQuery_hook, to pass through
cursorOptions instead of expecting hook functions to know what to pick.
This is needed because of the DECLARE CURSOR changes, but really should
have been done in 9.6; it's unlikely that any extant hook functions
know about using CURSOR_OPT_PARALLEL_OK.
Finally, teach gram.y to save statement boundary locations in RawStmt
nodes, and pass those through to Query and PlannedStmt nodes. This allows
more intelligent handling of cases where a source query string contains
multiple statements. This patch doesn't actually do anything with the
information, but a follow-on patch will. (Passing this information through
cleanly is the true motivation for these changes; while I think this is all
good cleanup, it's unlikely we'd have bothered without this end goal.)
catversion bump because addition of location fields to struct Query
affects stored rules.
This patch is by me, but it owes a good deal to Fabien Coelho who did
a lot of preliminary work on the problem, and also reviewed the patch.
Discussion: https://postgr.es/m/alpine.DEB.2.20.1612200926310.29821@lancre
2017-01-14 22:02:35 +01:00
|
|
|
CopyStmt *cstmt = (CopyStmt *) stmt->utilityStmt;
|
2007-02-20 18:32:18 +01:00
|
|
|
|
2008-05-12 22:02:02 +02:00
|
|
|
if (cstmt->filename == NULL)
|
2004-09-13 22:10:13 +02:00
|
|
|
{
|
2008-05-12 22:02:02 +02:00
|
|
|
my_res = SPI_ERROR_COPY;
|
2004-09-13 22:10:13 +02:00
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
}
|
Change representation of statement lists, and add statement location info.
This patch makes several changes that improve the consistency of
representation of lists of statements. It's always been the case
that the output of parse analysis is a list of Query nodes, whatever
the types of the individual statements in the list. This patch brings
similar consistency to the outputs of raw parsing and planning steps:
* The output of raw parsing is now always a list of RawStmt nodes;
the statement-type-dependent nodes are one level down from that.
* The output of pg_plan_queries() is now always a list of PlannedStmt
nodes, even for utility statements. In the case of a utility statement,
"planning" just consists of wrapping a CMD_UTILITY PlannedStmt around
the utility node. This list representation is now used in Portal and
CachedPlan plan lists, replacing the former convention of intermixing
PlannedStmts with bare utility-statement nodes.
Now, every list of statements has a consistent head-node type depending
on how far along it is in processing. This allows changing many places
that formerly used generic "Node *" pointers to use a more specific
pointer type, thus reducing the number of IsA() tests and casts needed,
as well as improving code clarity.
Also, the post-parse-analysis representation of DECLARE CURSOR is changed
so that it looks more like EXPLAIN, PREPARE, etc. That is, the contained
SELECT remains a child of the DeclareCursorStmt rather than getting flipped
around to be the other way. It's now true for both Query and PlannedStmt
that utilityStmt is non-null if and only if commandType is CMD_UTILITY.
That allows simplifying a lot of places that were testing both fields.
(I think some of those were just defensive programming, but in many places,
it was actually necessary to avoid confusing DECLARE CURSOR with SELECT.)
Because PlannedStmt carries a canSetTag field, we're also able to get rid
of some ad-hoc rules about how to reconstruct canSetTag for a bare utility
statement; specifically, the assumption that a utility is canSetTag if and
only if it's the only one in its list. While I see no near-term need for
relaxing that restriction, it's nice to get rid of the ad-hocery.
The API of ProcessUtility() is changed so that what it's passed is the
wrapper PlannedStmt not just the bare utility statement. This will affect
all users of ProcessUtility_hook, but the changes are pretty trivial; see
the affected contrib modules for examples of the minimum change needed.
(Most compilers should give pointer-type-mismatch warnings for uncorrected
code.)
There's also a change in the API of ExplainOneQuery_hook, to pass through
cursorOptions instead of expecting hook functions to know what to pick.
This is needed because of the DECLARE CURSOR changes, but really should
have been done in 9.6; it's unlikely that any extant hook functions
know about using CURSOR_OPT_PARALLEL_OK.
Finally, teach gram.y to save statement boundary locations in RawStmt
nodes, and pass those through to Query and PlannedStmt nodes. This allows
more intelligent handling of cases where a source query string contains
multiple statements. This patch doesn't actually do anything with the
information, but a follow-on patch will. (Passing this information through
cleanly is the true motivation for these changes; while I think this is all
good cleanup, it's unlikely we'd have bothered without this end goal.)
catversion bump because addition of location fields to struct Query
affects stored rules.
This patch is by me, but it owes a good deal to Fabien Coelho who did
a lot of preliminary work on the problem, and also reviewed the patch.
Discussion: https://postgr.es/m/alpine.DEB.2.20.1612200926310.29821@lancre
2017-01-14 22:02:35 +01:00
|
|
|
else if (IsA(stmt->utilityStmt, TransactionStmt))
|
2008-05-12 22:02:02 +02:00
|
|
|
{
|
|
|
|
my_res = SPI_ERROR_TRANSACTION;
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
}
|
2004-09-13 22:10:13 +02:00
|
|
|
|
2008-05-12 22:02:02 +02:00
|
|
|
if (read_only && !CommandIsReadOnly(stmt))
|
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
2009-06-11 16:49:15 +02:00
|
|
|
/* translator: %s is a SQL statement name */
|
Phase 3 of pgindent updates.
Don't move parenthesized lines to the left, even if that means they
flow past the right margin.
By default, BSD indent lines up statement continuation lines that are
within parentheses so that they start just to the right of the preceding
left parenthesis. However, traditionally, if that resulted in the
continuation line extending to the right of the desired right margin,
then indent would push it left just far enough to not overrun the margin,
if it could do so without making the continuation line start to the left of
the current statement indent. That makes for a weird mix of indentations
unless one has been completely rigid about never violating the 80-column
limit.
This behavior has been pretty universally panned by Postgres developers.
Hence, disable it with indent's new -lpl switch, so that parenthesized
lines are always lined up with the preceding left paren.
This patch is much less interesting than the first round of indent
changes, but also bulkier, so I thought it best to separate the effects.
Discussion: https://postgr.es/m/E1dAmxK-0006EE-1r@gemulon.postgresql.org
Discussion: https://postgr.es/m/30527.1495162840@sss.pgh.pa.us
2017-06-21 21:35:54 +02:00
|
|
|
errmsg("%s is not allowed in a non-volatile function",
|
|
|
|
CreateCommandTag((Node *) stmt))));
|
2004-09-13 22:10:13 +02:00
|
|
|
|
Create an infrastructure for parallel computation in PostgreSQL.
This does four basic things. First, it provides convenience routines
to coordinate the startup and shutdown of parallel workers. Second,
it synchronizes various pieces of state (e.g. GUCs, combo CID
mappings, transaction snapshot) from the parallel group leader to the
worker processes. Third, it prohibits various operations that would
result in unsafe changes to that state while parallelism is active.
Finally, it propagates events that would result in an ErrorResponse,
NoticeResponse, or NotifyResponse message being sent to the client
from the parallel workers back to the master, from which they can then
be sent on to the client.
Robert Haas, Amit Kapila, Noah Misch, Rushabh Lathia, Jeevan Chalke.
Suggestions and review from Andres Freund, Heikki Linnakangas, Noah
Misch, Simon Riggs, Euler Taveira, and Jim Nasby.
2015-04-30 21:02:14 +02:00
|
|
|
if (IsInParallelMode() && !CommandIsReadOnly(stmt))
|
Change representation of statement lists, and add statement location info.
This patch makes several changes that improve the consistency of
representation of lists of statements. It's always been the case
that the output of parse analysis is a list of Query nodes, whatever
the types of the individual statements in the list. This patch brings
similar consistency to the outputs of raw parsing and planning steps:
* The output of raw parsing is now always a list of RawStmt nodes;
the statement-type-dependent nodes are one level down from that.
* The output of pg_plan_queries() is now always a list of PlannedStmt
nodes, even for utility statements. In the case of a utility statement,
"planning" just consists of wrapping a CMD_UTILITY PlannedStmt around
the utility node. This list representation is now used in Portal and
CachedPlan plan lists, replacing the former convention of intermixing
PlannedStmts with bare utility-statement nodes.
Now, every list of statements has a consistent head-node type depending
on how far along it is in processing. This allows changing many places
that formerly used generic "Node *" pointers to use a more specific
pointer type, thus reducing the number of IsA() tests and casts needed,
as well as improving code clarity.
Also, the post-parse-analysis representation of DECLARE CURSOR is changed
so that it looks more like EXPLAIN, PREPARE, etc. That is, the contained
SELECT remains a child of the DeclareCursorStmt rather than getting flipped
around to be the other way. It's now true for both Query and PlannedStmt
that utilityStmt is non-null if and only if commandType is CMD_UTILITY.
That allows simplifying a lot of places that were testing both fields.
(I think some of those were just defensive programming, but in many places,
it was actually necessary to avoid confusing DECLARE CURSOR with SELECT.)
Because PlannedStmt carries a canSetTag field, we're also able to get rid
of some ad-hoc rules about how to reconstruct canSetTag for a bare utility
statement; specifically, the assumption that a utility is canSetTag if and
only if it's the only one in its list. While I see no near-term need for
relaxing that restriction, it's nice to get rid of the ad-hocery.
The API of ProcessUtility() is changed so that what it's passed is the
wrapper PlannedStmt not just the bare utility statement. This will affect
all users of ProcessUtility_hook, but the changes are pretty trivial; see
the affected contrib modules for examples of the minimum change needed.
(Most compilers should give pointer-type-mismatch warnings for uncorrected
code.)
There's also a change in the API of ExplainOneQuery_hook, to pass through
cursorOptions instead of expecting hook functions to know what to pick.
This is needed because of the DECLARE CURSOR changes, but really should
have been done in 9.6; it's unlikely that any extant hook functions
know about using CURSOR_OPT_PARALLEL_OK.
Finally, teach gram.y to save statement boundary locations in RawStmt
nodes, and pass those through to Query and PlannedStmt nodes. This allows
more intelligent handling of cases where a source query string contains
multiple statements. This patch doesn't actually do anything with the
information, but a follow-on patch will. (Passing this information through
cleanly is the true motivation for these changes; while I think this is all
good cleanup, it's unlikely we'd have bothered without this end goal.)
catversion bump because addition of location fields to struct Query
affects stored rules.
This patch is by me, but it owes a good deal to Fabien Coelho who did
a lot of preliminary work on the problem, and also reviewed the patch.
Discussion: https://postgr.es/m/alpine.DEB.2.20.1612200926310.29821@lancre
2017-01-14 22:02:35 +01:00
|
|
|
PreventCommandIfParallelMode(CreateCommandTag((Node *) stmt));
|
Create an infrastructure for parallel computation in PostgreSQL.
This does four basic things. First, it provides convenience routines
to coordinate the startup and shutdown of parallel workers. Second,
it synchronizes various pieces of state (e.g. GUCs, combo CID
mappings, transaction snapshot) from the parallel group leader to the
worker processes. Third, it prohibits various operations that would
result in unsafe changes to that state while parallelism is active.
Finally, it propagates events that would result in an ErrorResponse,
NoticeResponse, or NotifyResponse message being sent to the client
from the parallel workers back to the master, from which they can then
be sent on to the client.
Robert Haas, Amit Kapila, Noah Misch, Rushabh Lathia, Jeevan Chalke.
Suggestions and review from Andres Freund, Heikki Linnakangas, Noah
Misch, Simon Riggs, Euler Taveira, and Jim Nasby.
2015-04-30 21:02:14 +02:00
|
|
|
|
2008-05-12 22:02:02 +02:00
|
|
|
/*
|
2009-06-11 16:49:15 +02:00
|
|
|
* If not read-only mode, advance the command counter before each
|
2011-03-01 05:27:18 +01:00
|
|
|
* command and update the snapshot.
|
2008-05-12 22:02:02 +02:00
|
|
|
*/
|
2018-03-24 15:05:06 +01:00
|
|
|
if (!read_only && !plan->no_snapshots)
|
2011-03-01 05:27:18 +01:00
|
|
|
{
|
2008-05-12 22:02:02 +02:00
|
|
|
CommandCounterIncrement();
|
2011-03-01 05:27:18 +01:00
|
|
|
UpdateActiveSnapshotCommandId();
|
|
|
|
}
|
2004-09-13 22:10:13 +02:00
|
|
|
|
2008-11-30 21:51:25 +01:00
|
|
|
dest = CreateDestReceiver(canSetTag ? DestSPI : DestNone);
|
2004-09-13 22:10:13 +02:00
|
|
|
|
Change representation of statement lists, and add statement location info.
This patch makes several changes that improve the consistency of
representation of lists of statements. It's always been the case
that the output of parse analysis is a list of Query nodes, whatever
the types of the individual statements in the list. This patch brings
similar consistency to the outputs of raw parsing and planning steps:
* The output of raw parsing is now always a list of RawStmt nodes;
the statement-type-dependent nodes are one level down from that.
* The output of pg_plan_queries() is now always a list of PlannedStmt
nodes, even for utility statements. In the case of a utility statement,
"planning" just consists of wrapping a CMD_UTILITY PlannedStmt around
the utility node. This list representation is now used in Portal and
CachedPlan plan lists, replacing the former convention of intermixing
PlannedStmts with bare utility-statement nodes.
Now, every list of statements has a consistent head-node type depending
on how far along it is in processing. This allows changing many places
that formerly used generic "Node *" pointers to use a more specific
pointer type, thus reducing the number of IsA() tests and casts needed,
as well as improving code clarity.
Also, the post-parse-analysis representation of DECLARE CURSOR is changed
so that it looks more like EXPLAIN, PREPARE, etc. That is, the contained
SELECT remains a child of the DeclareCursorStmt rather than getting flipped
around to be the other way. It's now true for both Query and PlannedStmt
that utilityStmt is non-null if and only if commandType is CMD_UTILITY.
That allows simplifying a lot of places that were testing both fields.
(I think some of those were just defensive programming, but in many places,
it was actually necessary to avoid confusing DECLARE CURSOR with SELECT.)
Because PlannedStmt carries a canSetTag field, we're also able to get rid
of some ad-hoc rules about how to reconstruct canSetTag for a bare utility
statement; specifically, the assumption that a utility is canSetTag if and
only if it's the only one in its list. While I see no near-term need for
relaxing that restriction, it's nice to get rid of the ad-hocery.
The API of ProcessUtility() is changed so that what it's passed is the
wrapper PlannedStmt not just the bare utility statement. This will affect
all users of ProcessUtility_hook, but the changes are pretty trivial; see
the affected contrib modules for examples of the minimum change needed.
(Most compilers should give pointer-type-mismatch warnings for uncorrected
code.)
There's also a change in the API of ExplainOneQuery_hook, to pass through
cursorOptions instead of expecting hook functions to know what to pick.
This is needed because of the DECLARE CURSOR changes, but really should
have been done in 9.6; it's unlikely that any extant hook functions
know about using CURSOR_OPT_PARALLEL_OK.
Finally, teach gram.y to save statement boundary locations in RawStmt
nodes, and pass those through to Query and PlannedStmt nodes. This allows
more intelligent handling of cases where a source query string contains
multiple statements. This patch doesn't actually do anything with the
information, but a follow-on patch will. (Passing this information through
cleanly is the true motivation for these changes; while I think this is all
good cleanup, it's unlikely we'd have bothered without this end goal.)
catversion bump because addition of location fields to struct Query
affects stored rules.
This patch is by me, but it owes a good deal to Fabien Coelho who did
a lot of preliminary work on the problem, and also reviewed the patch.
Discussion: https://postgr.es/m/alpine.DEB.2.20.1612200926310.29821@lancre
2017-01-14 22:02:35 +01:00
|
|
|
if (stmt->utilityStmt == NULL)
|
2008-05-12 22:02:02 +02:00
|
|
|
{
|
|
|
|
QueryDesc *qdesc;
|
|
|
|
Snapshot snap;
|
|
|
|
|
|
|
|
if (ActiveSnapshotSet())
|
|
|
|
snap = GetActiveSnapshot();
|
|
|
|
else
|
|
|
|
snap = InvalidSnapshot;
|
|
|
|
|
Change representation of statement lists, and add statement location info.
This patch makes several changes that improve the consistency of
representation of lists of statements. It's always been the case
that the output of parse analysis is a list of Query nodes, whatever
the types of the individual statements in the list. This patch brings
similar consistency to the outputs of raw parsing and planning steps:
* The output of raw parsing is now always a list of RawStmt nodes;
the statement-type-dependent nodes are one level down from that.
* The output of pg_plan_queries() is now always a list of PlannedStmt
nodes, even for utility statements. In the case of a utility statement,
"planning" just consists of wrapping a CMD_UTILITY PlannedStmt around
the utility node. This list representation is now used in Portal and
CachedPlan plan lists, replacing the former convention of intermixing
PlannedStmts with bare utility-statement nodes.
Now, every list of statements has a consistent head-node type depending
on how far along it is in processing. This allows changing many places
that formerly used generic "Node *" pointers to use a more specific
pointer type, thus reducing the number of IsA() tests and casts needed,
as well as improving code clarity.
Also, the post-parse-analysis representation of DECLARE CURSOR is changed
so that it looks more like EXPLAIN, PREPARE, etc. That is, the contained
SELECT remains a child of the DeclareCursorStmt rather than getting flipped
around to be the other way. It's now true for both Query and PlannedStmt
that utilityStmt is non-null if and only if commandType is CMD_UTILITY.
That allows simplifying a lot of places that were testing both fields.
(I think some of those were just defensive programming, but in many places,
it was actually necessary to avoid confusing DECLARE CURSOR with SELECT.)
Because PlannedStmt carries a canSetTag field, we're also able to get rid
of some ad-hoc rules about how to reconstruct canSetTag for a bare utility
statement; specifically, the assumption that a utility is canSetTag if and
only if it's the only one in its list. While I see no near-term need for
relaxing that restriction, it's nice to get rid of the ad-hocery.
The API of ProcessUtility() is changed so that what it's passed is the
wrapper PlannedStmt not just the bare utility statement. This will affect
all users of ProcessUtility_hook, but the changes are pretty trivial; see
the affected contrib modules for examples of the minimum change needed.
(Most compilers should give pointer-type-mismatch warnings for uncorrected
code.)
There's also a change in the API of ExplainOneQuery_hook, to pass through
cursorOptions instead of expecting hook functions to know what to pick.
This is needed because of the DECLARE CURSOR changes, but really should
have been done in 9.6; it's unlikely that any extant hook functions
know about using CURSOR_OPT_PARALLEL_OK.
Finally, teach gram.y to save statement boundary locations in RawStmt
nodes, and pass those through to Query and PlannedStmt nodes. This allows
more intelligent handling of cases where a source query string contains
multiple statements. This patch doesn't actually do anything with the
information, but a follow-on patch will. (Passing this information through
cleanly is the true motivation for these changes; while I think this is all
good cleanup, it's unlikely we'd have bothered without this end goal.)
catversion bump because addition of location fields to struct Query
affects stored rules.
This patch is by me, but it owes a good deal to Fabien Coelho who did
a lot of preliminary work on the problem, and also reviewed the patch.
Discussion: https://postgr.es/m/alpine.DEB.2.20.1612200926310.29821@lancre
2017-01-14 22:02:35 +01:00
|
|
|
qdesc = CreateQueryDesc(stmt,
|
2009-01-02 21:42:00 +01:00
|
|
|
plansource->query_string,
|
2008-05-12 22:02:02 +02:00
|
|
|
snap, crosscheck_snapshot,
|
|
|
|
dest,
|
2017-04-01 06:17:18 +02:00
|
|
|
paramLI, _SPI_current->queryEnv,
|
|
|
|
0);
|
2008-05-12 22:02:02 +02:00
|
|
|
res = _SPI_pquery(qdesc, fire_triggers,
|
|
|
|
canSetTag ? tcount : 0);
|
|
|
|
FreeQueryDesc(qdesc);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2012-06-10 21:20:04 +02:00
|
|
|
char completionTag[COMPLETION_TAG_BUFSIZE];
|
2018-03-24 15:05:06 +01:00
|
|
|
ProcessUtilityContext context;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the SPI context is atomic, or we are asked to manage
|
|
|
|
* snapshots, then we are in an atomic execution context.
|
|
|
|
* Conversely, to propagate a nonatomic execution context, the
|
|
|
|
* caller must be in a nonatomic SPI context and manage
|
|
|
|
* snapshots itself.
|
|
|
|
*/
|
|
|
|
if (_SPI_current->atomic || !plan->no_snapshots)
|
|
|
|
context = PROCESS_UTILITY_QUERY;
|
|
|
|
else
|
|
|
|
context = PROCESS_UTILITY_QUERY_NONATOMIC;
|
Restructure SELECT INTO's parsetree representation into CreateTableAsStmt.
Making this operation look like a utility statement seems generally a good
idea, and particularly so in light of the desire to provide command
triggers for utility statements. The original choice of representing it as
SELECT with an IntoClause appendage had metastasized into rather a lot of
places, unfortunately, so that this patch is a great deal more complicated
than one might at first expect.
In particular, keeping EXPLAIN working for SELECT INTO and CREATE TABLE AS
subcommands required restructuring some EXPLAIN-related APIs. Add-on code
that calls ExplainOnePlan or ExplainOneUtility, or uses
ExplainOneQuery_hook, will need adjustment.
Also, the cases PREPARE ... SELECT INTO and CREATE RULE ... SELECT INTO,
which formerly were accepted though undocumented, are no longer accepted.
The PREPARE case can be replaced with use of CREATE TABLE AS EXECUTE.
The CREATE RULE case doesn't seem to have much real-world use (since the
rule would work only once before failing with "table already exists"),
so we'll not bother with that one.
Both SELECT INTO and CREATE TABLE AS still return a command tag of
"SELECT nnnn". There was some discussion of returning "CREATE TABLE nnnn",
but for the moment backwards compatibility wins the day.
Andres Freund and Tom Lane
2012-03-20 02:37:19 +01:00
|
|
|
|
2008-05-12 22:02:02 +02:00
|
|
|
ProcessUtility(stmt,
|
|
|
|
plansource->query_string,
|
2018-03-24 15:05:06 +01:00
|
|
|
context,
|
2008-05-12 22:02:02 +02:00
|
|
|
paramLI,
|
2017-04-01 06:17:18 +02:00
|
|
|
_SPI_current->queryEnv,
|
2008-05-12 22:02:02 +02:00
|
|
|
dest,
|
2013-04-28 06:18:45 +02:00
|
|
|
completionTag);
|
Restructure SELECT INTO's parsetree representation into CreateTableAsStmt.
Making this operation look like a utility statement seems generally a good
idea, and particularly so in light of the desire to provide command
triggers for utility statements. The original choice of representing it as
SELECT with an IntoClause appendage had metastasized into rather a lot of
places, unfortunately, so that this patch is a great deal more complicated
than one might at first expect.
In particular, keeping EXPLAIN working for SELECT INTO and CREATE TABLE AS
subcommands required restructuring some EXPLAIN-related APIs. Add-on code
that calls ExplainOnePlan or ExplainOneUtility, or uses
ExplainOneQuery_hook, will need adjustment.
Also, the cases PREPARE ... SELECT INTO and CREATE RULE ... SELECT INTO,
which formerly were accepted though undocumented, are no longer accepted.
The PREPARE case can be replaced with use of CREATE TABLE AS EXECUTE.
The CREATE RULE case doesn't seem to have much real-world use (since the
rule would work only once before failing with "table already exists"),
so we'll not bother with that one.
Both SELECT INTO and CREATE TABLE AS still return a command tag of
"SELECT nnnn". There was some discussion of returning "CREATE TABLE nnnn",
but for the moment backwards compatibility wins the day.
Andres Freund and Tom Lane
2012-03-20 02:37:19 +01:00
|
|
|
|
2008-05-12 22:02:02 +02:00
|
|
|
/* Update "processed" if stmt returned tuples */
|
|
|
|
if (_SPI_current->tuptable)
|
|
|
|
_SPI_current->processed = _SPI_current->tuptable->alloced -
|
|
|
|
_SPI_current->tuptable->free;
|
Restructure SELECT INTO's parsetree representation into CreateTableAsStmt.
Making this operation look like a utility statement seems generally a good
idea, and particularly so in light of the desire to provide command
triggers for utility statements. The original choice of representing it as
SELECT with an IntoClause appendage had metastasized into rather a lot of
places, unfortunately, so that this patch is a great deal more complicated
than one might at first expect.
In particular, keeping EXPLAIN working for SELECT INTO and CREATE TABLE AS
subcommands required restructuring some EXPLAIN-related APIs. Add-on code
that calls ExplainOnePlan or ExplainOneUtility, or uses
ExplainOneQuery_hook, will need adjustment.
Also, the cases PREPARE ... SELECT INTO and CREATE RULE ... SELECT INTO,
which formerly were accepted though undocumented, are no longer accepted.
The PREPARE case can be replaced with use of CREATE TABLE AS EXECUTE.
The CREATE RULE case doesn't seem to have much real-world use (since the
rule would work only once before failing with "table already exists"),
so we'll not bother with that one.
Both SELECT INTO and CREATE TABLE AS still return a command tag of
"SELECT nnnn". There was some discussion of returning "CREATE TABLE nnnn",
but for the moment backwards compatibility wins the day.
Andres Freund and Tom Lane
2012-03-20 02:37:19 +01:00
|
|
|
|
2012-10-03 13:32:01 +02:00
|
|
|
res = SPI_OK_UTILITY;
|
|
|
|
|
Restructure SELECT INTO's parsetree representation into CreateTableAsStmt.
Making this operation look like a utility statement seems generally a good
idea, and particularly so in light of the desire to provide command
triggers for utility statements. The original choice of representing it as
SELECT with an IntoClause appendage had metastasized into rather a lot of
places, unfortunately, so that this patch is a great deal more complicated
than one might at first expect.
In particular, keeping EXPLAIN working for SELECT INTO and CREATE TABLE AS
subcommands required restructuring some EXPLAIN-related APIs. Add-on code
that calls ExplainOnePlan or ExplainOneUtility, or uses
ExplainOneQuery_hook, will need adjustment.
Also, the cases PREPARE ... SELECT INTO and CREATE RULE ... SELECT INTO,
which formerly were accepted though undocumented, are no longer accepted.
The PREPARE case can be replaced with use of CREATE TABLE AS EXECUTE.
The CREATE RULE case doesn't seem to have much real-world use (since the
rule would work only once before failing with "table already exists"),
so we'll not bother with that one.
Both SELECT INTO and CREATE TABLE AS still return a command tag of
"SELECT nnnn". There was some discussion of returning "CREATE TABLE nnnn",
but for the moment backwards compatibility wins the day.
Andres Freund and Tom Lane
2012-03-20 02:37:19 +01:00
|
|
|
/*
|
2012-10-03 13:32:01 +02:00
|
|
|
* Some utility statements return a row count, even though the
|
|
|
|
* tuples are not returned to the caller.
|
Restructure SELECT INTO's parsetree representation into CreateTableAsStmt.
Making this operation look like a utility statement seems generally a good
idea, and particularly so in light of the desire to provide command
triggers for utility statements. The original choice of representing it as
SELECT with an IntoClause appendage had metastasized into rather a lot of
places, unfortunately, so that this patch is a great deal more complicated
than one might at first expect.
In particular, keeping EXPLAIN working for SELECT INTO and CREATE TABLE AS
subcommands required restructuring some EXPLAIN-related APIs. Add-on code
that calls ExplainOnePlan or ExplainOneUtility, or uses
ExplainOneQuery_hook, will need adjustment.
Also, the cases PREPARE ... SELECT INTO and CREATE RULE ... SELECT INTO,
which formerly were accepted though undocumented, are no longer accepted.
The PREPARE case can be replaced with use of CREATE TABLE AS EXECUTE.
The CREATE RULE case doesn't seem to have much real-world use (since the
rule would work only once before failing with "table already exists"),
so we'll not bother with that one.
Both SELECT INTO and CREATE TABLE AS still return a command tag of
"SELECT nnnn". There was some discussion of returning "CREATE TABLE nnnn",
but for the moment backwards compatibility wins the day.
Andres Freund and Tom Lane
2012-03-20 02:37:19 +01:00
|
|
|
*/
|
Change representation of statement lists, and add statement location info.
This patch makes several changes that improve the consistency of
representation of lists of statements. It's always been the case
that the output of parse analysis is a list of Query nodes, whatever
the types of the individual statements in the list. This patch brings
similar consistency to the outputs of raw parsing and planning steps:
* The output of raw parsing is now always a list of RawStmt nodes;
the statement-type-dependent nodes are one level down from that.
* The output of pg_plan_queries() is now always a list of PlannedStmt
nodes, even for utility statements. In the case of a utility statement,
"planning" just consists of wrapping a CMD_UTILITY PlannedStmt around
the utility node. This list representation is now used in Portal and
CachedPlan plan lists, replacing the former convention of intermixing
PlannedStmts with bare utility-statement nodes.
Now, every list of statements has a consistent head-node type depending
on how far along it is in processing. This allows changing many places
that formerly used generic "Node *" pointers to use a more specific
pointer type, thus reducing the number of IsA() tests and casts needed,
as well as improving code clarity.
Also, the post-parse-analysis representation of DECLARE CURSOR is changed
so that it looks more like EXPLAIN, PREPARE, etc. That is, the contained
SELECT remains a child of the DeclareCursorStmt rather than getting flipped
around to be the other way. It's now true for both Query and PlannedStmt
that utilityStmt is non-null if and only if commandType is CMD_UTILITY.
That allows simplifying a lot of places that were testing both fields.
(I think some of those were just defensive programming, but in many places,
it was actually necessary to avoid confusing DECLARE CURSOR with SELECT.)
Because PlannedStmt carries a canSetTag field, we're also able to get rid
of some ad-hoc rules about how to reconstruct canSetTag for a bare utility
statement; specifically, the assumption that a utility is canSetTag if and
only if it's the only one in its list. While I see no near-term need for
relaxing that restriction, it's nice to get rid of the ad-hocery.
The API of ProcessUtility() is changed so that what it's passed is the
wrapper PlannedStmt not just the bare utility statement. This will affect
all users of ProcessUtility_hook, but the changes are pretty trivial; see
the affected contrib modules for examples of the minimum change needed.
(Most compilers should give pointer-type-mismatch warnings for uncorrected
code.)
There's also a change in the API of ExplainOneQuery_hook, to pass through
cursorOptions instead of expecting hook functions to know what to pick.
This is needed because of the DECLARE CURSOR changes, but really should
have been done in 9.6; it's unlikely that any extant hook functions
know about using CURSOR_OPT_PARALLEL_OK.
Finally, teach gram.y to save statement boundary locations in RawStmt
nodes, and pass those through to Query and PlannedStmt nodes. This allows
more intelligent handling of cases where a source query string contains
multiple statements. This patch doesn't actually do anything with the
information, but a follow-on patch will. (Passing this information through
cleanly is the true motivation for these changes; while I think this is all
good cleanup, it's unlikely we'd have bothered without this end goal.)
catversion bump because addition of location fields to struct Query
affects stored rules.
This patch is by me, but it owes a good deal to Fabien Coelho who did
a lot of preliminary work on the problem, and also reviewed the patch.
Discussion: https://postgr.es/m/alpine.DEB.2.20.1612200926310.29821@lancre
2017-01-14 22:02:35 +01:00
|
|
|
if (IsA(stmt->utilityStmt, CreateTableAsStmt))
|
Restructure SELECT INTO's parsetree representation into CreateTableAsStmt.
Making this operation look like a utility statement seems generally a good
idea, and particularly so in light of the desire to provide command
triggers for utility statements. The original choice of representing it as
SELECT with an IntoClause appendage had metastasized into rather a lot of
places, unfortunately, so that this patch is a great deal more complicated
than one might at first expect.
In particular, keeping EXPLAIN working for SELECT INTO and CREATE TABLE AS
subcommands required restructuring some EXPLAIN-related APIs. Add-on code
that calls ExplainOnePlan or ExplainOneUtility, or uses
ExplainOneQuery_hook, will need adjustment.
Also, the cases PREPARE ... SELECT INTO and CREATE RULE ... SELECT INTO,
which formerly were accepted though undocumented, are no longer accepted.
The PREPARE case can be replaced with use of CREATE TABLE AS EXECUTE.
The CREATE RULE case doesn't seem to have much real-world use (since the
rule would work only once before failing with "table already exists"),
so we'll not bother with that one.
Both SELECT INTO and CREATE TABLE AS still return a command tag of
"SELECT nnnn". There was some discussion of returning "CREATE TABLE nnnn",
but for the moment backwards compatibility wins the day.
Andres Freund and Tom Lane
2012-03-20 02:37:19 +01:00
|
|
|
{
|
Change representation of statement lists, and add statement location info.
This patch makes several changes that improve the consistency of
representation of lists of statements. It's always been the case
that the output of parse analysis is a list of Query nodes, whatever
the types of the individual statements in the list. This patch brings
similar consistency to the outputs of raw parsing and planning steps:
* The output of raw parsing is now always a list of RawStmt nodes;
the statement-type-dependent nodes are one level down from that.
* The output of pg_plan_queries() is now always a list of PlannedStmt
nodes, even for utility statements. In the case of a utility statement,
"planning" just consists of wrapping a CMD_UTILITY PlannedStmt around
the utility node. This list representation is now used in Portal and
CachedPlan plan lists, replacing the former convention of intermixing
PlannedStmts with bare utility-statement nodes.
Now, every list of statements has a consistent head-node type depending
on how far along it is in processing. This allows changing many places
that formerly used generic "Node *" pointers to use a more specific
pointer type, thus reducing the number of IsA() tests and casts needed,
as well as improving code clarity.
Also, the post-parse-analysis representation of DECLARE CURSOR is changed
so that it looks more like EXPLAIN, PREPARE, etc. That is, the contained
SELECT remains a child of the DeclareCursorStmt rather than getting flipped
around to be the other way. It's now true for both Query and PlannedStmt
that utilityStmt is non-null if and only if commandType is CMD_UTILITY.
That allows simplifying a lot of places that were testing both fields.
(I think some of those were just defensive programming, but in many places,
it was actually necessary to avoid confusing DECLARE CURSOR with SELECT.)
Because PlannedStmt carries a canSetTag field, we're also able to get rid
of some ad-hoc rules about how to reconstruct canSetTag for a bare utility
statement; specifically, the assumption that a utility is canSetTag if and
only if it's the only one in its list. While I see no near-term need for
relaxing that restriction, it's nice to get rid of the ad-hocery.
The API of ProcessUtility() is changed so that what it's passed is the
wrapper PlannedStmt not just the bare utility statement. This will affect
all users of ProcessUtility_hook, but the changes are pretty trivial; see
the affected contrib modules for examples of the minimum change needed.
(Most compilers should give pointer-type-mismatch warnings for uncorrected
code.)
There's also a change in the API of ExplainOneQuery_hook, to pass through
cursorOptions instead of expecting hook functions to know what to pick.
This is needed because of the DECLARE CURSOR changes, but really should
have been done in 9.6; it's unlikely that any extant hook functions
know about using CURSOR_OPT_PARALLEL_OK.
Finally, teach gram.y to save statement boundary locations in RawStmt
nodes, and pass those through to Query and PlannedStmt nodes. This allows
more intelligent handling of cases where a source query string contains
multiple statements. This patch doesn't actually do anything with the
information, but a follow-on patch will. (Passing this information through
cleanly is the true motivation for these changes; while I think this is all
good cleanup, it's unlikely we'd have bothered without this end goal.)
catversion bump because addition of location fields to struct Query
affects stored rules.
This patch is by me, but it owes a good deal to Fabien Coelho who did
a lot of preliminary work on the problem, and also reviewed the patch.
Discussion: https://postgr.es/m/alpine.DEB.2.20.1612200926310.29821@lancre
2017-01-14 22:02:35 +01:00
|
|
|
CreateTableAsStmt *ctastmt = (CreateTableAsStmt *) stmt->utilityStmt;
|
2016-04-12 02:07:17 +02:00
|
|
|
|
|
|
|
if (strncmp(completionTag, "SELECT ", 7) == 0)
|
|
|
|
_SPI_current->processed =
|
|
|
|
pg_strtouint64(completionTag + 7, NULL, 10);
|
|
|
|
else
|
|
|
|
{
|
2016-08-11 17:22:25 +02:00
|
|
|
/*
|
|
|
|
* Must be an IF NOT EXISTS that did nothing, or a
|
|
|
|
* CREATE ... WITH NO DATA.
|
|
|
|
*/
|
|
|
|
Assert(ctastmt->if_not_exists ||
|
|
|
|
ctastmt->into->skipData);
|
2016-04-12 02:07:17 +02:00
|
|
|
_SPI_current->processed = 0;
|
|
|
|
}
|
2012-10-03 13:32:01 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* For historical reasons, if CREATE TABLE AS was spelled
|
|
|
|
* as SELECT INTO, return a special return code.
|
|
|
|
*/
|
2016-04-12 02:07:17 +02:00
|
|
|
if (ctastmt->is_select_into)
|
Restructure SELECT INTO's parsetree representation into CreateTableAsStmt.
Making this operation look like a utility statement seems generally a good
idea, and particularly so in light of the desire to provide command
triggers for utility statements. The original choice of representing it as
SELECT with an IntoClause appendage had metastasized into rather a lot of
places, unfortunately, so that this patch is a great deal more complicated
than one might at first expect.
In particular, keeping EXPLAIN working for SELECT INTO and CREATE TABLE AS
subcommands required restructuring some EXPLAIN-related APIs. Add-on code
that calls ExplainOnePlan or ExplainOneUtility, or uses
ExplainOneQuery_hook, will need adjustment.
Also, the cases PREPARE ... SELECT INTO and CREATE RULE ... SELECT INTO,
which formerly were accepted though undocumented, are no longer accepted.
The PREPARE case can be replaced with use of CREATE TABLE AS EXECUTE.
The CREATE RULE case doesn't seem to have much real-world use (since the
rule would work only once before failing with "table already exists"),
so we'll not bother with that one.
Both SELECT INTO and CREATE TABLE AS still return a command tag of
"SELECT nnnn". There was some discussion of returning "CREATE TABLE nnnn",
but for the moment backwards compatibility wins the day.
Andres Freund and Tom Lane
2012-03-20 02:37:19 +01:00
|
|
|
res = SPI_OK_SELINTO;
|
|
|
|
}
|
Change representation of statement lists, and add statement location info.
This patch makes several changes that improve the consistency of
representation of lists of statements. It's always been the case
that the output of parse analysis is a list of Query nodes, whatever
the types of the individual statements in the list. This patch brings
similar consistency to the outputs of raw parsing and planning steps:
* The output of raw parsing is now always a list of RawStmt nodes;
the statement-type-dependent nodes are one level down from that.
* The output of pg_plan_queries() is now always a list of PlannedStmt
nodes, even for utility statements. In the case of a utility statement,
"planning" just consists of wrapping a CMD_UTILITY PlannedStmt around
the utility node. This list representation is now used in Portal and
CachedPlan plan lists, replacing the former convention of intermixing
PlannedStmts with bare utility-statement nodes.
Now, every list of statements has a consistent head-node type depending
on how far along it is in processing. This allows changing many places
that formerly used generic "Node *" pointers to use a more specific
pointer type, thus reducing the number of IsA() tests and casts needed,
as well as improving code clarity.
Also, the post-parse-analysis representation of DECLARE CURSOR is changed
so that it looks more like EXPLAIN, PREPARE, etc. That is, the contained
SELECT remains a child of the DeclareCursorStmt rather than getting flipped
around to be the other way. It's now true for both Query and PlannedStmt
that utilityStmt is non-null if and only if commandType is CMD_UTILITY.
That allows simplifying a lot of places that were testing both fields.
(I think some of those were just defensive programming, but in many places,
it was actually necessary to avoid confusing DECLARE CURSOR with SELECT.)
Because PlannedStmt carries a canSetTag field, we're also able to get rid
of some ad-hoc rules about how to reconstruct canSetTag for a bare utility
statement; specifically, the assumption that a utility is canSetTag if and
only if it's the only one in its list. While I see no near-term need for
relaxing that restriction, it's nice to get rid of the ad-hocery.
The API of ProcessUtility() is changed so that what it's passed is the
wrapper PlannedStmt not just the bare utility statement. This will affect
all users of ProcessUtility_hook, but the changes are pretty trivial; see
the affected contrib modules for examples of the minimum change needed.
(Most compilers should give pointer-type-mismatch warnings for uncorrected
code.)
There's also a change in the API of ExplainOneQuery_hook, to pass through
cursorOptions instead of expecting hook functions to know what to pick.
This is needed because of the DECLARE CURSOR changes, but really should
have been done in 9.6; it's unlikely that any extant hook functions
know about using CURSOR_OPT_PARALLEL_OK.
Finally, teach gram.y to save statement boundary locations in RawStmt
nodes, and pass those through to Query and PlannedStmt nodes. This allows
more intelligent handling of cases where a source query string contains
multiple statements. This patch doesn't actually do anything with the
information, but a follow-on patch will. (Passing this information through
cleanly is the true motivation for these changes; while I think this is all
good cleanup, it's unlikely we'd have bothered without this end goal.)
catversion bump because addition of location fields to struct Query
affects stored rules.
This patch is by me, but it owes a good deal to Fabien Coelho who did
a lot of preliminary work on the problem, and also reviewed the patch.
Discussion: https://postgr.es/m/alpine.DEB.2.20.1612200926310.29821@lancre
2017-01-14 22:02:35 +01:00
|
|
|
else if (IsA(stmt->utilityStmt, CopyStmt))
|
2012-10-03 13:32:01 +02:00
|
|
|
{
|
|
|
|
Assert(strncmp(completionTag, "COPY ", 5) == 0);
|
Widen query numbers-of-tuples-processed counters to uint64.
This patch widens SPI_processed, EState's es_processed field, PortalData's
portalPos field, FuncCallContext's call_cntr and max_calls fields,
ExecutorRun's count argument, PortalRunFetch's result, and the max number
of rows in a SPITupleTable to uint64, and deals with (I hope) all the
ensuing fallout. Some of these values were declared uint32 before, and
others "long".
I also removed PortalData's posOverflow field, since that logic seems
pretty useless given that portalPos is now always 64 bits.
The user-visible results are that command tags for SELECT etc will
correctly report tuple counts larger than 4G, as will plpgsql's GET
GET DIAGNOSTICS ... ROW_COUNT command. Queries processing more tuples
than that are still not exactly the norm, but they're becoming more
common.
Most values associated with FETCH/MOVE distances, such as PortalRun's count
argument and the count argument of most SPI functions that have one, remain
declared as "long". It's not clear whether it would be worth promoting
those to int64; but it would definitely be a large dollop of additional
API churn on top of this, and it would only help 32-bit platforms which
seem relatively less likely to see any benefit.
Andreas Scherbaum, reviewed by Christian Ullrich, additional hacking by me
2016-03-12 22:05:10 +01:00
|
|
|
_SPI_current->processed = pg_strtouint64(completionTag + 5,
|
|
|
|
NULL, 10);
|
2012-10-03 13:32:01 +02:00
|
|
|
}
|
2008-05-12 22:02:02 +02:00
|
|
|
}
|
|
|
|
|
2007-11-30 19:38:34 +01:00
|
|
|
/*
|
2009-06-11 16:49:15 +02:00
|
|
|
* The last canSetTag query sets the status values returned to the
|
2014-05-06 18:12:18 +02:00
|
|
|
* caller. Be careful to free any tuptables not returned, to
|
2009-06-11 16:49:15 +02:00
|
|
|
* avoid intratransaction memory leak.
|
2007-11-30 19:38:34 +01:00
|
|
|
*/
|
2008-05-12 22:02:02 +02:00
|
|
|
if (canSetTag)
|
|
|
|
{
|
|
|
|
my_processed = _SPI_current->processed;
|
|
|
|
SPI_freetuptable(my_tuptable);
|
|
|
|
my_tuptable = _SPI_current->tuptable;
|
|
|
|
my_res = res;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
SPI_freetuptable(_SPI_current->tuptable);
|
|
|
|
_SPI_current->tuptable = NULL;
|
|
|
|
}
|
|
|
|
/* we know that the receiver doesn't need a destroy call */
|
|
|
|
if (res < 0)
|
|
|
|
{
|
|
|
|
my_res = res;
|
|
|
|
goto fail;
|
|
|
|
}
|
1997-09-07 07:04:48 +02:00
|
|
|
}
|
|
|
|
|
2008-05-12 22:02:02 +02:00
|
|
|
/* Done with this plan, so release refcount */
|
2011-09-16 06:42:53 +02:00
|
|
|
ReleaseCachedPlan(cplan, plan->saved);
|
2008-05-12 22:02:02 +02:00
|
|
|
cplan = NULL;
|
2007-03-16 00:12:07 +01:00
|
|
|
|
2004-09-13 22:10:13 +02:00
|
|
|
/*
|
2009-06-11 16:49:15 +02:00
|
|
|
* If not read-only mode, advance the command counter after the last
|
|
|
|
* command. This ensures that its effects are visible, in case it was
|
|
|
|
* DDL that would affect the next CachedPlanSource.
|
2004-09-13 22:10:13 +02:00
|
|
|
*/
|
2008-05-12 22:02:02 +02:00
|
|
|
if (!read_only)
|
|
|
|
CommandCounterIncrement();
|
2004-09-13 22:10:13 +02:00
|
|
|
}
|
|
|
|
|
2008-05-12 22:02:02 +02:00
|
|
|
fail:
|
|
|
|
|
2011-03-01 05:27:18 +01:00
|
|
|
/* Pop the snapshot off the stack if we pushed one */
|
|
|
|
if (pushed_active_snap)
|
|
|
|
PopActiveSnapshot();
|
|
|
|
|
2008-05-12 22:02:02 +02:00
|
|
|
/* We no longer need the cached plan refcount, if any */
|
|
|
|
if (cplan)
|
2011-09-16 06:42:53 +02:00
|
|
|
ReleaseCachedPlan(cplan, plan->saved);
|
2008-05-12 22:02:02 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Pop the error context stack
|
|
|
|
*/
|
|
|
|
error_context_stack = spierrcontext.previous;
|
2004-03-21 23:29:11 +01:00
|
|
|
|
2005-10-01 20:43:19 +02:00
|
|
|
/* Save results for caller */
|
|
|
|
SPI_processed = my_processed;
|
|
|
|
SPI_tuptable = my_tuptable;
|
|
|
|
|
2006-12-08 01:40:27 +01:00
|
|
|
/* tuptable now is caller's responsibility, not SPI's */
|
|
|
|
_SPI_current->tuptable = NULL;
|
|
|
|
|
2006-12-26 17:56:18 +01:00
|
|
|
/*
|
2009-06-11 16:49:15 +02:00
|
|
|
* If none of the queries had canSetTag, return SPI_OK_REWRITTEN. Prior to
|
|
|
|
* 8.4, we used return the last query's result code, but not its auxiliary
|
|
|
|
* results, but that's confusing.
|
2006-12-26 17:56:18 +01:00
|
|
|
*/
|
|
|
|
if (my_res == 0)
|
2009-01-21 12:02:40 +01:00
|
|
|
my_res = SPI_OK_REWRITTEN;
|
2006-12-26 17:56:18 +01:00
|
|
|
|
2006-08-14 15:40:18 +02:00
|
|
|
return my_res;
|
1997-09-06 13:23:05 +02:00
|
|
|
}
|
|
|
|
|
2008-04-01 05:09:30 +02:00
|
|
|
/*
|
2009-11-04 23:26:08 +01:00
|
|
|
* Convert arrays of query parameters to form wanted by planner and executor
|
2008-04-01 05:09:30 +02:00
|
|
|
*/
|
|
|
|
static ParamListInfo
|
|
|
|
_SPI_convert_params(int nargs, Oid *argtypes,
|
2011-09-16 06:42:53 +02:00
|
|
|
Datum *Values, const char *Nulls)
|
2008-04-01 05:09:30 +02:00
|
|
|
{
|
|
|
|
ParamListInfo paramLI;
|
|
|
|
|
|
|
|
if (nargs > 0)
|
|
|
|
{
|
2019-03-14 13:30:09 +01:00
|
|
|
paramLI = makeParamList(nargs);
|
|
|
|
|
|
|
|
for (int i = 0; i < nargs; i++)
|
2008-04-01 05:09:30 +02:00
|
|
|
{
|
|
|
|
ParamExternData *prm = ¶mLI->params[i];
|
|
|
|
|
|
|
|
prm->value = Values[i];
|
|
|
|
prm->isnull = (Nulls && Nulls[i] == 'n');
|
2011-09-16 06:42:53 +02:00
|
|
|
prm->pflags = PARAM_FLAG_CONST;
|
2008-04-01 05:09:30 +02:00
|
|
|
prm->ptype = argtypes[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
paramLI = NULL;
|
|
|
|
return paramLI;
|
|
|
|
}
|
|
|
|
|
1997-09-06 13:23:05 +02:00
|
|
|
static int
|
Widen query numbers-of-tuples-processed counters to uint64.
This patch widens SPI_processed, EState's es_processed field, PortalData's
portalPos field, FuncCallContext's call_cntr and max_calls fields,
ExecutorRun's count argument, PortalRunFetch's result, and the max number
of rows in a SPITupleTable to uint64, and deals with (I hope) all the
ensuing fallout. Some of these values were declared uint32 before, and
others "long".
I also removed PortalData's posOverflow field, since that logic seems
pretty useless given that portalPos is now always 64 bits.
The user-visible results are that command tags for SELECT etc will
correctly report tuple counts larger than 4G, as will plpgsql's GET
GET DIAGNOSTICS ... ROW_COUNT command. Queries processing more tuples
than that are still not exactly the norm, but they're becoming more
common.
Most values associated with FETCH/MOVE distances, such as PortalRun's count
argument and the count argument of most SPI functions that have one, remain
declared as "long". It's not clear whether it would be worth promoting
those to int64; but it would definitely be a large dollop of additional
API churn on top of this, and it would only help 32-bit platforms which
seem relatively less likely to see any benefit.
Andreas Scherbaum, reviewed by Christian Ullrich, additional hacking by me
2016-03-12 22:05:10 +01:00
|
|
|
_SPI_pquery(QueryDesc *queryDesc, bool fire_triggers, uint64 tcount)
|
1997-09-06 13:23:05 +02:00
|
|
|
{
|
1997-09-25 14:16:05 +02:00
|
|
|
int operation = queryDesc->operation;
|
2011-02-27 19:43:29 +01:00
|
|
|
int eflags;
|
1997-09-08 04:41:22 +02:00
|
|
|
int res;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
|
|
|
switch (operation)
|
|
|
|
{
|
1997-09-08 04:41:22 +02:00
|
|
|
case CMD_SELECT:
|
Restructure SELECT INTO's parsetree representation into CreateTableAsStmt.
Making this operation look like a utility statement seems generally a good
idea, and particularly so in light of the desire to provide command
triggers for utility statements. The original choice of representing it as
SELECT with an IntoClause appendage had metastasized into rather a lot of
places, unfortunately, so that this patch is a great deal more complicated
than one might at first expect.
In particular, keeping EXPLAIN working for SELECT INTO and CREATE TABLE AS
subcommands required restructuring some EXPLAIN-related APIs. Add-on code
that calls ExplainOnePlan or ExplainOneUtility, or uses
ExplainOneQuery_hook, will need adjustment.
Also, the cases PREPARE ... SELECT INTO and CREATE RULE ... SELECT INTO,
which formerly were accepted though undocumented, are no longer accepted.
The PREPARE case can be replaced with use of CREATE TABLE AS EXECUTE.
The CREATE RULE case doesn't seem to have much real-world use (since the
rule would work only once before failing with "table already exists"),
so we'll not bother with that one.
Both SELECT INTO and CREATE TABLE AS still return a command tag of
"SELECT nnnn". There was some discussion of returning "CREATE TABLE nnnn",
but for the moment backwards compatibility wins the day.
Andres Freund and Tom Lane
2012-03-20 02:37:19 +01:00
|
|
|
if (queryDesc->dest->mydest != DestSPI)
|
2005-10-01 20:43:19 +02:00
|
|
|
{
|
|
|
|
/* Don't return SPI_OK_SELECT if we're discarding result */
|
|
|
|
res = SPI_OK_UTILITY;
|
|
|
|
}
|
2006-08-12 04:52:06 +02:00
|
|
|
else
|
|
|
|
res = SPI_OK_SELECT;
|
1997-09-08 04:41:22 +02:00
|
|
|
break;
|
|
|
|
case CMD_INSERT:
|
2009-10-10 03:43:50 +02:00
|
|
|
if (queryDesc->plannedstmt->hasReturning)
|
2006-08-28 01:47:58 +02:00
|
|
|
res = SPI_OK_INSERT_RETURNING;
|
|
|
|
else
|
|
|
|
res = SPI_OK_INSERT;
|
1997-09-08 04:41:22 +02:00
|
|
|
break;
|
|
|
|
case CMD_DELETE:
|
2009-10-10 03:43:50 +02:00
|
|
|
if (queryDesc->plannedstmt->hasReturning)
|
2006-08-28 01:47:58 +02:00
|
|
|
res = SPI_OK_DELETE_RETURNING;
|
|
|
|
else
|
|
|
|
res = SPI_OK_DELETE;
|
1997-09-08 04:41:22 +02:00
|
|
|
break;
|
|
|
|
case CMD_UPDATE:
|
2009-10-10 03:43:50 +02:00
|
|
|
if (queryDesc->plannedstmt->hasReturning)
|
2006-08-28 01:47:58 +02:00
|
|
|
res = SPI_OK_UPDATE_RETURNING;
|
|
|
|
else
|
|
|
|
res = SPI_OK_UPDATE;
|
1997-09-08 04:41:22 +02:00
|
|
|
break;
|
|
|
|
default:
|
1998-09-01 05:29:17 +02:00
|
|
|
return SPI_ERROR_OPUNKNOWN;
|
1997-09-07 07:04:48 +02:00
|
|
|
}
|
|
|
|
|
1997-09-04 15:22:39 +02:00
|
|
|
#ifdef SPI_EXECUTOR_STATS
|
1997-09-07 07:04:48 +02:00
|
|
|
if (ShowExecutorStats)
|
|
|
|
ResetUsage();
|
1997-09-04 15:22:39 +02:00
|
|
|
#endif
|
2002-02-27 20:36:13 +01:00
|
|
|
|
2011-02-27 19:43:29 +01:00
|
|
|
/* Select execution options */
|
2007-08-15 21:15:47 +02:00
|
|
|
if (fire_triggers)
|
2011-02-27 19:43:29 +01:00
|
|
|
eflags = 0; /* default run-to-completion flags */
|
|
|
|
else
|
|
|
|
eflags = EXEC_FLAG_SKIP_TRIGGERS;
|
2004-09-10 20:40:09 +02:00
|
|
|
|
2011-02-27 19:43:29 +01:00
|
|
|
ExecutorStart(queryDesc, eflags);
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2017-03-23 18:05:48 +01:00
|
|
|
ExecutorRun(queryDesc, ForwardScanDirection, tcount, true);
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2002-12-05 16:50:39 +01:00
|
|
|
_SPI_current->processed = queryDesc->estate->es_processed;
|
2001-02-19 20:49:53 +01:00
|
|
|
|
2009-10-10 03:43:50 +02:00
|
|
|
if ((res == SPI_OK_SELECT || queryDesc->plannedstmt->hasReturning) &&
|
2006-08-28 01:47:58 +02:00
|
|
|
queryDesc->dest->mydest == DestSPI)
|
1997-09-07 07:04:48 +02:00
|
|
|
{
|
1997-09-25 14:16:05 +02:00
|
|
|
if (_SPI_checktuples())
|
2003-07-21 19:05:12 +02:00
|
|
|
elog(ERROR, "consistency check on SPI tuple count failed");
|
1997-09-07 07:04:48 +02:00
|
|
|
}
|
|
|
|
|
2011-02-27 19:43:29 +01:00
|
|
|
ExecutorFinish(queryDesc);
|
2005-03-25 22:58:00 +01:00
|
|
|
ExecutorEnd(queryDesc);
|
2008-03-20 21:05:56 +01:00
|
|
|
/* FreeQueryDesc is done by the caller */
|
2004-09-10 20:40:09 +02:00
|
|
|
|
2002-12-15 17:17:59 +01:00
|
|
|
#ifdef SPI_EXECUTOR_STATS
|
|
|
|
if (ShowExecutorStats)
|
|
|
|
ShowUsage("SPI EXECUTOR STATS");
|
|
|
|
#endif
|
1997-08-29 11:05:57 +02:00
|
|
|
|
2002-12-15 17:17:59 +01:00
|
|
|
return res;
|
1997-08-29 11:05:57 +02:00
|
|
|
}
|
|
|
|
|
2004-03-21 23:29:11 +01:00
|
|
|
/*
|
|
|
|
* _SPI_error_callback
|
|
|
|
*
|
|
|
|
* Add context information when a query invoked via SPI fails
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
_SPI_error_callback(void *arg)
|
|
|
|
{
|
|
|
|
const char *query = (const char *) arg;
|
|
|
|
int syntaxerrposition;
|
|
|
|
|
2017-12-11 22:33:20 +01:00
|
|
|
if (query == NULL) /* in case arg wasn't set yet */
|
|
|
|
return;
|
|
|
|
|
2004-03-21 23:29:11 +01:00
|
|
|
/*
|
2005-10-15 04:49:52 +02:00
|
|
|
* If there is a syntax error position, convert to internal syntax error;
|
|
|
|
* otherwise treat the query as an item of context stack
|
2004-03-21 23:29:11 +01:00
|
|
|
*/
|
|
|
|
syntaxerrposition = geterrposition();
|
|
|
|
if (syntaxerrposition > 0)
|
|
|
|
{
|
|
|
|
errposition(0);
|
|
|
|
internalerrposition(syntaxerrposition);
|
|
|
|
internalerrquery(query);
|
|
|
|
}
|
|
|
|
else
|
2004-10-12 23:54:45 +02:00
|
|
|
errcontext("SQL statement \"%s\"", query);
|
2004-03-21 23:29:11 +01:00
|
|
|
}
|
|
|
|
|
2001-05-21 16:22:19 +02:00
|
|
|
/*
|
|
|
|
* _SPI_cursor_operation()
|
|
|
|
*
|
|
|
|
* Do a FETCH or MOVE in a cursor
|
|
|
|
*/
|
|
|
|
static void
|
2007-04-16 03:14:58 +02:00
|
|
|
_SPI_cursor_operation(Portal portal, FetchDirection direction, long count,
|
2003-05-06 22:26:28 +02:00
|
|
|
DestReceiver *dest)
|
2001-05-21 16:22:19 +02:00
|
|
|
{
|
Widen query numbers-of-tuples-processed counters to uint64.
This patch widens SPI_processed, EState's es_processed field, PortalData's
portalPos field, FuncCallContext's call_cntr and max_calls fields,
ExecutorRun's count argument, PortalRunFetch's result, and the max number
of rows in a SPITupleTable to uint64, and deals with (I hope) all the
ensuing fallout. Some of these values were declared uint32 before, and
others "long".
I also removed PortalData's posOverflow field, since that logic seems
pretty useless given that portalPos is now always 64 bits.
The user-visible results are that command tags for SELECT etc will
correctly report tuple counts larger than 4G, as will plpgsql's GET
GET DIAGNOSTICS ... ROW_COUNT command. Queries processing more tuples
than that are still not exactly the norm, but they're becoming more
common.
Most values associated with FETCH/MOVE distances, such as PortalRun's count
argument and the count argument of most SPI functions that have one, remain
declared as "long". It's not clear whether it would be worth promoting
those to int64; but it would definitely be a large dollop of additional
API churn on top of this, and it would only help 32-bit platforms which
seem relatively less likely to see any benefit.
Andreas Scherbaum, reviewed by Christian Ullrich, additional hacking by me
2016-03-12 22:05:10 +01:00
|
|
|
uint64 nfetched;
|
2003-08-08 21:18:21 +02:00
|
|
|
|
2001-05-21 16:22:19 +02:00
|
|
|
/* Check that the portal is valid */
|
|
|
|
if (!PortalIsValid(portal))
|
|
|
|
elog(ERROR, "invalid portal in SPI cursor operation");
|
|
|
|
|
|
|
|
/* Push the SPI stack */
|
2003-09-23 17:11:33 +02:00
|
|
|
if (_SPI_begin_call(true) < 0)
|
|
|
|
elog(ERROR, "SPI cursor operation called while not connected");
|
2001-05-21 16:22:19 +02:00
|
|
|
|
2005-10-01 20:43:19 +02:00
|
|
|
/* Reset the SPI result (note we deliberately don't touch lastoid) */
|
2001-05-21 16:22:19 +02:00
|
|
|
SPI_processed = 0;
|
|
|
|
SPI_tuptable = NULL;
|
|
|
|
_SPI_current->processed = 0;
|
|
|
|
_SPI_current->tuptable = NULL;
|
|
|
|
|
2003-03-10 04:53:52 +01:00
|
|
|
/* Run the cursor */
|
2003-08-08 21:18:21 +02:00
|
|
|
nfetched = PortalRunFetch(portal,
|
2007-04-16 03:14:58 +02:00
|
|
|
direction,
|
2005-05-02 02:37:07 +02:00
|
|
|
count,
|
2003-08-08 21:18:21 +02:00
|
|
|
dest);
|
|
|
|
|
|
|
|
/*
|
2005-10-15 04:49:52 +02:00
|
|
|
* Think not to combine this store with the preceding function call. If
|
|
|
|
* the portal contains calls to functions that use SPI, then SPI_stack is
|
|
|
|
* likely to move around while the portal runs. When control returns,
|
|
|
|
* _SPI_current will point to the correct stack entry... but the pointer
|
|
|
|
* may be different than it was beforehand. So we must be sure to re-fetch
|
|
|
|
* the pointer after the function call completes.
|
2003-08-08 21:18:21 +02:00
|
|
|
*/
|
|
|
|
_SPI_current->processed = nfetched;
|
2001-05-21 16:22:19 +02:00
|
|
|
|
2005-11-03 18:11:40 +01:00
|
|
|
if (dest->mydest == DestSPI && _SPI_checktuples())
|
2003-07-21 19:05:12 +02:00
|
|
|
elog(ERROR, "consistency check on SPI tuple count failed");
|
2001-05-21 16:22:19 +02:00
|
|
|
|
|
|
|
/* Put the result into place for access by caller */
|
|
|
|
SPI_processed = _SPI_current->processed;
|
2001-10-25 07:50:21 +02:00
|
|
|
SPI_tuptable = _SPI_current->tuptable;
|
2001-05-21 16:22:19 +02:00
|
|
|
|
2006-12-08 01:40:27 +01:00
|
|
|
/* tuptable now is caller's responsibility, not SPI's */
|
|
|
|
_SPI_current->tuptable = NULL;
|
|
|
|
|
2001-05-21 16:22:19 +02:00
|
|
|
/* Pop the SPI stack */
|
|
|
|
_SPI_end_call(true);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
1997-09-08 04:41:22 +02:00
|
|
|
static MemoryContext
|
2004-10-13 03:25:13 +02:00
|
|
|
_SPI_execmem(void)
|
1997-08-29 11:05:57 +02:00
|
|
|
{
|
2000-06-28 05:33:33 +02:00
|
|
|
return MemoryContextSwitchTo(_SPI_current->execCxt);
|
1997-08-29 11:05:57 +02:00
|
|
|
}
|
|
|
|
|
1997-09-08 04:41:22 +02:00
|
|
|
static MemoryContext
|
2004-10-13 03:25:13 +02:00
|
|
|
_SPI_procmem(void)
|
1997-08-29 11:05:57 +02:00
|
|
|
{
|
2000-06-28 05:33:33 +02:00
|
|
|
return MemoryContextSwitchTo(_SPI_current->procCxt);
|
1997-08-29 11:05:57 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2003-09-23 17:11:33 +02:00
|
|
|
* _SPI_begin_call: begin a SPI operation within a connected procedure
|
2017-10-07 01:18:58 +02:00
|
|
|
*
|
|
|
|
* use_exec is true if we intend to make use of the procedure's execCxt
|
|
|
|
* during this SPI operation. We'll switch into that context, and arrange
|
|
|
|
* for it to be cleaned up at _SPI_end_call or if an error occurs.
|
1997-08-29 11:05:57 +02:00
|
|
|
*/
|
|
|
|
static int
|
2017-10-07 01:18:58 +02:00
|
|
|
_SPI_begin_call(bool use_exec)
|
1997-08-29 11:05:57 +02:00
|
|
|
{
|
Simplify code by getting rid of SPI_push, SPI_pop, SPI_restore_connection.
The idea behind SPI_push was to allow transitioning back into an
"unconnected" state when a SPI-using procedure calls unrelated code that
might or might not invoke SPI. That sounds good, but in practice the only
thing it does for us is to catch cases where a called SPI-using function
forgets to call SPI_connect --- which is a highly improbable failure mode,
since it would be exposed immediately by direct testing of said function.
As against that, we've had multiple bugs induced by forgetting to call
SPI_push/SPI_pop around code that might invoke SPI-using functions; these
are much harder to catch and indeed have gone undetected for years in some
cases. And we've had to band-aid around some problems of this ilk by
introducing conditional push/pop pairs in some places, which really kind
of defeats the purpose altogether; if we can't draw bright lines between
connected and unconnected code, what's the point?
Hence, get rid of SPI_push[_conditional], SPI_pop[_conditional], and the
underlying state variable _SPI_curid. It turns out SPI_restore_connection
can go away too, which is a nice side benefit since it was never more than
a kluge. Provide no-op macros for the deleted functions so as to avoid an
API break for external modules.
A side effect of this removal is that SPI_palloc and allied functions no
longer permit being called when unconnected; they'll throw an error
instead. The apparent usefulness of the previous behavior was a mirage
as well, because it was depended on by only a few places (which I fixed in
preceding commits), and it posed a risk of allocations being unexpectedly
long-lived if someone forgot a SPI_push call.
Discussion: <20808.1478481403@sss.pgh.pa.us>
2016-11-08 23:39:45 +01:00
|
|
|
if (_SPI_current == NULL)
|
1998-09-01 05:29:17 +02:00
|
|
|
return SPI_ERROR_UNCONNECTED;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2017-10-07 01:18:58 +02:00
|
|
|
if (use_exec)
|
|
|
|
{
|
|
|
|
/* remember when the Executor operation started */
|
|
|
|
_SPI_current->execSubid = GetCurrentSubTransactionId();
|
|
|
|
/* switch to the Executor memory context */
|
1997-09-07 07:04:48 +02:00
|
|
|
_SPI_execmem();
|
2017-10-07 01:18:58 +02:00
|
|
|
}
|
1997-09-07 07:04:48 +02:00
|
|
|
|
1998-09-01 05:29:17 +02:00
|
|
|
return 0;
|
1997-08-29 11:05:57 +02:00
|
|
|
}
|
|
|
|
|
2003-09-23 17:11:33 +02:00
|
|
|
/*
|
|
|
|
* _SPI_end_call: end a SPI operation within a connected procedure
|
|
|
|
*
|
2017-10-07 01:18:58 +02:00
|
|
|
* use_exec must be the same as in the previous _SPI_begin_call
|
|
|
|
*
|
2003-09-23 17:11:33 +02:00
|
|
|
* Note: this currently has no failure return cases, so callers don't check
|
|
|
|
*/
|
1997-08-29 11:05:57 +02:00
|
|
|
static int
|
2017-10-07 01:18:58 +02:00
|
|
|
_SPI_end_call(bool use_exec)
|
1997-08-29 11:05:57 +02:00
|
|
|
{
|
2017-10-07 01:18:58 +02:00
|
|
|
if (use_exec)
|
2000-06-28 05:33:33 +02:00
|
|
|
{
|
2017-10-07 01:18:58 +02:00
|
|
|
/* switch to the procedure memory context */
|
1997-09-07 07:04:48 +02:00
|
|
|
_SPI_procmem();
|
2017-10-07 01:18:58 +02:00
|
|
|
/* mark Executor context no longer in use */
|
|
|
|
_SPI_current->execSubid = InvalidSubTransactionId;
|
2000-06-28 05:33:33 +02:00
|
|
|
/* and free Executor memory */
|
|
|
|
MemoryContextResetAndDeleteChildren(_SPI_current->execCxt);
|
1997-09-07 07:04:48 +02:00
|
|
|
}
|
|
|
|
|
1998-09-01 05:29:17 +02:00
|
|
|
return 0;
|
1997-08-29 11:05:57 +02:00
|
|
|
}
|
|
|
|
|
1997-09-08 04:41:22 +02:00
|
|
|
static bool
|
2001-08-02 20:08:43 +02:00
|
|
|
_SPI_checktuples(void)
|
1997-08-29 11:05:57 +02:00
|
|
|
{
|
Widen query numbers-of-tuples-processed counters to uint64.
This patch widens SPI_processed, EState's es_processed field, PortalData's
portalPos field, FuncCallContext's call_cntr and max_calls fields,
ExecutorRun's count argument, PortalRunFetch's result, and the max number
of rows in a SPITupleTable to uint64, and deals with (I hope) all the
ensuing fallout. Some of these values were declared uint32 before, and
others "long".
I also removed PortalData's posOverflow field, since that logic seems
pretty useless given that portalPos is now always 64 bits.
The user-visible results are that command tags for SELECT etc will
correctly report tuple counts larger than 4G, as will plpgsql's GET
GET DIAGNOSTICS ... ROW_COUNT command. Queries processing more tuples
than that are still not exactly the norm, but they're becoming more
common.
Most values associated with FETCH/MOVE distances, such as PortalRun's count
argument and the count argument of most SPI functions that have one, remain
declared as "long". It's not clear whether it would be worth promoting
those to int64; but it would definitely be a large dollop of additional
API churn on top of this, and it would only help 32-bit platforms which
seem relatively less likely to see any benefit.
Andreas Scherbaum, reviewed by Christian Ullrich, additional hacking by me
2016-03-12 22:05:10 +01:00
|
|
|
uint64 processed = _SPI_current->processed;
|
1997-09-08 04:41:22 +02:00
|
|
|
SPITupleTable *tuptable = _SPI_current->tuptable;
|
|
|
|
bool failed = false;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2003-08-04 02:43:34 +02:00
|
|
|
if (tuptable == NULL) /* spi_dest_startup was not called */
|
2003-01-21 23:06:12 +01:00
|
|
|
failed = true;
|
|
|
|
else if (processed != (tuptable->alloced - tuptable->free))
|
|
|
|
failed = true;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
1998-09-01 05:29:17 +02:00
|
|
|
return failed;
|
1997-08-29 11:05:57 +02:00
|
|
|
}
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2007-03-16 00:12:07 +01:00
|
|
|
/*
|
2011-09-16 06:42:53 +02:00
|
|
|
* Convert a "temporary" SPIPlan into an "unsaved" plan.
|
|
|
|
*
|
|
|
|
* The passed _SPI_plan struct is on the stack, and all its subsidiary data
|
|
|
|
* is in or under the current SPI executor context. Copy the plan into the
|
|
|
|
* SPI procedure context so it will survive _SPI_end_call(). To minimize
|
|
|
|
* data copying, this destructively modifies the input plan, by taking the
|
|
|
|
* plancache entries away from it and reparenting them to the new SPIPlan.
|
2007-03-16 00:12:07 +01:00
|
|
|
*/
|
|
|
|
static SPIPlanPtr
|
2011-09-16 06:42:53 +02:00
|
|
|
_SPI_make_plan_non_temp(SPIPlanPtr plan)
|
1997-09-04 15:22:39 +02:00
|
|
|
{
|
2007-03-16 00:12:07 +01:00
|
|
|
SPIPlanPtr newplan;
|
2011-09-16 06:42:53 +02:00
|
|
|
MemoryContext parentcxt = _SPI_current->procCxt;
|
2001-05-21 16:22:19 +02:00
|
|
|
MemoryContext plancxt;
|
2007-03-16 00:12:07 +01:00
|
|
|
MemoryContext oldcxt;
|
|
|
|
ListCell *lc;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2011-09-16 06:42:53 +02:00
|
|
|
/* Assert the input is a temporary SPIPlan */
|
|
|
|
Assert(plan->magic == _SPI_PLAN_MAGIC);
|
|
|
|
Assert(plan->plancxt == NULL);
|
Invent a "one-shot" variant of CachedPlans for better performance.
SPI_execute() and related functions create a CachedPlan, execute it once,
and immediately discard it, so that the functionality offered by
plancache.c is of no value in this code path. And performance measurements
show that the extra data copying and invalidation checking done by
plancache.c slows down simple queries by 10% or more compared to 9.1.
However, enough of the SPI code is shared with functions that do need plan
caching that it seems impractical to bypass plancache.c altogether.
Instead, let's invent a variant version of cached plans that preserves
99% of the API but doesn't offer any of the actual functionality, nor the
overhead. This puts SPI_execute() performance back on par, or maybe even
slightly better, than it was before. This change should resolve recent
complaints of performance degradation from Dong Ye, Pavel Stehule, and
others.
By avoiding data copying, this change also reduces the amount of memory
needed to execute many-statement SPI_execute() strings, as for instance in
a recent complaint from Tomas Vondra.
An additional benefit of this change is that multi-statement SPI_execute()
query strings are now processed fully serially, that is we complete
execution of earlier statements before running parse analysis and planning
on following ones. This eliminates a long-standing POLA violation, in that
DDL that affects the behavior of a later statement will now behave as
expected.
Back-patch to 9.2, since this was a performance regression compared to 9.1.
(In 9.2, place the added struct fields so as to avoid changing the offsets
of existing fields.)
Heikki Linnakangas and Tom Lane
2013-01-04 23:42:19 +01:00
|
|
|
/* One-shot plans can't be saved */
|
|
|
|
Assert(!plan->oneshot);
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2001-08-02 18:05:23 +02:00
|
|
|
/*
|
2011-09-16 06:42:53 +02:00
|
|
|
* Create a memory context for the plan, underneath the procedure context.
|
Add macros to make AllocSetContextCreate() calls simpler and safer.
I found that half a dozen (nearly 5%) of our AllocSetContextCreate calls
had typos in the context-sizing parameters. While none of these led to
especially significant problems, they did create minor inefficiencies,
and it's now clear that expecting people to copy-and-paste those calls
accurately is not a great idea. Let's reduce the risk of future errors
by introducing single macros that encapsulate the common use-cases.
Three such macros are enough to cover all but two special-purpose contexts;
those two calls can be left as-is, I think.
While this patch doesn't in itself improve matters for third-party
extensions, it doesn't break anything for them either, and they can
gradually adopt the simplified notation over time.
In passing, change TopMemoryContext to use the default allocation
parameters. Formerly it could only be extended 8K at a time. That was
probably reasonable when this code was written; but nowadays we create
many more contexts than we did then, so that it's not unusual to have a
couple hundred K in TopMemoryContext, even without considering various
dubious code that sticks other things there. There seems no good reason
not to let it use growing blocks like most other contexts.
Back-patch to 9.6, mostly because that's still close enough to HEAD that
it's easy to do so, and keeping the branches in sync can be expected to
avoid some future back-patching pain. The bugs fixed by these changes
don't seem to be significant enough to justify fixing them further back.
Discussion: <21072.1472321324@sss.pgh.pa.us>
2016-08-27 23:50:38 +02:00
|
|
|
* We don't expect the plan to be very large.
|
2001-08-02 18:05:23 +02:00
|
|
|
*/
|
2001-05-21 16:22:19 +02:00
|
|
|
plancxt = AllocSetContextCreate(parentcxt,
|
2001-08-02 18:05:23 +02:00
|
|
|
"SPI Plan",
|
Add macros to make AllocSetContextCreate() calls simpler and safer.
I found that half a dozen (nearly 5%) of our AllocSetContextCreate calls
had typos in the context-sizing parameters. While none of these led to
especially significant problems, they did create minor inefficiencies,
and it's now clear that expecting people to copy-and-paste those calls
accurately is not a great idea. Let's reduce the risk of future errors
by introducing single macros that encapsulate the common use-cases.
Three such macros are enough to cover all but two special-purpose contexts;
those two calls can be left as-is, I think.
While this patch doesn't in itself improve matters for third-party
extensions, it doesn't break anything for them either, and they can
gradually adopt the simplified notation over time.
In passing, change TopMemoryContext to use the default allocation
parameters. Formerly it could only be extended 8K at a time. That was
probably reasonable when this code was written; but nowadays we create
many more contexts than we did then, so that it's not unusual to have a
couple hundred K in TopMemoryContext, even without considering various
dubious code that sticks other things there. There seems no good reason
not to let it use growing blocks like most other contexts.
Back-patch to 9.6, mostly because that's still close enough to HEAD that
it's easy to do so, and keeping the branches in sync can be expected to
avoid some future back-patching pain. The bugs fixed by these changes
don't seem to be significant enough to justify fixing them further back.
Discussion: <21072.1472321324@sss.pgh.pa.us>
2016-08-27 23:50:38 +02:00
|
|
|
ALLOCSET_SMALL_SIZES);
|
2001-05-21 16:22:19 +02:00
|
|
|
oldcxt = MemoryContextSwitchTo(plancxt);
|
|
|
|
|
2011-09-16 06:42:53 +02:00
|
|
|
/* Copy the SPI_plan struct and subsidiary data into the new context */
|
2018-03-24 15:05:06 +01:00
|
|
|
newplan = (SPIPlanPtr) palloc0(sizeof(_SPI_plan));
|
2007-03-16 00:12:07 +01:00
|
|
|
newplan->magic = _SPI_PLAN_MAGIC;
|
2001-05-21 16:22:19 +02:00
|
|
|
newplan->plancxt = plancxt;
|
2007-04-16 19:21:24 +02:00
|
|
|
newplan->cursor_options = plan->cursor_options;
|
1997-09-07 07:04:48 +02:00
|
|
|
newplan->nargs = plan->nargs;
|
|
|
|
if (plan->nargs > 0)
|
|
|
|
{
|
|
|
|
newplan->argtypes = (Oid *) palloc(plan->nargs * sizeof(Oid));
|
|
|
|
memcpy(newplan->argtypes, plan->argtypes, plan->nargs * sizeof(Oid));
|
|
|
|
}
|
|
|
|
else
|
|
|
|
newplan->argtypes = NULL;
|
2009-11-04 23:26:08 +01:00
|
|
|
newplan->parserSetup = plan->parserSetup;
|
|
|
|
newplan->parserSetupArg = plan->parserSetupArg;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2011-09-16 06:42:53 +02:00
|
|
|
/*
|
|
|
|
* Reparent all the CachedPlanSources into the procedure context. In
|
2012-06-10 21:20:04 +02:00
|
|
|
* theory this could fail partway through due to the pallocs, but we don't
|
|
|
|
* care too much since both the procedure context and the executor context
|
|
|
|
* would go away on error.
|
2011-09-16 06:42:53 +02:00
|
|
|
*/
|
2007-03-16 00:12:07 +01:00
|
|
|
foreach(lc, plan->plancache_list)
|
|
|
|
{
|
|
|
|
CachedPlanSource *plansource = (CachedPlanSource *) lfirst(lc);
|
|
|
|
|
2011-09-16 06:42:53 +02:00
|
|
|
CachedPlanSetParentContext(plansource, parentcxt);
|
|
|
|
|
|
|
|
/* Build new list, with list cells in plancxt */
|
|
|
|
newplan->plancache_list = lappend(newplan->plancache_list, plansource);
|
2007-03-16 00:12:07 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
MemoryContextSwitchTo(oldcxt);
|
|
|
|
|
2011-09-16 06:42:53 +02:00
|
|
|
/* For safety, unlink the CachedPlanSources from the temporary plan */
|
|
|
|
plan->plancache_list = NIL;
|
|
|
|
|
2007-03-16 00:12:07 +01:00
|
|
|
return newplan;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2011-09-16 06:42:53 +02:00
|
|
|
* Make a "saved" copy of the given plan.
|
2007-03-16 00:12:07 +01:00
|
|
|
*/
|
|
|
|
static SPIPlanPtr
|
|
|
|
_SPI_save_plan(SPIPlanPtr plan)
|
|
|
|
{
|
|
|
|
SPIPlanPtr newplan;
|
|
|
|
MemoryContext plancxt;
|
|
|
|
MemoryContext oldcxt;
|
|
|
|
ListCell *lc;
|
|
|
|
|
Invent a "one-shot" variant of CachedPlans for better performance.
SPI_execute() and related functions create a CachedPlan, execute it once,
and immediately discard it, so that the functionality offered by
plancache.c is of no value in this code path. And performance measurements
show that the extra data copying and invalidation checking done by
plancache.c slows down simple queries by 10% or more compared to 9.1.
However, enough of the SPI code is shared with functions that do need plan
caching that it seems impractical to bypass plancache.c altogether.
Instead, let's invent a variant version of cached plans that preserves
99% of the API but doesn't offer any of the actual functionality, nor the
overhead. This puts SPI_execute() performance back on par, or maybe even
slightly better, than it was before. This change should resolve recent
complaints of performance degradation from Dong Ye, Pavel Stehule, and
others.
By avoiding data copying, this change also reduces the amount of memory
needed to execute many-statement SPI_execute() strings, as for instance in
a recent complaint from Tomas Vondra.
An additional benefit of this change is that multi-statement SPI_execute()
query strings are now processed fully serially, that is we complete
execution of earlier statements before running parse analysis and planning
on following ones. This eliminates a long-standing POLA violation, in that
DDL that affects the behavior of a later statement will now behave as
expected.
Back-patch to 9.2, since this was a performance regression compared to 9.1.
(In 9.2, place the added struct fields so as to avoid changing the offsets
of existing fields.)
Heikki Linnakangas and Tom Lane
2013-01-04 23:42:19 +01:00
|
|
|
/* One-shot plans can't be saved */
|
|
|
|
Assert(!plan->oneshot);
|
|
|
|
|
2007-03-16 00:12:07 +01:00
|
|
|
/*
|
|
|
|
* Create a memory context for the plan. We don't expect the plan to be
|
2011-09-16 06:42:53 +02:00
|
|
|
* very large, so use smaller-than-default alloc parameters. It's a
|
|
|
|
* transient context until we finish copying everything.
|
2007-03-16 00:12:07 +01:00
|
|
|
*/
|
2011-09-16 06:42:53 +02:00
|
|
|
plancxt = AllocSetContextCreate(CurrentMemoryContext,
|
2007-03-16 00:12:07 +01:00
|
|
|
"SPI Plan",
|
Add macros to make AllocSetContextCreate() calls simpler and safer.
I found that half a dozen (nearly 5%) of our AllocSetContextCreate calls
had typos in the context-sizing parameters. While none of these led to
especially significant problems, they did create minor inefficiencies,
and it's now clear that expecting people to copy-and-paste those calls
accurately is not a great idea. Let's reduce the risk of future errors
by introducing single macros that encapsulate the common use-cases.
Three such macros are enough to cover all but two special-purpose contexts;
those two calls can be left as-is, I think.
While this patch doesn't in itself improve matters for third-party
extensions, it doesn't break anything for them either, and they can
gradually adopt the simplified notation over time.
In passing, change TopMemoryContext to use the default allocation
parameters. Formerly it could only be extended 8K at a time. That was
probably reasonable when this code was written; but nowadays we create
many more contexts than we did then, so that it's not unusual to have a
couple hundred K in TopMemoryContext, even without considering various
dubious code that sticks other things there. There seems no good reason
not to let it use growing blocks like most other contexts.
Back-patch to 9.6, mostly because that's still close enough to HEAD that
it's easy to do so, and keeping the branches in sync can be expected to
avoid some future back-patching pain. The bugs fixed by these changes
don't seem to be significant enough to justify fixing them further back.
Discussion: <21072.1472321324@sss.pgh.pa.us>
2016-08-27 23:50:38 +02:00
|
|
|
ALLOCSET_SMALL_SIZES);
|
2007-03-16 00:12:07 +01:00
|
|
|
oldcxt = MemoryContextSwitchTo(plancxt);
|
|
|
|
|
|
|
|
/* Copy the SPI plan into its own context */
|
2018-03-24 15:05:06 +01:00
|
|
|
newplan = (SPIPlanPtr) palloc0(sizeof(_SPI_plan));
|
2007-03-16 00:12:07 +01:00
|
|
|
newplan->magic = _SPI_PLAN_MAGIC;
|
|
|
|
newplan->plancxt = plancxt;
|
2007-04-16 19:21:24 +02:00
|
|
|
newplan->cursor_options = plan->cursor_options;
|
2007-03-16 00:12:07 +01:00
|
|
|
newplan->nargs = plan->nargs;
|
|
|
|
if (plan->nargs > 0)
|
|
|
|
{
|
|
|
|
newplan->argtypes = (Oid *) palloc(plan->nargs * sizeof(Oid));
|
|
|
|
memcpy(newplan->argtypes, plan->argtypes, plan->nargs * sizeof(Oid));
|
|
|
|
}
|
|
|
|
else
|
|
|
|
newplan->argtypes = NULL;
|
2009-11-04 23:26:08 +01:00
|
|
|
newplan->parserSetup = plan->parserSetup;
|
|
|
|
newplan->parserSetupArg = plan->parserSetupArg;
|
2007-03-16 00:12:07 +01:00
|
|
|
|
2011-09-16 06:42:53 +02:00
|
|
|
/* Copy all the plancache entries */
|
2007-03-16 00:12:07 +01:00
|
|
|
foreach(lc, plan->plancache_list)
|
|
|
|
{
|
|
|
|
CachedPlanSource *plansource = (CachedPlanSource *) lfirst(lc);
|
|
|
|
CachedPlanSource *newsource;
|
|
|
|
|
2011-09-16 06:42:53 +02:00
|
|
|
newsource = CopyCachedPlan(plansource);
|
2007-03-16 00:12:07 +01:00
|
|
|
newplan->plancache_list = lappend(newplan->plancache_list, newsource);
|
|
|
|
}
|
|
|
|
|
2001-05-21 16:22:19 +02:00
|
|
|
MemoryContextSwitchTo(oldcxt);
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2011-09-16 06:42:53 +02:00
|
|
|
/*
|
|
|
|
* Mark it saved, reparent it under CacheMemoryContext, and mark all the
|
|
|
|
* component CachedPlanSources as saved. This sequence cannot fail
|
|
|
|
* partway through, so there's no risk of long-term memory leakage.
|
|
|
|
*/
|
|
|
|
newplan->saved = true;
|
|
|
|
MemoryContextSetParent(newplan->plancxt, CacheMemoryContext);
|
|
|
|
|
|
|
|
foreach(lc, newplan->plancache_list)
|
|
|
|
{
|
|
|
|
CachedPlanSource *plansource = (CachedPlanSource *) lfirst(lc);
|
|
|
|
|
|
|
|
SaveCachedPlan(plansource);
|
|
|
|
}
|
|
|
|
|
1998-09-01 05:29:17 +02:00
|
|
|
return newplan;
|
1997-09-04 15:22:39 +02:00
|
|
|
}
|
2017-04-01 06:17:18 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Internal lookup of ephemeral named relation by name.
|
|
|
|
*/
|
|
|
|
static EphemeralNamedRelation
|
|
|
|
_SPI_find_ENR_by_name(const char *name)
|
|
|
|
{
|
|
|
|
/* internal static function; any error is bug in SPI itself */
|
|
|
|
Assert(name != NULL);
|
|
|
|
|
|
|
|
/* fast exit if no tuplestores have been added */
|
|
|
|
if (_SPI_current->queryEnv == NULL)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return get_ENR(_SPI_current->queryEnv, name);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Register an ephemeral named relation for use by the planner and executor on
|
|
|
|
* subsequent calls using this SPI connection.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
SPI_register_relation(EphemeralNamedRelation enr)
|
|
|
|
{
|
|
|
|
EphemeralNamedRelation match;
|
|
|
|
int res;
|
|
|
|
|
|
|
|
if (enr == NULL || enr->md.name == NULL)
|
|
|
|
return SPI_ERROR_ARGUMENT;
|
|
|
|
|
Phase 2 of pgindent updates.
Change pg_bsd_indent to follow upstream rules for placement of comments
to the right of code, and remove pgindent hack that caused comments
following #endif to not obey the general rule.
Commit e3860ffa4dd0dad0dd9eea4be9cc1412373a8c89 wasn't actually using
the published version of pg_bsd_indent, but a hacked-up version that
tried to minimize the amount of movement of comments to the right of
code. The situation of interest is where such a comment has to be
moved to the right of its default placement at column 33 because there's
code there. BSD indent has always moved right in units of tab stops
in such cases --- but in the previous incarnation, indent was working
in 8-space tab stops, while now it knows we use 4-space tabs. So the
net result is that in about half the cases, such comments are placed
one tab stop left of before. This is better all around: it leaves
more room on the line for comment text, and it means that in such
cases the comment uniformly starts at the next 4-space tab stop after
the code, rather than sometimes one and sometimes two tabs after.
Also, ensure that comments following #endif are indented the same
as comments following other preprocessor commands such as #else.
That inconsistency turns out to have been self-inflicted damage
from a poorly-thought-through post-indent "fixup" in pgindent.
This patch is much less interesting than the first round of indent
changes, but also bulkier, so I thought it best to separate the effects.
Discussion: https://postgr.es/m/E1dAmxK-0006EE-1r@gemulon.postgresql.org
Discussion: https://postgr.es/m/30527.1495162840@sss.pgh.pa.us
2017-06-21 21:18:54 +02:00
|
|
|
res = _SPI_begin_call(false); /* keep current memory context */
|
2017-04-01 06:17:18 +02:00
|
|
|
if (res < 0)
|
|
|
|
return res;
|
|
|
|
|
|
|
|
match = _SPI_find_ENR_by_name(enr->md.name);
|
|
|
|
if (match)
|
|
|
|
res = SPI_ERROR_REL_DUPLICATE;
|
|
|
|
else
|
|
|
|
{
|
|
|
|
if (_SPI_current->queryEnv == NULL)
|
|
|
|
_SPI_current->queryEnv = create_queryEnv();
|
|
|
|
|
|
|
|
register_ENR(_SPI_current->queryEnv, enr);
|
|
|
|
res = SPI_OK_REL_REGISTER;
|
|
|
|
}
|
|
|
|
|
|
|
|
_SPI_end_call(false);
|
|
|
|
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Unregister an ephemeral named relation by name. This will probably be a
|
|
|
|
* rarely used function, since SPI_finish will clear it automatically.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
SPI_unregister_relation(const char *name)
|
|
|
|
{
|
|
|
|
EphemeralNamedRelation match;
|
|
|
|
int res;
|
|
|
|
|
|
|
|
if (name == NULL)
|
|
|
|
return SPI_ERROR_ARGUMENT;
|
|
|
|
|
Phase 2 of pgindent updates.
Change pg_bsd_indent to follow upstream rules for placement of comments
to the right of code, and remove pgindent hack that caused comments
following #endif to not obey the general rule.
Commit e3860ffa4dd0dad0dd9eea4be9cc1412373a8c89 wasn't actually using
the published version of pg_bsd_indent, but a hacked-up version that
tried to minimize the amount of movement of comments to the right of
code. The situation of interest is where such a comment has to be
moved to the right of its default placement at column 33 because there's
code there. BSD indent has always moved right in units of tab stops
in such cases --- but in the previous incarnation, indent was working
in 8-space tab stops, while now it knows we use 4-space tabs. So the
net result is that in about half the cases, such comments are placed
one tab stop left of before. This is better all around: it leaves
more room on the line for comment text, and it means that in such
cases the comment uniformly starts at the next 4-space tab stop after
the code, rather than sometimes one and sometimes two tabs after.
Also, ensure that comments following #endif are indented the same
as comments following other preprocessor commands such as #else.
That inconsistency turns out to have been self-inflicted damage
from a poorly-thought-through post-indent "fixup" in pgindent.
This patch is much less interesting than the first round of indent
changes, but also bulkier, so I thought it best to separate the effects.
Discussion: https://postgr.es/m/E1dAmxK-0006EE-1r@gemulon.postgresql.org
Discussion: https://postgr.es/m/30527.1495162840@sss.pgh.pa.us
2017-06-21 21:18:54 +02:00
|
|
|
res = _SPI_begin_call(false); /* keep current memory context */
|
2017-04-01 06:17:18 +02:00
|
|
|
if (res < 0)
|
|
|
|
return res;
|
|
|
|
|
|
|
|
match = _SPI_find_ENR_by_name(name);
|
|
|
|
if (match)
|
|
|
|
{
|
|
|
|
unregister_ENR(_SPI_current->queryEnv, match->md.name);
|
|
|
|
res = SPI_OK_REL_UNREGISTER;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
res = SPI_ERROR_REL_NOT_FOUND;
|
|
|
|
|
|
|
|
_SPI_end_call(false);
|
|
|
|
|
|
|
|
return res;
|
|
|
|
}
|
2017-04-05 01:36:39 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Register the transient relations from 'tdata' using this SPI connection.
|
|
|
|
* This should be called by PL implementations' trigger handlers after
|
|
|
|
* connecting, in order to make transition tables visible to any queries run
|
|
|
|
* in this connection.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
SPI_register_trigger_data(TriggerData *tdata)
|
|
|
|
{
|
|
|
|
if (tdata == NULL)
|
|
|
|
return SPI_ERROR_ARGUMENT;
|
|
|
|
|
|
|
|
if (tdata->tg_newtable)
|
|
|
|
{
|
|
|
|
EphemeralNamedRelation enr =
|
2017-05-17 22:31:56 +02:00
|
|
|
palloc(sizeof(EphemeralNamedRelationData));
|
|
|
|
int rc;
|
2017-04-05 01:36:39 +02:00
|
|
|
|
|
|
|
enr->md.name = tdata->tg_trigger->tgnewtable;
|
|
|
|
enr->md.reliddesc = tdata->tg_relation->rd_id;
|
|
|
|
enr->md.tupdesc = NULL;
|
|
|
|
enr->md.enrtype = ENR_NAMED_TUPLESTORE;
|
|
|
|
enr->md.enrtuples = tuplestore_tuple_count(tdata->tg_newtable);
|
|
|
|
enr->reldata = tdata->tg_newtable;
|
|
|
|
rc = SPI_register_relation(enr);
|
|
|
|
if (rc != SPI_OK_REL_REGISTER)
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (tdata->tg_oldtable)
|
|
|
|
{
|
|
|
|
EphemeralNamedRelation enr =
|
2017-05-17 22:31:56 +02:00
|
|
|
palloc(sizeof(EphemeralNamedRelationData));
|
|
|
|
int rc;
|
2017-04-05 01:36:39 +02:00
|
|
|
|
|
|
|
enr->md.name = tdata->tg_trigger->tgoldtable;
|
|
|
|
enr->md.reliddesc = tdata->tg_relation->rd_id;
|
|
|
|
enr->md.tupdesc = NULL;
|
|
|
|
enr->md.enrtype = ENR_NAMED_TUPLESTORE;
|
|
|
|
enr->md.enrtuples = tuplestore_tuple_count(tdata->tg_oldtable);
|
|
|
|
enr->reldata = tdata->tg_oldtable;
|
|
|
|
rc = SPI_register_relation(enr);
|
|
|
|
if (rc != SPI_OK_REL_REGISTER)
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
return SPI_OK_TD_REGISTER;
|
|
|
|
}
|