2011-12-18 20:14:16 +01:00
|
|
|
/*
|
|
|
|
* interface to SPI functions
|
|
|
|
*
|
|
|
|
* src/pl/plpython/plpy_spi.c
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "postgres.h"
|
|
|
|
|
Widen query numbers-of-tuples-processed counters to uint64.
This patch widens SPI_processed, EState's es_processed field, PortalData's
portalPos field, FuncCallContext's call_cntr and max_calls fields,
ExecutorRun's count argument, PortalRunFetch's result, and the max number
of rows in a SPITupleTable to uint64, and deals with (I hope) all the
ensuing fallout. Some of these values were declared uint32 before, and
others "long".
I also removed PortalData's posOverflow field, since that logic seems
pretty useless given that portalPos is now always 64 bits.
The user-visible results are that command tags for SELECT etc will
correctly report tuple counts larger than 4G, as will plpgsql's GET
GET DIAGNOSTICS ... ROW_COUNT command. Queries processing more tuples
than that are still not exactly the norm, but they're becoming more
common.
Most values associated with FETCH/MOVE distances, such as PortalRun's count
argument and the count argument of most SPI functions that have one, remain
declared as "long". It's not clear whether it would be worth promoting
those to int64; but it would definitely be a large dollop of additional
API churn on top of this, and it would only help 32-bit platforms which
seem relatively less likely to see any benefit.
Andreas Scherbaum, reviewed by Christian Ullrich, additional hacking by me
2016-03-12 22:05:10 +01:00
|
|
|
#include <limits.h>
|
|
|
|
|
2012-08-30 22:15:44 +02:00
|
|
|
#include "access/htup_details.h"
|
2011-12-18 20:14:16 +01:00
|
|
|
#include "access/xact.h"
|
|
|
|
#include "catalog/pg_type.h"
|
2013-01-31 02:11:58 +01:00
|
|
|
#include "executor/spi.h"
|
2011-12-18 20:14:16 +01:00
|
|
|
#include "mb/pg_wchar.h"
|
|
|
|
#include "parser/parse_type.h"
|
2012-05-02 19:59:51 +02:00
|
|
|
#include "utils/memutils.h"
|
2011-12-18 20:14:16 +01:00
|
|
|
#include "utils/syscache.h"
|
|
|
|
|
|
|
|
#include "plpython.h"
|
|
|
|
|
|
|
|
#include "plpy_spi.h"
|
|
|
|
|
|
|
|
#include "plpy_elog.h"
|
2012-03-13 18:19:06 +01:00
|
|
|
#include "plpy_main.h"
|
2011-12-18 20:14:16 +01:00
|
|
|
#include "plpy_planobject.h"
|
|
|
|
#include "plpy_plpymodule.h"
|
|
|
|
#include "plpy_procedure.h"
|
|
|
|
#include "plpy_resultobject.h"
|
|
|
|
|
|
|
|
|
2011-12-29 21:55:49 +01:00
|
|
|
static PyObject *PLy_spi_execute_query(char *query, long limit);
|
Widen query numbers-of-tuples-processed counters to uint64.
This patch widens SPI_processed, EState's es_processed field, PortalData's
portalPos field, FuncCallContext's call_cntr and max_calls fields,
ExecutorRun's count argument, PortalRunFetch's result, and the max number
of rows in a SPITupleTable to uint64, and deals with (I hope) all the
ensuing fallout. Some of these values were declared uint32 before, and
others "long".
I also removed PortalData's posOverflow field, since that logic seems
pretty useless given that portalPos is now always 64 bits.
The user-visible results are that command tags for SELECT etc will
correctly report tuple counts larger than 4G, as will plpgsql's GET
GET DIAGNOSTICS ... ROW_COUNT command. Queries processing more tuples
than that are still not exactly the norm, but they're becoming more
common.
Most values associated with FETCH/MOVE distances, such as PortalRun's count
argument and the count argument of most SPI functions that have one, remain
declared as "long". It's not clear whether it would be worth promoting
those to int64; but it would definitely be a large dollop of additional
API churn on top of this, and it would only help 32-bit platforms which
seem relatively less likely to see any benefit.
Andreas Scherbaum, reviewed by Christian Ullrich, additional hacking by me
2016-03-12 22:05:10 +01:00
|
|
|
static PyObject *PLy_spi_execute_fetch_result(SPITupleTable *tuptable,
|
2019-05-22 19:04:48 +02:00
|
|
|
uint64 rows, int status);
|
2011-12-29 21:55:49 +01:00
|
|
|
static void PLy_spi_exception_set(PyObject *excclass, ErrorData *edata);
|
2011-12-18 20:14:16 +01:00
|
|
|
|
|
|
|
|
|
|
|
/* prepare(query="select * from foo")
|
|
|
|
* prepare(query="select * from foo where bar = $1", params=["text"])
|
|
|
|
* prepare(query="select * from foo where bar = $1", params=["text"], limit=5)
|
|
|
|
*/
|
|
|
|
PyObject *
|
|
|
|
PLy_spi_prepare(PyObject *self, PyObject *args)
|
|
|
|
{
|
|
|
|
PLyPlanObject *plan;
|
|
|
|
PyObject *list = NULL;
|
|
|
|
PyObject *volatile optr = NULL;
|
|
|
|
char *query;
|
Make PL/Python handle domain-type conversions correctly.
Fix PL/Python so that it can handle domains over composite, and so that
it enforces domain constraints correctly in other cases that were not
always done properly before. Notably, it didn't do arrays of domains
right (oversight in commit c12d570fa), and it failed to enforce domain
constraints when returning a composite type containing a domain field,
and if a transform function is being used for a domain's base type then
it failed to enforce domain constraints on the result. Also, in many
places it missed checking domain constraints on null values, because
the plpy_typeio code simply wasn't called for Py_None.
Rather than try to band-aid these problems, I made a significant
refactoring of the plpy_typeio logic. The existing design of recursing
for array and composite members is extended to also treat domains as
containers requiring recursion, and the APIs for the module are cleaned
up and simplified.
The patch also modifies plpy_typeio to rely on the typcache more than
it did before (which was pretty much not at all). This reduces the
need for repetitive lookups, and lets us get rid of an ad-hoc scheme
for detecting changes in composite types. I added a couple of small
features to typcache to help with that.
Although some of this is fixing bugs that long predate v11, I don't
think we should risk a back-patch: it's a significant amount of code
churn, and there've been no complaints from the field about the bugs.
Tom Lane, reviewed by Anthony Bykov
Discussion: https://postgr.es/m/24449.1509393613@sss.pgh.pa.us
2017-11-16 22:22:57 +01:00
|
|
|
PLyExecutionContext *exec_ctx = PLy_current_execution_context();
|
2011-12-18 20:14:16 +01:00
|
|
|
volatile MemoryContext oldcontext;
|
|
|
|
volatile ResourceOwner oldowner;
|
|
|
|
volatile int nargs;
|
|
|
|
|
2016-10-27 18:00:00 +02:00
|
|
|
if (!PyArg_ParseTuple(args, "s|O:prepare", &query, &list))
|
2011-12-18 20:14:16 +01:00
|
|
|
return NULL;
|
|
|
|
|
|
|
|
if (list && (!PySequence_Check(list)))
|
|
|
|
{
|
|
|
|
PLy_exception_set(PyExc_TypeError,
|
Phase 3 of pgindent updates.
Don't move parenthesized lines to the left, even if that means they
flow past the right margin.
By default, BSD indent lines up statement continuation lines that are
within parentheses so that they start just to the right of the preceding
left parenthesis. However, traditionally, if that resulted in the
continuation line extending to the right of the desired right margin,
then indent would push it left just far enough to not overrun the margin,
if it could do so without making the continuation line start to the left of
the current statement indent. That makes for a weird mix of indentations
unless one has been completely rigid about never violating the 80-column
limit.
This behavior has been pretty universally panned by Postgres developers.
Hence, disable it with indent's new -lpl switch, so that parenthesized
lines are always lined up with the preceding left paren.
This patch is much less interesting than the first round of indent
changes, but also bulkier, so I thought it best to separate the effects.
Discussion: https://postgr.es/m/E1dAmxK-0006EE-1r@gemulon.postgresql.org
Discussion: https://postgr.es/m/30527.1495162840@sss.pgh.pa.us
2017-06-21 21:35:54 +02:00
|
|
|
"second argument of plpy.prepare must be a sequence");
|
2011-12-18 20:14:16 +01:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((plan = (PLyPlanObject *) PLy_plan_new()) == NULL)
|
|
|
|
return NULL;
|
|
|
|
|
2015-11-05 19:52:30 +01:00
|
|
|
plan->mcxt = AllocSetContextCreate(TopMemoryContext,
|
|
|
|
"PL/Python plan context",
|
Add macros to make AllocSetContextCreate() calls simpler and safer.
I found that half a dozen (nearly 5%) of our AllocSetContextCreate calls
had typos in the context-sizing parameters. While none of these led to
especially significant problems, they did create minor inefficiencies,
and it's now clear that expecting people to copy-and-paste those calls
accurately is not a great idea. Let's reduce the risk of future errors
by introducing single macros that encapsulate the common use-cases.
Three such macros are enough to cover all but two special-purpose contexts;
those two calls can be left as-is, I think.
While this patch doesn't in itself improve matters for third-party
extensions, it doesn't break anything for them either, and they can
gradually adopt the simplified notation over time.
In passing, change TopMemoryContext to use the default allocation
parameters. Formerly it could only be extended 8K at a time. That was
probably reasonable when this code was written; but nowadays we create
many more contexts than we did then, so that it's not unusual to have a
couple hundred K in TopMemoryContext, even without considering various
dubious code that sticks other things there. There seems no good reason
not to let it use growing blocks like most other contexts.
Back-patch to 9.6, mostly because that's still close enough to HEAD that
it's easy to do so, and keeping the branches in sync can be expected to
avoid some future back-patching pain. The bugs fixed by these changes
don't seem to be significant enough to justify fixing them further back.
Discussion: <21072.1472321324@sss.pgh.pa.us>
2016-08-27 23:50:38 +02:00
|
|
|
ALLOCSET_DEFAULT_SIZES);
|
2015-11-05 19:52:30 +01:00
|
|
|
oldcontext = MemoryContextSwitchTo(plan->mcxt);
|
|
|
|
|
2011-12-18 20:14:16 +01:00
|
|
|
nargs = list ? PySequence_Length(list) : 0;
|
|
|
|
|
|
|
|
plan->nargs = nargs;
|
Make PL/Python handle domain-type conversions correctly.
Fix PL/Python so that it can handle domains over composite, and so that
it enforces domain constraints correctly in other cases that were not
always done properly before. Notably, it didn't do arrays of domains
right (oversight in commit c12d570fa), and it failed to enforce domain
constraints when returning a composite type containing a domain field,
and if a transform function is being used for a domain's base type then
it failed to enforce domain constraints on the result. Also, in many
places it missed checking domain constraints on null values, because
the plpy_typeio code simply wasn't called for Py_None.
Rather than try to band-aid these problems, I made a significant
refactoring of the plpy_typeio logic. The existing design of recursing
for array and composite members is extended to also treat domains as
containers requiring recursion, and the APIs for the module are cleaned
up and simplified.
The patch also modifies plpy_typeio to rely on the typcache more than
it did before (which was pretty much not at all). This reduces the
need for repetitive lookups, and lets us get rid of an ad-hoc scheme
for detecting changes in composite types. I added a couple of small
features to typcache to help with that.
Although some of this is fixing bugs that long predate v11, I don't
think we should risk a back-patch: it's a significant amount of code
churn, and there've been no complaints from the field about the bugs.
Tom Lane, reviewed by Anthony Bykov
Discussion: https://postgr.es/m/24449.1509393613@sss.pgh.pa.us
2017-11-16 22:22:57 +01:00
|
|
|
plan->types = nargs ? palloc0(sizeof(Oid) * nargs) : NULL;
|
|
|
|
plan->values = nargs ? palloc0(sizeof(Datum) * nargs) : NULL;
|
|
|
|
plan->args = nargs ? palloc0(sizeof(PLyObToDatum) * nargs) : NULL;
|
2015-11-05 19:52:30 +01:00
|
|
|
|
|
|
|
MemoryContextSwitchTo(oldcontext);
|
2011-12-18 20:14:16 +01:00
|
|
|
|
|
|
|
oldcontext = CurrentMemoryContext;
|
|
|
|
oldowner = CurrentResourceOwner;
|
|
|
|
|
|
|
|
PLy_spi_subtransaction_begin(oldcontext, oldowner);
|
|
|
|
|
|
|
|
PG_TRY();
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < nargs; i++)
|
|
|
|
{
|
|
|
|
char *sptr;
|
|
|
|
Oid typeId;
|
|
|
|
int32 typmod;
|
|
|
|
|
|
|
|
optr = PySequence_GetItem(list, i);
|
|
|
|
if (PyString_Check(optr))
|
|
|
|
sptr = PyString_AsString(optr);
|
|
|
|
else if (PyUnicode_Check(optr))
|
|
|
|
sptr = PLyUnicode_AsString(optr);
|
|
|
|
else
|
|
|
|
{
|
|
|
|
ereport(ERROR,
|
|
|
|
(errmsg("plpy.prepare: type name at ordinal position %d is not a string", i)));
|
|
|
|
sptr = NULL; /* keep compiler quiet */
|
|
|
|
}
|
|
|
|
|
|
|
|
/********************************************************
|
|
|
|
* Resolve argument type names and then look them up by
|
|
|
|
* oid in the system cache, and remember the required
|
|
|
|
*information for input conversion.
|
|
|
|
********************************************************/
|
|
|
|
|
2014-04-08 16:27:56 +02:00
|
|
|
parseTypeString(sptr, &typeId, &typmod, false);
|
2011-12-18 20:14:16 +01:00
|
|
|
|
|
|
|
Py_DECREF(optr);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* set optr to NULL, so we won't try to unref it again in case of
|
|
|
|
* an error
|
|
|
|
*/
|
|
|
|
optr = NULL;
|
|
|
|
|
|
|
|
plan->types[i] = typeId;
|
Make PL/Python handle domain-type conversions correctly.
Fix PL/Python so that it can handle domains over composite, and so that
it enforces domain constraints correctly in other cases that were not
always done properly before. Notably, it didn't do arrays of domains
right (oversight in commit c12d570fa), and it failed to enforce domain
constraints when returning a composite type containing a domain field,
and if a transform function is being used for a domain's base type then
it failed to enforce domain constraints on the result. Also, in many
places it missed checking domain constraints on null values, because
the plpy_typeio code simply wasn't called for Py_None.
Rather than try to band-aid these problems, I made a significant
refactoring of the plpy_typeio logic. The existing design of recursing
for array and composite members is extended to also treat domains as
containers requiring recursion, and the APIs for the module are cleaned
up and simplified.
The patch also modifies plpy_typeio to rely on the typcache more than
it did before (which was pretty much not at all). This reduces the
need for repetitive lookups, and lets us get rid of an ad-hoc scheme
for detecting changes in composite types. I added a couple of small
features to typcache to help with that.
Although some of this is fixing bugs that long predate v11, I don't
think we should risk a back-patch: it's a significant amount of code
churn, and there've been no complaints from the field about the bugs.
Tom Lane, reviewed by Anthony Bykov
Discussion: https://postgr.es/m/24449.1509393613@sss.pgh.pa.us
2017-11-16 22:22:57 +01:00
|
|
|
PLy_output_setup_func(&plan->args[i], plan->mcxt,
|
|
|
|
typeId, typmod,
|
|
|
|
exec_ctx->curr_proc);
|
2011-12-18 20:14:16 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
pg_verifymbstr(query, strlen(query), false);
|
|
|
|
plan->plan = SPI_prepare(query, plan->nargs, plan->types);
|
|
|
|
if (plan->plan == NULL)
|
|
|
|
elog(ERROR, "SPI_prepare failed: %s",
|
|
|
|
SPI_result_code_string(SPI_result));
|
|
|
|
|
|
|
|
/* transfer plan from procCxt to topCxt */
|
|
|
|
if (SPI_keepplan(plan->plan))
|
|
|
|
elog(ERROR, "SPI_keepplan failed");
|
|
|
|
|
|
|
|
PLy_spi_subtransaction_commit(oldcontext, oldowner);
|
|
|
|
}
|
|
|
|
PG_CATCH();
|
|
|
|
{
|
|
|
|
Py_DECREF(plan);
|
|
|
|
Py_XDECREF(optr);
|
|
|
|
|
|
|
|
PLy_spi_subtransaction_abort(oldcontext, oldowner);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
PG_END_TRY();
|
|
|
|
|
|
|
|
Assert(plan->plan != NULL);
|
|
|
|
return (PyObject *) plan;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* execute(query="select * from foo", limit=5)
|
|
|
|
* execute(plan=plan, values=(foo, bar), limit=5)
|
|
|
|
*/
|
|
|
|
PyObject *
|
|
|
|
PLy_spi_execute(PyObject *self, PyObject *args)
|
|
|
|
{
|
|
|
|
char *query;
|
|
|
|
PyObject *plan;
|
|
|
|
PyObject *list = NULL;
|
|
|
|
long limit = 0;
|
|
|
|
|
|
|
|
if (PyArg_ParseTuple(args, "s|l", &query, &limit))
|
|
|
|
return PLy_spi_execute_query(query, limit);
|
|
|
|
|
|
|
|
PyErr_Clear();
|
|
|
|
|
|
|
|
if (PyArg_ParseTuple(args, "O|Ol", &plan, &list, &limit) &&
|
|
|
|
is_PLyPlanObject(plan))
|
|
|
|
return PLy_spi_execute_plan(plan, list, limit);
|
|
|
|
|
|
|
|
PLy_exception_set(PLy_exc_error, "plpy.execute expected a query or a plan");
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2017-02-25 14:42:25 +01:00
|
|
|
PyObject *
|
2011-12-18 20:14:16 +01:00
|
|
|
PLy_spi_execute_plan(PyObject *ob, PyObject *list, long limit)
|
|
|
|
{
|
|
|
|
volatile int nargs;
|
|
|
|
int i,
|
|
|
|
rv;
|
|
|
|
PLyPlanObject *plan;
|
|
|
|
volatile MemoryContext oldcontext;
|
|
|
|
volatile ResourceOwner oldowner;
|
|
|
|
PyObject *ret;
|
|
|
|
|
|
|
|
if (list != NULL)
|
|
|
|
{
|
|
|
|
if (!PySequence_Check(list) || PyString_Check(list) || PyUnicode_Check(list))
|
|
|
|
{
|
|
|
|
PLy_exception_set(PyExc_TypeError, "plpy.execute takes a sequence as its second argument");
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
nargs = PySequence_Length(list);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
nargs = 0;
|
|
|
|
|
|
|
|
plan = (PLyPlanObject *) ob;
|
|
|
|
|
|
|
|
if (nargs != plan->nargs)
|
|
|
|
{
|
|
|
|
char *sv;
|
|
|
|
PyObject *so = PyObject_Str(list);
|
|
|
|
|
|
|
|
if (!so)
|
|
|
|
PLy_elog(ERROR, "could not execute plan");
|
|
|
|
sv = PyString_AsString(so);
|
|
|
|
PLy_exception_set_plural(PyExc_TypeError,
|
Phase 3 of pgindent updates.
Don't move parenthesized lines to the left, even if that means they
flow past the right margin.
By default, BSD indent lines up statement continuation lines that are
within parentheses so that they start just to the right of the preceding
left parenthesis. However, traditionally, if that resulted in the
continuation line extending to the right of the desired right margin,
then indent would push it left just far enough to not overrun the margin,
if it could do so without making the continuation line start to the left of
the current statement indent. That makes for a weird mix of indentations
unless one has been completely rigid about never violating the 80-column
limit.
This behavior has been pretty universally panned by Postgres developers.
Hence, disable it with indent's new -lpl switch, so that parenthesized
lines are always lined up with the preceding left paren.
This patch is much less interesting than the first round of indent
changes, but also bulkier, so I thought it best to separate the effects.
Discussion: https://postgr.es/m/E1dAmxK-0006EE-1r@gemulon.postgresql.org
Discussion: https://postgr.es/m/30527.1495162840@sss.pgh.pa.us
2017-06-21 21:35:54 +02:00
|
|
|
"Expected sequence of %d argument, got %d: %s",
|
|
|
|
"Expected sequence of %d arguments, got %d: %s",
|
2011-12-18 20:14:16 +01:00
|
|
|
plan->nargs,
|
|
|
|
plan->nargs, nargs, sv);
|
|
|
|
Py_DECREF(so);
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
oldcontext = CurrentMemoryContext;
|
|
|
|
oldowner = CurrentResourceOwner;
|
|
|
|
|
|
|
|
PLy_spi_subtransaction_begin(oldcontext, oldowner);
|
|
|
|
|
|
|
|
PG_TRY();
|
|
|
|
{
|
2012-03-13 18:19:06 +01:00
|
|
|
PLyExecutionContext *exec_ctx = PLy_current_execution_context();
|
2011-12-18 20:14:16 +01:00
|
|
|
char *volatile nulls;
|
|
|
|
volatile int j;
|
|
|
|
|
|
|
|
if (nargs > 0)
|
|
|
|
nulls = palloc(nargs * sizeof(char));
|
|
|
|
else
|
|
|
|
nulls = NULL;
|
|
|
|
|
|
|
|
for (j = 0; j < nargs; j++)
|
|
|
|
{
|
Make PL/Python handle domain-type conversions correctly.
Fix PL/Python so that it can handle domains over composite, and so that
it enforces domain constraints correctly in other cases that were not
always done properly before. Notably, it didn't do arrays of domains
right (oversight in commit c12d570fa), and it failed to enforce domain
constraints when returning a composite type containing a domain field,
and if a transform function is being used for a domain's base type then
it failed to enforce domain constraints on the result. Also, in many
places it missed checking domain constraints on null values, because
the plpy_typeio code simply wasn't called for Py_None.
Rather than try to band-aid these problems, I made a significant
refactoring of the plpy_typeio logic. The existing design of recursing
for array and composite members is extended to also treat domains as
containers requiring recursion, and the APIs for the module are cleaned
up and simplified.
The patch also modifies plpy_typeio to rely on the typcache more than
it did before (which was pretty much not at all). This reduces the
need for repetitive lookups, and lets us get rid of an ad-hoc scheme
for detecting changes in composite types. I added a couple of small
features to typcache to help with that.
Although some of this is fixing bugs that long predate v11, I don't
think we should risk a back-patch: it's a significant amount of code
churn, and there've been no complaints from the field about the bugs.
Tom Lane, reviewed by Anthony Bykov
Discussion: https://postgr.es/m/24449.1509393613@sss.pgh.pa.us
2017-11-16 22:22:57 +01:00
|
|
|
PLyObToDatum *arg = &plan->args[j];
|
2011-12-18 20:14:16 +01:00
|
|
|
PyObject *elem;
|
|
|
|
|
|
|
|
elem = PySequence_GetItem(list, j);
|
Make PL/Python handle domain-type conversions correctly.
Fix PL/Python so that it can handle domains over composite, and so that
it enforces domain constraints correctly in other cases that were not
always done properly before. Notably, it didn't do arrays of domains
right (oversight in commit c12d570fa), and it failed to enforce domain
constraints when returning a composite type containing a domain field,
and if a transform function is being used for a domain's base type then
it failed to enforce domain constraints on the result. Also, in many
places it missed checking domain constraints on null values, because
the plpy_typeio code simply wasn't called for Py_None.
Rather than try to band-aid these problems, I made a significant
refactoring of the plpy_typeio logic. The existing design of recursing
for array and composite members is extended to also treat domains as
containers requiring recursion, and the APIs for the module are cleaned
up and simplified.
The patch also modifies plpy_typeio to rely on the typcache more than
it did before (which was pretty much not at all). This reduces the
need for repetitive lookups, and lets us get rid of an ad-hoc scheme
for detecting changes in composite types. I added a couple of small
features to typcache to help with that.
Although some of this is fixing bugs that long predate v11, I don't
think we should risk a back-patch: it's a significant amount of code
churn, and there've been no complaints from the field about the bugs.
Tom Lane, reviewed by Anthony Bykov
Discussion: https://postgr.es/m/24449.1509393613@sss.pgh.pa.us
2017-11-16 22:22:57 +01:00
|
|
|
PG_TRY();
|
2011-12-18 20:14:16 +01:00
|
|
|
{
|
Make PL/Python handle domain-type conversions correctly.
Fix PL/Python so that it can handle domains over composite, and so that
it enforces domain constraints correctly in other cases that were not
always done properly before. Notably, it didn't do arrays of domains
right (oversight in commit c12d570fa), and it failed to enforce domain
constraints when returning a composite type containing a domain field,
and if a transform function is being used for a domain's base type then
it failed to enforce domain constraints on the result. Also, in many
places it missed checking domain constraints on null values, because
the plpy_typeio code simply wasn't called for Py_None.
Rather than try to band-aid these problems, I made a significant
refactoring of the plpy_typeio logic. The existing design of recursing
for array and composite members is extended to also treat domains as
containers requiring recursion, and the APIs for the module are cleaned
up and simplified.
The patch also modifies plpy_typeio to rely on the typcache more than
it did before (which was pretty much not at all). This reduces the
need for repetitive lookups, and lets us get rid of an ad-hoc scheme
for detecting changes in composite types. I added a couple of small
features to typcache to help with that.
Although some of this is fixing bugs that long predate v11, I don't
think we should risk a back-patch: it's a significant amount of code
churn, and there've been no complaints from the field about the bugs.
Tom Lane, reviewed by Anthony Bykov
Discussion: https://postgr.es/m/24449.1509393613@sss.pgh.pa.us
2017-11-16 22:22:57 +01:00
|
|
|
bool isnull;
|
2011-12-18 20:14:16 +01:00
|
|
|
|
Make PL/Python handle domain-type conversions correctly.
Fix PL/Python so that it can handle domains over composite, and so that
it enforces domain constraints correctly in other cases that were not
always done properly before. Notably, it didn't do arrays of domains
right (oversight in commit c12d570fa), and it failed to enforce domain
constraints when returning a composite type containing a domain field,
and if a transform function is being used for a domain's base type then
it failed to enforce domain constraints on the result. Also, in many
places it missed checking domain constraints on null values, because
the plpy_typeio code simply wasn't called for Py_None.
Rather than try to band-aid these problems, I made a significant
refactoring of the plpy_typeio logic. The existing design of recursing
for array and composite members is extended to also treat domains as
containers requiring recursion, and the APIs for the module are cleaned
up and simplified.
The patch also modifies plpy_typeio to rely on the typcache more than
it did before (which was pretty much not at all). This reduces the
need for repetitive lookups, and lets us get rid of an ad-hoc scheme
for detecting changes in composite types. I added a couple of small
features to typcache to help with that.
Although some of this is fixing bugs that long predate v11, I don't
think we should risk a back-patch: it's a significant amount of code
churn, and there've been no complaints from the field about the bugs.
Tom Lane, reviewed by Anthony Bykov
Discussion: https://postgr.es/m/24449.1509393613@sss.pgh.pa.us
2017-11-16 22:22:57 +01:00
|
|
|
plan->values[j] = PLy_output_convert(arg, elem, &isnull);
|
|
|
|
nulls[j] = isnull ? 'n' : ' ';
|
2011-12-18 20:14:16 +01:00
|
|
|
}
|
Make PL/Python handle domain-type conversions correctly.
Fix PL/Python so that it can handle domains over composite, and so that
it enforces domain constraints correctly in other cases that were not
always done properly before. Notably, it didn't do arrays of domains
right (oversight in commit c12d570fa), and it failed to enforce domain
constraints when returning a composite type containing a domain field,
and if a transform function is being used for a domain's base type then
it failed to enforce domain constraints on the result. Also, in many
places it missed checking domain constraints on null values, because
the plpy_typeio code simply wasn't called for Py_None.
Rather than try to band-aid these problems, I made a significant
refactoring of the plpy_typeio logic. The existing design of recursing
for array and composite members is extended to also treat domains as
containers requiring recursion, and the APIs for the module are cleaned
up and simplified.
The patch also modifies plpy_typeio to rely on the typcache more than
it did before (which was pretty much not at all). This reduces the
need for repetitive lookups, and lets us get rid of an ad-hoc scheme
for detecting changes in composite types. I added a couple of small
features to typcache to help with that.
Although some of this is fixing bugs that long predate v11, I don't
think we should risk a back-patch: it's a significant amount of code
churn, and there've been no complaints from the field about the bugs.
Tom Lane, reviewed by Anthony Bykov
Discussion: https://postgr.es/m/24449.1509393613@sss.pgh.pa.us
2017-11-16 22:22:57 +01:00
|
|
|
PG_CATCH();
|
2011-12-18 20:14:16 +01:00
|
|
|
{
|
|
|
|
Py_DECREF(elem);
|
Make PL/Python handle domain-type conversions correctly.
Fix PL/Python so that it can handle domains over composite, and so that
it enforces domain constraints correctly in other cases that were not
always done properly before. Notably, it didn't do arrays of domains
right (oversight in commit c12d570fa), and it failed to enforce domain
constraints when returning a composite type containing a domain field,
and if a transform function is being used for a domain's base type then
it failed to enforce domain constraints on the result. Also, in many
places it missed checking domain constraints on null values, because
the plpy_typeio code simply wasn't called for Py_None.
Rather than try to band-aid these problems, I made a significant
refactoring of the plpy_typeio logic. The existing design of recursing
for array and composite members is extended to also treat domains as
containers requiring recursion, and the APIs for the module are cleaned
up and simplified.
The patch also modifies plpy_typeio to rely on the typcache more than
it did before (which was pretty much not at all). This reduces the
need for repetitive lookups, and lets us get rid of an ad-hoc scheme
for detecting changes in composite types. I added a couple of small
features to typcache to help with that.
Although some of this is fixing bugs that long predate v11, I don't
think we should risk a back-patch: it's a significant amount of code
churn, and there've been no complaints from the field about the bugs.
Tom Lane, reviewed by Anthony Bykov
Discussion: https://postgr.es/m/24449.1509393613@sss.pgh.pa.us
2017-11-16 22:22:57 +01:00
|
|
|
PG_RE_THROW();
|
2011-12-18 20:14:16 +01:00
|
|
|
}
|
Make PL/Python handle domain-type conversions correctly.
Fix PL/Python so that it can handle domains over composite, and so that
it enforces domain constraints correctly in other cases that were not
always done properly before. Notably, it didn't do arrays of domains
right (oversight in commit c12d570fa), and it failed to enforce domain
constraints when returning a composite type containing a domain field,
and if a transform function is being used for a domain's base type then
it failed to enforce domain constraints on the result. Also, in many
places it missed checking domain constraints on null values, because
the plpy_typeio code simply wasn't called for Py_None.
Rather than try to band-aid these problems, I made a significant
refactoring of the plpy_typeio logic. The existing design of recursing
for array and composite members is extended to also treat domains as
containers requiring recursion, and the APIs for the module are cleaned
up and simplified.
The patch also modifies plpy_typeio to rely on the typcache more than
it did before (which was pretty much not at all). This reduces the
need for repetitive lookups, and lets us get rid of an ad-hoc scheme
for detecting changes in composite types. I added a couple of small
features to typcache to help with that.
Although some of this is fixing bugs that long predate v11, I don't
think we should risk a back-patch: it's a significant amount of code
churn, and there've been no complaints from the field about the bugs.
Tom Lane, reviewed by Anthony Bykov
Discussion: https://postgr.es/m/24449.1509393613@sss.pgh.pa.us
2017-11-16 22:22:57 +01:00
|
|
|
PG_END_TRY();
|
|
|
|
Py_DECREF(elem);
|
2011-12-18 20:14:16 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
rv = SPI_execute_plan(plan->plan, plan->values, nulls,
|
2012-03-13 18:19:06 +01:00
|
|
|
exec_ctx->curr_proc->fn_readonly, limit);
|
2011-12-18 20:14:16 +01:00
|
|
|
ret = PLy_spi_execute_fetch_result(SPI_tuptable, SPI_processed, rv);
|
|
|
|
|
|
|
|
if (nargs > 0)
|
|
|
|
pfree(nulls);
|
|
|
|
|
|
|
|
PLy_spi_subtransaction_commit(oldcontext, oldowner);
|
|
|
|
}
|
|
|
|
PG_CATCH();
|
|
|
|
{
|
|
|
|
int k;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* cleanup plan->values array
|
|
|
|
*/
|
|
|
|
for (k = 0; k < nargs; k++)
|
|
|
|
{
|
Make PL/Python handle domain-type conversions correctly.
Fix PL/Python so that it can handle domains over composite, and so that
it enforces domain constraints correctly in other cases that were not
always done properly before. Notably, it didn't do arrays of domains
right (oversight in commit c12d570fa), and it failed to enforce domain
constraints when returning a composite type containing a domain field,
and if a transform function is being used for a domain's base type then
it failed to enforce domain constraints on the result. Also, in many
places it missed checking domain constraints on null values, because
the plpy_typeio code simply wasn't called for Py_None.
Rather than try to band-aid these problems, I made a significant
refactoring of the plpy_typeio logic. The existing design of recursing
for array and composite members is extended to also treat domains as
containers requiring recursion, and the APIs for the module are cleaned
up and simplified.
The patch also modifies plpy_typeio to rely on the typcache more than
it did before (which was pretty much not at all). This reduces the
need for repetitive lookups, and lets us get rid of an ad-hoc scheme
for detecting changes in composite types. I added a couple of small
features to typcache to help with that.
Although some of this is fixing bugs that long predate v11, I don't
think we should risk a back-patch: it's a significant amount of code
churn, and there've been no complaints from the field about the bugs.
Tom Lane, reviewed by Anthony Bykov
Discussion: https://postgr.es/m/24449.1509393613@sss.pgh.pa.us
2017-11-16 22:22:57 +01:00
|
|
|
if (!plan->args[k].typbyval &&
|
2011-12-18 20:14:16 +01:00
|
|
|
(plan->values[k] != PointerGetDatum(NULL)))
|
|
|
|
{
|
|
|
|
pfree(DatumGetPointer(plan->values[k]));
|
|
|
|
plan->values[k] = PointerGetDatum(NULL);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
PLy_spi_subtransaction_abort(oldcontext, oldowner);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
PG_END_TRY();
|
|
|
|
|
|
|
|
for (i = 0; i < nargs; i++)
|
|
|
|
{
|
Make PL/Python handle domain-type conversions correctly.
Fix PL/Python so that it can handle domains over composite, and so that
it enforces domain constraints correctly in other cases that were not
always done properly before. Notably, it didn't do arrays of domains
right (oversight in commit c12d570fa), and it failed to enforce domain
constraints when returning a composite type containing a domain field,
and if a transform function is being used for a domain's base type then
it failed to enforce domain constraints on the result. Also, in many
places it missed checking domain constraints on null values, because
the plpy_typeio code simply wasn't called for Py_None.
Rather than try to band-aid these problems, I made a significant
refactoring of the plpy_typeio logic. The existing design of recursing
for array and composite members is extended to also treat domains as
containers requiring recursion, and the APIs for the module are cleaned
up and simplified.
The patch also modifies plpy_typeio to rely on the typcache more than
it did before (which was pretty much not at all). This reduces the
need for repetitive lookups, and lets us get rid of an ad-hoc scheme
for detecting changes in composite types. I added a couple of small
features to typcache to help with that.
Although some of this is fixing bugs that long predate v11, I don't
think we should risk a back-patch: it's a significant amount of code
churn, and there've been no complaints from the field about the bugs.
Tom Lane, reviewed by Anthony Bykov
Discussion: https://postgr.es/m/24449.1509393613@sss.pgh.pa.us
2017-11-16 22:22:57 +01:00
|
|
|
if (!plan->args[i].typbyval &&
|
2011-12-18 20:14:16 +01:00
|
|
|
(plan->values[i] != PointerGetDatum(NULL)))
|
|
|
|
{
|
|
|
|
pfree(DatumGetPointer(plan->values[i]));
|
|
|
|
plan->values[i] = PointerGetDatum(NULL);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (rv < 0)
|
|
|
|
{
|
|
|
|
PLy_exception_set(PLy_exc_spi_error,
|
|
|
|
"SPI_execute_plan failed: %s",
|
|
|
|
SPI_result_code_string(rv));
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static PyObject *
|
|
|
|
PLy_spi_execute_query(char *query, long limit)
|
|
|
|
{
|
|
|
|
int rv;
|
|
|
|
volatile MemoryContext oldcontext;
|
|
|
|
volatile ResourceOwner oldowner;
|
2012-03-13 20:26:32 +01:00
|
|
|
PyObject *ret = NULL;
|
2011-12-18 20:14:16 +01:00
|
|
|
|
|
|
|
oldcontext = CurrentMemoryContext;
|
|
|
|
oldowner = CurrentResourceOwner;
|
|
|
|
|
|
|
|
PLy_spi_subtransaction_begin(oldcontext, oldowner);
|
|
|
|
|
|
|
|
PG_TRY();
|
|
|
|
{
|
2012-06-10 21:20:04 +02:00
|
|
|
PLyExecutionContext *exec_ctx = PLy_current_execution_context();
|
2012-03-13 18:19:06 +01:00
|
|
|
|
2011-12-18 20:14:16 +01:00
|
|
|
pg_verifymbstr(query, strlen(query), false);
|
2012-03-13 18:19:06 +01:00
|
|
|
rv = SPI_execute(query, exec_ctx->curr_proc->fn_readonly, limit);
|
2011-12-18 20:14:16 +01:00
|
|
|
ret = PLy_spi_execute_fetch_result(SPI_tuptable, SPI_processed, rv);
|
|
|
|
|
|
|
|
PLy_spi_subtransaction_commit(oldcontext, oldowner);
|
|
|
|
}
|
|
|
|
PG_CATCH();
|
|
|
|
{
|
|
|
|
PLy_spi_subtransaction_abort(oldcontext, oldowner);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
PG_END_TRY();
|
|
|
|
|
|
|
|
if (rv < 0)
|
|
|
|
{
|
2012-03-13 20:26:32 +01:00
|
|
|
Py_XDECREF(ret);
|
2011-12-18 20:14:16 +01:00
|
|
|
PLy_exception_set(PLy_exc_spi_error,
|
|
|
|
"SPI_execute failed: %s",
|
|
|
|
SPI_result_code_string(rv));
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static PyObject *
|
Widen query numbers-of-tuples-processed counters to uint64.
This patch widens SPI_processed, EState's es_processed field, PortalData's
portalPos field, FuncCallContext's call_cntr and max_calls fields,
ExecutorRun's count argument, PortalRunFetch's result, and the max number
of rows in a SPITupleTable to uint64, and deals with (I hope) all the
ensuing fallout. Some of these values were declared uint32 before, and
others "long".
I also removed PortalData's posOverflow field, since that logic seems
pretty useless given that portalPos is now always 64 bits.
The user-visible results are that command tags for SELECT etc will
correctly report tuple counts larger than 4G, as will plpgsql's GET
GET DIAGNOSTICS ... ROW_COUNT command. Queries processing more tuples
than that are still not exactly the norm, but they're becoming more
common.
Most values associated with FETCH/MOVE distances, such as PortalRun's count
argument and the count argument of most SPI functions that have one, remain
declared as "long". It's not clear whether it would be worth promoting
those to int64; but it would definitely be a large dollop of additional
API churn on top of this, and it would only help 32-bit platforms which
seem relatively less likely to see any benefit.
Andreas Scherbaum, reviewed by Christian Ullrich, additional hacking by me
2016-03-12 22:05:10 +01:00
|
|
|
PLy_spi_execute_fetch_result(SPITupleTable *tuptable, uint64 rows, int status)
|
2011-12-18 20:14:16 +01:00
|
|
|
{
|
|
|
|
PLyResultObject *result;
|
Make PL/Python handle domain-type conversions correctly.
Fix PL/Python so that it can handle domains over composite, and so that
it enforces domain constraints correctly in other cases that were not
always done properly before. Notably, it didn't do arrays of domains
right (oversight in commit c12d570fa), and it failed to enforce domain
constraints when returning a composite type containing a domain field,
and if a transform function is being used for a domain's base type then
it failed to enforce domain constraints on the result. Also, in many
places it missed checking domain constraints on null values, because
the plpy_typeio code simply wasn't called for Py_None.
Rather than try to band-aid these problems, I made a significant
refactoring of the plpy_typeio logic. The existing design of recursing
for array and composite members is extended to also treat domains as
containers requiring recursion, and the APIs for the module are cleaned
up and simplified.
The patch also modifies plpy_typeio to rely on the typcache more than
it did before (which was pretty much not at all). This reduces the
need for repetitive lookups, and lets us get rid of an ad-hoc scheme
for detecting changes in composite types. I added a couple of small
features to typcache to help with that.
Although some of this is fixing bugs that long predate v11, I don't
think we should risk a back-patch: it's a significant amount of code
churn, and there've been no complaints from the field about the bugs.
Tom Lane, reviewed by Anthony Bykov
Discussion: https://postgr.es/m/24449.1509393613@sss.pgh.pa.us
2017-11-16 22:22:57 +01:00
|
|
|
PLyExecutionContext *exec_ctx = PLy_current_execution_context();
|
2011-12-18 20:14:16 +01:00
|
|
|
volatile MemoryContext oldcontext;
|
|
|
|
|
|
|
|
result = (PLyResultObject *) PLy_result_new();
|
2017-10-31 15:49:36 +01:00
|
|
|
if (!result)
|
2017-12-05 20:14:55 +01:00
|
|
|
{
|
|
|
|
SPI_freetuptable(tuptable);
|
2017-10-31 15:49:36 +01:00
|
|
|
return NULL;
|
2017-12-05 20:14:55 +01:00
|
|
|
}
|
2011-12-18 20:14:16 +01:00
|
|
|
Py_DECREF(result->status);
|
|
|
|
result->status = PyInt_FromLong(status);
|
|
|
|
|
|
|
|
if (status > 0 && tuptable == NULL)
|
|
|
|
{
|
|
|
|
Py_DECREF(result->nrows);
|
2018-01-20 14:02:01 +01:00
|
|
|
result->nrows = PyLong_FromUnsignedLongLong(rows);
|
2011-12-18 20:14:16 +01:00
|
|
|
}
|
|
|
|
else if (status > 0 && tuptable != NULL)
|
|
|
|
{
|
Make PL/Python handle domain-type conversions correctly.
Fix PL/Python so that it can handle domains over composite, and so that
it enforces domain constraints correctly in other cases that were not
always done properly before. Notably, it didn't do arrays of domains
right (oversight in commit c12d570fa), and it failed to enforce domain
constraints when returning a composite type containing a domain field,
and if a transform function is being used for a domain's base type then
it failed to enforce domain constraints on the result. Also, in many
places it missed checking domain constraints on null values, because
the plpy_typeio code simply wasn't called for Py_None.
Rather than try to band-aid these problems, I made a significant
refactoring of the plpy_typeio logic. The existing design of recursing
for array and composite members is extended to also treat domains as
containers requiring recursion, and the APIs for the module are cleaned
up and simplified.
The patch also modifies plpy_typeio to rely on the typcache more than
it did before (which was pretty much not at all). This reduces the
need for repetitive lookups, and lets us get rid of an ad-hoc scheme
for detecting changes in composite types. I added a couple of small
features to typcache to help with that.
Although some of this is fixing bugs that long predate v11, I don't
think we should risk a back-patch: it's a significant amount of code
churn, and there've been no complaints from the field about the bugs.
Tom Lane, reviewed by Anthony Bykov
Discussion: https://postgr.es/m/24449.1509393613@sss.pgh.pa.us
2017-11-16 22:22:57 +01:00
|
|
|
PLyDatumToOb ininfo;
|
2015-11-05 19:52:30 +01:00
|
|
|
MemoryContext cxt;
|
2011-12-18 20:14:16 +01:00
|
|
|
|
|
|
|
Py_DECREF(result->nrows);
|
2018-01-20 14:02:01 +01:00
|
|
|
result->nrows = PyLong_FromUnsignedLongLong(rows);
|
2015-11-05 19:52:30 +01:00
|
|
|
|
|
|
|
cxt = AllocSetContextCreate(CurrentMemoryContext,
|
|
|
|
"PL/Python temp context",
|
Add macros to make AllocSetContextCreate() calls simpler and safer.
I found that half a dozen (nearly 5%) of our AllocSetContextCreate calls
had typos in the context-sizing parameters. While none of these led to
especially significant problems, they did create minor inefficiencies,
and it's now clear that expecting people to copy-and-paste those calls
accurately is not a great idea. Let's reduce the risk of future errors
by introducing single macros that encapsulate the common use-cases.
Three such macros are enough to cover all but two special-purpose contexts;
those two calls can be left as-is, I think.
While this patch doesn't in itself improve matters for third-party
extensions, it doesn't break anything for them either, and they can
gradually adopt the simplified notation over time.
In passing, change TopMemoryContext to use the default allocation
parameters. Formerly it could only be extended 8K at a time. That was
probably reasonable when this code was written; but nowadays we create
many more contexts than we did then, so that it's not unusual to have a
couple hundred K in TopMemoryContext, even without considering various
dubious code that sticks other things there. There seems no good reason
not to let it use growing blocks like most other contexts.
Back-patch to 9.6, mostly because that's still close enough to HEAD that
it's easy to do so, and keeping the branches in sync can be expected to
avoid some future back-patching pain. The bugs fixed by these changes
don't seem to be significant enough to justify fixing them further back.
Discussion: <21072.1472321324@sss.pgh.pa.us>
2016-08-27 23:50:38 +02:00
|
|
|
ALLOCSET_DEFAULT_SIZES);
|
Make PL/Python handle domain-type conversions correctly.
Fix PL/Python so that it can handle domains over composite, and so that
it enforces domain constraints correctly in other cases that were not
always done properly before. Notably, it didn't do arrays of domains
right (oversight in commit c12d570fa), and it failed to enforce domain
constraints when returning a composite type containing a domain field,
and if a transform function is being used for a domain's base type then
it failed to enforce domain constraints on the result. Also, in many
places it missed checking domain constraints on null values, because
the plpy_typeio code simply wasn't called for Py_None.
Rather than try to band-aid these problems, I made a significant
refactoring of the plpy_typeio logic. The existing design of recursing
for array and composite members is extended to also treat domains as
containers requiring recursion, and the APIs for the module are cleaned
up and simplified.
The patch also modifies plpy_typeio to rely on the typcache more than
it did before (which was pretty much not at all). This reduces the
need for repetitive lookups, and lets us get rid of an ad-hoc scheme
for detecting changes in composite types. I added a couple of small
features to typcache to help with that.
Although some of this is fixing bugs that long predate v11, I don't
think we should risk a back-patch: it's a significant amount of code
churn, and there've been no complaints from the field about the bugs.
Tom Lane, reviewed by Anthony Bykov
Discussion: https://postgr.es/m/24449.1509393613@sss.pgh.pa.us
2017-11-16 22:22:57 +01:00
|
|
|
|
|
|
|
/* Initialize for converting result tuples to Python */
|
|
|
|
PLy_input_setup_func(&ininfo, cxt, RECORDOID, -1,
|
|
|
|
exec_ctx->curr_proc);
|
2011-12-18 20:14:16 +01:00
|
|
|
|
|
|
|
oldcontext = CurrentMemoryContext;
|
|
|
|
PG_TRY();
|
|
|
|
{
|
2012-05-02 19:59:51 +02:00
|
|
|
MemoryContext oldcontext2;
|
|
|
|
|
2011-12-18 20:14:16 +01:00
|
|
|
if (rows)
|
|
|
|
{
|
Widen query numbers-of-tuples-processed counters to uint64.
This patch widens SPI_processed, EState's es_processed field, PortalData's
portalPos field, FuncCallContext's call_cntr and max_calls fields,
ExecutorRun's count argument, PortalRunFetch's result, and the max number
of rows in a SPITupleTable to uint64, and deals with (I hope) all the
ensuing fallout. Some of these values were declared uint32 before, and
others "long".
I also removed PortalData's posOverflow field, since that logic seems
pretty useless given that portalPos is now always 64 bits.
The user-visible results are that command tags for SELECT etc will
correctly report tuple counts larger than 4G, as will plpgsql's GET
GET DIAGNOSTICS ... ROW_COUNT command. Queries processing more tuples
than that are still not exactly the norm, but they're becoming more
common.
Most values associated with FETCH/MOVE distances, such as PortalRun's count
argument and the count argument of most SPI functions that have one, remain
declared as "long". It's not clear whether it would be worth promoting
those to int64; but it would definitely be a large dollop of additional
API churn on top of this, and it would only help 32-bit platforms which
seem relatively less likely to see any benefit.
Andreas Scherbaum, reviewed by Christian Ullrich, additional hacking by me
2016-03-12 22:05:10 +01:00
|
|
|
uint64 i;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* PyList_New() and PyList_SetItem() use Py_ssize_t for list
|
|
|
|
* size and list indices; so we cannot support a result larger
|
|
|
|
* than PY_SSIZE_T_MAX.
|
|
|
|
*/
|
|
|
|
if (rows > (uint64) PY_SSIZE_T_MAX)
|
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
|
|
|
|
errmsg("query result has too many rows to fit in a Python list")));
|
|
|
|
|
2011-12-18 20:14:16 +01:00
|
|
|
Py_DECREF(result->rows);
|
|
|
|
result->rows = PyList_New(rows);
|
2017-12-05 20:14:55 +01:00
|
|
|
if (result->rows)
|
2017-10-31 15:49:36 +01:00
|
|
|
{
|
|
|
|
PLy_input_setup_tuple(&ininfo, tuptable->tupdesc,
|
|
|
|
exec_ctx->curr_proc);
|
|
|
|
|
|
|
|
for (i = 0; i < rows; i++)
|
|
|
|
{
|
|
|
|
PyObject *row = PLy_input_from_tuple(&ininfo,
|
|
|
|
tuptable->vals[i],
|
2019-03-30 08:13:09 +01:00
|
|
|
tuptable->tupdesc,
|
|
|
|
true);
|
2011-12-18 20:14:16 +01:00
|
|
|
|
2017-10-31 15:49:36 +01:00
|
|
|
PyList_SetItem(result->rows, i, row);
|
|
|
|
}
|
2011-12-18 20:14:16 +01:00
|
|
|
}
|
|
|
|
}
|
2013-07-20 18:44:37 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Save tuple descriptor for later use by result set metadata
|
|
|
|
* functions. Save it in TopMemoryContext so that it survives
|
|
|
|
* outside of an SPI context. We trust that PLy_result_dealloc()
|
|
|
|
* will clean it up when the time is right. (Do this as late as
|
|
|
|
* possible, to minimize the number of ways the tupdesc could get
|
|
|
|
* leaked due to errors.)
|
|
|
|
*/
|
|
|
|
oldcontext2 = MemoryContextSwitchTo(TopMemoryContext);
|
|
|
|
result->tupdesc = CreateTupleDescCopy(tuptable->tupdesc);
|
|
|
|
MemoryContextSwitchTo(oldcontext2);
|
2011-12-18 20:14:16 +01:00
|
|
|
}
|
|
|
|
PG_CATCH();
|
|
|
|
{
|
|
|
|
MemoryContextSwitchTo(oldcontext);
|
2015-11-05 19:52:30 +01:00
|
|
|
MemoryContextDelete(cxt);
|
2011-12-18 20:14:16 +01:00
|
|
|
Py_DECREF(result);
|
2013-07-20 18:44:37 +02:00
|
|
|
PG_RE_THROW();
|
2011-12-18 20:14:16 +01:00
|
|
|
}
|
|
|
|
PG_END_TRY();
|
|
|
|
|
2015-11-05 19:52:30 +01:00
|
|
|
MemoryContextDelete(cxt);
|
2011-12-18 20:14:16 +01:00
|
|
|
SPI_freetuptable(tuptable);
|
2017-12-05 20:14:55 +01:00
|
|
|
|
|
|
|
/* in case PyList_New() failed above */
|
|
|
|
if (!result->rows)
|
|
|
|
{
|
|
|
|
Py_DECREF(result);
|
|
|
|
result = NULL;
|
|
|
|
}
|
2011-12-18 20:14:16 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return (PyObject *) result;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Utilities for running SPI functions in subtransactions.
|
|
|
|
*
|
|
|
|
* Usage:
|
|
|
|
*
|
2012-06-10 21:20:04 +02:00
|
|
|
* MemoryContext oldcontext = CurrentMemoryContext;
|
|
|
|
* ResourceOwner oldowner = CurrentResourceOwner;
|
2011-12-18 20:14:16 +01:00
|
|
|
*
|
2012-06-10 21:20:04 +02:00
|
|
|
* PLy_spi_subtransaction_begin(oldcontext, oldowner);
|
|
|
|
* PG_TRY();
|
|
|
|
* {
|
|
|
|
* <call SPI functions>
|
|
|
|
* PLy_spi_subtransaction_commit(oldcontext, oldowner);
|
|
|
|
* }
|
|
|
|
* PG_CATCH();
|
|
|
|
* {
|
|
|
|
* <do cleanup>
|
|
|
|
* PLy_spi_subtransaction_abort(oldcontext, oldowner);
|
|
|
|
* return NULL;
|
|
|
|
* }
|
|
|
|
* PG_END_TRY();
|
2011-12-18 20:14:16 +01:00
|
|
|
*
|
|
|
|
* These utilities take care of restoring connection to the SPI manager and
|
|
|
|
* setting a Python exception in case of an abort.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
PLy_spi_subtransaction_begin(MemoryContext oldcontext, ResourceOwner oldowner)
|
|
|
|
{
|
|
|
|
BeginInternalSubTransaction(NULL);
|
|
|
|
/* Want to run inside function's memory context */
|
|
|
|
MemoryContextSwitchTo(oldcontext);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
PLy_spi_subtransaction_commit(MemoryContext oldcontext, ResourceOwner oldowner)
|
|
|
|
{
|
|
|
|
/* Commit the inner transaction, return to outer xact context */
|
|
|
|
ReleaseCurrentSubTransaction();
|
|
|
|
MemoryContextSwitchTo(oldcontext);
|
|
|
|
CurrentResourceOwner = oldowner;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
PLy_spi_subtransaction_abort(MemoryContext oldcontext, ResourceOwner oldowner)
|
|
|
|
{
|
|
|
|
ErrorData *edata;
|
|
|
|
PLyExceptionEntry *entry;
|
|
|
|
PyObject *exc;
|
|
|
|
|
|
|
|
/* Save error info */
|
|
|
|
MemoryContextSwitchTo(oldcontext);
|
|
|
|
edata = CopyErrorData();
|
|
|
|
FlushErrorState();
|
|
|
|
|
|
|
|
/* Abort the inner transaction */
|
|
|
|
RollbackAndReleaseCurrentSubTransaction();
|
|
|
|
MemoryContextSwitchTo(oldcontext);
|
|
|
|
CurrentResourceOwner = oldowner;
|
|
|
|
|
|
|
|
/* Look up the correct exception */
|
|
|
|
entry = hash_search(PLy_spi_exceptions, &(edata->sqlerrcode),
|
|
|
|
HASH_FIND, NULL);
|
2016-06-10 00:02:36 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* This could be a custom error code, if that's the case fallback to
|
2016-04-08 17:30:25 +02:00
|
|
|
* SPIError
|
|
|
|
*/
|
2011-12-18 20:14:16 +01:00
|
|
|
exc = entry ? entry->exc : PLy_exc_spi_error;
|
|
|
|
/* Make Python raise the exception */
|
|
|
|
PLy_spi_exception_set(exc, edata);
|
|
|
|
FreeErrorData(edata);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Raise a SPIError, passing in it more error details, like the
|
|
|
|
* internal query and error position.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
PLy_spi_exception_set(PyObject *excclass, ErrorData *edata)
|
|
|
|
{
|
|
|
|
PyObject *args = NULL;
|
|
|
|
PyObject *spierror = NULL;
|
|
|
|
PyObject *spidata = NULL;
|
|
|
|
|
|
|
|
args = Py_BuildValue("(s)", edata->message);
|
|
|
|
if (!args)
|
|
|
|
goto failure;
|
|
|
|
|
|
|
|
/* create a new SPI exception with the error message as the parameter */
|
|
|
|
spierror = PyObject_CallObject(excclass, args);
|
|
|
|
if (!spierror)
|
|
|
|
goto failure;
|
|
|
|
|
2016-06-10 00:02:36 +02:00
|
|
|
spidata = Py_BuildValue("(izzzizzzzz)", edata->sqlerrcode, edata->detail, edata->hint,
|
2016-04-08 17:30:25 +02:00
|
|
|
edata->internalquery, edata->internalpos,
|
Phase 3 of pgindent updates.
Don't move parenthesized lines to the left, even if that means they
flow past the right margin.
By default, BSD indent lines up statement continuation lines that are
within parentheses so that they start just to the right of the preceding
left parenthesis. However, traditionally, if that resulted in the
continuation line extending to the right of the desired right margin,
then indent would push it left just far enough to not overrun the margin,
if it could do so without making the continuation line start to the left of
the current statement indent. That makes for a weird mix of indentations
unless one has been completely rigid about never violating the 80-column
limit.
This behavior has been pretty universally panned by Postgres developers.
Hence, disable it with indent's new -lpl switch, so that parenthesized
lines are always lined up with the preceding left paren.
This patch is much less interesting than the first round of indent
changes, but also bulkier, so I thought it best to separate the effects.
Discussion: https://postgr.es/m/E1dAmxK-0006EE-1r@gemulon.postgresql.org
Discussion: https://postgr.es/m/30527.1495162840@sss.pgh.pa.us
2017-06-21 21:35:54 +02:00
|
|
|
edata->schema_name, edata->table_name, edata->column_name,
|
2016-04-08 17:30:25 +02:00
|
|
|
edata->datatype_name, edata->constraint_name);
|
2011-12-18 20:14:16 +01:00
|
|
|
if (!spidata)
|
|
|
|
goto failure;
|
|
|
|
|
|
|
|
if (PyObject_SetAttrString(spierror, "spidata", spidata) == -1)
|
|
|
|
goto failure;
|
|
|
|
|
|
|
|
PyErr_SetObject(excclass, spierror);
|
|
|
|
|
|
|
|
Py_DECREF(args);
|
|
|
|
Py_DECREF(spierror);
|
|
|
|
Py_DECREF(spidata);
|
|
|
|
return;
|
|
|
|
|
|
|
|
failure:
|
|
|
|
Py_XDECREF(args);
|
|
|
|
Py_XDECREF(spierror);
|
|
|
|
Py_XDECREF(spidata);
|
|
|
|
elog(ERROR, "could not convert SPI error to Python exception");
|
|
|
|
}
|