postgresql/src/pl/plpgsql/src/pl_handler.c

527 lines
14 KiB
C
Raw Normal View History

/*-------------------------------------------------------------------------
*
* pl_handler.c - Handler for the PL/pgSQL
* procedural language
*
* Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
2010-09-20 22:08:53 +02:00
* src/pl/plpgsql/src/pl_handler.c
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include "access/htup_details.h"
#include "catalog/pg_proc.h"
#include "catalog/pg_type.h"
#include "funcapi.h"
#include "miscadmin.h"
#include "plpgsql.h"
#include "utils/builtins.h"
#include "utils/guc.h"
#include "utils/lsyscache.h"
#include "utils/syscache.h"
#include "utils/varlena.h"
static bool plpgsql_extra_checks_check_hook(char **newvalue, void **extra, GucSource source);
static void plpgsql_extra_warnings_assign_hook(const char *newvalue, void *extra);
static void plpgsql_extra_errors_assign_hook(const char *newvalue, void *extra);
PG_MODULE_MAGIC;
/* Custom GUC variable */
static const struct config_enum_entry variable_conflict_options[] = {
{"error", PLPGSQL_RESOLVE_ERROR, false},
{"use_variable", PLPGSQL_RESOLVE_VARIABLE, false},
{"use_column", PLPGSQL_RESOLVE_COLUMN, false},
{NULL, 0, false}
};
2010-02-26 03:01:40 +01:00
int plpgsql_variable_conflict = PLPGSQL_RESOLVE_ERROR;
bool plpgsql_print_strict_params = false;
bool plpgsql_check_asserts = true;
char *plpgsql_extra_warnings_string = NULL;
char *plpgsql_extra_errors_string = NULL;
int plpgsql_extra_warnings;
int plpgsql_extra_errors;
/* Hook for plugins */
PLpgSQL_plugin **plpgsql_plugin_ptr = NULL;
static bool
plpgsql_extra_checks_check_hook(char **newvalue, void **extra, GucSource source)
{
char *rawstring;
List *elemlist;
ListCell *l;
int extrachecks = 0;
int *myextra;
if (pg_strcasecmp(*newvalue, "all") == 0)
extrachecks = PLPGSQL_XCHECK_ALL;
else if (pg_strcasecmp(*newvalue, "none") == 0)
extrachecks = PLPGSQL_XCHECK_NONE;
else
{
/* Need a modifiable copy of string */
rawstring = pstrdup(*newvalue);
/* Parse string into list of identifiers */
if (!SplitIdentifierString(rawstring, ',', &elemlist))
{
/* syntax error in list */
GUC_check_errdetail("List syntax is invalid.");
pfree(rawstring);
list_free(elemlist);
return false;
}
foreach(l, elemlist)
{
char *tok = (char *) lfirst(l);
if (pg_strcasecmp(tok, "shadowed_variables") == 0)
extrachecks |= PLPGSQL_XCHECK_SHADOWVAR;
else if (pg_strcasecmp(tok, "too_many_rows") == 0)
extrachecks |= PLPGSQL_XCHECK_TOOMANYROWS;
else if (pg_strcasecmp(tok, "strict_multi_assignment") == 0)
extrachecks |= PLPGSQL_XCHECK_STRICTMULTIASSIGNMENT;
else if (pg_strcasecmp(tok, "all") == 0 || pg_strcasecmp(tok, "none") == 0)
{
GUC_check_errdetail("Key word \"%s\" cannot be combined with other key words.", tok);
pfree(rawstring);
list_free(elemlist);
return false;
}
else
{
GUC_check_errdetail("Unrecognized key word: \"%s\".", tok);
pfree(rawstring);
list_free(elemlist);
return false;
}
}
pfree(rawstring);
list_free(elemlist);
}
myextra = (int *) malloc(sizeof(int));
if (!myextra)
return false;
*myextra = extrachecks;
*extra = (void *) myextra;
return true;
}
static void
plpgsql_extra_warnings_assign_hook(const char *newvalue, void *extra)
{
plpgsql_extra_warnings = *((int *) extra);
}
static void
plpgsql_extra_errors_assign_hook(const char *newvalue, void *extra)
{
plpgsql_extra_errors = *((int *) extra);
}
/*
* _PG_init() - library load-time initialization
*
* DO NOT make this static nor change its name!
*/
void
_PG_init(void)
{
/* Be sure we do initialization only once (should be redundant now) */
static bool inited = false;
if (inited)
return;
pg_bindtextdomain(TEXTDOMAIN);
DefineCustomEnumVariable("plpgsql.variable_conflict",
gettext_noop("Sets handling of conflicts between PL/pgSQL variable names and table column names."),
NULL,
&plpgsql_variable_conflict,
PLPGSQL_RESOLVE_ERROR,
variable_conflict_options,
PGC_SUSET, 0,
NULL, NULL, NULL);
DefineCustomBoolVariable("plpgsql.print_strict_params",
2014-08-09 06:07:00 +02:00
gettext_noop("Print information about parameters in the DETAIL part of the error messages generated on INTO ... STRICT failures."),
NULL,
&plpgsql_print_strict_params,
false,
PGC_USERSET, 0,
NULL, NULL, NULL);
DefineCustomBoolVariable("plpgsql.check_asserts",
gettext_noop("Perform checks given in ASSERT statements."),
NULL,
&plpgsql_check_asserts,
true,
PGC_USERSET, 0,
NULL, NULL, NULL);
DefineCustomStringVariable("plpgsql.extra_warnings",
2014-08-09 06:07:00 +02:00
gettext_noop("List of programming constructs that should produce a warning."),
NULL,
&plpgsql_extra_warnings_string,
"none",
PGC_USERSET, GUC_LIST_INPUT,
plpgsql_extra_checks_check_hook,
plpgsql_extra_warnings_assign_hook,
NULL);
DefineCustomStringVariable("plpgsql.extra_errors",
2014-08-09 06:07:00 +02:00
gettext_noop("List of programming constructs that should produce an error."),
NULL,
&plpgsql_extra_errors_string,
"none",
PGC_USERSET, GUC_LIST_INPUT,
plpgsql_extra_checks_check_hook,
plpgsql_extra_errors_assign_hook,
NULL);
EmitWarningsOnPlaceholders("plpgsql");
plpgsql_HashTableInit();
RegisterXactCallback(plpgsql_xact_cb, NULL);
RegisterSubXactCallback(plpgsql_subxact_cb, NULL);
/* Set up a rendezvous point with optional instrumentation plugin */
plpgsql_plugin_ptr = (PLpgSQL_plugin **) find_rendezvous_variable("PLpgSQL_plugin");
inited = true;
}
/* ----------
* plpgsql_call_handler
*
* The PostgreSQL function manager and trigger manager
* call this function for execution of PL/pgSQL procedures.
* ----------
*/
PG_FUNCTION_INFO_V1(plpgsql_call_handler);
Datum
plpgsql_call_handler(PG_FUNCTION_ARGS)
{
Transaction control in PL procedures In each of the supplied procedural languages (PL/pgSQL, PL/Perl, PL/Python, PL/Tcl), add language-specific commit and rollback functions/commands to control transactions in procedures in that language. Add similar underlying functions to SPI. Some additional cleanup so that transaction commit or abort doesn't blow away data structures still used by the procedure call. Add execution context tracking to CALL and DO statements so that transaction control commands can only be issued in top-level procedure and block calls, not function calls or other procedure or block calls. - SPI Add a new function SPI_connect_ext() that is like SPI_connect() but allows passing option flags. The only option flag right now is SPI_OPT_NONATOMIC. A nonatomic SPI connection can execute transaction control commands, otherwise it's not allowed. This is meant to be passed down from CALL and DO statements which themselves know in which context they are called. A nonatomic SPI connection uses different memory management. A normal SPI connection allocates its memory in TopTransactionContext. For nonatomic connections we use PortalContext instead. As the comment in SPI_connect_ext() (previously SPI_connect()) indicates, one could potentially use PortalContext in all cases, but it seems safest to leave the existing uses alone, because this stuff is complicated enough already. SPI also gets new functions SPI_start_transaction(), SPI_commit(), and SPI_rollback(), which can be used by PLs to implement their transaction control logic. - portalmem.c Some adjustments were made in the code that cleans up portals at transaction abort. The portal code could already handle a command *committing* a transaction and continuing (e.g., VACUUM), but it was not quite prepared for a command *aborting* a transaction and continuing. In AtAbort_Portals(), remove the code that marks an active portal as failed. As the comment there already predicted, this doesn't work if the running command wants to keep running after transaction abort. And it's actually not necessary, because pquery.c is careful to run all portal code in a PG_TRY block and explicitly runs MarkPortalFailed() if there is an exception. So the code in AtAbort_Portals() is never used anyway. In AtAbort_Portals() and AtCleanup_Portals(), we need to be careful not to clean up active portals too much. This mirrors similar code in PreCommit_Portals(). - PL/Perl Gets new functions spi_commit() and spi_rollback() - PL/pgSQL Gets new commands COMMIT and ROLLBACK. Update the PL/SQL porting example in the documentation to reflect that transactions are now possible in procedures. - PL/Python Gets new functions plpy.commit and plpy.rollback. - PL/Tcl Gets new commands commit and rollback. Reviewed-by: Andrew Dunstan <andrew.dunstan@2ndquadrant.com>
2018-01-22 14:30:16 +01:00
bool nonatomic;
PLpgSQL_function *func;
PLpgSQL_execstate *save_cur_estate;
Datum retval;
int rc;
Transaction control in PL procedures In each of the supplied procedural languages (PL/pgSQL, PL/Perl, PL/Python, PL/Tcl), add language-specific commit and rollback functions/commands to control transactions in procedures in that language. Add similar underlying functions to SPI. Some additional cleanup so that transaction commit or abort doesn't blow away data structures still used by the procedure call. Add execution context tracking to CALL and DO statements so that transaction control commands can only be issued in top-level procedure and block calls, not function calls or other procedure or block calls. - SPI Add a new function SPI_connect_ext() that is like SPI_connect() but allows passing option flags. The only option flag right now is SPI_OPT_NONATOMIC. A nonatomic SPI connection can execute transaction control commands, otherwise it's not allowed. This is meant to be passed down from CALL and DO statements which themselves know in which context they are called. A nonatomic SPI connection uses different memory management. A normal SPI connection allocates its memory in TopTransactionContext. For nonatomic connections we use PortalContext instead. As the comment in SPI_connect_ext() (previously SPI_connect()) indicates, one could potentially use PortalContext in all cases, but it seems safest to leave the existing uses alone, because this stuff is complicated enough already. SPI also gets new functions SPI_start_transaction(), SPI_commit(), and SPI_rollback(), which can be used by PLs to implement their transaction control logic. - portalmem.c Some adjustments were made in the code that cleans up portals at transaction abort. The portal code could already handle a command *committing* a transaction and continuing (e.g., VACUUM), but it was not quite prepared for a command *aborting* a transaction and continuing. In AtAbort_Portals(), remove the code that marks an active portal as failed. As the comment there already predicted, this doesn't work if the running command wants to keep running after transaction abort. And it's actually not necessary, because pquery.c is careful to run all portal code in a PG_TRY block and explicitly runs MarkPortalFailed() if there is an exception. So the code in AtAbort_Portals() is never used anyway. In AtAbort_Portals() and AtCleanup_Portals(), we need to be careful not to clean up active portals too much. This mirrors similar code in PreCommit_Portals(). - PL/Perl Gets new functions spi_commit() and spi_rollback() - PL/pgSQL Gets new commands COMMIT and ROLLBACK. Update the PL/SQL porting example in the documentation to reflect that transactions are now possible in procedures. - PL/Python Gets new functions plpy.commit and plpy.rollback. - PL/Tcl Gets new commands commit and rollback. Reviewed-by: Andrew Dunstan <andrew.dunstan@2ndquadrant.com>
2018-01-22 14:30:16 +01:00
nonatomic = fcinfo->context &&
IsA(fcinfo->context, CallContext) &&
!castNode(CallContext, fcinfo->context)->atomic;
/*
* Connect to SPI manager
*/
Transaction control in PL procedures In each of the supplied procedural languages (PL/pgSQL, PL/Perl, PL/Python, PL/Tcl), add language-specific commit and rollback functions/commands to control transactions in procedures in that language. Add similar underlying functions to SPI. Some additional cleanup so that transaction commit or abort doesn't blow away data structures still used by the procedure call. Add execution context tracking to CALL and DO statements so that transaction control commands can only be issued in top-level procedure and block calls, not function calls or other procedure or block calls. - SPI Add a new function SPI_connect_ext() that is like SPI_connect() but allows passing option flags. The only option flag right now is SPI_OPT_NONATOMIC. A nonatomic SPI connection can execute transaction control commands, otherwise it's not allowed. This is meant to be passed down from CALL and DO statements which themselves know in which context they are called. A nonatomic SPI connection uses different memory management. A normal SPI connection allocates its memory in TopTransactionContext. For nonatomic connections we use PortalContext instead. As the comment in SPI_connect_ext() (previously SPI_connect()) indicates, one could potentially use PortalContext in all cases, but it seems safest to leave the existing uses alone, because this stuff is complicated enough already. SPI also gets new functions SPI_start_transaction(), SPI_commit(), and SPI_rollback(), which can be used by PLs to implement their transaction control logic. - portalmem.c Some adjustments were made in the code that cleans up portals at transaction abort. The portal code could already handle a command *committing* a transaction and continuing (e.g., VACUUM), but it was not quite prepared for a command *aborting* a transaction and continuing. In AtAbort_Portals(), remove the code that marks an active portal as failed. As the comment there already predicted, this doesn't work if the running command wants to keep running after transaction abort. And it's actually not necessary, because pquery.c is careful to run all portal code in a PG_TRY block and explicitly runs MarkPortalFailed() if there is an exception. So the code in AtAbort_Portals() is never used anyway. In AtAbort_Portals() and AtCleanup_Portals(), we need to be careful not to clean up active portals too much. This mirrors similar code in PreCommit_Portals(). - PL/Perl Gets new functions spi_commit() and spi_rollback() - PL/pgSQL Gets new commands COMMIT and ROLLBACK. Update the PL/SQL porting example in the documentation to reflect that transactions are now possible in procedures. - PL/Python Gets new functions plpy.commit and plpy.rollback. - PL/Tcl Gets new commands commit and rollback. Reviewed-by: Andrew Dunstan <andrew.dunstan@2ndquadrant.com>
2018-01-22 14:30:16 +01:00
if ((rc = SPI_connect_ext(nonatomic ? SPI_OPT_NONATOMIC : 0)) != SPI_OK_CONNECT)
elog(ERROR, "SPI_connect failed: %s", SPI_result_code_string(rc));
/* Find or compile the function */
func = plpgsql_compile(fcinfo, false);
/* Must save and restore prior value of cur_estate */
save_cur_estate = func->cur_estate;
/* Mark the function as busy, so it can't be deleted from under us */
func->use_count++;
PG_TRY();
{
/*
* Determine if called as function or trigger and call appropriate
* subhandler
*/
if (CALLED_AS_TRIGGER(fcinfo))
retval = PointerGetDatum(plpgsql_exec_trigger(func,
(TriggerData *) fcinfo->context));
else if (CALLED_AS_EVENT_TRIGGER(fcinfo))
{
plpgsql_exec_event_trigger(func,
(EventTriggerData *) fcinfo->context);
retval = (Datum) 0;
}
else
Improve performance of "simple expressions" in PL/pgSQL. For relatively simple expressions (say, "x + 1" or "x > 0"), plpgsql's management overhead exceeds the cost of evaluating the expression. This patch substantially improves that situation, providing roughly 2X speedup for such trivial expressions. First, add infrastructure in the plancache to allow fast re-validation of cached plans that contain no table access, and hence need no locks. Teach plpgsql to use this infrastructure for expressions that it's already deemed "simple" (which in particular will never contain table references). The fast path still requires checking that search_path hasn't changed, so provide a fast path for OverrideSearchPathMatchesCurrent by counting changes that have occurred to the active search path in the current session. This is simplistic but seems enough for now, seeing that PushOverrideSearchPath is not used in any performance-critical cases. Second, manage the refcounts on simple expressions' cached plans using a transaction-lifespan resource owner, so that we only need to take and release an expression's refcount once per transaction not once per expression evaluation. The management of this resource owner exactly parallels the existing management of plpgsql's simple-expression EState. Add some regression tests covering this area, in particular verifying that expression caching doesn't break semantics for search_path changes. Patch by me, but it owes something to previous work by Amit Langote, who recognized that getting rid of plancache-related overhead would be a useful thing to do here. Also thanks to Andres Freund for review. Discussion: https://postgr.es/m/CAFj8pRDRVfLdAxsWeVLzCAbkLFZhW549K+67tpOc-faC8uH8zw@mail.gmail.com
2020-03-26 23:58:57 +01:00
retval = plpgsql_exec_function(func, fcinfo,
NULL, NULL,
!nonatomic);
}
PG_FINALLY();
{
/* Decrement use-count, restore cur_estate */
func->use_count--;
func->cur_estate = save_cur_estate;
}
PG_END_TRY();
/*
* Disconnect from SPI manager
*/
if ((rc = SPI_finish()) != SPI_OK_FINISH)
elog(ERROR, "SPI_finish failed: %s", SPI_result_code_string(rc));
return retval;
}
/* ----------
* plpgsql_inline_handler
*
* Called by PostgreSQL to execute an anonymous code block
* ----------
*/
PG_FUNCTION_INFO_V1(plpgsql_inline_handler);
Datum
plpgsql_inline_handler(PG_FUNCTION_ARGS)
{
Change function call information to be variable length. Before this change FunctionCallInfoData, the struct arguments etc for V1 function calls are stored in, always had space for FUNC_MAX_ARGS/100 arguments, storing datums and their nullness in two arrays. For nearly every function call 100 arguments is far more than needed, therefore wasting memory. Arg and argnull being two separate arrays also guarantees that to access a single argument, two cachelines have to be touched. Change the layout so there's a single variable-length array with pairs of value / isnull. That drastically reduces memory consumption for most function calls (on x86-64 a two argument function now uses 64bytes, previously 936 bytes), and makes it very likely that argument value and its nullness are on the same cacheline. Arguments are stored in a new NullableDatum struct, which, due to padding, needs more memory per argument than before. But as usually far fewer arguments are stored, and individual arguments are cheaper to access, that's still a clear win. It's likely that there's other places where conversion to NullableDatum arrays would make sense, e.g. TupleTableSlots, but that's for another commit. Because the function call information is now variable-length allocations have to take the number of arguments into account. For heap allocations that can be done with SizeForFunctionCallInfoData(), for on-stack allocations there's a new LOCAL_FCINFO(name, nargs) macro that helps to allocate an appropriately sized and aligned variable. Some places with stack allocation function call information don't know the number of arguments at compile time, and currently variably sized stack allocations aren't allowed in postgres. Therefore allow for FUNC_MAX_ARGS space in these cases. They're not that common, so for now that seems acceptable. Because of the need to allocate FunctionCallInfo of the appropriate size, older extensions may need to update their code. To avoid subtle breakages, the FunctionCallInfoData struct has been renamed to FunctionCallInfoBaseData. Most code only references FunctionCallInfo, so that shouldn't cause much collateral damage. This change is also a prerequisite for more efficient expression JIT compilation (by allocating the function call information on the stack, allowing LLVM to optimize it away); previously the size of the call information caused problems inside LLVM's optimizer. Author: Andres Freund Reviewed-By: Tom Lane Discussion: https://postgr.es/m/20180605172952.x34m5uz6ju6enaem@alap3.anarazel.de
2019-01-26 23:17:52 +01:00
LOCAL_FCINFO(fake_fcinfo, 0);
2017-02-21 17:33:07 +01:00
InlineCodeBlock *codeblock = castNode(InlineCodeBlock, DatumGetPointer(PG_GETARG_DATUM(0)));
PLpgSQL_function *func;
FmgrInfo flinfo;
2013-11-15 19:52:03 +01:00
EState *simple_eval_estate;
Improve performance of "simple expressions" in PL/pgSQL. For relatively simple expressions (say, "x + 1" or "x > 0"), plpgsql's management overhead exceeds the cost of evaluating the expression. This patch substantially improves that situation, providing roughly 2X speedup for such trivial expressions. First, add infrastructure in the plancache to allow fast re-validation of cached plans that contain no table access, and hence need no locks. Teach plpgsql to use this infrastructure for expressions that it's already deemed "simple" (which in particular will never contain table references). The fast path still requires checking that search_path hasn't changed, so provide a fast path for OverrideSearchPathMatchesCurrent by counting changes that have occurred to the active search path in the current session. This is simplistic but seems enough for now, seeing that PushOverrideSearchPath is not used in any performance-critical cases. Second, manage the refcounts on simple expressions' cached plans using a transaction-lifespan resource owner, so that we only need to take and release an expression's refcount once per transaction not once per expression evaluation. The management of this resource owner exactly parallels the existing management of plpgsql's simple-expression EState. Add some regression tests covering this area, in particular verifying that expression caching doesn't break semantics for search_path changes. Patch by me, but it owes something to previous work by Amit Langote, who recognized that getting rid of plancache-related overhead would be a useful thing to do here. Also thanks to Andres Freund for review. Discussion: https://postgr.es/m/CAFj8pRDRVfLdAxsWeVLzCAbkLFZhW549K+67tpOc-faC8uH8zw@mail.gmail.com
2020-03-26 23:58:57 +01:00
ResourceOwner simple_eval_resowner;
Datum retval;
int rc;
/*
* Connect to SPI manager
*/
Transaction control in PL procedures In each of the supplied procedural languages (PL/pgSQL, PL/Perl, PL/Python, PL/Tcl), add language-specific commit and rollback functions/commands to control transactions in procedures in that language. Add similar underlying functions to SPI. Some additional cleanup so that transaction commit or abort doesn't blow away data structures still used by the procedure call. Add execution context tracking to CALL and DO statements so that transaction control commands can only be issued in top-level procedure and block calls, not function calls or other procedure or block calls. - SPI Add a new function SPI_connect_ext() that is like SPI_connect() but allows passing option flags. The only option flag right now is SPI_OPT_NONATOMIC. A nonatomic SPI connection can execute transaction control commands, otherwise it's not allowed. This is meant to be passed down from CALL and DO statements which themselves know in which context they are called. A nonatomic SPI connection uses different memory management. A normal SPI connection allocates its memory in TopTransactionContext. For nonatomic connections we use PortalContext instead. As the comment in SPI_connect_ext() (previously SPI_connect()) indicates, one could potentially use PortalContext in all cases, but it seems safest to leave the existing uses alone, because this stuff is complicated enough already. SPI also gets new functions SPI_start_transaction(), SPI_commit(), and SPI_rollback(), which can be used by PLs to implement their transaction control logic. - portalmem.c Some adjustments were made in the code that cleans up portals at transaction abort. The portal code could already handle a command *committing* a transaction and continuing (e.g., VACUUM), but it was not quite prepared for a command *aborting* a transaction and continuing. In AtAbort_Portals(), remove the code that marks an active portal as failed. As the comment there already predicted, this doesn't work if the running command wants to keep running after transaction abort. And it's actually not necessary, because pquery.c is careful to run all portal code in a PG_TRY block and explicitly runs MarkPortalFailed() if there is an exception. So the code in AtAbort_Portals() is never used anyway. In AtAbort_Portals() and AtCleanup_Portals(), we need to be careful not to clean up active portals too much. This mirrors similar code in PreCommit_Portals(). - PL/Perl Gets new functions spi_commit() and spi_rollback() - PL/pgSQL Gets new commands COMMIT and ROLLBACK. Update the PL/SQL porting example in the documentation to reflect that transactions are now possible in procedures. - PL/Python Gets new functions plpy.commit and plpy.rollback. - PL/Tcl Gets new commands commit and rollback. Reviewed-by: Andrew Dunstan <andrew.dunstan@2ndquadrant.com>
2018-01-22 14:30:16 +01:00
if ((rc = SPI_connect_ext(codeblock->atomic ? 0 : SPI_OPT_NONATOMIC)) != SPI_OK_CONNECT)
elog(ERROR, "SPI_connect failed: %s", SPI_result_code_string(rc));
/* Compile the anonymous code block */
func = plpgsql_compile_inline(codeblock->source_text);
/* Mark the function as busy, just pro forma */
func->use_count++;
/*
* Set up a fake fcinfo with just enough info to satisfy
* plpgsql_exec_function(). In particular note that this sets things up
* with no arguments passed.
*/
Change function call information to be variable length. Before this change FunctionCallInfoData, the struct arguments etc for V1 function calls are stored in, always had space for FUNC_MAX_ARGS/100 arguments, storing datums and their nullness in two arrays. For nearly every function call 100 arguments is far more than needed, therefore wasting memory. Arg and argnull being two separate arrays also guarantees that to access a single argument, two cachelines have to be touched. Change the layout so there's a single variable-length array with pairs of value / isnull. That drastically reduces memory consumption for most function calls (on x86-64 a two argument function now uses 64bytes, previously 936 bytes), and makes it very likely that argument value and its nullness are on the same cacheline. Arguments are stored in a new NullableDatum struct, which, due to padding, needs more memory per argument than before. But as usually far fewer arguments are stored, and individual arguments are cheaper to access, that's still a clear win. It's likely that there's other places where conversion to NullableDatum arrays would make sense, e.g. TupleTableSlots, but that's for another commit. Because the function call information is now variable-length allocations have to take the number of arguments into account. For heap allocations that can be done with SizeForFunctionCallInfoData(), for on-stack allocations there's a new LOCAL_FCINFO(name, nargs) macro that helps to allocate an appropriately sized and aligned variable. Some places with stack allocation function call information don't know the number of arguments at compile time, and currently variably sized stack allocations aren't allowed in postgres. Therefore allow for FUNC_MAX_ARGS space in these cases. They're not that common, so for now that seems acceptable. Because of the need to allocate FunctionCallInfo of the appropriate size, older extensions may need to update their code. To avoid subtle breakages, the FunctionCallInfoData struct has been renamed to FunctionCallInfoBaseData. Most code only references FunctionCallInfo, so that shouldn't cause much collateral damage. This change is also a prerequisite for more efficient expression JIT compilation (by allocating the function call information on the stack, allowing LLVM to optimize it away); previously the size of the call information caused problems inside LLVM's optimizer. Author: Andres Freund Reviewed-By: Tom Lane Discussion: https://postgr.es/m/20180605172952.x34m5uz6ju6enaem@alap3.anarazel.de
2019-01-26 23:17:52 +01:00
MemSet(fake_fcinfo, 0, SizeForFunctionCallInfo(0));
MemSet(&flinfo, 0, sizeof(flinfo));
Change function call information to be variable length. Before this change FunctionCallInfoData, the struct arguments etc for V1 function calls are stored in, always had space for FUNC_MAX_ARGS/100 arguments, storing datums and their nullness in two arrays. For nearly every function call 100 arguments is far more than needed, therefore wasting memory. Arg and argnull being two separate arrays also guarantees that to access a single argument, two cachelines have to be touched. Change the layout so there's a single variable-length array with pairs of value / isnull. That drastically reduces memory consumption for most function calls (on x86-64 a two argument function now uses 64bytes, previously 936 bytes), and makes it very likely that argument value and its nullness are on the same cacheline. Arguments are stored in a new NullableDatum struct, which, due to padding, needs more memory per argument than before. But as usually far fewer arguments are stored, and individual arguments are cheaper to access, that's still a clear win. It's likely that there's other places where conversion to NullableDatum arrays would make sense, e.g. TupleTableSlots, but that's for another commit. Because the function call information is now variable-length allocations have to take the number of arguments into account. For heap allocations that can be done with SizeForFunctionCallInfoData(), for on-stack allocations there's a new LOCAL_FCINFO(name, nargs) macro that helps to allocate an appropriately sized and aligned variable. Some places with stack allocation function call information don't know the number of arguments at compile time, and currently variably sized stack allocations aren't allowed in postgres. Therefore allow for FUNC_MAX_ARGS space in these cases. They're not that common, so for now that seems acceptable. Because of the need to allocate FunctionCallInfo of the appropriate size, older extensions may need to update their code. To avoid subtle breakages, the FunctionCallInfoData struct has been renamed to FunctionCallInfoBaseData. Most code only references FunctionCallInfo, so that shouldn't cause much collateral damage. This change is also a prerequisite for more efficient expression JIT compilation (by allocating the function call information on the stack, allowing LLVM to optimize it away); previously the size of the call information caused problems inside LLVM's optimizer. Author: Andres Freund Reviewed-By: Tom Lane Discussion: https://postgr.es/m/20180605172952.x34m5uz6ju6enaem@alap3.anarazel.de
2019-01-26 23:17:52 +01:00
fake_fcinfo->flinfo = &flinfo;
flinfo.fn_oid = InvalidOid;
flinfo.fn_mcxt = CurrentMemoryContext;
/*
Improve performance of "simple expressions" in PL/pgSQL. For relatively simple expressions (say, "x + 1" or "x > 0"), plpgsql's management overhead exceeds the cost of evaluating the expression. This patch substantially improves that situation, providing roughly 2X speedup for such trivial expressions. First, add infrastructure in the plancache to allow fast re-validation of cached plans that contain no table access, and hence need no locks. Teach plpgsql to use this infrastructure for expressions that it's already deemed "simple" (which in particular will never contain table references). The fast path still requires checking that search_path hasn't changed, so provide a fast path for OverrideSearchPathMatchesCurrent by counting changes that have occurred to the active search path in the current session. This is simplistic but seems enough for now, seeing that PushOverrideSearchPath is not used in any performance-critical cases. Second, manage the refcounts on simple expressions' cached plans using a transaction-lifespan resource owner, so that we only need to take and release an expression's refcount once per transaction not once per expression evaluation. The management of this resource owner exactly parallels the existing management of plpgsql's simple-expression EState. Add some regression tests covering this area, in particular verifying that expression caching doesn't break semantics for search_path changes. Patch by me, but it owes something to previous work by Amit Langote, who recognized that getting rid of plancache-related overhead would be a useful thing to do here. Also thanks to Andres Freund for review. Discussion: https://postgr.es/m/CAFj8pRDRVfLdAxsWeVLzCAbkLFZhW549K+67tpOc-faC8uH8zw@mail.gmail.com
2020-03-26 23:58:57 +01:00
* Create a private EState and resowner for simple-expression execution.
* Notice that these are NOT tied to transaction-level resources; they
* must survive any COMMIT/ROLLBACK the DO block executes, since we will
* unconditionally try to clean them up below. (Hence, be wary of adding
* anything that could fail between here and the PG_TRY block.) See the
* comments for shared_simple_eval_estate.
*/
2013-11-15 19:52:03 +01:00
simple_eval_estate = CreateExecutorState();
Improve performance of "simple expressions" in PL/pgSQL. For relatively simple expressions (say, "x + 1" or "x > 0"), plpgsql's management overhead exceeds the cost of evaluating the expression. This patch substantially improves that situation, providing roughly 2X speedup for such trivial expressions. First, add infrastructure in the plancache to allow fast re-validation of cached plans that contain no table access, and hence need no locks. Teach plpgsql to use this infrastructure for expressions that it's already deemed "simple" (which in particular will never contain table references). The fast path still requires checking that search_path hasn't changed, so provide a fast path for OverrideSearchPathMatchesCurrent by counting changes that have occurred to the active search path in the current session. This is simplistic but seems enough for now, seeing that PushOverrideSearchPath is not used in any performance-critical cases. Second, manage the refcounts on simple expressions' cached plans using a transaction-lifespan resource owner, so that we only need to take and release an expression's refcount once per transaction not once per expression evaluation. The management of this resource owner exactly parallels the existing management of plpgsql's simple-expression EState. Add some regression tests covering this area, in particular verifying that expression caching doesn't break semantics for search_path changes. Patch by me, but it owes something to previous work by Amit Langote, who recognized that getting rid of plancache-related overhead would be a useful thing to do here. Also thanks to Andres Freund for review. Discussion: https://postgr.es/m/CAFj8pRDRVfLdAxsWeVLzCAbkLFZhW549K+67tpOc-faC8uH8zw@mail.gmail.com
2020-03-26 23:58:57 +01:00
simple_eval_resowner =
ResourceOwnerCreate(NULL, "PL/pgSQL DO block simple expressions");
2013-11-15 19:52:03 +01:00
/* And run the function */
PG_TRY();
{
Improve performance of "simple expressions" in PL/pgSQL. For relatively simple expressions (say, "x + 1" or "x > 0"), plpgsql's management overhead exceeds the cost of evaluating the expression. This patch substantially improves that situation, providing roughly 2X speedup for such trivial expressions. First, add infrastructure in the plancache to allow fast re-validation of cached plans that contain no table access, and hence need no locks. Teach plpgsql to use this infrastructure for expressions that it's already deemed "simple" (which in particular will never contain table references). The fast path still requires checking that search_path hasn't changed, so provide a fast path for OverrideSearchPathMatchesCurrent by counting changes that have occurred to the active search path in the current session. This is simplistic but seems enough for now, seeing that PushOverrideSearchPath is not used in any performance-critical cases. Second, manage the refcounts on simple expressions' cached plans using a transaction-lifespan resource owner, so that we only need to take and release an expression's refcount once per transaction not once per expression evaluation. The management of this resource owner exactly parallels the existing management of plpgsql's simple-expression EState. Add some regression tests covering this area, in particular verifying that expression caching doesn't break semantics for search_path changes. Patch by me, but it owes something to previous work by Amit Langote, who recognized that getting rid of plancache-related overhead would be a useful thing to do here. Also thanks to Andres Freund for review. Discussion: https://postgr.es/m/CAFj8pRDRVfLdAxsWeVLzCAbkLFZhW549K+67tpOc-faC8uH8zw@mail.gmail.com
2020-03-26 23:58:57 +01:00
retval = plpgsql_exec_function(func, fake_fcinfo,
simple_eval_estate,
simple_eval_resowner,
codeblock->atomic);
2013-11-15 19:52:03 +01:00
}
PG_CATCH();
{
/*
* We need to clean up what would otherwise be long-lived resources
* accumulated by the failed DO block, principally cached plans for
Improve performance of "simple expressions" in PL/pgSQL. For relatively simple expressions (say, "x + 1" or "x > 0"), plpgsql's management overhead exceeds the cost of evaluating the expression. This patch substantially improves that situation, providing roughly 2X speedup for such trivial expressions. First, add infrastructure in the plancache to allow fast re-validation of cached plans that contain no table access, and hence need no locks. Teach plpgsql to use this infrastructure for expressions that it's already deemed "simple" (which in particular will never contain table references). The fast path still requires checking that search_path hasn't changed, so provide a fast path for OverrideSearchPathMatchesCurrent by counting changes that have occurred to the active search path in the current session. This is simplistic but seems enough for now, seeing that PushOverrideSearchPath is not used in any performance-critical cases. Second, manage the refcounts on simple expressions' cached plans using a transaction-lifespan resource owner, so that we only need to take and release an expression's refcount once per transaction not once per expression evaluation. The management of this resource owner exactly parallels the existing management of plpgsql's simple-expression EState. Add some regression tests covering this area, in particular verifying that expression caching doesn't break semantics for search_path changes. Patch by me, but it owes something to previous work by Amit Langote, who recognized that getting rid of plancache-related overhead would be a useful thing to do here. Also thanks to Andres Freund for review. Discussion: https://postgr.es/m/CAFj8pRDRVfLdAxsWeVLzCAbkLFZhW549K+67tpOc-faC8uH8zw@mail.gmail.com
2020-03-26 23:58:57 +01:00
* statements (which can be flushed by plpgsql_free_function_memory),
* execution trees for simple expressions, which are in the private
* EState, and cached-plan refcounts held by the private resowner.
2013-11-15 19:52:03 +01:00
*
* Before releasing the private EState, we must clean up any
* simple_econtext_stack entries pointing into it, which we can do by
* invoking the subxact callback. (It will be called again later if
* some outer control level does a subtransaction abort, but no harm
* is done.) We cheat a bit knowing that plpgsql_subxact_cb does not
* pay attention to its parentSubid argument.
*/
plpgsql_subxact_cb(SUBXACT_EVENT_ABORT_SUB,
GetCurrentSubTransactionId(),
0, NULL);
Improve performance of "simple expressions" in PL/pgSQL. For relatively simple expressions (say, "x + 1" or "x > 0"), plpgsql's management overhead exceeds the cost of evaluating the expression. This patch substantially improves that situation, providing roughly 2X speedup for such trivial expressions. First, add infrastructure in the plancache to allow fast re-validation of cached plans that contain no table access, and hence need no locks. Teach plpgsql to use this infrastructure for expressions that it's already deemed "simple" (which in particular will never contain table references). The fast path still requires checking that search_path hasn't changed, so provide a fast path for OverrideSearchPathMatchesCurrent by counting changes that have occurred to the active search path in the current session. This is simplistic but seems enough for now, seeing that PushOverrideSearchPath is not used in any performance-critical cases. Second, manage the refcounts on simple expressions' cached plans using a transaction-lifespan resource owner, so that we only need to take and release an expression's refcount once per transaction not once per expression evaluation. The management of this resource owner exactly parallels the existing management of plpgsql's simple-expression EState. Add some regression tests covering this area, in particular verifying that expression caching doesn't break semantics for search_path changes. Patch by me, but it owes something to previous work by Amit Langote, who recognized that getting rid of plancache-related overhead would be a useful thing to do here. Also thanks to Andres Freund for review. Discussion: https://postgr.es/m/CAFj8pRDRVfLdAxsWeVLzCAbkLFZhW549K+67tpOc-faC8uH8zw@mail.gmail.com
2020-03-26 23:58:57 +01:00
/* Clean up the private EState and resowner */
2013-11-15 19:52:03 +01:00
FreeExecutorState(simple_eval_estate);
Improve performance of "simple expressions" in PL/pgSQL. For relatively simple expressions (say, "x + 1" or "x > 0"), plpgsql's management overhead exceeds the cost of evaluating the expression. This patch substantially improves that situation, providing roughly 2X speedup for such trivial expressions. First, add infrastructure in the plancache to allow fast re-validation of cached plans that contain no table access, and hence need no locks. Teach plpgsql to use this infrastructure for expressions that it's already deemed "simple" (which in particular will never contain table references). The fast path still requires checking that search_path hasn't changed, so provide a fast path for OverrideSearchPathMatchesCurrent by counting changes that have occurred to the active search path in the current session. This is simplistic but seems enough for now, seeing that PushOverrideSearchPath is not used in any performance-critical cases. Second, manage the refcounts on simple expressions' cached plans using a transaction-lifespan resource owner, so that we only need to take and release an expression's refcount once per transaction not once per expression evaluation. The management of this resource owner exactly parallels the existing management of plpgsql's simple-expression EState. Add some regression tests covering this area, in particular verifying that expression caching doesn't break semantics for search_path changes. Patch by me, but it owes something to previous work by Amit Langote, who recognized that getting rid of plancache-related overhead would be a useful thing to do here. Also thanks to Andres Freund for review. Discussion: https://postgr.es/m/CAFj8pRDRVfLdAxsWeVLzCAbkLFZhW549K+67tpOc-faC8uH8zw@mail.gmail.com
2020-03-26 23:58:57 +01:00
ResourceOwnerReleaseAllPlanCacheRefs(simple_eval_resowner);
ResourceOwnerDelete(simple_eval_resowner);
2013-11-15 19:52:03 +01:00
/* Function should now have no remaining use-counts ... */
func->use_count--;
Assert(func->use_count == 0);
/* ... so we can free subsidiary storage */
plpgsql_free_function_memory(func);
/* And propagate the error */
PG_RE_THROW();
}
PG_END_TRY();
Improve performance of "simple expressions" in PL/pgSQL. For relatively simple expressions (say, "x + 1" or "x > 0"), plpgsql's management overhead exceeds the cost of evaluating the expression. This patch substantially improves that situation, providing roughly 2X speedup for such trivial expressions. First, add infrastructure in the plancache to allow fast re-validation of cached plans that contain no table access, and hence need no locks. Teach plpgsql to use this infrastructure for expressions that it's already deemed "simple" (which in particular will never contain table references). The fast path still requires checking that search_path hasn't changed, so provide a fast path for OverrideSearchPathMatchesCurrent by counting changes that have occurred to the active search path in the current session. This is simplistic but seems enough for now, seeing that PushOverrideSearchPath is not used in any performance-critical cases. Second, manage the refcounts on simple expressions' cached plans using a transaction-lifespan resource owner, so that we only need to take and release an expression's refcount once per transaction not once per expression evaluation. The management of this resource owner exactly parallels the existing management of plpgsql's simple-expression EState. Add some regression tests covering this area, in particular verifying that expression caching doesn't break semantics for search_path changes. Patch by me, but it owes something to previous work by Amit Langote, who recognized that getting rid of plancache-related overhead would be a useful thing to do here. Also thanks to Andres Freund for review. Discussion: https://postgr.es/m/CAFj8pRDRVfLdAxsWeVLzCAbkLFZhW549K+67tpOc-faC8uH8zw@mail.gmail.com
2020-03-26 23:58:57 +01:00
/* Clean up the private EState and resowner */
2013-11-15 19:52:03 +01:00
FreeExecutorState(simple_eval_estate);
Improve performance of "simple expressions" in PL/pgSQL. For relatively simple expressions (say, "x + 1" or "x > 0"), plpgsql's management overhead exceeds the cost of evaluating the expression. This patch substantially improves that situation, providing roughly 2X speedup for such trivial expressions. First, add infrastructure in the plancache to allow fast re-validation of cached plans that contain no table access, and hence need no locks. Teach plpgsql to use this infrastructure for expressions that it's already deemed "simple" (which in particular will never contain table references). The fast path still requires checking that search_path hasn't changed, so provide a fast path for OverrideSearchPathMatchesCurrent by counting changes that have occurred to the active search path in the current session. This is simplistic but seems enough for now, seeing that PushOverrideSearchPath is not used in any performance-critical cases. Second, manage the refcounts on simple expressions' cached plans using a transaction-lifespan resource owner, so that we only need to take and release an expression's refcount once per transaction not once per expression evaluation. The management of this resource owner exactly parallels the existing management of plpgsql's simple-expression EState. Add some regression tests covering this area, in particular verifying that expression caching doesn't break semantics for search_path changes. Patch by me, but it owes something to previous work by Amit Langote, who recognized that getting rid of plancache-related overhead would be a useful thing to do here. Also thanks to Andres Freund for review. Discussion: https://postgr.es/m/CAFj8pRDRVfLdAxsWeVLzCAbkLFZhW549K+67tpOc-faC8uH8zw@mail.gmail.com
2020-03-26 23:58:57 +01:00
ResourceOwnerReleaseAllPlanCacheRefs(simple_eval_resowner);
ResourceOwnerDelete(simple_eval_resowner);
/* Function should now have no remaining use-counts ... */
func->use_count--;
Assert(func->use_count == 0);
/* ... so we can free subsidiary storage */
plpgsql_free_function_memory(func);
/*
* Disconnect from SPI manager
*/
if ((rc = SPI_finish()) != SPI_OK_FINISH)
elog(ERROR, "SPI_finish failed: %s", SPI_result_code_string(rc));
return retval;
}
/* ----------
* plpgsql_validator
*
* This function attempts to validate a PL/pgSQL function at
* CREATE FUNCTION time.
* ----------
*/
PG_FUNCTION_INFO_V1(plpgsql_validator);
Datum
plpgsql_validator(PG_FUNCTION_ARGS)
{
Oid funcoid = PG_GETARG_OID(0);
HeapTuple tuple;
Form_pg_proc proc;
char functyptype;
int numargs;
Oid *argtypes;
char **argnames;
char *argmodes;
bool is_dml_trigger = false;
bool is_event_trigger = false;
int i;
if (!CheckFunctionValidatorAccess(fcinfo->flinfo->fn_oid, funcoid))
PG_RETURN_VOID();
/* Get the new function's pg_proc entry */
tuple = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcoid));
if (!HeapTupleIsValid(tuple))
elog(ERROR, "cache lookup failed for function %u", funcoid);
proc = (Form_pg_proc) GETSTRUCT(tuple);
functyptype = get_typtype(proc->prorettype);
/* Disallow pseudotype result */
/* except for TRIGGER, EVTTRIGGER, RECORD, VOID, or polymorphic */
if (functyptype == TYPTYPE_PSEUDO)
{
if (proc->prorettype == TRIGGEROID)
is_dml_trigger = true;
else if (proc->prorettype == EVTTRIGGEROID)
is_event_trigger = true;
else if (proc->prorettype != RECORDOID &&
proc->prorettype != VOIDOID &&
!IsPolymorphicType(proc->prorettype))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2009-02-18 12:33:04 +01:00
errmsg("PL/pgSQL functions cannot return type %s",
format_type_be(proc->prorettype))));
}
/* Disallow pseudotypes in arguments (either IN or OUT) */
Make plpgsql use its DTYPE_REC code paths for composite-type variables. Formerly, DTYPE_REC was used only for variables declared as "record"; variables of named composite types used DTYPE_ROW, which is faster for some purposes but much less flexible. In particular, the ROW code paths are entirely incapable of dealing with DDL-caused changes to the number or data types of the columns of a row variable, once a particular plpgsql function has been parsed for the first time in a session. And, since the stored representation of a ROW isn't a tuple, there wasn't any easy way to deal with variables of domain-over-composite types, since the domain constraint checking code would expect the value to be checked to be a tuple. A lesser, but still real, annoyance is that ROW format cannot represent a true NULL composite value, only a row of per-field NULL values, which is not exactly the same thing. Hence, switch to using DTYPE_REC for all composite-typed variables, whether "record", named composite type, or domain over named composite type. DTYPE_ROW remains but is used only for its native purpose, to represent a fixed-at-compile-time list of variables, for instance the targets of an INTO clause. To accomplish this without taking significant performance losses, introduce infrastructure that allows storing composite-type variables as "expanded objects", similar to the "expanded array" infrastructure introduced in commit 1dc5ebc90. A composite variable's value is thereby kept (most of the time) in the form of separate Datums, so that field accesses and updates are not much more expensive than they were in the ROW format. This holds the line, more or less, on performance of variables of named composite types in field-access-intensive microbenchmarks, and makes variables declared "record" perform much better than before in similar tests. In addition, the logic involved with enforcing composite-domain constraints against updates of individual fields is in the expanded record infrastructure not plpgsql proper, so that it might be reusable for other purposes. In further support of this, introduce a typcache feature for assigning a unique-within-process identifier to each distinct tuple descriptor of interest; in particular, DDL alterations on composite types result in a new identifier for that type. This allows very cheap detection of the need to refresh tupdesc-dependent data. This improves on the "tupDescSeqNo" idea I had in commit 687f096ea: that assigned identifying sequence numbers to successive versions of individual composite types, but the numbers were not unique across different types, nor was there support for assigning numbers to registered record types. In passing, allow plpgsql functions to accept as well as return type "record". There was no good reason for the old restriction, and it was out of step with most of the other PLs. Tom Lane, reviewed by Pavel Stehule Discussion: https://postgr.es/m/8962.1514399547@sss.pgh.pa.us
2018-02-14 00:52:21 +01:00
/* except for RECORD and polymorphic */
numargs = get_func_arg_info(tuple,
&argtypes, &argnames, &argmodes);
for (i = 0; i < numargs; i++)
{
if (get_typtype(argtypes[i]) == TYPTYPE_PSEUDO)
{
Make plpgsql use its DTYPE_REC code paths for composite-type variables. Formerly, DTYPE_REC was used only for variables declared as "record"; variables of named composite types used DTYPE_ROW, which is faster for some purposes but much less flexible. In particular, the ROW code paths are entirely incapable of dealing with DDL-caused changes to the number or data types of the columns of a row variable, once a particular plpgsql function has been parsed for the first time in a session. And, since the stored representation of a ROW isn't a tuple, there wasn't any easy way to deal with variables of domain-over-composite types, since the domain constraint checking code would expect the value to be checked to be a tuple. A lesser, but still real, annoyance is that ROW format cannot represent a true NULL composite value, only a row of per-field NULL values, which is not exactly the same thing. Hence, switch to using DTYPE_REC for all composite-typed variables, whether "record", named composite type, or domain over named composite type. DTYPE_ROW remains but is used only for its native purpose, to represent a fixed-at-compile-time list of variables, for instance the targets of an INTO clause. To accomplish this without taking significant performance losses, introduce infrastructure that allows storing composite-type variables as "expanded objects", similar to the "expanded array" infrastructure introduced in commit 1dc5ebc90. A composite variable's value is thereby kept (most of the time) in the form of separate Datums, so that field accesses and updates are not much more expensive than they were in the ROW format. This holds the line, more or less, on performance of variables of named composite types in field-access-intensive microbenchmarks, and makes variables declared "record" perform much better than before in similar tests. In addition, the logic involved with enforcing composite-domain constraints against updates of individual fields is in the expanded record infrastructure not plpgsql proper, so that it might be reusable for other purposes. In further support of this, introduce a typcache feature for assigning a unique-within-process identifier to each distinct tuple descriptor of interest; in particular, DDL alterations on composite types result in a new identifier for that type. This allows very cheap detection of the need to refresh tupdesc-dependent data. This improves on the "tupDescSeqNo" idea I had in commit 687f096ea: that assigned identifying sequence numbers to successive versions of individual composite types, but the numbers were not unique across different types, nor was there support for assigning numbers to registered record types. In passing, allow plpgsql functions to accept as well as return type "record". There was no good reason for the old restriction, and it was out of step with most of the other PLs. Tom Lane, reviewed by Pavel Stehule Discussion: https://postgr.es/m/8962.1514399547@sss.pgh.pa.us
2018-02-14 00:52:21 +01:00
if (argtypes[i] != RECORDOID &&
!IsPolymorphicType(argtypes[i]))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2009-02-18 12:33:04 +01:00
errmsg("PL/pgSQL functions cannot accept type %s",
2006-10-04 02:30:14 +02:00
format_type_be(argtypes[i]))));
}
}
/* Postpone body checks if !check_function_bodies */
if (check_function_bodies)
{
Change function call information to be variable length. Before this change FunctionCallInfoData, the struct arguments etc for V1 function calls are stored in, always had space for FUNC_MAX_ARGS/100 arguments, storing datums and their nullness in two arrays. For nearly every function call 100 arguments is far more than needed, therefore wasting memory. Arg and argnull being two separate arrays also guarantees that to access a single argument, two cachelines have to be touched. Change the layout so there's a single variable-length array with pairs of value / isnull. That drastically reduces memory consumption for most function calls (on x86-64 a two argument function now uses 64bytes, previously 936 bytes), and makes it very likely that argument value and its nullness are on the same cacheline. Arguments are stored in a new NullableDatum struct, which, due to padding, needs more memory per argument than before. But as usually far fewer arguments are stored, and individual arguments are cheaper to access, that's still a clear win. It's likely that there's other places where conversion to NullableDatum arrays would make sense, e.g. TupleTableSlots, but that's for another commit. Because the function call information is now variable-length allocations have to take the number of arguments into account. For heap allocations that can be done with SizeForFunctionCallInfoData(), for on-stack allocations there's a new LOCAL_FCINFO(name, nargs) macro that helps to allocate an appropriately sized and aligned variable. Some places with stack allocation function call information don't know the number of arguments at compile time, and currently variably sized stack allocations aren't allowed in postgres. Therefore allow for FUNC_MAX_ARGS space in these cases. They're not that common, so for now that seems acceptable. Because of the need to allocate FunctionCallInfo of the appropriate size, older extensions may need to update their code. To avoid subtle breakages, the FunctionCallInfoData struct has been renamed to FunctionCallInfoBaseData. Most code only references FunctionCallInfo, so that shouldn't cause much collateral damage. This change is also a prerequisite for more efficient expression JIT compilation (by allocating the function call information on the stack, allowing LLVM to optimize it away); previously the size of the call information caused problems inside LLVM's optimizer. Author: Andres Freund Reviewed-By: Tom Lane Discussion: https://postgr.es/m/20180605172952.x34m5uz6ju6enaem@alap3.anarazel.de
2019-01-26 23:17:52 +01:00
LOCAL_FCINFO(fake_fcinfo, 0);
FmgrInfo flinfo;
int rc;
TriggerData trigdata;
EventTriggerData etrigdata;
/*
* Connect to SPI manager (is this needed for compilation?)
*/
if ((rc = SPI_connect()) != SPI_OK_CONNECT)
elog(ERROR, "SPI_connect failed: %s", SPI_result_code_string(rc));
/*
* Set up a fake fcinfo with just enough info to satisfy
* plpgsql_compile().
*/
Change function call information to be variable length. Before this change FunctionCallInfoData, the struct arguments etc for V1 function calls are stored in, always had space for FUNC_MAX_ARGS/100 arguments, storing datums and their nullness in two arrays. For nearly every function call 100 arguments is far more than needed, therefore wasting memory. Arg and argnull being two separate arrays also guarantees that to access a single argument, two cachelines have to be touched. Change the layout so there's a single variable-length array with pairs of value / isnull. That drastically reduces memory consumption for most function calls (on x86-64 a two argument function now uses 64bytes, previously 936 bytes), and makes it very likely that argument value and its nullness are on the same cacheline. Arguments are stored in a new NullableDatum struct, which, due to padding, needs more memory per argument than before. But as usually far fewer arguments are stored, and individual arguments are cheaper to access, that's still a clear win. It's likely that there's other places where conversion to NullableDatum arrays would make sense, e.g. TupleTableSlots, but that's for another commit. Because the function call information is now variable-length allocations have to take the number of arguments into account. For heap allocations that can be done with SizeForFunctionCallInfoData(), for on-stack allocations there's a new LOCAL_FCINFO(name, nargs) macro that helps to allocate an appropriately sized and aligned variable. Some places with stack allocation function call information don't know the number of arguments at compile time, and currently variably sized stack allocations aren't allowed in postgres. Therefore allow for FUNC_MAX_ARGS space in these cases. They're not that common, so for now that seems acceptable. Because of the need to allocate FunctionCallInfo of the appropriate size, older extensions may need to update their code. To avoid subtle breakages, the FunctionCallInfoData struct has been renamed to FunctionCallInfoBaseData. Most code only references FunctionCallInfo, so that shouldn't cause much collateral damage. This change is also a prerequisite for more efficient expression JIT compilation (by allocating the function call information on the stack, allowing LLVM to optimize it away); previously the size of the call information caused problems inside LLVM's optimizer. Author: Andres Freund Reviewed-By: Tom Lane Discussion: https://postgr.es/m/20180605172952.x34m5uz6ju6enaem@alap3.anarazel.de
2019-01-26 23:17:52 +01:00
MemSet(fake_fcinfo, 0, SizeForFunctionCallInfo(0));
MemSet(&flinfo, 0, sizeof(flinfo));
Change function call information to be variable length. Before this change FunctionCallInfoData, the struct arguments etc for V1 function calls are stored in, always had space for FUNC_MAX_ARGS/100 arguments, storing datums and their nullness in two arrays. For nearly every function call 100 arguments is far more than needed, therefore wasting memory. Arg and argnull being two separate arrays also guarantees that to access a single argument, two cachelines have to be touched. Change the layout so there's a single variable-length array with pairs of value / isnull. That drastically reduces memory consumption for most function calls (on x86-64 a two argument function now uses 64bytes, previously 936 bytes), and makes it very likely that argument value and its nullness are on the same cacheline. Arguments are stored in a new NullableDatum struct, which, due to padding, needs more memory per argument than before. But as usually far fewer arguments are stored, and individual arguments are cheaper to access, that's still a clear win. It's likely that there's other places where conversion to NullableDatum arrays would make sense, e.g. TupleTableSlots, but that's for another commit. Because the function call information is now variable-length allocations have to take the number of arguments into account. For heap allocations that can be done with SizeForFunctionCallInfoData(), for on-stack allocations there's a new LOCAL_FCINFO(name, nargs) macro that helps to allocate an appropriately sized and aligned variable. Some places with stack allocation function call information don't know the number of arguments at compile time, and currently variably sized stack allocations aren't allowed in postgres. Therefore allow for FUNC_MAX_ARGS space in these cases. They're not that common, so for now that seems acceptable. Because of the need to allocate FunctionCallInfo of the appropriate size, older extensions may need to update their code. To avoid subtle breakages, the FunctionCallInfoData struct has been renamed to FunctionCallInfoBaseData. Most code only references FunctionCallInfo, so that shouldn't cause much collateral damage. This change is also a prerequisite for more efficient expression JIT compilation (by allocating the function call information on the stack, allowing LLVM to optimize it away); previously the size of the call information caused problems inside LLVM's optimizer. Author: Andres Freund Reviewed-By: Tom Lane Discussion: https://postgr.es/m/20180605172952.x34m5uz6ju6enaem@alap3.anarazel.de
2019-01-26 23:17:52 +01:00
fake_fcinfo->flinfo = &flinfo;
flinfo.fn_oid = funcoid;
flinfo.fn_mcxt = CurrentMemoryContext;
if (is_dml_trigger)
{
MemSet(&trigdata, 0, sizeof(trigdata));
trigdata.type = T_TriggerData;
Change function call information to be variable length. Before this change FunctionCallInfoData, the struct arguments etc for V1 function calls are stored in, always had space for FUNC_MAX_ARGS/100 arguments, storing datums and their nullness in two arrays. For nearly every function call 100 arguments is far more than needed, therefore wasting memory. Arg and argnull being two separate arrays also guarantees that to access a single argument, two cachelines have to be touched. Change the layout so there's a single variable-length array with pairs of value / isnull. That drastically reduces memory consumption for most function calls (on x86-64 a two argument function now uses 64bytes, previously 936 bytes), and makes it very likely that argument value and its nullness are on the same cacheline. Arguments are stored in a new NullableDatum struct, which, due to padding, needs more memory per argument than before. But as usually far fewer arguments are stored, and individual arguments are cheaper to access, that's still a clear win. It's likely that there's other places where conversion to NullableDatum arrays would make sense, e.g. TupleTableSlots, but that's for another commit. Because the function call information is now variable-length allocations have to take the number of arguments into account. For heap allocations that can be done with SizeForFunctionCallInfoData(), for on-stack allocations there's a new LOCAL_FCINFO(name, nargs) macro that helps to allocate an appropriately sized and aligned variable. Some places with stack allocation function call information don't know the number of arguments at compile time, and currently variably sized stack allocations aren't allowed in postgres. Therefore allow for FUNC_MAX_ARGS space in these cases. They're not that common, so for now that seems acceptable. Because of the need to allocate FunctionCallInfo of the appropriate size, older extensions may need to update their code. To avoid subtle breakages, the FunctionCallInfoData struct has been renamed to FunctionCallInfoBaseData. Most code only references FunctionCallInfo, so that shouldn't cause much collateral damage. This change is also a prerequisite for more efficient expression JIT compilation (by allocating the function call information on the stack, allowing LLVM to optimize it away); previously the size of the call information caused problems inside LLVM's optimizer. Author: Andres Freund Reviewed-By: Tom Lane Discussion: https://postgr.es/m/20180605172952.x34m5uz6ju6enaem@alap3.anarazel.de
2019-01-26 23:17:52 +01:00
fake_fcinfo->context = (Node *) &trigdata;
}
else if (is_event_trigger)
{
MemSet(&etrigdata, 0, sizeof(etrigdata));
etrigdata.type = T_EventTriggerData;
Change function call information to be variable length. Before this change FunctionCallInfoData, the struct arguments etc for V1 function calls are stored in, always had space for FUNC_MAX_ARGS/100 arguments, storing datums and their nullness in two arrays. For nearly every function call 100 arguments is far more than needed, therefore wasting memory. Arg and argnull being two separate arrays also guarantees that to access a single argument, two cachelines have to be touched. Change the layout so there's a single variable-length array with pairs of value / isnull. That drastically reduces memory consumption for most function calls (on x86-64 a two argument function now uses 64bytes, previously 936 bytes), and makes it very likely that argument value and its nullness are on the same cacheline. Arguments are stored in a new NullableDatum struct, which, due to padding, needs more memory per argument than before. But as usually far fewer arguments are stored, and individual arguments are cheaper to access, that's still a clear win. It's likely that there's other places where conversion to NullableDatum arrays would make sense, e.g. TupleTableSlots, but that's for another commit. Because the function call information is now variable-length allocations have to take the number of arguments into account. For heap allocations that can be done with SizeForFunctionCallInfoData(), for on-stack allocations there's a new LOCAL_FCINFO(name, nargs) macro that helps to allocate an appropriately sized and aligned variable. Some places with stack allocation function call information don't know the number of arguments at compile time, and currently variably sized stack allocations aren't allowed in postgres. Therefore allow for FUNC_MAX_ARGS space in these cases. They're not that common, so for now that seems acceptable. Because of the need to allocate FunctionCallInfo of the appropriate size, older extensions may need to update their code. To avoid subtle breakages, the FunctionCallInfoData struct has been renamed to FunctionCallInfoBaseData. Most code only references FunctionCallInfo, so that shouldn't cause much collateral damage. This change is also a prerequisite for more efficient expression JIT compilation (by allocating the function call information on the stack, allowing LLVM to optimize it away); previously the size of the call information caused problems inside LLVM's optimizer. Author: Andres Freund Reviewed-By: Tom Lane Discussion: https://postgr.es/m/20180605172952.x34m5uz6ju6enaem@alap3.anarazel.de
2019-01-26 23:17:52 +01:00
fake_fcinfo->context = (Node *) &etrigdata;
}
/* Test-compile the function */
Change function call information to be variable length. Before this change FunctionCallInfoData, the struct arguments etc for V1 function calls are stored in, always had space for FUNC_MAX_ARGS/100 arguments, storing datums and their nullness in two arrays. For nearly every function call 100 arguments is far more than needed, therefore wasting memory. Arg and argnull being two separate arrays also guarantees that to access a single argument, two cachelines have to be touched. Change the layout so there's a single variable-length array with pairs of value / isnull. That drastically reduces memory consumption for most function calls (on x86-64 a two argument function now uses 64bytes, previously 936 bytes), and makes it very likely that argument value and its nullness are on the same cacheline. Arguments are stored in a new NullableDatum struct, which, due to padding, needs more memory per argument than before. But as usually far fewer arguments are stored, and individual arguments are cheaper to access, that's still a clear win. It's likely that there's other places where conversion to NullableDatum arrays would make sense, e.g. TupleTableSlots, but that's for another commit. Because the function call information is now variable-length allocations have to take the number of arguments into account. For heap allocations that can be done with SizeForFunctionCallInfoData(), for on-stack allocations there's a new LOCAL_FCINFO(name, nargs) macro that helps to allocate an appropriately sized and aligned variable. Some places with stack allocation function call information don't know the number of arguments at compile time, and currently variably sized stack allocations aren't allowed in postgres. Therefore allow for FUNC_MAX_ARGS space in these cases. They're not that common, so for now that seems acceptable. Because of the need to allocate FunctionCallInfo of the appropriate size, older extensions may need to update their code. To avoid subtle breakages, the FunctionCallInfoData struct has been renamed to FunctionCallInfoBaseData. Most code only references FunctionCallInfo, so that shouldn't cause much collateral damage. This change is also a prerequisite for more efficient expression JIT compilation (by allocating the function call information on the stack, allowing LLVM to optimize it away); previously the size of the call information caused problems inside LLVM's optimizer. Author: Andres Freund Reviewed-By: Tom Lane Discussion: https://postgr.es/m/20180605172952.x34m5uz6ju6enaem@alap3.anarazel.de
2019-01-26 23:17:52 +01:00
plpgsql_compile(fake_fcinfo, true);
/*
* Disconnect from SPI manager
*/
if ((rc = SPI_finish()) != SPI_OK_FINISH)
elog(ERROR, "SPI_finish failed: %s", SPI_result_code_string(rc));
}
ReleaseSysCache(tuple);
PG_RETURN_VOID();
}