From 4b93f57999a2ca9b9c9e573ea32ab1aeaa8bf496 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Tue, 13 Feb 2018 18:52:21 -0500 Subject: [PATCH] Make plpgsql use its DTYPE_REC code paths for composite-type variables. Formerly, DTYPE_REC was used only for variables declared as "record"; variables of named composite types used DTYPE_ROW, which is faster for some purposes but much less flexible. In particular, the ROW code paths are entirely incapable of dealing with DDL-caused changes to the number or data types of the columns of a row variable, once a particular plpgsql function has been parsed for the first time in a session. And, since the stored representation of a ROW isn't a tuple, there wasn't any easy way to deal with variables of domain-over-composite types, since the domain constraint checking code would expect the value to be checked to be a tuple. A lesser, but still real, annoyance is that ROW format cannot represent a true NULL composite value, only a row of per-field NULL values, which is not exactly the same thing. Hence, switch to using DTYPE_REC for all composite-typed variables, whether "record", named composite type, or domain over named composite type. DTYPE_ROW remains but is used only for its native purpose, to represent a fixed-at-compile-time list of variables, for instance the targets of an INTO clause. To accomplish this without taking significant performance losses, introduce infrastructure that allows storing composite-type variables as "expanded objects", similar to the "expanded array" infrastructure introduced in commit 1dc5ebc90. A composite variable's value is thereby kept (most of the time) in the form of separate Datums, so that field accesses and updates are not much more expensive than they were in the ROW format. This holds the line, more or less, on performance of variables of named composite types in field-access-intensive microbenchmarks, and makes variables declared "record" perform much better than before in similar tests. In addition, the logic involved with enforcing composite-domain constraints against updates of individual fields is in the expanded record infrastructure not plpgsql proper, so that it might be reusable for other purposes. In further support of this, introduce a typcache feature for assigning a unique-within-process identifier to each distinct tuple descriptor of interest; in particular, DDL alterations on composite types result in a new identifier for that type. This allows very cheap detection of the need to refresh tupdesc-dependent data. This improves on the "tupDescSeqNo" idea I had in commit 687f096ea: that assigned identifying sequence numbers to successive versions of individual composite types, but the numbers were not unique across different types, nor was there support for assigning numbers to registered record types. In passing, allow plpgsql functions to accept as well as return type "record". There was no good reason for the old restriction, and it was out of step with most of the other PLs. Tom Lane, reviewed by Pavel Stehule Discussion: https://postgr.es/m/8962.1514399547@sss.pgh.pa.us --- doc/src/sgml/plpgsql.sgml | 12 +- src/backend/executor/execExprInterp.c | 137 +- src/backend/utils/adt/Makefile | 2 +- src/backend/utils/adt/expandedrecord.c | 1569 ++++++++++++++ src/backend/utils/cache/typcache.c | 84 +- src/include/utils/expandedrecord.h | 227 ++ src/include/utils/typcache.h | 14 +- src/pl/plpgsql/src/Makefile | 2 +- .../plpgsql/src/expected/plpgsql_record.out | 662 ++++++ src/pl/plpgsql/src/pl_comp.c | 380 +--- src/pl/plpgsql/src/pl_exec.c | 1865 ++++++++++++----- src/pl/plpgsql/src/pl_funcs.c | 9 +- src/pl/plpgsql/src/pl_gram.y | 15 +- src/pl/plpgsql/src/pl_handler.c | 5 +- src/pl/plpgsql/src/plpgsql.h | 59 +- src/pl/plpgsql/src/sql/plpgsql_record.sql | 441 ++++ src/pl/plpython/plpy_typeio.c | 8 +- src/pl/plpython/plpy_typeio.h | 4 +- src/test/regress/expected/plpgsql.out | 11 +- src/test/regress/sql/plpgsql.sql | 3 + 20 files changed, 4596 insertions(+), 913 deletions(-) create mode 100644 src/backend/utils/adt/expandedrecord.c create mode 100644 src/include/utils/expandedrecord.h create mode 100644 src/pl/plpgsql/src/expected/plpgsql_record.out create mode 100644 src/pl/plpgsql/src/sql/plpgsql_record.sql diff --git a/doc/src/sgml/plpgsql.sgml b/doc/src/sgml/plpgsql.sgml index 90a3c00dfe..c1e3c6a19d 100644 --- a/doc/src/sgml/plpgsql.sgml +++ b/doc/src/sgml/plpgsql.sgml @@ -123,7 +123,9 @@ and they can return a result of any of these types. They can also accept or return any composite type (row type) specified by name. It is also possible to declare a PL/pgSQL - function as returning record, which means that the result + function as accepting record, which means that any + composite type will do as input, or + as returning record, which means that the result is a row type whose columns are determined by specification in the calling query, as discussed in . @@ -671,14 +673,6 @@ user_id users.user_id%TYPE; be selected from it, for example $1.user_id. - - Only the user-defined columns of a table row are accessible in a - row-type variable, not the OID or other system columns (because the - row could be from a view). The fields of the row type inherit the - table's field size or precision for data types such as - char(n). - - Here is an example of using composite types. table1 and table2 are existing tables having at least the diff --git a/src/backend/executor/execExprInterp.c b/src/backend/executor/execExprInterp.c index f646fd9c51..9c6c2b02e9 100644 --- a/src/backend/executor/execExprInterp.c +++ b/src/backend/executor/execExprInterp.c @@ -70,6 +70,7 @@ #include "utils/builtins.h" #include "utils/date.h" #include "utils/datum.h" +#include "utils/expandedrecord.h" #include "utils/lsyscache.h" #include "utils/timestamp.h" #include "utils/typcache.h" @@ -2820,57 +2821,105 @@ ExecEvalFieldSelect(ExprState *state, ExprEvalStep *op, ExprContext *econtext) if (*op->resnull) return; - /* Get the composite datum and extract its type fields */ tupDatum = *op->resvalue; - tuple = DatumGetHeapTupleHeader(tupDatum); - tupType = HeapTupleHeaderGetTypeId(tuple); - tupTypmod = HeapTupleHeaderGetTypMod(tuple); - - /* Lookup tupdesc if first time through or if type changes */ - tupDesc = get_cached_rowtype(tupType, tupTypmod, - &op->d.fieldselect.argdesc, - econtext); - - /* - * Find field's attr record. Note we don't support system columns here: a - * datum tuple doesn't have valid values for most of the interesting - * system columns anyway. - */ - if (fieldnum <= 0) /* should never happen */ - elog(ERROR, "unsupported reference to system column %d in FieldSelect", - fieldnum); - if (fieldnum > tupDesc->natts) /* should never happen */ - elog(ERROR, "attribute number %d exceeds number of columns %d", - fieldnum, tupDesc->natts); - attr = TupleDescAttr(tupDesc, fieldnum - 1); - - /* Check for dropped column, and force a NULL result if so */ - if (attr->attisdropped) + /* We can special-case expanded records for speed */ + if (VARATT_IS_EXTERNAL_EXPANDED(DatumGetPointer(tupDatum))) { - *op->resnull = true; - return; + ExpandedRecordHeader *erh = (ExpandedRecordHeader *) DatumGetEOHP(tupDatum); + + Assert(erh->er_magic == ER_MAGIC); + + /* Extract record's TupleDesc */ + tupDesc = expanded_record_get_tupdesc(erh); + + /* + * Find field's attr record. Note we don't support system columns + * here: a datum tuple doesn't have valid values for most of the + * interesting system columns anyway. + */ + if (fieldnum <= 0) /* should never happen */ + elog(ERROR, "unsupported reference to system column %d in FieldSelect", + fieldnum); + if (fieldnum > tupDesc->natts) /* should never happen */ + elog(ERROR, "attribute number %d exceeds number of columns %d", + fieldnum, tupDesc->natts); + attr = TupleDescAttr(tupDesc, fieldnum - 1); + + /* Check for dropped column, and force a NULL result if so */ + if (attr->attisdropped) + { + *op->resnull = true; + return; + } + + /* Check for type mismatch --- possible after ALTER COLUMN TYPE? */ + /* As in CheckVarSlotCompatibility, we should but can't check typmod */ + if (op->d.fieldselect.resulttype != attr->atttypid) + ereport(ERROR, + (errcode(ERRCODE_DATATYPE_MISMATCH), + errmsg("attribute %d has wrong type", fieldnum), + errdetail("Table has type %s, but query expects %s.", + format_type_be(attr->atttypid), + format_type_be(op->d.fieldselect.resulttype)))); + + /* extract the field */ + *op->resvalue = expanded_record_get_field(erh, fieldnum, + op->resnull); } + else + { + /* Get the composite datum and extract its type fields */ + tuple = DatumGetHeapTupleHeader(tupDatum); - /* Check for type mismatch --- possible after ALTER COLUMN TYPE? */ - /* As in CheckVarSlotCompatibility, we should but can't check typmod */ - if (op->d.fieldselect.resulttype != attr->atttypid) - ereport(ERROR, - (errcode(ERRCODE_DATATYPE_MISMATCH), - errmsg("attribute %d has wrong type", fieldnum), - errdetail("Table has type %s, but query expects %s.", - format_type_be(attr->atttypid), - format_type_be(op->d.fieldselect.resulttype)))); + tupType = HeapTupleHeaderGetTypeId(tuple); + tupTypmod = HeapTupleHeaderGetTypMod(tuple); - /* heap_getattr needs a HeapTuple not a bare HeapTupleHeader */ - tmptup.t_len = HeapTupleHeaderGetDatumLength(tuple); - tmptup.t_data = tuple; + /* Lookup tupdesc if first time through or if type changes */ + tupDesc = get_cached_rowtype(tupType, tupTypmod, + &op->d.fieldselect.argdesc, + econtext); - /* extract the field */ - *op->resvalue = heap_getattr(&tmptup, - fieldnum, - tupDesc, - op->resnull); + /* + * Find field's attr record. Note we don't support system columns + * here: a datum tuple doesn't have valid values for most of the + * interesting system columns anyway. + */ + if (fieldnum <= 0) /* should never happen */ + elog(ERROR, "unsupported reference to system column %d in FieldSelect", + fieldnum); + if (fieldnum > tupDesc->natts) /* should never happen */ + elog(ERROR, "attribute number %d exceeds number of columns %d", + fieldnum, tupDesc->natts); + attr = TupleDescAttr(tupDesc, fieldnum - 1); + + /* Check for dropped column, and force a NULL result if so */ + if (attr->attisdropped) + { + *op->resnull = true; + return; + } + + /* Check for type mismatch --- possible after ALTER COLUMN TYPE? */ + /* As in CheckVarSlotCompatibility, we should but can't check typmod */ + if (op->d.fieldselect.resulttype != attr->atttypid) + ereport(ERROR, + (errcode(ERRCODE_DATATYPE_MISMATCH), + errmsg("attribute %d has wrong type", fieldnum), + errdetail("Table has type %s, but query expects %s.", + format_type_be(attr->atttypid), + format_type_be(op->d.fieldselect.resulttype)))); + + /* heap_getattr needs a HeapTuple not a bare HeapTupleHeader */ + tmptup.t_len = HeapTupleHeaderGetDatumLength(tuple); + tmptup.t_data = tuple; + + /* extract the field */ + *op->resvalue = heap_getattr(&tmptup, + fieldnum, + tupDesc, + op->resnull); + } } /* diff --git a/src/backend/utils/adt/Makefile b/src/backend/utils/adt/Makefile index 1fb018416e..61ca90312f 100644 --- a/src/backend/utils/adt/Makefile +++ b/src/backend/utils/adt/Makefile @@ -12,7 +12,7 @@ include $(top_builddir)/src/Makefile.global OBJS = acl.o amutils.o arrayfuncs.o array_expanded.o array_selfuncs.o \ array_typanalyze.o array_userfuncs.o arrayutils.o ascii.o \ bool.o cash.o char.o date.o datetime.o datum.o dbsize.o domains.o \ - encode.o enum.o expandeddatum.o \ + encode.o enum.o expandeddatum.o expandedrecord.o \ float.o format_type.o formatting.o genfile.o \ geo_ops.o geo_selfuncs.o geo_spgist.o inet_cidr_ntop.o inet_net_pton.o \ int.o int8.o json.o jsonb.o jsonb_gin.o jsonb_op.o jsonb_util.o \ diff --git a/src/backend/utils/adt/expandedrecord.c b/src/backend/utils/adt/expandedrecord.c new file mode 100644 index 0000000000..0bf5fe8cc7 --- /dev/null +++ b/src/backend/utils/adt/expandedrecord.c @@ -0,0 +1,1569 @@ +/*------------------------------------------------------------------------- + * + * expandedrecord.c + * Functions for manipulating composite expanded objects. + * + * This module supports "expanded objects" (cf. expandeddatum.h) that can + * store values of named composite types, domains over named composite types, + * and record types (registered or anonymous). + * + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * src/backend/utils/adt/expandedrecord.c + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include "access/htup_details.h" +#include "catalog/heap.h" +#include "catalog/pg_type.h" +#include "utils/builtins.h" +#include "utils/datum.h" +#include "utils/expandedrecord.h" +#include "utils/memutils.h" +#include "utils/typcache.h" + + +/* "Methods" required for an expanded object */ +static Size ER_get_flat_size(ExpandedObjectHeader *eohptr); +static void ER_flatten_into(ExpandedObjectHeader *eohptr, + void *result, Size allocated_size); + +static const ExpandedObjectMethods ER_methods = +{ + ER_get_flat_size, + ER_flatten_into +}; + +/* Other local functions */ +static void ER_mc_callback(void *arg); +static MemoryContext get_domain_check_cxt(ExpandedRecordHeader *erh); +static void build_dummy_expanded_header(ExpandedRecordHeader *main_erh); +static pg_noinline void check_domain_for_new_field(ExpandedRecordHeader *erh, + int fnumber, + Datum newValue, bool isnull); +static pg_noinline void check_domain_for_new_tuple(ExpandedRecordHeader *erh, + HeapTuple tuple); + + +/* + * Build an expanded record of the specified composite type + * + * type_id can be RECORDOID, but only if a positive typmod is given. + * + * The expanded record is initially "empty", having a state logically + * equivalent to a NULL composite value (not ROW(NULL, NULL, ...)). + * Note that this might not be a valid state for a domain type; if the + * caller needs to check that, call expanded_record_set_tuple(erh, NULL). + * + * The expanded object will be a child of parentcontext. + */ +ExpandedRecordHeader * +make_expanded_record_from_typeid(Oid type_id, int32 typmod, + MemoryContext parentcontext) +{ + ExpandedRecordHeader *erh; + int flags = 0; + TupleDesc tupdesc; + uint64 tupdesc_id; + MemoryContext objcxt; + char *chunk; + + if (type_id != RECORDOID) + { + /* + * Consult the typcache to see if it's a domain over composite, and in + * any case to get the tupdesc and tupdesc identifier. + */ + TypeCacheEntry *typentry; + + typentry = lookup_type_cache(type_id, + TYPECACHE_TUPDESC | + TYPECACHE_DOMAIN_BASE_INFO); + if (typentry->typtype == TYPTYPE_DOMAIN) + { + flags |= ER_FLAG_IS_DOMAIN; + typentry = lookup_type_cache(typentry->domainBaseType, + TYPECACHE_TUPDESC); + } + if (typentry->tupDesc == NULL) + ereport(ERROR, + (errcode(ERRCODE_WRONG_OBJECT_TYPE), + errmsg("type %s is not composite", + format_type_be(type_id)))); + tupdesc = typentry->tupDesc; + tupdesc_id = typentry->tupDesc_identifier; + } + else + { + /* + * For RECORD types, get the tupdesc and identifier from typcache. + */ + tupdesc = lookup_rowtype_tupdesc(type_id, typmod); + tupdesc_id = assign_record_type_identifier(type_id, typmod); + } + + /* + * Allocate private context for expanded object. We use a regular-size + * context, not a small one, to improve the odds that we can fit a tupdesc + * into it without needing an extra malloc block. (This code path doesn't + * ever need to copy a tupdesc into the expanded record, but let's be + * consistent with the other ways of making an expanded record.) + */ + objcxt = AllocSetContextCreate(parentcontext, + "expanded record", + ALLOCSET_DEFAULT_SIZES); + + /* + * Since we already know the number of fields in the tupdesc, we can + * allocate the dvalues/dnulls arrays along with the record header. This + * is useless if we never need those arrays, but it costs almost nothing, + * and it will save a palloc cycle if we do need them. + */ + erh = (ExpandedRecordHeader *) + MemoryContextAlloc(objcxt, MAXALIGN(sizeof(ExpandedRecordHeader)) + + tupdesc->natts * (sizeof(Datum) + sizeof(bool))); + + /* Ensure all header fields are initialized to 0/null */ + memset(erh, 0, sizeof(ExpandedRecordHeader)); + + EOH_init_header(&erh->hdr, &ER_methods, objcxt); + erh->er_magic = ER_MAGIC; + + /* Set up dvalues/dnulls, with no valid contents as yet */ + chunk = (char *) erh + MAXALIGN(sizeof(ExpandedRecordHeader)); + erh->dvalues = (Datum *) chunk; + erh->dnulls = (bool *) (chunk + tupdesc->natts * sizeof(Datum)); + erh->nfields = tupdesc->natts; + + /* Fill in composite-type identification info */ + erh->er_decltypeid = type_id; + erh->er_typeid = tupdesc->tdtypeid; + erh->er_typmod = tupdesc->tdtypmod; + erh->er_tupdesc_id = tupdesc_id; + + erh->flags = flags; + + /* + * If what we got from the typcache is a refcounted tupdesc, we need to + * acquire our own refcount on it. We manage the refcount with a memory + * context callback rather than assuming that the CurrentResourceOwner is + * longer-lived than this expanded object. + */ + if (tupdesc->tdrefcount >= 0) + { + /* Register callback to release the refcount */ + erh->er_mcb.func = ER_mc_callback; + erh->er_mcb.arg = (void *) erh; + MemoryContextRegisterResetCallback(erh->hdr.eoh_context, + &erh->er_mcb); + + /* And save the pointer */ + erh->er_tupdesc = tupdesc; + tupdesc->tdrefcount++; + + /* If we called lookup_rowtype_tupdesc, release the pin it took */ + if (type_id == RECORDOID) + DecrTupleDescRefCount(tupdesc); + } + else + { + /* + * If it's not refcounted, just assume it will outlive the expanded + * object. (This can happen for shared record types, for instance.) + */ + erh->er_tupdesc = tupdesc; + } + + /* + * We don't set ER_FLAG_DVALUES_VALID or ER_FLAG_FVALUE_VALID, so the + * record remains logically empty. + */ + + return erh; +} + +/* + * Build an expanded record of the rowtype defined by the tupdesc + * + * The tupdesc is copied if necessary (i.e., if we can't just bump its + * reference count instead). + * + * The expanded record is initially "empty", having a state logically + * equivalent to a NULL composite value (not ROW(NULL, NULL, ...)). + * + * The expanded object will be a child of parentcontext. + */ +ExpandedRecordHeader * +make_expanded_record_from_tupdesc(TupleDesc tupdesc, + MemoryContext parentcontext) +{ + ExpandedRecordHeader *erh; + uint64 tupdesc_id; + MemoryContext objcxt; + MemoryContext oldcxt; + char *chunk; + + if (tupdesc->tdtypeid != RECORDOID) + { + /* + * If it's a named composite type (not RECORD), we prefer to reference + * the typcache's copy of the tupdesc, which is guaranteed to be + * refcounted (the given tupdesc might not be). In any case, we need + * to consult the typcache to get the correct tupdesc identifier. + * + * Note that tdtypeid couldn't be a domain type, so we need not + * consider that case here. + */ + TypeCacheEntry *typentry; + + typentry = lookup_type_cache(tupdesc->tdtypeid, TYPECACHE_TUPDESC); + if (typentry->tupDesc == NULL) + ereport(ERROR, + (errcode(ERRCODE_WRONG_OBJECT_TYPE), + errmsg("type %s is not composite", + format_type_be(tupdesc->tdtypeid)))); + tupdesc = typentry->tupDesc; + tupdesc_id = typentry->tupDesc_identifier; + } + else + { + /* + * For RECORD types, get the appropriate unique identifier (possibly + * freshly assigned). + */ + tupdesc_id = assign_record_type_identifier(tupdesc->tdtypeid, + tupdesc->tdtypmod); + } + + /* + * Allocate private context for expanded object. We use a regular-size + * context, not a small one, to improve the odds that we can fit a tupdesc + * into it without needing an extra malloc block. + */ + objcxt = AllocSetContextCreate(parentcontext, + "expanded record", + ALLOCSET_DEFAULT_SIZES); + + /* + * Since we already know the number of fields in the tupdesc, we can + * allocate the dvalues/dnulls arrays along with the record header. This + * is useless if we never need those arrays, but it costs almost nothing, + * and it will save a palloc cycle if we do need them. + */ + erh = (ExpandedRecordHeader *) + MemoryContextAlloc(objcxt, MAXALIGN(sizeof(ExpandedRecordHeader)) + + tupdesc->natts * (sizeof(Datum) + sizeof(bool))); + + /* Ensure all header fields are initialized to 0/null */ + memset(erh, 0, sizeof(ExpandedRecordHeader)); + + EOH_init_header(&erh->hdr, &ER_methods, objcxt); + erh->er_magic = ER_MAGIC; + + /* Set up dvalues/dnulls, with no valid contents as yet */ + chunk = (char *) erh + MAXALIGN(sizeof(ExpandedRecordHeader)); + erh->dvalues = (Datum *) chunk; + erh->dnulls = (bool *) (chunk + tupdesc->natts * sizeof(Datum)); + erh->nfields = tupdesc->natts; + + /* Fill in composite-type identification info */ + erh->er_decltypeid = erh->er_typeid = tupdesc->tdtypeid; + erh->er_typmod = tupdesc->tdtypmod; + erh->er_tupdesc_id = tupdesc_id; + + /* + * Copy tupdesc if needed, but we prefer to bump its refcount if possible. + * We manage the refcount with a memory context callback rather than + * assuming that the CurrentResourceOwner is longer-lived than this + * expanded object. + */ + if (tupdesc->tdrefcount >= 0) + { + /* Register callback to release the refcount */ + erh->er_mcb.func = ER_mc_callback; + erh->er_mcb.arg = (void *) erh; + MemoryContextRegisterResetCallback(erh->hdr.eoh_context, + &erh->er_mcb); + + /* And save the pointer */ + erh->er_tupdesc = tupdesc; + tupdesc->tdrefcount++; + } + else + { + /* Just copy it */ + oldcxt = MemoryContextSwitchTo(objcxt); + erh->er_tupdesc = CreateTupleDescCopy(tupdesc); + erh->flags |= ER_FLAG_TUPDESC_ALLOCED; + MemoryContextSwitchTo(oldcxt); + } + + /* + * We don't set ER_FLAG_DVALUES_VALID or ER_FLAG_FVALUE_VALID, so the + * record remains logically empty. + */ + + return erh; +} + +/* + * Build an expanded record of the same rowtype as the given expanded record + * + * This is faster than either of the above routines because we can bypass + * typcache lookup(s). + * + * The expanded record is initially "empty" --- we do not copy whatever + * tuple might be in the source expanded record. + * + * The expanded object will be a child of parentcontext. + */ +ExpandedRecordHeader * +make_expanded_record_from_exprecord(ExpandedRecordHeader *olderh, + MemoryContext parentcontext) +{ + ExpandedRecordHeader *erh; + TupleDesc tupdesc = expanded_record_get_tupdesc(olderh); + MemoryContext objcxt; + MemoryContext oldcxt; + char *chunk; + + /* + * Allocate private context for expanded object. We use a regular-size + * context, not a small one, to improve the odds that we can fit a tupdesc + * into it without needing an extra malloc block. + */ + objcxt = AllocSetContextCreate(parentcontext, + "expanded record", + ALLOCSET_DEFAULT_SIZES); + + /* + * Since we already know the number of fields in the tupdesc, we can + * allocate the dvalues/dnulls arrays along with the record header. This + * is useless if we never need those arrays, but it costs almost nothing, + * and it will save a palloc cycle if we do need them. + */ + erh = (ExpandedRecordHeader *) + MemoryContextAlloc(objcxt, MAXALIGN(sizeof(ExpandedRecordHeader)) + + tupdesc->natts * (sizeof(Datum) + sizeof(bool))); + + /* Ensure all header fields are initialized to 0/null */ + memset(erh, 0, sizeof(ExpandedRecordHeader)); + + EOH_init_header(&erh->hdr, &ER_methods, objcxt); + erh->er_magic = ER_MAGIC; + + /* Set up dvalues/dnulls, with no valid contents as yet */ + chunk = (char *) erh + MAXALIGN(sizeof(ExpandedRecordHeader)); + erh->dvalues = (Datum *) chunk; + erh->dnulls = (bool *) (chunk + tupdesc->natts * sizeof(Datum)); + erh->nfields = tupdesc->natts; + + /* Fill in composite-type identification info */ + erh->er_decltypeid = olderh->er_decltypeid; + erh->er_typeid = olderh->er_typeid; + erh->er_typmod = olderh->er_typmod; + erh->er_tupdesc_id = olderh->er_tupdesc_id; + + /* The only flag bit that transfers over is IS_DOMAIN */ + erh->flags = olderh->flags & ER_FLAG_IS_DOMAIN; + + /* + * Copy tupdesc if needed, but we prefer to bump its refcount if possible. + * We manage the refcount with a memory context callback rather than + * assuming that the CurrentResourceOwner is longer-lived than this + * expanded object. + */ + if (tupdesc->tdrefcount >= 0) + { + /* Register callback to release the refcount */ + erh->er_mcb.func = ER_mc_callback; + erh->er_mcb.arg = (void *) erh; + MemoryContextRegisterResetCallback(erh->hdr.eoh_context, + &erh->er_mcb); + + /* And save the pointer */ + erh->er_tupdesc = tupdesc; + tupdesc->tdrefcount++; + } + else if (olderh->flags & ER_FLAG_TUPDESC_ALLOCED) + { + /* We need to make our own copy of the tupdesc */ + oldcxt = MemoryContextSwitchTo(objcxt); + erh->er_tupdesc = CreateTupleDescCopy(tupdesc); + erh->flags |= ER_FLAG_TUPDESC_ALLOCED; + MemoryContextSwitchTo(oldcxt); + } + else + { + /* + * Assume the tupdesc will outlive this expanded object, just like + * we're assuming it will outlive the source object. + */ + erh->er_tupdesc = tupdesc; + } + + /* + * We don't set ER_FLAG_DVALUES_VALID or ER_FLAG_FVALUE_VALID, so the + * record remains logically empty. + */ + + return erh; +} + +/* + * Insert given tuple as the value of the expanded record + * + * It is caller's responsibility that the tuple matches the record's + * previously-assigned rowtype. (However domain constraints, if any, + * will be checked here.) + * + * The tuple is physically copied into the expanded record's local storage + * if "copy" is true, otherwise it's caller's responsibility that the tuple + * will live as long as the expanded record does. In any case, out-of-line + * fields in the tuple are not automatically inlined. + * + * Alternatively, tuple can be NULL, in which case we just set the expanded + * record to be empty. + */ +void +expanded_record_set_tuple(ExpandedRecordHeader *erh, + HeapTuple tuple, + bool copy) +{ + int oldflags; + HeapTuple oldtuple; + char *oldfstartptr; + char *oldfendptr; + int newflags; + HeapTuple newtuple; + MemoryContext oldcxt; + + /* Shouldn't ever be trying to assign new data to a dummy header */ + Assert(!(erh->flags & ER_FLAG_IS_DUMMY)); + + /* + * Before performing the assignment, see if result will satisfy domain. + */ + if (erh->flags & ER_FLAG_IS_DOMAIN) + check_domain_for_new_tuple(erh, tuple); + + /* + * Initialize new flags, keeping only non-data status bits. + */ + oldflags = erh->flags; + newflags = oldflags & ER_FLAGS_NON_DATA; + + /* + * Copy tuple into local storage if needed. We must be sure this succeeds + * before we start to modify the expanded record's state. + */ + if (copy && tuple) + { + oldcxt = MemoryContextSwitchTo(erh->hdr.eoh_context); + newtuple = heap_copytuple(tuple); + newflags |= ER_FLAG_FVALUE_ALLOCED; + MemoryContextSwitchTo(oldcxt); + } + else + newtuple = tuple; + + /* Make copies of fields we're about to overwrite */ + oldtuple = erh->fvalue; + oldfstartptr = erh->fstartptr; + oldfendptr = erh->fendptr; + + /* + * It's now safe to update the expanded record's state. + */ + if (newtuple) + { + /* Save flat representation */ + erh->fvalue = newtuple; + erh->fstartptr = (char *) newtuple->t_data; + erh->fendptr = ((char *) newtuple->t_data) + newtuple->t_len; + newflags |= ER_FLAG_FVALUE_VALID; + + /* Remember if we have any out-of-line field values */ + if (HeapTupleHasExternal(newtuple)) + newflags |= ER_FLAG_HAVE_EXTERNAL; + } + else + { + erh->fvalue = NULL; + erh->fstartptr = erh->fendptr = NULL; + } + + erh->flags = newflags; + + /* Reset flat-size info; we don't bother to make it valid now */ + erh->flat_size = 0; + + /* + * Now, release any storage belonging to old field values. It's safe to + * do this because ER_FLAG_DVALUES_VALID is no longer set in erh->flags; + * even if we fail partway through, the record is valid, and at worst + * we've failed to reclaim some space. + */ + if (oldflags & ER_FLAG_DVALUES_ALLOCED) + { + TupleDesc tupdesc = erh->er_tupdesc; + int i; + + for (i = 0; i < erh->nfields; i++) + { + if (!erh->dnulls[i] && + !(TupleDescAttr(tupdesc, i)->attbyval)) + { + char *oldValue = (char *) DatumGetPointer(erh->dvalues[i]); + + if (oldValue < oldfstartptr || oldValue >= oldfendptr) + pfree(oldValue); + } + } + } + + /* Likewise free the old tuple, if it was locally allocated */ + if (oldflags & ER_FLAG_FVALUE_ALLOCED) + heap_freetuple(oldtuple); + + /* We won't make a new deconstructed representation until/unless needed */ +} + +/* + * make_expanded_record_from_datum: build expanded record from composite Datum + * + * This combines the functions of make_expanded_record_from_typeid and + * expanded_record_set_tuple. However, we do not force a lookup of the + * tupdesc immediately, reasoning that it might never be needed. + * + * The expanded object will be a child of parentcontext. + * + * Note: a composite datum cannot self-identify as being of a domain type, + * so we need not consider domain cases here. + */ +Datum +make_expanded_record_from_datum(Datum recorddatum, MemoryContext parentcontext) +{ + ExpandedRecordHeader *erh; + HeapTupleHeader tuphdr; + HeapTupleData tmptup; + HeapTuple newtuple; + MemoryContext objcxt; + MemoryContext oldcxt; + + /* + * Allocate private context for expanded object. We use a regular-size + * context, not a small one, to improve the odds that we can fit a tupdesc + * into it without needing an extra malloc block. + */ + objcxt = AllocSetContextCreate(parentcontext, + "expanded record", + ALLOCSET_DEFAULT_SIZES); + + /* Set up expanded record header, initializing fields to 0/null */ + erh = (ExpandedRecordHeader *) + MemoryContextAllocZero(objcxt, sizeof(ExpandedRecordHeader)); + + EOH_init_header(&erh->hdr, &ER_methods, objcxt); + erh->er_magic = ER_MAGIC; + + /* + * Detoast and copy source record into private context, as a HeapTuple. + * (If we actually have to detoast the source, we'll leak some memory in + * the caller's context, but it doesn't seem worth worrying about.) + */ + tuphdr = DatumGetHeapTupleHeader(recorddatum); + + tmptup.t_len = HeapTupleHeaderGetDatumLength(tuphdr); + ItemPointerSetInvalid(&(tmptup.t_self)); + tmptup.t_tableOid = InvalidOid; + tmptup.t_data = tuphdr; + + oldcxt = MemoryContextSwitchTo(objcxt); + newtuple = heap_copytuple(&tmptup); + erh->flags |= ER_FLAG_FVALUE_ALLOCED; + MemoryContextSwitchTo(oldcxt); + + /* Fill in composite-type identification info */ + erh->er_decltypeid = erh->er_typeid = HeapTupleHeaderGetTypeId(tuphdr); + erh->er_typmod = HeapTupleHeaderGetTypMod(tuphdr); + + /* remember we have a flat representation */ + erh->fvalue = newtuple; + erh->fstartptr = (char *) newtuple->t_data; + erh->fendptr = ((char *) newtuple->t_data) + newtuple->t_len; + erh->flags |= ER_FLAG_FVALUE_VALID; + + /* Shouldn't need to set ER_FLAG_HAVE_EXTERNAL */ + Assert(!HeapTupleHeaderHasExternal(tuphdr)); + + /* + * We won't look up the tupdesc till we have to, nor make a deconstructed + * representation. We don't have enough info to fill flat_size and + * friends, either. + */ + + /* return a R/W pointer to the expanded record */ + return EOHPGetRWDatum(&erh->hdr); +} + +/* + * get_flat_size method for expanded records + * + * Note: call this in a reasonably short-lived memory context, in case of + * memory leaks from activities such as detoasting. + */ +static Size +ER_get_flat_size(ExpandedObjectHeader *eohptr) +{ + ExpandedRecordHeader *erh = (ExpandedRecordHeader *) eohptr; + TupleDesc tupdesc; + Size len; + Size data_len; + int hoff; + bool hasnull; + int i; + + Assert(erh->er_magic == ER_MAGIC); + + /* + * The flat representation has to be a valid composite datum. Make sure + * that we have a registered, not anonymous, RECORD type. + */ + if (erh->er_typeid == RECORDOID && + erh->er_typmod < 0) + { + tupdesc = expanded_record_get_tupdesc(erh); + assign_record_type_typmod(tupdesc); + erh->er_typmod = tupdesc->tdtypmod; + } + + /* + * If we have a valid flattened value without out-of-line fields, we can + * just use it as-is. + */ + if (erh->flags & ER_FLAG_FVALUE_VALID && + !(erh->flags & ER_FLAG_HAVE_EXTERNAL)) + return erh->fvalue->t_len; + + /* If we have a cached size value, believe that */ + if (erh->flat_size) + return erh->flat_size; + + /* If we haven't yet deconstructed the tuple, do that */ + if (!(erh->flags & ER_FLAG_DVALUES_VALID)) + deconstruct_expanded_record(erh); + + /* Tuple descriptor must be valid by now */ + tupdesc = erh->er_tupdesc; + + /* + * Composite datums mustn't contain any out-of-line values. + */ + if (erh->flags & ER_FLAG_HAVE_EXTERNAL) + { + for (i = 0; i < erh->nfields; i++) + { + Form_pg_attribute attr = TupleDescAttr(tupdesc, i); + + if (!erh->dnulls[i] && + !attr->attbyval && attr->attlen == -1 && + VARATT_IS_EXTERNAL(DatumGetPointer(erh->dvalues[i]))) + { + /* + * It's an external toasted value, so we need to dereference + * it so that the flat representation will be self-contained. + * Do this step in the caller's context because the TOAST + * fetch might leak memory. That means making an extra copy, + * which is a tad annoying, but repetitive leaks in the + * record's context would be worse. + */ + Datum newValue; + + newValue = PointerGetDatum(PG_DETOAST_DATUM(erh->dvalues[i])); + /* expanded_record_set_field can do the rest */ + /* ... and we don't need it to recheck domain constraints */ + expanded_record_set_field_internal(erh, i + 1, + newValue, false, + false); + /* Might as well free the detoasted value */ + pfree(DatumGetPointer(newValue)); + } + } + + /* + * We have now removed all external field values, so we can clear the + * flag about them. This won't cause ER_flatten_into() to mistakenly + * take the fast path, since expanded_record_set_field() will have + * cleared ER_FLAG_FVALUE_VALID. + */ + erh->flags &= ~ER_FLAG_HAVE_EXTERNAL; + } + + /* Test if we currently have any null values */ + hasnull = false; + for (i = 0; i < erh->nfields; i++) + { + if (erh->dnulls[i]) + { + hasnull = true; + break; + } + } + + /* Determine total space needed */ + len = offsetof(HeapTupleHeaderData, t_bits); + + if (hasnull) + len += BITMAPLEN(tupdesc->natts); + + if (tupdesc->tdhasoid) + len += sizeof(Oid); + + hoff = len = MAXALIGN(len); /* align user data safely */ + + data_len = heap_compute_data_size(tupdesc, erh->dvalues, erh->dnulls); + + len += data_len; + + /* Cache for next time */ + erh->flat_size = len; + erh->data_len = data_len; + erh->hoff = hoff; + erh->hasnull = hasnull; + + return len; +} + +/* + * flatten_into method for expanded records + */ +static void +ER_flatten_into(ExpandedObjectHeader *eohptr, + void *result, Size allocated_size) +{ + ExpandedRecordHeader *erh = (ExpandedRecordHeader *) eohptr; + HeapTupleHeader tuphdr = (HeapTupleHeader) result; + TupleDesc tupdesc; + + Assert(erh->er_magic == ER_MAGIC); + + /* Easy if we have a valid flattened value without out-of-line fields */ + if (erh->flags & ER_FLAG_FVALUE_VALID && + !(erh->flags & ER_FLAG_HAVE_EXTERNAL)) + { + Assert(allocated_size == erh->fvalue->t_len); + memcpy(tuphdr, erh->fvalue->t_data, allocated_size); + /* The original flattened value might not have datum header fields */ + HeapTupleHeaderSetDatumLength(tuphdr, allocated_size); + HeapTupleHeaderSetTypeId(tuphdr, erh->er_typeid); + HeapTupleHeaderSetTypMod(tuphdr, erh->er_typmod); + return; + } + + /* Else allocation should match previous get_flat_size result */ + Assert(allocated_size == erh->flat_size); + + /* We'll need the tuple descriptor */ + tupdesc = expanded_record_get_tupdesc(erh); + + /* We must ensure that any pad space is zero-filled */ + memset(tuphdr, 0, allocated_size); + + /* Set up header fields of composite Datum */ + HeapTupleHeaderSetDatumLength(tuphdr, allocated_size); + HeapTupleHeaderSetTypeId(tuphdr, erh->er_typeid); + HeapTupleHeaderSetTypMod(tuphdr, erh->er_typmod); + /* We also make sure that t_ctid is invalid unless explicitly set */ + ItemPointerSetInvalid(&(tuphdr->t_ctid)); + + HeapTupleHeaderSetNatts(tuphdr, tupdesc->natts); + tuphdr->t_hoff = erh->hoff; + + if (tupdesc->tdhasoid) /* else leave infomask = 0 */ + tuphdr->t_infomask = HEAP_HASOID; + + /* And fill the data area from dvalues/dnulls */ + heap_fill_tuple(tupdesc, + erh->dvalues, + erh->dnulls, + (char *) tuphdr + erh->hoff, + erh->data_len, + &tuphdr->t_infomask, + (erh->hasnull ? tuphdr->t_bits : NULL)); +} + +/* + * Look up the tupdesc for the expanded record's actual type + * + * Note: code internal to this module is allowed to just fetch + * erh->er_tupdesc if ER_FLAG_DVALUES_VALID is set; otherwise it should call + * expanded_record_get_tupdesc. This function is the out-of-line portion + * of expanded_record_get_tupdesc. + */ +TupleDesc +expanded_record_fetch_tupdesc(ExpandedRecordHeader *erh) +{ + TupleDesc tupdesc; + + /* Easy if we already have it (but caller should have checked already) */ + if (erh->er_tupdesc) + return erh->er_tupdesc; + + /* Lookup the composite type's tupdesc using the typcache */ + tupdesc = lookup_rowtype_tupdesc(erh->er_typeid, erh->er_typmod); + + /* + * If it's a refcounted tupdesc rather than a statically allocated one, we + * want to manage the refcount with a memory context callback rather than + * assuming that the CurrentResourceOwner is longer-lived than this + * expanded object. + */ + if (tupdesc->tdrefcount >= 0) + { + /* Register callback if we didn't already */ + if (erh->er_mcb.arg == NULL) + { + erh->er_mcb.func = ER_mc_callback; + erh->er_mcb.arg = (void *) erh; + MemoryContextRegisterResetCallback(erh->hdr.eoh_context, + &erh->er_mcb); + } + + /* Remember our own pointer */ + erh->er_tupdesc = tupdesc; + tupdesc->tdrefcount++; + + /* Release the pin lookup_rowtype_tupdesc acquired */ + DecrTupleDescRefCount(tupdesc); + } + else + { + /* Just remember the pointer */ + erh->er_tupdesc = tupdesc; + } + + /* In either case, fetch the process-global ID for this tupdesc */ + erh->er_tupdesc_id = assign_record_type_identifier(tupdesc->tdtypeid, + tupdesc->tdtypmod); + + return tupdesc; +} + +/* + * Get a HeapTuple representing the current value of the expanded record + * + * If valid, the originally stored tuple is returned, so caller must not + * scribble on it. Otherwise, we return a HeapTuple created in the current + * memory context. In either case, no attempt has been made to inline + * out-of-line toasted values, so the tuple isn't usable as a composite + * datum. + * + * Returns NULL if expanded record is empty. + */ +HeapTuple +expanded_record_get_tuple(ExpandedRecordHeader *erh) +{ + /* Easy case if we still have original tuple */ + if (erh->flags & ER_FLAG_FVALUE_VALID) + return erh->fvalue; + + /* Else just build a tuple from datums */ + if (erh->flags & ER_FLAG_DVALUES_VALID) + return heap_form_tuple(erh->er_tupdesc, erh->dvalues, erh->dnulls); + + /* Expanded record is empty */ + return NULL; +} + +/* + * Memory context reset callback for cleaning up external resources + */ +static void +ER_mc_callback(void *arg) +{ + ExpandedRecordHeader *erh = (ExpandedRecordHeader *) arg; + TupleDesc tupdesc = erh->er_tupdesc; + + /* Release our privately-managed tupdesc refcount, if any */ + if (tupdesc) + { + erh->er_tupdesc = NULL; /* just for luck */ + if (tupdesc->tdrefcount > 0) + { + if (--tupdesc->tdrefcount == 0) + FreeTupleDesc(tupdesc); + } + } +} + +/* + * DatumGetExpandedRecord: get a writable expanded record from an input argument + * + * Caution: if the input is a read/write pointer, this returns the input + * argument; so callers must be sure that their changes are "safe", that is + * they cannot leave the record in a corrupt state. + */ +ExpandedRecordHeader * +DatumGetExpandedRecord(Datum d) +{ + /* If it's a writable expanded record already, just return it */ + if (VARATT_IS_EXTERNAL_EXPANDED_RW(DatumGetPointer(d))) + { + ExpandedRecordHeader *erh = (ExpandedRecordHeader *) DatumGetEOHP(d); + + Assert(erh->er_magic == ER_MAGIC); + return erh; + } + + /* Else expand the hard way */ + d = make_expanded_record_from_datum(d, CurrentMemoryContext); + return (ExpandedRecordHeader *) DatumGetEOHP(d); +} + +/* + * Create the Datum/isnull representation of an expanded record object + * if we didn't do so already. After calling this, it's OK to read the + * dvalues/dnulls arrays directly, rather than going through get_field. + * + * Note that if the object is currently empty ("null"), this will change + * it to represent a row of nulls. + */ +void +deconstruct_expanded_record(ExpandedRecordHeader *erh) +{ + TupleDesc tupdesc; + Datum *dvalues; + bool *dnulls; + int nfields; + + if (erh->flags & ER_FLAG_DVALUES_VALID) + return; /* already valid, nothing to do */ + + /* We'll need the tuple descriptor */ + tupdesc = expanded_record_get_tupdesc(erh); + + /* + * Allocate arrays in private context, if we don't have them already. We + * don't expect to see a change in nfields here, so while we cope if it + * happens, we don't bother avoiding a leak of the old arrays (which might + * not be separately palloc'd, anyway). + */ + nfields = tupdesc->natts; + if (erh->dvalues == NULL || erh->nfields != nfields) + { + char *chunk; + + /* + * To save a palloc cycle, we allocate both the Datum and isnull + * arrays in one palloc chunk. + */ + chunk = MemoryContextAlloc(erh->hdr.eoh_context, + nfields * (sizeof(Datum) + sizeof(bool))); + dvalues = (Datum *) chunk; + dnulls = (bool *) (chunk + nfields * sizeof(Datum)); + erh->dvalues = dvalues; + erh->dnulls = dnulls; + erh->nfields = nfields; + } + else + { + dvalues = erh->dvalues; + dnulls = erh->dnulls; + } + + if (erh->flags & ER_FLAG_FVALUE_VALID) + { + /* Deconstruct tuple */ + heap_deform_tuple(erh->fvalue, tupdesc, dvalues, dnulls); + } + else + { + /* If record was empty, instantiate it as a row of nulls */ + memset(dvalues, 0, nfields * sizeof(Datum)); + memset(dnulls, true, nfields * sizeof(bool)); + } + + /* Mark the dvalues as valid */ + erh->flags |= ER_FLAG_DVALUES_VALID; +} + +/* + * Look up a record field by name + * + * If there is a field named "fieldname", fill in the contents of finfo + * and return "true". Else return "false" without changing *finfo. + */ +bool +expanded_record_lookup_field(ExpandedRecordHeader *erh, const char *fieldname, + ExpandedRecordFieldInfo *finfo) +{ + TupleDesc tupdesc; + int fno; + Form_pg_attribute attr; + + tupdesc = expanded_record_get_tupdesc(erh); + + /* First, check user-defined attributes */ + for (fno = 0; fno < tupdesc->natts; fno++) + { + attr = TupleDescAttr(tupdesc, fno); + if (namestrcmp(&attr->attname, fieldname) == 0 && + !attr->attisdropped) + { + finfo->fnumber = attr->attnum; + finfo->ftypeid = attr->atttypid; + finfo->ftypmod = attr->atttypmod; + finfo->fcollation = attr->attcollation; + return true; + } + } + + /* How about system attributes? */ + attr = SystemAttributeByName(fieldname, tupdesc->tdhasoid); + if (attr != NULL) + { + finfo->fnumber = attr->attnum; + finfo->ftypeid = attr->atttypid; + finfo->ftypmod = attr->atttypmod; + finfo->fcollation = attr->attcollation; + return true; + } + + return false; +} + +/* + * Fetch value of record field + * + * expanded_record_get_field is the frontend for this; it handles the + * easy inline-able cases. + */ +Datum +expanded_record_fetch_field(ExpandedRecordHeader *erh, int fnumber, + bool *isnull) +{ + if (fnumber > 0) + { + /* Empty record has null fields */ + if (ExpandedRecordIsEmpty(erh)) + { + *isnull = true; + return (Datum) 0; + } + /* Make sure we have deconstructed form */ + deconstruct_expanded_record(erh); + /* Out-of-range field number reads as null */ + if (unlikely(fnumber > erh->nfields)) + { + *isnull = true; + return (Datum) 0; + } + *isnull = erh->dnulls[fnumber - 1]; + return erh->dvalues[fnumber - 1]; + } + else + { + /* System columns read as null if we haven't got flat tuple */ + if (erh->fvalue == NULL) + { + *isnull = true; + return (Datum) 0; + } + /* heap_getsysattr doesn't actually use tupdesc, so just pass null */ + return heap_getsysattr(erh->fvalue, fnumber, NULL, isnull); + } +} + +/* + * Set value of record field + * + * If the expanded record is of domain type, the assignment will be rejected + * (without changing the record's state) if the domain's constraints would + * be violated. + * + * Internal callers can pass check_constraints = false to skip application + * of domain constraints. External callers should never do that. + */ +void +expanded_record_set_field_internal(ExpandedRecordHeader *erh, int fnumber, + Datum newValue, bool isnull, + bool check_constraints) +{ + TupleDesc tupdesc; + Form_pg_attribute attr; + Datum *dvalues; + bool *dnulls; + char *oldValue; + + /* + * Shouldn't ever be trying to assign new data to a dummy header, except + * in the case of an internal call for field inlining. + */ + Assert(!(erh->flags & ER_FLAG_IS_DUMMY) || !check_constraints); + + /* Before performing the assignment, see if result will satisfy domain */ + if ((erh->flags & ER_FLAG_IS_DOMAIN) && check_constraints) + check_domain_for_new_field(erh, fnumber, newValue, isnull); + + /* If we haven't yet deconstructed the tuple, do that */ + if (!(erh->flags & ER_FLAG_DVALUES_VALID)) + deconstruct_expanded_record(erh); + + /* Tuple descriptor must be valid by now */ + tupdesc = erh->er_tupdesc; + Assert(erh->nfields == tupdesc->natts); + + /* Caller error if fnumber is system column or nonexistent column */ + if (unlikely(fnumber <= 0 || fnumber > erh->nfields)) + elog(ERROR, "cannot assign to field %d of expanded record", fnumber); + + /* + * Copy new field value into record's context, if needed. + */ + attr = TupleDescAttr(tupdesc, fnumber - 1); + if (!isnull && !attr->attbyval) + { + MemoryContext oldcxt; + + oldcxt = MemoryContextSwitchTo(erh->hdr.eoh_context); + newValue = datumCopy(newValue, false, attr->attlen); + MemoryContextSwitchTo(oldcxt); + + /* Remember that we have field(s) that may need to be pfree'd */ + erh->flags |= ER_FLAG_DVALUES_ALLOCED; + + /* + * While we're here, note whether it's an external toasted value, + * because that could mean we need to inline it later. + */ + if (attr->attlen == -1 && + VARATT_IS_EXTERNAL(DatumGetPointer(newValue))) + erh->flags |= ER_FLAG_HAVE_EXTERNAL; + } + + /* + * We're ready to make irreversible changes. + */ + dvalues = erh->dvalues; + dnulls = erh->dnulls; + + /* Flattened value will no longer represent record accurately */ + erh->flags &= ~ER_FLAG_FVALUE_VALID; + /* And we don't know the flattened size either */ + erh->flat_size = 0; + + /* Grab old field value for pfree'ing, if needed. */ + if (!attr->attbyval && !dnulls[fnumber - 1]) + oldValue = (char *) DatumGetPointer(dvalues[fnumber - 1]); + else + oldValue = NULL; + + /* And finally we can insert the new field. */ + dvalues[fnumber - 1] = newValue; + dnulls[fnumber - 1] = isnull; + + /* + * Free old field if needed; this keeps repeated field replacements from + * bloating the record's storage. If the pfree somehow fails, it won't + * corrupt the record. + * + * If we're updating a dummy header, we can't risk pfree'ing the old + * value, because most likely the expanded record's main header still has + * a pointer to it. This won't result in any sustained memory leak, since + * whatever we just allocated here is in the short-lived domain check + * context. + */ + if (oldValue && !(erh->flags & ER_FLAG_IS_DUMMY)) + { + /* Don't try to pfree a part of the original flat record */ + if (oldValue < erh->fstartptr || oldValue >= erh->fendptr) + pfree(oldValue); + } +} + +/* + * Set all record field(s) + * + * Caller must ensure that the provided datums are of the right types + * to match the record's previously assigned rowtype. + * + * Unlike repeated application of expanded_record_set_field(), this does not + * guarantee to leave the expanded record in a non-corrupt state in event + * of an error. Typically it would only be used for initializing a new + * expanded record. + */ +void +expanded_record_set_fields(ExpandedRecordHeader *erh, + const Datum *newValues, const bool *isnulls) +{ + TupleDesc tupdesc; + Datum *dvalues; + bool *dnulls; + int fnumber; + MemoryContext oldcxt; + + /* Shouldn't ever be trying to assign new data to a dummy header */ + Assert(!(erh->flags & ER_FLAG_IS_DUMMY)); + + /* If we haven't yet deconstructed the tuple, do that */ + if (!(erh->flags & ER_FLAG_DVALUES_VALID)) + deconstruct_expanded_record(erh); + + /* Tuple descriptor must be valid by now */ + tupdesc = erh->er_tupdesc; + Assert(erh->nfields == tupdesc->natts); + + /* Flattened value will no longer represent record accurately */ + erh->flags &= ~ER_FLAG_FVALUE_VALID; + /* And we don't know the flattened size either */ + erh->flat_size = 0; + + oldcxt = MemoryContextSwitchTo(erh->hdr.eoh_context); + + dvalues = erh->dvalues; + dnulls = erh->dnulls; + + for (fnumber = 0; fnumber < erh->nfields; fnumber++) + { + Form_pg_attribute attr = TupleDescAttr(tupdesc, fnumber); + Datum newValue; + bool isnull; + + /* Ignore dropped columns */ + if (attr->attisdropped) + continue; + + newValue = newValues[fnumber]; + isnull = isnulls[fnumber]; + + if (!attr->attbyval) + { + /* + * Copy new field value into record's context, if needed. + */ + if (!isnull) + { + newValue = datumCopy(newValue, false, attr->attlen); + + /* Remember that we have field(s) that need to be pfree'd */ + erh->flags |= ER_FLAG_DVALUES_ALLOCED; + + /* + * While we're here, note whether it's an external toasted + * value, because that could mean we need to inline it later. + */ + if (attr->attlen == -1 && + VARATT_IS_EXTERNAL(DatumGetPointer(newValue))) + erh->flags |= ER_FLAG_HAVE_EXTERNAL; + } + + /* + * Free old field value, if any (not likely, since really we ought + * to be inserting into an empty record). + */ + if (unlikely(!dnulls[fnumber])) + { + char *oldValue; + + oldValue = (char *) DatumGetPointer(dvalues[fnumber]); + /* Don't try to pfree a part of the original flat record */ + if (oldValue < erh->fstartptr || oldValue >= erh->fendptr) + pfree(oldValue); + } + } + + /* And finally we can insert the new field. */ + dvalues[fnumber] = newValue; + dnulls[fnumber] = isnull; + } + + /* + * Because we don't guarantee atomicity of set_fields(), we can just leave + * checking of domain constraints to occur as the final step; if it throws + * an error, too bad. + */ + if (erh->flags & ER_FLAG_IS_DOMAIN) + { + /* We run domain_check in a short-lived context to limit cruft */ + MemoryContextSwitchTo(get_domain_check_cxt(erh)); + + domain_check(ExpandedRecordGetRODatum(erh), false, + erh->er_decltypeid, + &erh->er_domaininfo, + erh->hdr.eoh_context); + } + + MemoryContextSwitchTo(oldcxt); +} + +/* + * Construct (or reset) working memory context for domain checks. + * + * If we don't have a working memory context for domain checking, make one; + * if we have one, reset it to get rid of any leftover cruft. (It is a tad + * annoying to need a whole context for this, since it will often go unused + * --- but it's hard to avoid memory leaks otherwise. We can make the + * context small, at least.) + */ +static MemoryContext +get_domain_check_cxt(ExpandedRecordHeader *erh) +{ + if (erh->er_domain_check_cxt == NULL) + erh->er_domain_check_cxt = + AllocSetContextCreate(erh->hdr.eoh_context, + "expanded record domain checks", + ALLOCSET_SMALL_SIZES); + else + MemoryContextReset(erh->er_domain_check_cxt); + return erh->er_domain_check_cxt; +} + +/* + * Construct "dummy header" for checking domain constraints. + * + * Since we don't want to modify the state of the expanded record until + * we've validated the constraints, our approach is to set up a dummy + * record header containing the new field value(s) and then pass that to + * domain_check. We retain the dummy header as part of the expanded + * record's state to save palloc cycles, but reinitialize (most of) + * its contents on each use. + */ +static void +build_dummy_expanded_header(ExpandedRecordHeader *main_erh) +{ + ExpandedRecordHeader *erh; + TupleDesc tupdesc = expanded_record_get_tupdesc(main_erh); + + /* Ensure we have a domain_check_cxt */ + (void) get_domain_check_cxt(main_erh); + + /* + * Allocate dummy header on first time through, or in the unlikely event + * that the number of fields changes (in which case we just leak the old + * one). Include space for its field values in the request. + */ + erh = main_erh->er_dummy_header; + if (erh == NULL || erh->nfields != tupdesc->natts) + { + char *chunk; + + erh = (ExpandedRecordHeader *) + MemoryContextAlloc(main_erh->hdr.eoh_context, + MAXALIGN(sizeof(ExpandedRecordHeader)) + + tupdesc->natts * (sizeof(Datum) + sizeof(bool))); + + /* Ensure all header fields are initialized to 0/null */ + memset(erh, 0, sizeof(ExpandedRecordHeader)); + + /* + * We set up the dummy header with an indication that its memory + * context is the short-lived context. This is so that, if any + * detoasting of out-of-line values happens due to an attempt to + * extract a composite datum from the dummy header, the detoasted + * stuff will end up in the short-lived context and not cause a leak. + * This is cheating a bit on the expanded-object protocol; but since + * we never pass a R/W pointer to the dummy object to any other code, + * nothing else is authorized to delete or transfer ownership of the + * object's context, so it should be safe enough. + */ + EOH_init_header(&erh->hdr, &ER_methods, main_erh->er_domain_check_cxt); + erh->er_magic = ER_MAGIC; + + /* Set up dvalues/dnulls, with no valid contents as yet */ + chunk = (char *) erh + MAXALIGN(sizeof(ExpandedRecordHeader)); + erh->dvalues = (Datum *) chunk; + erh->dnulls = (bool *) (chunk + tupdesc->natts * sizeof(Datum)); + erh->nfields = tupdesc->natts; + + /* + * The fields we just set are assumed to remain constant through + * multiple uses of the dummy header to check domain constraints. All + * other dummy header fields should be explicitly reset below, to + * ensure there's not accidental effects of one check on the next one. + */ + + main_erh->er_dummy_header = erh; + } + + /* + * If anything inquires about the dummy header's declared type, it should + * report the composite base type, not the domain type (since the VALUE in + * a domain check constraint is of the base type not the domain). Hence + * we do not transfer over the IS_DOMAIN flag, nor indeed any of the main + * header's flags, since the dummy header is empty of data at this point. + * But don't forget to mark header as dummy. + */ + erh->flags = ER_FLAG_IS_DUMMY; + + /* Copy composite-type identification info */ + erh->er_decltypeid = erh->er_typeid = main_erh->er_typeid; + erh->er_typmod = main_erh->er_typmod; + + /* Dummy header does not need its own tupdesc refcount */ + erh->er_tupdesc = tupdesc; + erh->er_tupdesc_id = main_erh->er_tupdesc_id; + + /* + * It's tempting to copy over whatever we know about the flat size, but + * there's no point since we're surely about to modify the dummy record's + * field(s). Instead just clear anything left over from a previous usage + * cycle. + */ + erh->flat_size = 0; + + /* Copy over fvalue if we have it, so that system columns are available */ + erh->fvalue = main_erh->fvalue; + erh->fstartptr = main_erh->fstartptr; + erh->fendptr = main_erh->fendptr; +} + +/* + * Precheck domain constraints for a set_field operation + */ +static pg_noinline void +check_domain_for_new_field(ExpandedRecordHeader *erh, int fnumber, + Datum newValue, bool isnull) +{ + ExpandedRecordHeader *dummy_erh; + MemoryContext oldcxt; + + /* Construct dummy header to contain proposed new field set */ + build_dummy_expanded_header(erh); + dummy_erh = erh->er_dummy_header; + + /* + * If record isn't empty, just deconstruct it (if needed) and copy over + * the existing field values. If it is empty, just fill fields with nulls + * manually --- don't call deconstruct_expanded_record prematurely. + */ + if (!ExpandedRecordIsEmpty(erh)) + { + deconstruct_expanded_record(erh); + memcpy(dummy_erh->dvalues, erh->dvalues, + dummy_erh->nfields * sizeof(Datum)); + memcpy(dummy_erh->dnulls, erh->dnulls, + dummy_erh->nfields * sizeof(bool)); + /* There might be some external values in there... */ + dummy_erh->flags |= erh->flags & ER_FLAG_HAVE_EXTERNAL; + } + else + { + memset(dummy_erh->dvalues, 0, dummy_erh->nfields * sizeof(Datum)); + memset(dummy_erh->dnulls, true, dummy_erh->nfields * sizeof(bool)); + } + + /* Either way, we now have valid dvalues */ + dummy_erh->flags |= ER_FLAG_DVALUES_VALID; + + /* Caller error if fnumber is system column or nonexistent column */ + if (unlikely(fnumber <= 0 || fnumber > dummy_erh->nfields)) + elog(ERROR, "cannot assign to field %d of expanded record", fnumber); + + /* Insert proposed new value into dummy field array */ + dummy_erh->dvalues[fnumber - 1] = newValue; + dummy_erh->dnulls[fnumber - 1] = isnull; + + /* + * The proposed new value might be external, in which case we'd better set + * the flag for that in dummy_erh. (This matters in case something in the + * domain check expressions tries to extract a flat value from the dummy + * header.) + */ + if (!isnull) + { + Form_pg_attribute attr = TupleDescAttr(erh->er_tupdesc, fnumber - 1); + + if (!attr->attbyval && attr->attlen == -1 && + VARATT_IS_EXTERNAL(DatumGetPointer(newValue))) + dummy_erh->flags |= ER_FLAG_HAVE_EXTERNAL; + } + + /* + * We call domain_check in the short-lived context, so that any cruft + * leaked by expression evaluation can be reclaimed. + */ + oldcxt = MemoryContextSwitchTo(erh->er_domain_check_cxt); + + /* + * And now we can apply the check. Note we use main header's domain cache + * space, so that caching carries across repeated uses. + */ + domain_check(ExpandedRecordGetRODatum(dummy_erh), false, + erh->er_decltypeid, + &erh->er_domaininfo, + erh->hdr.eoh_context); + + MemoryContextSwitchTo(oldcxt); + + /* We might as well clean up cruft immediately. */ + MemoryContextReset(erh->er_domain_check_cxt); +} + +/* + * Precheck domain constraints for a set_tuple operation + */ +static pg_noinline void +check_domain_for_new_tuple(ExpandedRecordHeader *erh, HeapTuple tuple) +{ + ExpandedRecordHeader *dummy_erh; + MemoryContext oldcxt; + + /* If we're being told to set record to empty, just see if NULL is OK */ + if (tuple == NULL) + { + /* We run domain_check in a short-lived context to limit cruft */ + oldcxt = MemoryContextSwitchTo(get_domain_check_cxt(erh)); + + domain_check((Datum) 0, true, + erh->er_decltypeid, + &erh->er_domaininfo, + erh->hdr.eoh_context); + + MemoryContextSwitchTo(oldcxt); + + /* We might as well clean up cruft immediately. */ + MemoryContextReset(erh->er_domain_check_cxt); + + return; + } + + /* Construct dummy header to contain replacement tuple */ + build_dummy_expanded_header(erh); + dummy_erh = erh->er_dummy_header; + + /* Insert tuple, but don't bother to deconstruct its fields for now */ + dummy_erh->fvalue = tuple; + dummy_erh->fstartptr = (char *) tuple->t_data; + dummy_erh->fendptr = ((char *) tuple->t_data) + tuple->t_len; + dummy_erh->flags |= ER_FLAG_FVALUE_VALID; + + /* Remember if we have any out-of-line field values */ + if (HeapTupleHasExternal(tuple)) + dummy_erh->flags |= ER_FLAG_HAVE_EXTERNAL; + + /* + * We call domain_check in the short-lived context, so that any cruft + * leaked by expression evaluation can be reclaimed. + */ + oldcxt = MemoryContextSwitchTo(erh->er_domain_check_cxt); + + /* + * And now we can apply the check. Note we use main header's domain cache + * space, so that caching carries across repeated uses. + */ + domain_check(ExpandedRecordGetRODatum(dummy_erh), false, + erh->er_decltypeid, + &erh->er_domaininfo, + erh->hdr.eoh_context); + + MemoryContextSwitchTo(oldcxt); + + /* We might as well clean up cruft immediately. */ + MemoryContextReset(erh->er_domain_check_cxt); +} diff --git a/src/backend/utils/cache/typcache.c b/src/backend/utils/cache/typcache.c index cf22306b20..874d8cd1c9 100644 --- a/src/backend/utils/cache/typcache.c +++ b/src/backend/utils/cache/typcache.c @@ -259,12 +259,22 @@ static const dshash_parameters srtr_typmod_table_params = { LWTRANCHE_SESSION_TYPMOD_TABLE }; +/* hashtable for recognizing registered record types */ static HTAB *RecordCacheHash = NULL; +/* arrays of info about registered record types, indexed by assigned typmod */ static TupleDesc *RecordCacheArray = NULL; -static int32 RecordCacheArrayLen = 0; /* allocated length of array */ +static uint64 *RecordIdentifierArray = NULL; +static int32 RecordCacheArrayLen = 0; /* allocated length of above arrays */ static int32 NextRecordTypmod = 0; /* number of entries used */ +/* + * Process-wide counter for generating unique tupledesc identifiers. + * Zero and one (INVALID_TUPLEDESC_IDENTIFIER) aren't allowed to be chosen + * as identifiers, so we start the counter at INVALID_TUPLEDESC_IDENTIFIER. + */ +static uint64 tupledesc_id_counter = INVALID_TUPLEDESC_IDENTIFIER; + static void load_typcache_tupdesc(TypeCacheEntry *typentry); static void load_rangetype_info(TypeCacheEntry *typentry); static void load_domaintype_info(TypeCacheEntry *typentry); @@ -793,10 +803,10 @@ load_typcache_tupdesc(TypeCacheEntry *typentry) typentry->tupDesc->tdrefcount++; /* - * In future, we could take some pains to not increment the seqno if the - * tupdesc didn't really change; but for now it's not worth it. + * In future, we could take some pains to not change tupDesc_identifier if + * the tupdesc didn't really change; but for now it's not worth it. */ - typentry->tupDescSeqNo++; + typentry->tupDesc_identifier = ++tupledesc_id_counter; relation_close(rel, AccessShareLock); } @@ -1496,7 +1506,8 @@ cache_range_element_properties(TypeCacheEntry *typentry) } /* - * Make sure that RecordCacheArray is large enough to store 'typmod'. + * Make sure that RecordCacheArray and RecordIdentifierArray are large enough + * to store 'typmod'. */ static void ensure_record_cache_typmod_slot_exists(int32 typmod) @@ -1505,6 +1516,8 @@ ensure_record_cache_typmod_slot_exists(int32 typmod) { RecordCacheArray = (TupleDesc *) MemoryContextAllocZero(CacheMemoryContext, 64 * sizeof(TupleDesc)); + RecordIdentifierArray = (uint64 *) + MemoryContextAllocZero(CacheMemoryContext, 64 * sizeof(uint64)); RecordCacheArrayLen = 64; } @@ -1519,6 +1532,10 @@ ensure_record_cache_typmod_slot_exists(int32 typmod) newlen * sizeof(TupleDesc)); memset(RecordCacheArray + RecordCacheArrayLen, 0, (newlen - RecordCacheArrayLen) * sizeof(TupleDesc)); + RecordIdentifierArray = (uint64 *) repalloc(RecordIdentifierArray, + newlen * sizeof(uint64)); + memset(RecordIdentifierArray + RecordCacheArrayLen, 0, + (newlen - RecordCacheArrayLen) * sizeof(uint64)); RecordCacheArrayLen = newlen; } } @@ -1581,11 +1598,17 @@ lookup_rowtype_tupdesc_internal(Oid type_id, int32 typmod, bool noError) /* * Our local array can now point directly to the TupleDesc - * in shared memory. + * in shared memory, which is non-reference-counted. */ RecordCacheArray[typmod] = tupdesc; Assert(tupdesc->tdrefcount == -1); + /* + * We don't share tupdesc identifiers across processes, so + * assign one locally. + */ + RecordIdentifierArray[typmod] = ++tupledesc_id_counter; + dshash_release_lock(CurrentSession->shared_typmod_table, entry); @@ -1790,12 +1813,61 @@ assign_record_type_typmod(TupleDesc tupDesc) RecordCacheArray[entDesc->tdtypmod] = entDesc; recentry->tupdesc = entDesc; + /* Assign a unique tupdesc identifier, too. */ + RecordIdentifierArray[entDesc->tdtypmod] = ++tupledesc_id_counter; + /* Update the caller's tuple descriptor. */ tupDesc->tdtypmod = entDesc->tdtypmod; MemoryContextSwitchTo(oldcxt); } +/* + * assign_record_type_identifier + * + * Get an identifier, which will be unique over the lifespan of this backend + * process, for the current tuple descriptor of the specified composite type. + * For named composite types, the value is guaranteed to change if the type's + * definition does. For registered RECORD types, the value will not change + * once assigned, since the registered type won't either. If an anonymous + * RECORD type is specified, we return a new identifier on each call. + */ +uint64 +assign_record_type_identifier(Oid type_id, int32 typmod) +{ + if (type_id != RECORDOID) + { + /* + * It's a named composite type, so use the regular typcache. + */ + TypeCacheEntry *typentry; + + typentry = lookup_type_cache(type_id, TYPECACHE_TUPDESC); + if (typentry->tupDesc == NULL) + ereport(ERROR, + (errcode(ERRCODE_WRONG_OBJECT_TYPE), + errmsg("type %s is not composite", + format_type_be(type_id)))); + Assert(typentry->tupDesc_identifier != 0); + return typentry->tupDesc_identifier; + } + else + { + /* + * It's a transient record type, so look in our record-type table. + */ + if (typmod >= 0 && typmod < RecordCacheArrayLen && + RecordCacheArray[typmod] != NULL) + { + Assert(RecordIdentifierArray[typmod] != 0); + return RecordIdentifierArray[typmod]; + } + + /* For anonymous or unrecognized record type, generate a new ID */ + return ++tupledesc_id_counter; + } +} + /* * Return the amout of shmem required to hold a SharedRecordTypmodRegistry. * This exists only to avoid exposing private innards of diff --git a/src/include/utils/expandedrecord.h b/src/include/utils/expandedrecord.h new file mode 100644 index 0000000000..a95c9cce22 --- /dev/null +++ b/src/include/utils/expandedrecord.h @@ -0,0 +1,227 @@ +/*------------------------------------------------------------------------- + * + * expandedrecord.h + * Declarations for composite expanded objects. + * + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * src/include/utils/expandedrecord.h + * + *------------------------------------------------------------------------- + */ +#ifndef EXPANDEDRECORD_H +#define EXPANDEDRECORD_H + +#include "access/htup.h" +#include "access/tupdesc.h" +#include "fmgr.h" +#include "utils/expandeddatum.h" + + +/* + * An expanded record is contained within a private memory context (as + * all expanded objects must be) and has a control structure as below. + * + * The expanded record might contain a regular "flat" tuple if that was the + * original input and we've not modified it. Otherwise, the contents are + * represented by Datum/isnull arrays plus type information. We could also + * have both forms, if we've deconstructed the original tuple for access + * purposes but not yet changed it. For pass-by-reference field types, the + * Datums would point into the flat tuple in this situation. Once we start + * modifying tuple fields, new pass-by-ref fields are separately palloc'd + * within the memory context. + * + * It's possible to build an expanded record that references a "flat" tuple + * stored externally, if the caller can guarantee that that tuple will not + * change for the lifetime of the expanded record. (This frammish is mainly + * meant to avoid unnecessary data copying in trigger functions.) + */ +#define ER_MAGIC 1384727874 /* ID for debugging crosschecks */ + +typedef struct ExpandedRecordHeader +{ + /* Standard header for expanded objects */ + ExpandedObjectHeader hdr; + + /* Magic value identifying an expanded record (for debugging only) */ + int er_magic; + + /* Assorted flag bits */ + int flags; +#define ER_FLAG_FVALUE_VALID 0x0001 /* fvalue is up to date? */ +#define ER_FLAG_FVALUE_ALLOCED 0x0002 /* fvalue is local storage? */ +#define ER_FLAG_DVALUES_VALID 0x0004 /* dvalues/dnulls are up to date? */ +#define ER_FLAG_DVALUES_ALLOCED 0x0008 /* any field values local storage? */ +#define ER_FLAG_HAVE_EXTERNAL 0x0010 /* any field values are external? */ +#define ER_FLAG_TUPDESC_ALLOCED 0x0020 /* tupdesc is local storage? */ +#define ER_FLAG_IS_DOMAIN 0x0040 /* er_decltypeid is domain? */ +#define ER_FLAG_IS_DUMMY 0x0080 /* this header is dummy (see below) */ +/* flag bits that are not to be cleared when replacing tuple data: */ +#define ER_FLAGS_NON_DATA \ + (ER_FLAG_TUPDESC_ALLOCED | ER_FLAG_IS_DOMAIN | ER_FLAG_IS_DUMMY) + + /* Declared type of the record variable (could be a domain type) */ + Oid er_decltypeid; + + /* + * Actual composite type/typmod; never a domain (if ER_FLAG_IS_DOMAIN, + * these identify the composite base type). These will match + * er_tupdesc->tdtypeid/tdtypmod, as well as the header fields of + * composite datums made from or stored in this expanded record. + */ + Oid er_typeid; /* type OID of the composite type */ + int32 er_typmod; /* typmod of the composite type */ + + /* + * Tuple descriptor, if we have one, else NULL. This may point to a + * reference-counted tupdesc originally belonging to the typcache, in + * which case we use a memory context reset callback to release the + * refcount. It can also be locally allocated in this object's private + * context (in which case ER_FLAG_TUPDESC_ALLOCED is set). + */ + TupleDesc er_tupdesc; + + /* + * Unique-within-process identifier for the tupdesc (see typcache.h). This + * field will never be equal to INVALID_TUPLEDESC_IDENTIFIER. + */ + uint64 er_tupdesc_id; + + /* + * If we have a Datum-array representation of the record, it's kept here; + * else ER_FLAG_DVALUES_VALID is not set, and dvalues/dnulls may be NULL + * if they've not yet been allocated. If allocated, the dvalues and + * dnulls arrays are palloc'd within the object private context, and are + * of length matching er_tupdesc->natts. For pass-by-ref field types, + * dvalues entries might point either into the fstartptr..fendptr area, or + * to separately palloc'd chunks. + */ + Datum *dvalues; /* array of Datums */ + bool *dnulls; /* array of is-null flags for Datums */ + int nfields; /* length of above arrays */ + + /* + * flat_size is the current space requirement for the flat equivalent of + * the expanded record, if known; otherwise it's 0. We store this to make + * consecutive calls of get_flat_size cheap. If flat_size is not 0, the + * component values data_len, hoff, and hasnull must be valid too. + */ + Size flat_size; + + Size data_len; /* data len within flat_size */ + int hoff; /* header offset */ + bool hasnull; /* null bitmap needed? */ + + /* + * fvalue points to the flat representation if we have one, else it is + * NULL. If the flat representation is valid (up to date) then + * ER_FLAG_FVALUE_VALID is set. Even if we've outdated the flat + * representation due to changes of user fields, it can still be used to + * fetch system column values. If we have a flat representation then + * fstartptr/fendptr point to the start and end+1 of its data area; this + * is so that we can tell which Datum pointers point into the flat + * representation rather than being pointers to separately palloc'd data. + */ + HeapTuple fvalue; /* might or might not be private storage */ + char *fstartptr; /* start of its data area */ + char *fendptr; /* end+1 of its data area */ + + /* Working state for domain checking, used if ER_FLAG_IS_DOMAIN is set */ + MemoryContext er_domain_check_cxt; /* short-term memory context */ + struct ExpandedRecordHeader *er_dummy_header; /* dummy record header */ + void *er_domaininfo; /* cache space for domain_check() */ + + /* Callback info (it's active if er_mcb.arg is not NULL) */ + MemoryContextCallback er_mcb; +} ExpandedRecordHeader; + +/* fmgr macros for expanded record objects */ +#define PG_GETARG_EXPANDED_RECORD(n) DatumGetExpandedRecord(PG_GETARG_DATUM(n)) +#define ExpandedRecordGetDatum(erh) EOHPGetRWDatum(&(erh)->hdr) +#define ExpandedRecordGetRODatum(erh) EOHPGetRODatum(&(erh)->hdr) +#define PG_RETURN_EXPANDED_RECORD(x) PG_RETURN_DATUM(ExpandedRecordGetDatum(x)) + +/* assorted other macros */ +#define ExpandedRecordIsEmpty(erh) \ + (((erh)->flags & (ER_FLAG_DVALUES_VALID | ER_FLAG_FVALUE_VALID)) == 0) +#define ExpandedRecordIsDomain(erh) \ + (((erh)->flags & ER_FLAG_IS_DOMAIN) != 0) + +/* this can substitute for TransferExpandedObject() when we already have erh */ +#define TransferExpandedRecord(erh, cxt) \ + MemoryContextSetParent((erh)->hdr.eoh_context, cxt) + +/* information returned by expanded_record_lookup_field() */ +typedef struct ExpandedRecordFieldInfo +{ + int fnumber; /* field's attr number in record */ + Oid ftypeid; /* field's type/typmod info */ + int32 ftypmod; + Oid fcollation; /* field's collation if any */ +} ExpandedRecordFieldInfo; + +/* + * prototypes for functions defined in expandedrecord.c + */ +extern ExpandedRecordHeader *make_expanded_record_from_typeid(Oid type_id, int32 typmod, + MemoryContext parentcontext); +extern ExpandedRecordHeader *make_expanded_record_from_tupdesc(TupleDesc tupdesc, + MemoryContext parentcontext); +extern ExpandedRecordHeader *make_expanded_record_from_exprecord(ExpandedRecordHeader *olderh, + MemoryContext parentcontext); +extern void expanded_record_set_tuple(ExpandedRecordHeader *erh, + HeapTuple tuple, bool copy); +extern Datum make_expanded_record_from_datum(Datum recorddatum, + MemoryContext parentcontext); +extern TupleDesc expanded_record_fetch_tupdesc(ExpandedRecordHeader *erh); +extern HeapTuple expanded_record_get_tuple(ExpandedRecordHeader *erh); +extern ExpandedRecordHeader *DatumGetExpandedRecord(Datum d); +extern void deconstruct_expanded_record(ExpandedRecordHeader *erh); +extern bool expanded_record_lookup_field(ExpandedRecordHeader *erh, + const char *fieldname, + ExpandedRecordFieldInfo *finfo); +extern Datum expanded_record_fetch_field(ExpandedRecordHeader *erh, int fnumber, + bool *isnull); +extern void expanded_record_set_field_internal(ExpandedRecordHeader *erh, + int fnumber, + Datum newValue, bool isnull, + bool check_constraints); +extern void expanded_record_set_fields(ExpandedRecordHeader *erh, + const Datum *newValues, const bool *isnulls); + +/* outside code should never call expanded_record_set_field_internal as such */ +#define expanded_record_set_field(erh, fnumber, newValue, isnull) \ + expanded_record_set_field_internal(erh, fnumber, newValue, isnull, true) + +/* + * Inline-able fast cases. The expanded_record_fetch_xxx functions above + * handle the general cases. + */ + +/* Get the tupdesc for the expanded record's actual type */ +static inline TupleDesc +expanded_record_get_tupdesc(ExpandedRecordHeader *erh) +{ + if (likely(erh->er_tupdesc != NULL)) + return erh->er_tupdesc; + else + return expanded_record_fetch_tupdesc(erh); +} + +/* Get value of record field */ +static inline Datum +expanded_record_get_field(ExpandedRecordHeader *erh, int fnumber, + bool *isnull) +{ + if ((erh->flags & ER_FLAG_DVALUES_VALID) && + likely(fnumber > 0 && fnumber <= erh->nfields)) + { + *isnull = erh->dnulls[fnumber - 1]; + return erh->dvalues[fnumber - 1]; + } + else + return expanded_record_fetch_field(erh, fnumber, isnull); +} + +#endif /* EXPANDEDRECORD_H */ diff --git a/src/include/utils/typcache.h b/src/include/utils/typcache.h index f25448d316..217d064da5 100644 --- a/src/include/utils/typcache.h +++ b/src/include/utils/typcache.h @@ -76,11 +76,14 @@ typedef struct TypeCacheEntry /* * Tuple descriptor if it's a composite type (row type). NULL if not * composite or information hasn't yet been requested. (NOTE: this is a - * reference-counted tupledesc.) To simplify caching dependent info, - * tupDescSeqNo is incremented each time tupDesc is rebuilt in a session. + * reference-counted tupledesc.) + * + * To simplify caching dependent info, tupDesc_identifier is an identifier + * for this tupledesc that is unique for the life of the process, and + * changes anytime the tupledesc does. Zero if not yet determined. */ TupleDesc tupDesc; - int64 tupDescSeqNo; + uint64 tupDesc_identifier; /* * Fields computed when TYPECACHE_RANGE_INFO is requested. Zeroes if not @@ -138,6 +141,9 @@ typedef struct TypeCacheEntry #define TYPECACHE_HASH_EXTENDED_PROC 0x4000 #define TYPECACHE_HASH_EXTENDED_PROC_FINFO 0x8000 +/* This value will not equal any valid tupledesc identifier, nor 0 */ +#define INVALID_TUPLEDESC_IDENTIFIER ((uint64) 1) + /* * Callers wishing to maintain a long-lived reference to a domain's constraint * set must store it in one of these. Use InitDomainConstraintRef() and @@ -179,6 +185,8 @@ extern TupleDesc lookup_rowtype_tupdesc_domain(Oid type_id, int32 typmod, extern void assign_record_type_typmod(TupleDesc tupDesc); +extern uint64 assign_record_type_identifier(Oid type_id, int32 typmod); + extern int compare_values_of_enum(TypeCacheEntry *tcache, Oid arg1, Oid arg2); extern size_t SharedRecordTypmodRegistryEstimate(void); diff --git a/src/pl/plpgsql/src/Makefile b/src/pl/plpgsql/src/Makefile index 91e1ada7ad..2190eab616 100644 --- a/src/pl/plpgsql/src/Makefile +++ b/src/pl/plpgsql/src/Makefile @@ -26,7 +26,7 @@ DATA = plpgsql.control plpgsql--1.0.sql plpgsql--unpackaged--1.0.sql REGRESS_OPTS = --dbname=$(PL_TESTDB) -REGRESS = plpgsql_call plpgsql_control plpgsql_transaction +REGRESS = plpgsql_call plpgsql_control plpgsql_record plpgsql_transaction all: all-lib diff --git a/src/pl/plpgsql/src/expected/plpgsql_record.out b/src/pl/plpgsql/src/expected/plpgsql_record.out new file mode 100644 index 0000000000..3f7cab2088 --- /dev/null +++ b/src/pl/plpgsql/src/expected/plpgsql_record.out @@ -0,0 +1,662 @@ +-- +-- Tests for PL/pgSQL handling of composite (record) variables +-- +create type two_int4s as (f1 int4, f2 int4); +create type two_int8s as (q1 int8, q2 int8); +-- base-case return of a composite type +create function retc(int) returns two_int8s language plpgsql as +$$ begin return row($1,1)::two_int8s; end $$; +select retc(42); + retc +-------- + (42,1) +(1 row) + +-- ok to return a matching record type +create or replace function retc(int) returns two_int8s language plpgsql as +$$ begin return row($1::int8, 1::int8); end $$; +select retc(42); + retc +-------- + (42,1) +(1 row) + +-- we don't currently support implicit casting +create or replace function retc(int) returns two_int8s language plpgsql as +$$ begin return row($1,1); end $$; +select retc(42); +ERROR: returned record type does not match expected record type +DETAIL: Returned type integer does not match expected type bigint in column 1. +CONTEXT: PL/pgSQL function retc(integer) while casting return value to function's return type +-- nor extra columns +create or replace function retc(int) returns two_int8s language plpgsql as +$$ begin return row($1::int8, 1::int8, 42); end $$; +select retc(42); +ERROR: returned record type does not match expected record type +DETAIL: Number of returned columns (3) does not match expected column count (2). +CONTEXT: PL/pgSQL function retc(integer) while casting return value to function's return type +-- same cases with an intermediate "record" variable +create or replace function retc(int) returns two_int8s language plpgsql as +$$ declare r record; begin r := row($1::int8, 1::int8); return r; end $$; +select retc(42); + retc +-------- + (42,1) +(1 row) + +create or replace function retc(int) returns two_int8s language plpgsql as +$$ declare r record; begin r := row($1,1); return r; end $$; +select retc(42); +ERROR: returned record type does not match expected record type +DETAIL: Returned type integer does not match expected type bigint in column 1. +CONTEXT: PL/pgSQL function retc(integer) while casting return value to function's return type +create or replace function retc(int) returns two_int8s language plpgsql as +$$ declare r record; begin r := row($1::int8, 1::int8, 42); return r; end $$; +select retc(42); +ERROR: returned record type does not match expected record type +DETAIL: Number of returned columns (3) does not match expected column count (2). +CONTEXT: PL/pgSQL function retc(integer) while casting return value to function's return type +-- but, for mostly historical reasons, we do convert when assigning +-- to a named-composite-type variable +create or replace function retc(int) returns two_int8s language plpgsql as +$$ declare r two_int8s; begin r := row($1::int8, 1::int8, 42); return r; end $$; +select retc(42); + retc +-------- + (42,1) +(1 row) + +do $$ declare c two_int8s; +begin c := row(1,2); raise notice 'c = %', c; end$$; +NOTICE: c = (1,2) +do $$ declare c two_int8s; +begin for c in select 1,2 loop raise notice 'c = %', c; end loop; end$$; +NOTICE: c = (1,2) +do $$ declare c4 two_int4s; c8 two_int8s; +begin + c8 := row(1,2); + c4 := c8; + c8 := c4; + raise notice 'c4 = %', c4; + raise notice 'c8 = %', c8; +end$$; +NOTICE: c4 = (1,2) +NOTICE: c8 = (1,2) +-- check passing composite result to another function +create function getq1(two_int8s) returns int8 language plpgsql as $$ +declare r two_int8s; begin r := $1; return r.q1; end $$; +select getq1(retc(344)); + getq1 +------- + 344 +(1 row) + +select getq1(row(1,2)); + getq1 +------- + 1 +(1 row) + +do $$ +declare r1 two_int8s; r2 record; x int8; +begin + r1 := retc(345); + perform getq1(r1); + x := getq1(r1); + raise notice 'x = %', x; + r2 := retc(346); + perform getq1(r2); + x := getq1(r2); + raise notice 'x = %', x; +end$$; +NOTICE: x = 345 +NOTICE: x = 346 +-- check assignments of composites +do $$ +declare r1 two_int8s; r2 two_int8s; r3 record; r4 record; +begin + r1 := row(1,2); + raise notice 'r1 = %', r1; + r1 := r1; -- shouldn't do anything + raise notice 'r1 = %', r1; + r2 := r1; + raise notice 'r1 = %', r1; + raise notice 'r2 = %', r2; + r2.q2 = r1.q1 + 3; -- check that r2 has distinct storage + raise notice 'r1 = %', r1; + raise notice 'r2 = %', r2; + r1 := null; + raise notice 'r1 = %', r1; + raise notice 'r2 = %', r2; + r1 := row(7,11)::two_int8s; + r2 := r1; + raise notice 'r1 = %', r1; + raise notice 'r2 = %', r2; + r3 := row(1,2); + r4 := r3; + raise notice 'r3 = %', r3; + raise notice 'r4 = %', r4; + r4.f1 := r4.f1 + 3; -- check that r4 has distinct storage + raise notice 'r3 = %', r3; + raise notice 'r4 = %', r4; + r1 := r3; + raise notice 'r1 = %', r1; + r4 := r1; + raise notice 'r4 = %', r4; + r4.q2 := r4.q2 + 1; -- r4's field names have changed + raise notice 'r4 = %', r4; +end$$; +NOTICE: r1 = (1,2) +NOTICE: r1 = (1,2) +NOTICE: r1 = (1,2) +NOTICE: r2 = (1,2) +NOTICE: r1 = (1,2) +NOTICE: r2 = (1,4) +NOTICE: r1 = +NOTICE: r2 = (1,4) +NOTICE: r1 = (7,11) +NOTICE: r2 = (7,11) +NOTICE: r3 = (1,2) +NOTICE: r4 = (1,2) +NOTICE: r3 = (1,2) +NOTICE: r4 = (4,2) +NOTICE: r1 = (1,2) +NOTICE: r4 = (1,2) +NOTICE: r4 = (1,3) +-- fields of named-type vars read as null if uninitialized +do $$ +declare r1 two_int8s; +begin + raise notice 'r1 = %', r1; + raise notice 'r1.q1 = %', r1.q1; + raise notice 'r1.q2 = %', r1.q2; + raise notice 'r1 = %', r1; +end$$; +NOTICE: r1 = +NOTICE: r1.q1 = +NOTICE: r1.q2 = +NOTICE: r1 = +do $$ +declare r1 two_int8s; +begin + raise notice 'r1.q1 = %', r1.q1; + raise notice 'r1.q2 = %', r1.q2; + raise notice 'r1 = %', r1; + raise notice 'r1.nosuchfield = %', r1.nosuchfield; +end$$; +NOTICE: r1.q1 = +NOTICE: r1.q2 = +NOTICE: r1 = +ERROR: record "r1" has no field "nosuchfield" +CONTEXT: SQL statement "SELECT r1.nosuchfield" +PL/pgSQL function inline_code_block line 7 at RAISE +-- records, not so much +do $$ +declare r1 record; +begin + raise notice 'r1 = %', r1; + raise notice 'r1.f1 = %', r1.f1; + raise notice 'r1.f2 = %', r1.f2; + raise notice 'r1 = %', r1; +end$$; +NOTICE: r1 = +ERROR: record "r1" is not assigned yet +DETAIL: The tuple structure of a not-yet-assigned record is indeterminate. +CONTEXT: SQL statement "SELECT r1.f1" +PL/pgSQL function inline_code_block line 5 at RAISE +-- but OK if you assign first +do $$ +declare r1 record; +begin + raise notice 'r1 = %', r1; + r1 := row(1,2); + raise notice 'r1.f1 = %', r1.f1; + raise notice 'r1.f2 = %', r1.f2; + raise notice 'r1 = %', r1; + raise notice 'r1.nosuchfield = %', r1.nosuchfield; +end$$; +NOTICE: r1 = +NOTICE: r1.f1 = 1 +NOTICE: r1.f2 = 2 +NOTICE: r1 = (1,2) +ERROR: record "r1" has no field "nosuchfield" +CONTEXT: SQL statement "SELECT r1.nosuchfield" +PL/pgSQL function inline_code_block line 9 at RAISE +-- check repeated assignments to composite fields +create table some_table (id int, data text); +do $$ +declare r some_table; +begin + r := (23, 'skidoo'); + for i in 1 .. 10 loop + r.id := r.id + i; + r.data := r.data || ' ' || i; + end loop; + raise notice 'r = %', r; +end$$; +NOTICE: r = (78,"skidoo 1 2 3 4 5 6 7 8 9 10") +-- check behavior of function declared to return "record" +create function returnsrecord(int) returns record language plpgsql as +$$ begin return row($1,$1+1); end $$; +select returnsrecord(42); + returnsrecord +--------------- + (42,43) +(1 row) + +select * from returnsrecord(42) as r(x int, y int); + x | y +----+---- + 42 | 43 +(1 row) + +select * from returnsrecord(42) as r(x int, y int, z int); -- fail +ERROR: returned record type does not match expected record type +DETAIL: Number of returned columns (2) does not match expected column count (3). +CONTEXT: PL/pgSQL function returnsrecord(integer) while casting return value to function's return type +select * from returnsrecord(42) as r(x int, y bigint); -- fail +ERROR: returned record type does not match expected record type +DETAIL: Returned type integer does not match expected type bigint in column 2. +CONTEXT: PL/pgSQL function returnsrecord(integer) while casting return value to function's return type +-- same with an intermediate record variable +create or replace function returnsrecord(int) returns record language plpgsql as +$$ declare r record; begin r := row($1,$1+1); return r; end $$; +select returnsrecord(42); + returnsrecord +--------------- + (42,43) +(1 row) + +select * from returnsrecord(42) as r(x int, y int); + x | y +----+---- + 42 | 43 +(1 row) + +select * from returnsrecord(42) as r(x int, y int, z int); -- fail +ERROR: returned record type does not match expected record type +DETAIL: Number of returned columns (2) does not match expected column count (3). +CONTEXT: PL/pgSQL function returnsrecord(integer) while casting return value to function's return type +select * from returnsrecord(42) as r(x int, y bigint); -- fail +ERROR: returned record type does not match expected record type +DETAIL: Returned type integer does not match expected type bigint in column 2. +CONTEXT: PL/pgSQL function returnsrecord(integer) while casting return value to function's return type +-- should work the same with a missing column in the actual result value +create table has_hole(f1 int, f2 int, f3 int); +alter table has_hole drop column f2; +create or replace function returnsrecord(int) returns record language plpgsql as +$$ begin return row($1,$1+1)::has_hole; end $$; +select returnsrecord(42); + returnsrecord +--------------- + (42,43) +(1 row) + +select * from returnsrecord(42) as r(x int, y int); + x | y +----+---- + 42 | 43 +(1 row) + +select * from returnsrecord(42) as r(x int, y int, z int); -- fail +ERROR: returned record type does not match expected record type +DETAIL: Number of returned columns (2) does not match expected column count (3). +CONTEXT: PL/pgSQL function returnsrecord(integer) while casting return value to function's return type +select * from returnsrecord(42) as r(x int, y bigint); -- fail +ERROR: returned record type does not match expected record type +DETAIL: Returned type integer does not match expected type bigint in column 2. +CONTEXT: PL/pgSQL function returnsrecord(integer) while casting return value to function's return type +-- same with an intermediate record variable +create or replace function returnsrecord(int) returns record language plpgsql as +$$ declare r record; begin r := row($1,$1+1)::has_hole; return r; end $$; +select returnsrecord(42); + returnsrecord +--------------- + (42,43) +(1 row) + +select * from returnsrecord(42) as r(x int, y int); + x | y +----+---- + 42 | 43 +(1 row) + +select * from returnsrecord(42) as r(x int, y int, z int); -- fail +ERROR: returned record type does not match expected record type +DETAIL: Number of returned columns (2) does not match expected column count (3). +CONTEXT: PL/pgSQL function returnsrecord(integer) while casting return value to function's return type +select * from returnsrecord(42) as r(x int, y bigint); -- fail +ERROR: returned record type does not match expected record type +DETAIL: Returned type integer does not match expected type bigint in column 2. +CONTEXT: PL/pgSQL function returnsrecord(integer) while casting return value to function's return type +-- check access to a field of an argument declared "record" +create function getf1(x record) returns int language plpgsql as +$$ begin return x.f1; end $$; +select getf1(1); +ERROR: function getf1(integer) does not exist +LINE 1: select getf1(1); + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +select getf1(row(1,2)); + getf1 +------- + 1 +(1 row) + +select getf1(row(1,2)::two_int8s); +ERROR: record "x" has no field "f1" +CONTEXT: PL/pgSQL function getf1(record) line 1 at RETURN +select getf1(row(1,2)); + getf1 +------- + 1 +(1 row) + +-- check behavior when assignment to FOR-loop variable requires coercion +do $$ +declare r two_int8s; +begin + for r in select i, i+1 from generate_series(1,4) i + loop + raise notice 'r = %', r; + end loop; +end$$; +NOTICE: r = (1,2) +NOTICE: r = (2,3) +NOTICE: r = (3,4) +NOTICE: r = (4,5) +-- check behavior when returning setof composite +create function returnssetofholes() returns setof has_hole language plpgsql as +$$ +declare r record; + h has_hole; +begin + return next h; + r := (1,2); + h := (3,4); + return next r; + return next h; + return next row(5,6); + return next row(7,8)::has_hole; +end$$; +select returnssetofholes(); + returnssetofholes +------------------- + (,) + (1,2) + (3,4) + (5,6) + (7,8) +(5 rows) + +create or replace function returnssetofholes() returns setof has_hole language plpgsql as +$$ +declare r record; +begin + return next r; -- fails, not assigned yet +end$$; +select returnssetofholes(); +ERROR: record "r" is not assigned yet +DETAIL: The tuple structure of a not-yet-assigned record is indeterminate. +CONTEXT: PL/pgSQL function returnssetofholes() line 4 at RETURN NEXT +create or replace function returnssetofholes() returns setof has_hole language plpgsql as +$$ +begin + return next row(1,2,3); -- fails +end$$; +select returnssetofholes(); +ERROR: returned record type does not match expected record type +DETAIL: Number of returned columns (3) does not match expected column count (2). +CONTEXT: PL/pgSQL function returnssetofholes() line 3 at RETURN NEXT +-- check behavior with changes of a named rowtype +create table mutable(f1 int, f2 text); +create function sillyaddone(int) returns int language plpgsql as +$$ declare r mutable; begin r.f1 := $1; return r.f1 + 1; end $$; +select sillyaddone(42); + sillyaddone +------------- + 43 +(1 row) + +alter table mutable drop column f1; +alter table mutable add column f1 float8; +-- currently, this fails due to cached plan for "r.f1 + 1" expression +select sillyaddone(42); +ERROR: type of parameter 4 (double precision) does not match that when preparing the plan (integer) +CONTEXT: PL/pgSQL function sillyaddone(integer) line 1 at RETURN +\c - +-- but it's OK after a reconnect +select sillyaddone(42); + sillyaddone +------------- + 43 +(1 row) + +alter table mutable drop column f1; +select sillyaddone(42); -- fail +ERROR: record "r" has no field "f1" +CONTEXT: PL/pgSQL function sillyaddone(integer) line 1 at assignment +create function getf3(x mutable) returns int language plpgsql as +$$ begin return x.f3; end $$; +select getf3(null::mutable); -- doesn't work yet +ERROR: record "x" has no field "f3" +CONTEXT: SQL statement "SELECT x.f3" +PL/pgSQL function getf3(mutable) line 1 at RETURN +alter table mutable add column f3 int; +select getf3(null::mutable); -- now it works + getf3 +------- + +(1 row) + +alter table mutable drop column f3; +select getf3(null::mutable); -- fails again +ERROR: record "x" has no field "f3" +CONTEXT: PL/pgSQL function getf3(mutable) line 1 at RETURN +-- check access to system columns in a record variable +create function sillytrig() returns trigger language plpgsql as +$$begin + raise notice 'old.ctid = %', old.ctid; + raise notice 'old.tableoid = %', old.tableoid::regclass; + return new; +end$$; +create trigger mutable_trig before update on mutable for each row +execute procedure sillytrig(); +insert into mutable values ('foo'), ('bar'); +update mutable set f2 = f2 || ' baz'; +NOTICE: old.ctid = (0,1) +NOTICE: old.tableoid = mutable +NOTICE: old.ctid = (0,2) +NOTICE: old.tableoid = mutable +table mutable; + f2 +--------- + foo baz + bar baz +(2 rows) + +-- check returning a composite datum from a trigger +create or replace function sillytrig() returns trigger language plpgsql as +$$begin + return row(new.*); +end$$; +update mutable set f2 = f2 || ' baz'; +table mutable; + f2 +------------- + foo baz baz + bar baz baz +(2 rows) + +create or replace function sillytrig() returns trigger language plpgsql as +$$declare r record; +begin + r := row(new.*); + return r; +end$$; +update mutable set f2 = f2 || ' baz'; +table mutable; + f2 +----------------- + foo baz baz baz + bar baz baz baz +(2 rows) + +-- +-- Domains of composite +-- +create domain ordered_int8s as two_int8s check((value).q1 <= (value).q2); +create function read_ordered_int8s(p ordered_int8s) returns int8 as $$ +begin return p.q1 + p.q2; end +$$ language plpgsql; +select read_ordered_int8s(row(1, 2)); + read_ordered_int8s +-------------------- + 3 +(1 row) + +select read_ordered_int8s(row(2, 1)); -- fail +ERROR: value for domain ordered_int8s violates check constraint "ordered_int8s_check" +create function build_ordered_int8s(i int8, j int8) returns ordered_int8s as $$ +begin return row(i,j); end +$$ language plpgsql; +select build_ordered_int8s(1,2); + build_ordered_int8s +--------------------- + (1,2) +(1 row) + +select build_ordered_int8s(2,1); -- fail +ERROR: value for domain ordered_int8s violates check constraint "ordered_int8s_check" +CONTEXT: PL/pgSQL function build_ordered_int8s(bigint,bigint) while casting return value to function's return type +create function build_ordered_int8s_2(i int8, j int8) returns ordered_int8s as $$ +declare r record; begin r := row(i,j); return r; end +$$ language plpgsql; +select build_ordered_int8s_2(1,2); + build_ordered_int8s_2 +----------------------- + (1,2) +(1 row) + +select build_ordered_int8s_2(2,1); -- fail +ERROR: value for domain ordered_int8s violates check constraint "ordered_int8s_check" +CONTEXT: PL/pgSQL function build_ordered_int8s_2(bigint,bigint) while casting return value to function's return type +create function build_ordered_int8s_3(i int8, j int8) returns ordered_int8s as $$ +declare r two_int8s; begin r := row(i,j); return r; end +$$ language plpgsql; +select build_ordered_int8s_3(1,2); + build_ordered_int8s_3 +----------------------- + (1,2) +(1 row) + +select build_ordered_int8s_3(2,1); -- fail +ERROR: value for domain ordered_int8s violates check constraint "ordered_int8s_check" +CONTEXT: PL/pgSQL function build_ordered_int8s_3(bigint,bigint) while casting return value to function's return type +create function build_ordered_int8s_4(i int8, j int8) returns ordered_int8s as $$ +declare r ordered_int8s; begin r := row(i,j); return r; end +$$ language plpgsql; +select build_ordered_int8s_4(1,2); + build_ordered_int8s_4 +----------------------- + (1,2) +(1 row) + +select build_ordered_int8s_4(2,1); -- fail +ERROR: value for domain ordered_int8s violates check constraint "ordered_int8s_check" +CONTEXT: PL/pgSQL function build_ordered_int8s_4(bigint,bigint) line 2 at assignment +create function build_ordered_int8s_a(i int8, j int8) returns ordered_int8s[] as $$ +begin return array[row(i,j), row(i,j+1)]; end +$$ language plpgsql; +select build_ordered_int8s_a(1,2); + build_ordered_int8s_a +----------------------- + {"(1,2)","(1,3)"} +(1 row) + +select build_ordered_int8s_a(2,1); -- fail +ERROR: value for domain ordered_int8s violates check constraint "ordered_int8s_check" +CONTEXT: PL/pgSQL function build_ordered_int8s_a(bigint,bigint) while casting return value to function's return type +-- check field assignment +do $$ +declare r ordered_int8s; +begin + r.q1 := null; + r.q2 := 43; + r.q1 := 42; + r.q2 := 41; -- fail +end$$; +ERROR: value for domain ordered_int8s violates check constraint "ordered_int8s_check" +CONTEXT: PL/pgSQL function inline_code_block line 7 at assignment +-- check whole-row assignment +do $$ +declare r ordered_int8s; +begin + r := null; + r := row(null,null); + r := row(1,2); + r := row(2,1); -- fail +end$$; +ERROR: value for domain ordered_int8s violates check constraint "ordered_int8s_check" +CONTEXT: PL/pgSQL function inline_code_block line 7 at assignment +-- check assignment in for-loop +do $$ +declare r ordered_int8s; +begin + for r in values (1,2),(3,4),(6,5) loop + raise notice 'r = %', r; + end loop; +end$$; +NOTICE: r = (1,2) +NOTICE: r = (3,4) +ERROR: value for domain ordered_int8s violates check constraint "ordered_int8s_check" +CONTEXT: PL/pgSQL function inline_code_block line 4 at FOR over SELECT rows +-- check behavior with toastable fields, too +create type two_texts as (f1 text, f2 text); +create domain ordered_texts as two_texts check((value).f1 <= (value).f2); +create table sometable (id int, a text, b text); +-- b should be compressed, but in-line +insert into sometable values (1, 'a', repeat('ffoob',1000)); +-- this b should be out-of-line +insert into sometable values (2, 'a', repeat('ffoob',100000)); +-- this pair should fail the domain check +insert into sometable values (3, 'z', repeat('ffoob',100000)); +do $$ +declare d ordered_texts; +begin + for d in select a, b from sometable loop + raise notice 'succeeded at "%"', d.f1; + end loop; +end$$; +NOTICE: succeeded at "a" +NOTICE: succeeded at "a" +ERROR: value for domain ordered_texts violates check constraint "ordered_texts_check" +CONTEXT: PL/pgSQL function inline_code_block line 4 at FOR over SELECT rows +do $$ +declare r record; d ordered_texts; +begin + for r in select * from sometable loop + raise notice 'processing row %', r.id; + d := row(r.a, r.b); + end loop; +end$$; +NOTICE: processing row 1 +NOTICE: processing row 2 +NOTICE: processing row 3 +ERROR: value for domain ordered_texts violates check constraint "ordered_texts_check" +CONTEXT: PL/pgSQL function inline_code_block line 6 at assignment +do $$ +declare r record; d ordered_texts; +begin + for r in select * from sometable loop + raise notice 'processing row %', r.id; + d := null; + d.f1 := r.a; + d.f2 := r.b; + end loop; +end$$; +NOTICE: processing row 1 +NOTICE: processing row 2 +NOTICE: processing row 3 +ERROR: value for domain ordered_texts violates check constraint "ordered_texts_check" +CONTEXT: PL/pgSQL function inline_code_block line 8 at assignment diff --git a/src/pl/plpgsql/src/pl_comp.c b/src/pl/plpgsql/src/pl_comp.c index 43de3f752c..09ecaec635 100644 --- a/src/pl/plpgsql/src/pl_comp.c +++ b/src/pl/plpgsql/src/pl_comp.c @@ -32,6 +32,7 @@ #include "utils/regproc.h" #include "utils/rel.h" #include "utils/syscache.h" +#include "utils/typcache.h" #include "plpgsql.h" @@ -104,7 +105,6 @@ static Node *plpgsql_param_ref(ParseState *pstate, ParamRef *pref); static Node *resolve_column_ref(ParseState *pstate, PLpgSQL_expr *expr, ColumnRef *cref, bool error_if_no_field); static Node *make_datum_param(PLpgSQL_expr *expr, int dno, int location); -static PLpgSQL_row *build_row_from_class(Oid classOid); static PLpgSQL_row *build_row_from_vars(PLpgSQL_variable **vars, int numvars); static PLpgSQL_type *build_datatype(HeapTuple typeTup, int32 typmod, Oid collation); static void plpgsql_start_datums(void); @@ -425,8 +425,7 @@ do_compile(FunctionCallInfo fcinfo, /* Disallow pseudotype argument */ /* (note we already replaced polymorphic types) */ /* (build_variable would do this, but wrong message) */ - if (argdtype->ttype != PLPGSQL_TTYPE_SCALAR && - argdtype->ttype != PLPGSQL_TTYPE_ROW) + if (argdtype->ttype == PLPGSQL_TTYPE_PSEUDO) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("PL/pgSQL functions cannot accept type %s", @@ -447,8 +446,8 @@ do_compile(FunctionCallInfo fcinfo, } else { - Assert(argvariable->dtype == PLPGSQL_DTYPE_ROW); - argitemtype = PLPGSQL_NSTYPE_ROW; + Assert(argvariable->dtype == PLPGSQL_DTYPE_REC); + argitemtype = PLPGSQL_NSTYPE_REC; } /* Remember arguments in appropriate arrays */ @@ -557,29 +556,25 @@ do_compile(FunctionCallInfo fcinfo, format_type_be(rettypeid)))); } - if (typeStruct->typrelid != InvalidOid || - rettypeid == RECORDOID) - function->fn_retistuple = true; - else - { - function->fn_retbyval = typeStruct->typbyval; - function->fn_rettyplen = typeStruct->typlen; + function->fn_retistuple = type_is_rowtype(rettypeid); + function->fn_retbyval = typeStruct->typbyval; + function->fn_rettyplen = typeStruct->typlen; - /* - * install $0 reference, but only for polymorphic return - * types, and not when the return is specified through an - * output parameter. - */ - if (IsPolymorphicType(procStruct->prorettype) && - num_out_args == 0) - { - (void) plpgsql_build_variable("$0", 0, - build_datatype(typeTup, - -1, - function->fn_input_collation), - true); - } + /* + * install $0 reference, but only for polymorphic return + * types, and not when the return is specified through an + * output parameter. + */ + if (IsPolymorphicType(procStruct->prorettype) && + num_out_args == 0) + { + (void) plpgsql_build_variable("$0", 0, + build_datatype(typeTup, + -1, + function->fn_input_collation), + true); } + ReleaseSysCache(typeTup); } break; @@ -599,11 +594,11 @@ do_compile(FunctionCallInfo fcinfo, errhint("The arguments of the trigger can be accessed through TG_NARGS and TG_ARGV instead."))); /* Add the record for referencing NEW ROW */ - rec = plpgsql_build_record("new", 0, true); + rec = plpgsql_build_record("new", 0, RECORDOID, true); function->new_varno = rec->dno; /* Add the record for referencing OLD ROW */ - rec = plpgsql_build_record("old", 0, true); + rec = plpgsql_build_record("old", 0, RECORDOID, true); function->old_varno = rec->dno; /* Add the variable tg_name */ @@ -1240,19 +1235,22 @@ resolve_column_ref(ParseState *pstate, PLpgSQL_expr *expr, if (nnames == nnames_field) { /* colname could be a field in this record */ + PLpgSQL_rec *rec = (PLpgSQL_rec *) estate->datums[nse->itemno]; int i; /* search for a datum referencing this field */ - for (i = 0; i < estate->ndatums; i++) + i = rec->firstfield; + while (i >= 0) { PLpgSQL_recfield *fld = (PLpgSQL_recfield *) estate->datums[i]; - if (fld->dtype == PLPGSQL_DTYPE_RECFIELD && - fld->recparentno == nse->itemno && - strcmp(fld->fieldname, colname) == 0) + Assert(fld->dtype == PLPGSQL_DTYPE_RECFIELD && + fld->recparentno == nse->itemno); + if (strcmp(fld->fieldname, colname) == 0) { return make_datum_param(expr, i, cref->location); } + i = fld->nextfield; } /* @@ -1270,34 +1268,6 @@ resolve_column_ref(ParseState *pstate, PLpgSQL_expr *expr, parser_errposition(pstate, cref->location))); } break; - case PLPGSQL_NSTYPE_ROW: - if (nnames == nnames_wholerow) - return make_datum_param(expr, nse->itemno, cref->location); - if (nnames == nnames_field) - { - /* colname could be a field in this row */ - PLpgSQL_row *row = (PLpgSQL_row *) estate->datums[nse->itemno]; - int i; - - for (i = 0; i < row->nfields; i++) - { - if (row->fieldnames[i] && - strcmp(row->fieldnames[i], colname) == 0) - { - return make_datum_param(expr, row->varnos[i], - cref->location); - } - } - /* Not found, so throw error or return NULL */ - if (error_if_no_field) - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_COLUMN), - errmsg("record \"%s\" has no field \"%s\"", - (nnames_field == 1) ? name1 : name2, - colname), - parser_errposition(pstate, cref->location))); - } - break; default: elog(ERROR, "unrecognized plpgsql itemtype: %d", nse->itemtype); } @@ -1385,7 +1355,6 @@ plpgsql_parse_word(char *word1, const char *yytxt, switch (ns->itemtype) { case PLPGSQL_NSTYPE_VAR: - case PLPGSQL_NSTYPE_ROW: case PLPGSQL_NSTYPE_REC: wdatum->datum = plpgsql_Datums[ns->itemno]; wdatum->ident = word1; @@ -1461,14 +1430,11 @@ plpgsql_parse_dblword(char *word1, char *word2, * datum whether it is or not --- any error will be * detected later. */ + PLpgSQL_rec *rec; PLpgSQL_recfield *new; - new = palloc(sizeof(PLpgSQL_recfield)); - new->dtype = PLPGSQL_DTYPE_RECFIELD; - new->fieldname = pstrdup(word2); - new->recparentno = ns->itemno; - - plpgsql_adddatum((PLpgSQL_datum *) new); + rec = (PLpgSQL_rec *) (plpgsql_Datums[ns->itemno]); + new = plpgsql_build_recfield(rec, word2); wdatum->datum = (PLpgSQL_datum *) new; } @@ -1482,43 +1448,6 @@ plpgsql_parse_dblword(char *word1, char *word2, wdatum->idents = idents; return true; - case PLPGSQL_NSTYPE_ROW: - if (nnames == 1) - { - /* - * First word is a row name, so second word could be a - * field in this row. Again, no error now if it - * isn't. - */ - PLpgSQL_row *row; - int i; - - row = (PLpgSQL_row *) (plpgsql_Datums[ns->itemno]); - for (i = 0; i < row->nfields; i++) - { - if (row->fieldnames[i] && - strcmp(row->fieldnames[i], word2) == 0) - { - wdatum->datum = plpgsql_Datums[row->varnos[i]]; - wdatum->ident = NULL; - wdatum->quoted = false; /* not used */ - wdatum->idents = idents; - return true; - } - } - /* fall through to return CWORD */ - } - else - { - /* Block-qualified reference to row variable. */ - wdatum->datum = plpgsql_Datums[ns->itemno]; - wdatum->ident = NULL; - wdatum->quoted = false; /* not used */ - wdatum->idents = idents; - return true; - } - break; - default: break; } @@ -1572,14 +1501,11 @@ plpgsql_parse_tripword(char *word1, char *word2, char *word3, * words 1/2 are a record name, so third word could be * a field in this record. */ + PLpgSQL_rec *rec; PLpgSQL_recfield *new; - new = palloc(sizeof(PLpgSQL_recfield)); - new->dtype = PLPGSQL_DTYPE_RECFIELD; - new->fieldname = pstrdup(word3); - new->recparentno = ns->itemno; - - plpgsql_adddatum((PLpgSQL_datum *) new); + rec = (PLpgSQL_rec *) (plpgsql_Datums[ns->itemno]); + new = plpgsql_build_recfield(rec, word3); wdatum->datum = (PLpgSQL_datum *) new; wdatum->ident = NULL; @@ -1588,32 +1514,6 @@ plpgsql_parse_tripword(char *word1, char *word2, char *word3, return true; } - case PLPGSQL_NSTYPE_ROW: - { - /* - * words 1/2 are a row name, so third word could be a - * field in this row. - */ - PLpgSQL_row *row; - int i; - - row = (PLpgSQL_row *) (plpgsql_Datums[ns->itemno]); - for (i = 0; i < row->nfields; i++) - { - if (row->fieldnames[i] && - strcmp(row->fieldnames[i], word3) == 0) - { - wdatum->datum = plpgsql_Datums[row->varnos[i]]; - wdatum->ident = NULL; - wdatum->quoted = false; /* not used */ - wdatum->idents = idents; - return true; - } - } - /* fall through to return CWORD */ - break; - } - default: break; } @@ -1864,8 +1764,8 @@ plpgsql_parse_cwordrowtype(List *idents) * plpgsql_build_variable - build a datum-array entry of a given * datatype * - * The returned struct may be a PLpgSQL_var, PLpgSQL_row, or - * PLpgSQL_rec depending on the given datatype, and is allocated via + * The returned struct may be a PLpgSQL_var or PLpgSQL_rec + * depending on the given datatype, and is allocated via * palloc. The struct is automatically added to the current datum * array, and optionally to the current namespace. */ @@ -1902,31 +1802,13 @@ plpgsql_build_variable(const char *refname, int lineno, PLpgSQL_type *dtype, result = (PLpgSQL_variable *) var; break; } - case PLPGSQL_TTYPE_ROW: - { - /* Composite type -- build a row variable */ - PLpgSQL_row *row; - - row = build_row_from_class(dtype->typrelid); - - row->dtype = PLPGSQL_DTYPE_ROW; - row->refname = pstrdup(refname); - row->lineno = lineno; - - plpgsql_adddatum((PLpgSQL_datum *) row); - if (add2namespace) - plpgsql_ns_additem(PLPGSQL_NSTYPE_ROW, - row->dno, - refname); - result = (PLpgSQL_variable *) row; - break; - } case PLPGSQL_TTYPE_REC: { - /* "record" type -- build a record variable */ + /* Composite type -- build a record variable */ PLpgSQL_rec *rec; - rec = plpgsql_build_record(refname, lineno, add2namespace); + rec = plpgsql_build_record(refname, lineno, dtype->typoid, + add2namespace); result = (PLpgSQL_variable *) rec; break; } @@ -1950,7 +1832,8 @@ plpgsql_build_variable(const char *refname, int lineno, PLpgSQL_type *dtype, * Build empty named record variable, and optionally add it to namespace */ PLpgSQL_rec * -plpgsql_build_record(const char *refname, int lineno, bool add2namespace) +plpgsql_build_record(const char *refname, int lineno, Oid rectypeid, + bool add2namespace) { PLpgSQL_rec *rec; @@ -1958,10 +1841,9 @@ plpgsql_build_record(const char *refname, int lineno, bool add2namespace) rec->dtype = PLPGSQL_DTYPE_REC; rec->refname = pstrdup(refname); rec->lineno = lineno; - rec->tup = NULL; - rec->tupdesc = NULL; - rec->freetup = false; - rec->freetupdesc = false; + rec->rectypeid = rectypeid; + rec->firstfield = -1; + rec->erh = NULL; plpgsql_adddatum((PLpgSQL_datum *) rec); if (add2namespace) plpgsql_ns_additem(PLPGSQL_NSTYPE_REC, rec->dno, rec->refname); @@ -1969,104 +1851,9 @@ plpgsql_build_record(const char *refname, int lineno, bool add2namespace) return rec; } -/* - * Build a row-variable data structure given the pg_class OID. - */ -static PLpgSQL_row * -build_row_from_class(Oid classOid) -{ - PLpgSQL_row *row; - Relation rel; - Form_pg_class classStruct; - const char *relname; - int i; - - /* - * Open the relation to get info. - */ - rel = relation_open(classOid, AccessShareLock); - classStruct = RelationGetForm(rel); - relname = RelationGetRelationName(rel); - - /* - * Accept relation, sequence, view, materialized view, composite type, or - * foreign table. - */ - if (classStruct->relkind != RELKIND_RELATION && - classStruct->relkind != RELKIND_SEQUENCE && - classStruct->relkind != RELKIND_VIEW && - classStruct->relkind != RELKIND_MATVIEW && - classStruct->relkind != RELKIND_COMPOSITE_TYPE && - classStruct->relkind != RELKIND_FOREIGN_TABLE && - classStruct->relkind != RELKIND_PARTITIONED_TABLE) - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("relation \"%s\" is not a table", relname))); - - /* - * Create a row datum entry and all the required variables that it will - * point to. - */ - row = palloc0(sizeof(PLpgSQL_row)); - row->dtype = PLPGSQL_DTYPE_ROW; - row->rowtupdesc = CreateTupleDescCopy(RelationGetDescr(rel)); - row->nfields = classStruct->relnatts; - row->fieldnames = palloc(sizeof(char *) * row->nfields); - row->varnos = palloc(sizeof(int) * row->nfields); - - for (i = 0; i < row->nfields; i++) - { - Form_pg_attribute attrStruct; - - /* - * Get the attribute and check for dropped column - */ - attrStruct = TupleDescAttr(row->rowtupdesc, i); - - if (!attrStruct->attisdropped) - { - char *attname; - char refname[(NAMEDATALEN * 2) + 100]; - PLpgSQL_variable *var; - - attname = NameStr(attrStruct->attname); - snprintf(refname, sizeof(refname), "%s.%s", relname, attname); - - /* - * Create the internal variable for the field - * - * We know if the table definitions contain a default value or if - * the field is declared in the table as NOT NULL. But it's - * possible to create a table field as NOT NULL without a default - * value and that would lead to problems later when initializing - * the variables due to entering a block at execution time. Thus - * we ignore this information for now. - */ - var = plpgsql_build_variable(refname, 0, - plpgsql_build_datatype(attrStruct->atttypid, - attrStruct->atttypmod, - attrStruct->attcollation), - false); - - /* Add the variable to the row */ - row->fieldnames[i] = attname; - row->varnos[i] = var->dno; - } - else - { - /* Leave a hole in the row structure for the dropped col */ - row->fieldnames[i] = NULL; - row->varnos[i] = -1; - } - } - - relation_close(rel, AccessShareLock); - - return row; -} - /* * Build a row-variable data structure given the component variables. + * Include a rowtupdesc, since we will need to materialize the row result. */ static PLpgSQL_row * build_row_from_vars(PLpgSQL_variable **vars, int numvars) @@ -2084,9 +1871,9 @@ build_row_from_vars(PLpgSQL_variable **vars, int numvars) for (i = 0; i < numvars; i++) { PLpgSQL_variable *var = vars[i]; - Oid typoid = RECORDOID; - int32 typmod = -1; - Oid typcoll = InvalidOid; + Oid typoid; + int32 typmod; + Oid typcoll; switch (var->dtype) { @@ -2097,19 +1884,17 @@ build_row_from_vars(PLpgSQL_variable **vars, int numvars) break; case PLPGSQL_DTYPE_REC: - break; - - case PLPGSQL_DTYPE_ROW: - if (((PLpgSQL_row *) var)->rowtupdesc) - { - typoid = ((PLpgSQL_row *) var)->rowtupdesc->tdtypeid; - typmod = ((PLpgSQL_row *) var)->rowtupdesc->tdtypmod; - /* composite types have no collation */ - } + typoid = ((PLpgSQL_rec *) var)->rectypeid; + typmod = -1; /* don't know typmod, if it's used at all */ + typcoll = InvalidOid; /* composite types have no collation */ break; default: elog(ERROR, "unrecognized dtype: %d", var->dtype); + typoid = InvalidOid; /* keep compiler quiet */ + typmod = 0; + typcoll = InvalidOid; + break; } row->fieldnames[i] = var->refname; @@ -2125,6 +1910,46 @@ build_row_from_vars(PLpgSQL_variable **vars, int numvars) return row; } +/* + * Build a RECFIELD datum for the named field of the specified record variable + * + * If there's already such a datum, just return it; we don't need duplicates. + */ +PLpgSQL_recfield * +plpgsql_build_recfield(PLpgSQL_rec *rec, const char *fldname) +{ + PLpgSQL_recfield *recfield; + int i; + + /* search for an existing datum referencing this field */ + i = rec->firstfield; + while (i >= 0) + { + PLpgSQL_recfield *fld = (PLpgSQL_recfield *) plpgsql_Datums[i]; + + Assert(fld->dtype == PLPGSQL_DTYPE_RECFIELD && + fld->recparentno == rec->dno); + if (strcmp(fld->fieldname, fldname) == 0) + return fld; + i = fld->nextfield; + } + + /* nope, so make a new one */ + recfield = palloc0(sizeof(PLpgSQL_recfield)); + recfield->dtype = PLPGSQL_DTYPE_RECFIELD; + recfield->fieldname = pstrdup(fldname); + recfield->recparentno = rec->dno; + recfield->rectupledescid = INVALID_TUPLEDESC_IDENTIFIER; + + plpgsql_adddatum((PLpgSQL_datum *) recfield); + + /* now we can link it into the parent's chain */ + recfield->nextfield = rec->firstfield; + rec->firstfield = recfield->dno; + + return recfield; +} + /* * plpgsql_build_datatype * Build PLpgSQL_type struct given type OID, typmod, and collation. @@ -2171,14 +1996,18 @@ build_datatype(HeapTuple typeTup, int32 typmod, Oid collation) switch (typeStruct->typtype) { case TYPTYPE_BASE: - case TYPTYPE_DOMAIN: case TYPTYPE_ENUM: case TYPTYPE_RANGE: typ->ttype = PLPGSQL_TTYPE_SCALAR; break; case TYPTYPE_COMPOSITE: - Assert(OidIsValid(typeStruct->typrelid)); - typ->ttype = PLPGSQL_TTYPE_ROW; + typ->ttype = PLPGSQL_TTYPE_REC; + break; + case TYPTYPE_DOMAIN: + if (type_is_rowtype(typeStruct->typbasetype)) + typ->ttype = PLPGSQL_TTYPE_REC; + else + typ->ttype = PLPGSQL_TTYPE_SCALAR; break; case TYPTYPE_PSEUDO: if (typ->typoid == RECORDOID) @@ -2194,7 +2023,6 @@ build_datatype(HeapTuple typeTup, int32 typmod, Oid collation) typ->typlen = typeStruct->typlen; typ->typbyval = typeStruct->typbyval; typ->typtype = typeStruct->typtype; - typ->typrelid = typeStruct->typrelid; typ->collation = typeStruct->typcollation; if (OidIsValid(collation) && OidIsValid(typ->collation)) typ->collation = collation; diff --git a/src/pl/plpgsql/src/pl_exec.c b/src/pl/plpgsql/src/pl_exec.c index 4478c5332e..7612902e8f 100644 --- a/src/pl/plpgsql/src/pl_exec.c +++ b/src/pl/plpgsql/src/pl_exec.c @@ -232,6 +232,8 @@ static HTAB *shared_cast_hash = NULL; /************************************************************ * Local function forward declarations ************************************************************/ +static void coerce_function_result_tuple(PLpgSQL_execstate *estate, + TupleDesc tupdesc); static void plpgsql_exec_error_callback(void *arg); static PLpgSQL_datum *copy_plpgsql_datum(PLpgSQL_datum *datum); static MemoryContext get_stmt_mcontext(PLpgSQL_execstate *estate); @@ -291,9 +293,9 @@ static int exec_stmt_dynexecute(PLpgSQL_execstate *estate, static int exec_stmt_dynfors(PLpgSQL_execstate *estate, PLpgSQL_stmt_dynfors *stmt); static int exec_stmt_commit(PLpgSQL_execstate *estate, - PLpgSQL_stmt_commit *stmt); + PLpgSQL_stmt_commit *stmt); static int exec_stmt_rollback(PLpgSQL_execstate *estate, - PLpgSQL_stmt_rollback *stmt); + PLpgSQL_stmt_rollback *stmt); static void plpgsql_estate_setup(PLpgSQL_execstate *estate, PLpgSQL_function *func, @@ -349,7 +351,7 @@ static ParamListInfo setup_param_list(PLpgSQL_execstate *estate, PLpgSQL_expr *expr); static ParamExternData *plpgsql_param_fetch(ParamListInfo params, int paramid, bool speculative, - ParamExternData *prm); + ParamExternData *workspace); static void plpgsql_param_compile(ParamListInfo params, Param *param, ExprState *state, Datum *resv, bool *resnull); @@ -357,19 +359,35 @@ static void plpgsql_param_eval_var(ExprState *state, ExprEvalStep *op, ExprContext *econtext); static void plpgsql_param_eval_var_ro(ExprState *state, ExprEvalStep *op, ExprContext *econtext); -static void plpgsql_param_eval_non_var(ExprState *state, ExprEvalStep *op, +static void plpgsql_param_eval_recfield(ExprState *state, ExprEvalStep *op, + ExprContext *econtext); +static void plpgsql_param_eval_generic(ExprState *state, ExprEvalStep *op, ExprContext *econtext); +static void plpgsql_param_eval_generic_ro(ExprState *state, ExprEvalStep *op, + ExprContext *econtext); static void exec_move_row(PLpgSQL_execstate *estate, PLpgSQL_variable *target, HeapTuple tup, TupleDesc tupdesc); +static ExpandedRecordHeader *make_expanded_record_for_rec(PLpgSQL_execstate *estate, + PLpgSQL_rec *rec, + TupleDesc srctupdesc, + ExpandedRecordHeader *srcerh); +static void exec_move_row_from_fields(PLpgSQL_execstate *estate, + PLpgSQL_variable *target, + ExpandedRecordHeader *newerh, + Datum *values, bool *nulls, + TupleDesc tupdesc); +static bool compatible_tupdescs(TupleDesc src_tupdesc, TupleDesc dst_tupdesc); static HeapTuple make_tuple_from_row(PLpgSQL_execstate *estate, PLpgSQL_row *row, TupleDesc tupdesc); -static HeapTuple get_tuple_from_datum(Datum value); -static TupleDesc get_tupdesc_from_datum(Datum value); +static TupleDesc deconstruct_composite_datum(Datum value, + HeapTupleData *tmptup); static void exec_move_row_from_datum(PLpgSQL_execstate *estate, PLpgSQL_variable *target, Datum value); +static void instantiate_empty_record_variable(PLpgSQL_execstate *estate, + PLpgSQL_rec *rec); static char *convert_value_to_string(PLpgSQL_execstate *estate, Datum value, Oid valtype); static Datum exec_cast_value(PLpgSQL_execstate *estate, @@ -387,6 +405,8 @@ static void assign_simple_var(PLpgSQL_execstate *estate, PLpgSQL_var *var, Datum newvalue, bool isnull, bool freeable); static void assign_text_var(PLpgSQL_execstate *estate, PLpgSQL_var *var, const char *str); +static void assign_record_var(PLpgSQL_execstate *estate, PLpgSQL_rec *rec, + ExpandedRecordHeader *erh); static PreparedParamsData *exec_eval_using_params(PLpgSQL_execstate *estate, List *params); static Portal exec_dynquery_with_params(PLpgSQL_execstate *estate, @@ -482,7 +502,7 @@ plpgsql_exec_function(PLpgSQL_function *func, FunctionCallInfo fcinfo, /* take ownership of R/W object */ assign_simple_var(&estate, var, TransferExpandedObject(var->value, - CurrentMemoryContext), + estate.datum_context), false, true); } @@ -495,7 +515,7 @@ plpgsql_exec_function(PLpgSQL_function *func, FunctionCallInfo fcinfo, /* flat array, so force to expanded form */ assign_simple_var(&estate, var, expand_array(var->value, - CurrentMemoryContext, + estate.datum_context, NULL), false, true); @@ -504,21 +524,21 @@ plpgsql_exec_function(PLpgSQL_function *func, FunctionCallInfo fcinfo, } break; - case PLPGSQL_DTYPE_ROW: + case PLPGSQL_DTYPE_REC: { - PLpgSQL_row *row = (PLpgSQL_row *) estate.datums[n]; + PLpgSQL_rec *rec = (PLpgSQL_rec *) estate.datums[n]; if (!fcinfo->argnull[i]) { /* Assign row value from composite datum */ exec_move_row_from_datum(&estate, - (PLpgSQL_variable *) row, + (PLpgSQL_variable *) rec, fcinfo->arg[i]); } else { /* If arg is null, treat it as an empty row */ - exec_move_row(&estate, (PLpgSQL_variable *) row, + exec_move_row(&estate, (PLpgSQL_variable *) rec, NULL, NULL); } /* clean up after exec_move_row() */ @@ -582,15 +602,12 @@ plpgsql_exec_function(PLpgSQL_function *func, FunctionCallInfo fcinfo, /* If we produced any tuples, send back the result */ if (estate.tuple_store) { - rsi->setResult = estate.tuple_store; - if (estate.rettupdesc) - { - MemoryContext oldcxt; + MemoryContext oldcxt; - oldcxt = MemoryContextSwitchTo(estate.tuple_store_cxt); - rsi->setDesc = CreateTupleDescCopy(estate.rettupdesc); - MemoryContextSwitchTo(oldcxt); - } + rsi->setResult = estate.tuple_store; + oldcxt = MemoryContextSwitchTo(estate.tuple_store_cxt); + rsi->setDesc = CreateTupleDescCopy(estate.tuple_store_desc); + MemoryContextSwitchTo(oldcxt); } estate.retval = (Datum) 0; fcinfo->isnull = true; @@ -598,62 +615,80 @@ plpgsql_exec_function(PLpgSQL_function *func, FunctionCallInfo fcinfo, else if (!estate.retisnull) { if (!func->fn_rettype) - { ereport(ERROR, (errmsg("cannot return a value from a procedure"))); - } + /* + * Cast result value to function's declared result type, and copy it + * out to the upper executor memory context. We must treat tuple + * results specially in order to deal with cases like rowtypes + * involving dropped columns. + */ if (estate.retistuple) { - /* - * We have to check that the returned tuple actually matches the - * expected result type. XXX would be better to cache the tupdesc - * instead of repeating get_call_result_type() - */ - HeapTuple rettup = (HeapTuple) DatumGetPointer(estate.retval); - TupleDesc tupdesc; - TupleConversionMap *tupmap; - - switch (get_call_result_type(fcinfo, NULL, &tupdesc)) + /* Don't need coercion if rowtype is known to match */ + if (func->fn_rettype == estate.rettype && + func->fn_rettype != RECORDOID) { - case TYPEFUNC_COMPOSITE: - /* got the expected result rowtype, now check it */ - tupmap = convert_tuples_by_position(estate.rettupdesc, - tupdesc, - gettext_noop("returned record type does not match expected record type")); - /* it might need conversion */ - if (tupmap) - rettup = do_convert_tuple(rettup, tupmap); - /* no need to free map, we're about to return anyway */ - break; - case TYPEFUNC_RECORD: - - /* - * Failed to determine actual type of RECORD. We could - * raise an error here, but what this means in practice is - * that the caller is expecting any old generic rowtype, - * so we don't really need to be restrictive. Pass back - * the generated result type, instead. - */ - tupdesc = estate.rettupdesc; - if (tupdesc == NULL) /* shouldn't happen */ - elog(ERROR, "return type must be a row type"); - break; - default: - /* shouldn't get here if retistuple is true ... */ - elog(ERROR, "return type must be a row type"); - break; + /* + * Copy the tuple result into upper executor memory context. + * However, if we have a R/W expanded datum, we can just + * transfer its ownership out to the upper context. + */ + estate.retval = SPI_datumTransfer(estate.retval, + false, + -1); } + else + { + /* + * Need to look up the expected result type. XXX would be + * better to cache the tupdesc instead of repeating + * get_call_result_type(), but the only easy place to save it + * is in the PLpgSQL_function struct, and that's too + * long-lived: composite types could change during the + * existence of a PLpgSQL_function. + */ + Oid resultTypeId; + TupleDesc tupdesc; - /* - * Copy tuple to upper executor memory, as a tuple Datum. Make - * sure it is labeled with the caller-supplied tuple type. - */ - estate.retval = PointerGetDatum(SPI_returntuple(rettup, tupdesc)); + switch (get_call_result_type(fcinfo, &resultTypeId, &tupdesc)) + { + case TYPEFUNC_COMPOSITE: + /* got the expected result rowtype, now coerce it */ + coerce_function_result_tuple(&estate, tupdesc); + break; + case TYPEFUNC_COMPOSITE_DOMAIN: + /* got the expected result rowtype, now coerce it */ + coerce_function_result_tuple(&estate, tupdesc); + /* and check domain constraints */ + /* XXX allowing caching here would be good, too */ + domain_check(estate.retval, false, resultTypeId, + NULL, NULL); + break; + case TYPEFUNC_RECORD: + + /* + * Failed to determine actual type of RECORD. We + * could raise an error here, but what this means in + * practice is that the caller is expecting any old + * generic rowtype, so we don't really need to be + * restrictive. Pass back the generated result as-is. + */ + estate.retval = SPI_datumTransfer(estate.retval, + false, + -1); + break; + default: + /* shouldn't get here if retistuple is true ... */ + elog(ERROR, "return type must be a row type"); + break; + } + } } else { - /* Cast value to proper type */ + /* Scalar case: use exec_cast_value */ estate.retval = exec_cast_value(&estate, estate.retval, &fcinfo->isnull, @@ -699,6 +734,94 @@ plpgsql_exec_function(PLpgSQL_function *func, FunctionCallInfo fcinfo, return estate.retval; } +/* + * Helper for plpgsql_exec_function: coerce composite result to the specified + * tuple descriptor, and copy it out to upper executor memory. This is split + * out mostly for cosmetic reasons --- the logic would be very deeply nested + * otherwise. + * + * estate->retval is updated in-place. + */ +static void +coerce_function_result_tuple(PLpgSQL_execstate *estate, TupleDesc tupdesc) +{ + HeapTuple rettup; + TupleDesc retdesc; + TupleConversionMap *tupmap; + + /* We assume exec_stmt_return verified that result is composite */ + Assert(type_is_rowtype(estate->rettype)); + + /* We can special-case expanded records for speed */ + if (VARATT_IS_EXTERNAL_EXPANDED(DatumGetPointer(estate->retval))) + { + ExpandedRecordHeader *erh = (ExpandedRecordHeader *) DatumGetEOHP(estate->retval); + + Assert(erh->er_magic == ER_MAGIC); + + /* Extract record's TupleDesc */ + retdesc = expanded_record_get_tupdesc(erh); + + /* check rowtype compatibility */ + tupmap = convert_tuples_by_position(retdesc, + tupdesc, + gettext_noop("returned record type does not match expected record type")); + + /* it might need conversion */ + if (tupmap) + { + rettup = expanded_record_get_tuple(erh); + Assert(rettup); + rettup = do_convert_tuple(rettup, tupmap); + + /* + * Copy tuple to upper executor memory, as a tuple Datum. Make + * sure it is labeled with the caller-supplied tuple type. + */ + estate->retval = PointerGetDatum(SPI_returntuple(rettup, tupdesc)); + /* no need to free map, we're about to return anyway */ + } + else + { + /* + * We need only copy result into upper executor memory context. + * However, if we have a R/W expanded datum, we can just transfer + * its ownership out to the upper executor context. + */ + estate->retval = SPI_datumTransfer(estate->retval, + false, + -1); + } + } + else + { + /* Convert composite datum to a HeapTuple and TupleDesc */ + HeapTupleData tmptup; + + retdesc = deconstruct_composite_datum(estate->retval, &tmptup); + rettup = &tmptup; + + /* check rowtype compatibility */ + tupmap = convert_tuples_by_position(retdesc, + tupdesc, + gettext_noop("returned record type does not match expected record type")); + + /* it might need conversion */ + if (tupmap) + rettup = do_convert_tuple(rettup, tupmap); + + /* + * Copy tuple to upper executor memory, as a tuple Datum. Make sure + * it is labeled with the caller-supplied tuple type. + */ + estate->retval = PointerGetDatum(SPI_returntuple(rettup, tupdesc)); + + /* no need to free map, we're about to return anyway */ + + ReleaseTupleDesc(retdesc); + } +} + /* ---------- * plpgsql_exec_trigger Called by the call handler for @@ -713,6 +836,7 @@ plpgsql_exec_trigger(PLpgSQL_function *func, ErrorContextCallback plerrcontext; int i; int rc; + TupleDesc tupdesc; PLpgSQL_var *var; PLpgSQL_rec *rec_new, *rec_old; @@ -747,37 +871,34 @@ plpgsql_exec_trigger(PLpgSQL_function *func, * might have a test like "if (TG_OP = 'INSERT' and NEW.foo = 'xyz')", * which should parse regardless of the current trigger type. */ + tupdesc = RelationGetDescr(trigdata->tg_relation); + rec_new = (PLpgSQL_rec *) (estate.datums[func->new_varno]); - rec_new->freetup = false; - rec_new->tupdesc = trigdata->tg_relation->rd_att; - rec_new->freetupdesc = false; rec_old = (PLpgSQL_rec *) (estate.datums[func->old_varno]); - rec_old->freetup = false; - rec_old->tupdesc = trigdata->tg_relation->rd_att; - rec_old->freetupdesc = false; + + rec_new->erh = make_expanded_record_from_tupdesc(tupdesc, + estate.datum_context); + rec_old->erh = make_expanded_record_from_exprecord(rec_new->erh, + estate.datum_context); if (!TRIGGER_FIRED_FOR_ROW(trigdata->tg_event)) { /* * Per-statement triggers don't use OLD/NEW variables */ - rec_new->tup = NULL; - rec_old->tup = NULL; } else if (TRIGGER_FIRED_BY_INSERT(trigdata->tg_event)) { - rec_new->tup = trigdata->tg_trigtuple; - rec_old->tup = NULL; + expanded_record_set_tuple(rec_new->erh, trigdata->tg_trigtuple, false); } else if (TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event)) { - rec_new->tup = trigdata->tg_newtuple; - rec_old->tup = trigdata->tg_trigtuple; + expanded_record_set_tuple(rec_new->erh, trigdata->tg_newtuple, false); + expanded_record_set_tuple(rec_old->erh, trigdata->tg_trigtuple, false); } else if (TRIGGER_FIRED_BY_DELETE(trigdata->tg_event)) { - rec_new->tup = NULL; - rec_old->tup = trigdata->tg_trigtuple; + expanded_record_set_tuple(rec_old->erh, trigdata->tg_trigtuple, false); } else elog(ERROR, "unrecognized trigger action: not INSERT, DELETE, or UPDATE"); @@ -936,20 +1057,68 @@ plpgsql_exec_trigger(PLpgSQL_function *func, rettup = NULL; else { + TupleDesc retdesc; TupleConversionMap *tupmap; - rettup = (HeapTuple) DatumGetPointer(estate.retval); - /* check rowtype compatibility */ - tupmap = convert_tuples_by_position(estate.rettupdesc, - trigdata->tg_relation->rd_att, - gettext_noop("returned row structure does not match the structure of the triggering table")); - /* it might need conversion */ - if (tupmap) - rettup = do_convert_tuple(rettup, tupmap); - /* no need to free map, we're about to return anyway */ + /* We assume exec_stmt_return verified that result is composite */ + Assert(type_is_rowtype(estate.rettype)); - /* Copy tuple to upper executor memory */ - rettup = SPI_copytuple(rettup); + /* We can special-case expanded records for speed */ + if (VARATT_IS_EXTERNAL_EXPANDED(DatumGetPointer(estate.retval))) + { + ExpandedRecordHeader *erh = (ExpandedRecordHeader *) DatumGetEOHP(estate.retval); + + Assert(erh->er_magic == ER_MAGIC); + + /* Extract HeapTuple and TupleDesc */ + rettup = expanded_record_get_tuple(erh); + Assert(rettup); + retdesc = expanded_record_get_tupdesc(erh); + + if (retdesc != RelationGetDescr(trigdata->tg_relation)) + { + /* check rowtype compatibility */ + tupmap = convert_tuples_by_position(retdesc, + RelationGetDescr(trigdata->tg_relation), + gettext_noop("returned row structure does not match the structure of the triggering table")); + /* it might need conversion */ + if (tupmap) + rettup = do_convert_tuple(rettup, tupmap); + /* no need to free map, we're about to return anyway */ + } + + /* + * Copy tuple to upper executor memory. But if user just did + * "return new" or "return old" without changing anything, there's + * no need to copy; we can return the original tuple (which will + * save a few cycles in trigger.c as well as here). + */ + if (rettup != trigdata->tg_newtuple && + rettup != trigdata->tg_trigtuple) + rettup = SPI_copytuple(rettup); + } + else + { + /* Convert composite datum to a HeapTuple and TupleDesc */ + HeapTupleData tmptup; + + retdesc = deconstruct_composite_datum(estate.retval, &tmptup); + rettup = &tmptup; + + /* check rowtype compatibility */ + tupmap = convert_tuples_by_position(retdesc, + RelationGetDescr(trigdata->tg_relation), + gettext_noop("returned row structure does not match the structure of the triggering table")); + /* it might need conversion */ + if (tupmap) + rettup = do_convert_tuple(rettup, tupmap); + + ReleaseTupleDesc(retdesc); + /* no need to free map, we're about to return anyway */ + + /* Copy tuple to upper executor memory */ + rettup = SPI_copytuple(rettup); + } } /* @@ -1146,11 +1315,8 @@ copy_plpgsql_datum(PLpgSQL_datum *datum) PLpgSQL_rec *new = palloc(sizeof(PLpgSQL_rec)); memcpy(new, datum, sizeof(PLpgSQL_rec)); - /* should be preset to null/non-freeable */ - Assert(new->tup == NULL); - Assert(new->tupdesc == NULL); - Assert(!new->freetup); - Assert(!new->freetupdesc); + /* should be preset to empty */ + Assert(new->erh == NULL); result = (PLpgSQL_datum *) new; } @@ -1162,8 +1328,8 @@ copy_plpgsql_datum(PLpgSQL_datum *datum) /* * These datum records are read-only at runtime, so no need to - * copy them (well, ARRAYELEM contains some cached type data, but - * we'd just as soon centralize the caching anyway) + * copy them (well, RECFIELD and ARRAYELEM contain cached data, + * but we'd just as soon centralize the caching anyway) */ result = datum; break; @@ -1334,18 +1500,9 @@ exec_stmt_block(PLpgSQL_execstate *estate, PLpgSQL_stmt_block *block) { PLpgSQL_rec *rec = (PLpgSQL_rec *) datum; - if (rec->freetup) - { - heap_freetuple(rec->tup); - rec->freetup = false; - } - if (rec->freetupdesc) - { - FreeTupleDesc(rec->tupdesc); - rec->freetupdesc = false; - } - rec->tup = NULL; - rec->tupdesc = NULL; + if (rec->erh) + DeleteExpandedObject(ExpandedRecordGetDatum(rec->erh)); + rec->erh = NULL; } break; @@ -1401,16 +1558,12 @@ exec_stmt_block(PLpgSQL_execstate *estate, PLpgSQL_stmt_block *block) /* * If the block ended with RETURN, we may need to copy the return - * value out of the subtransaction eval_context. This is - * currently only needed for scalar result types --- rowtype - * values will always exist in the function's main memory context, - * cf. exec_stmt_return(). We can avoid a physical copy if the - * value happens to be a R/W expanded object. + * value out of the subtransaction eval_context. We can avoid a + * physical copy if the value happens to be a R/W expanded object. */ if (rc == PLPGSQL_RC_RETURN && !estate->retisset && - !estate->retisnull && - estate->rettupdesc == NULL) + !estate->retisnull) { int16 resTypLen; bool resTypByVal; @@ -2574,12 +2727,8 @@ exec_stmt_exit(PLpgSQL_execstate *estate, PLpgSQL_stmt_exit *stmt) * exec_stmt_return Evaluate an expression and start * returning from the function. * - * Note: in the retistuple code paths, the returned tuple is always in the - * function's main context, whereas for non-tuple data types the result may - * be in the eval_mcontext. The former case is not a memory leak since we're - * about to exit the function anyway. (If you want to change it, note that - * exec_stmt_block() knows about this behavior.) The latter case means that - * we must not do exec_eval_cleanup while unwinding the control stack. + * Note: The result may be in the eval_mcontext. Therefore, we must not + * do exec_eval_cleanup while unwinding the control stack. * ---------- */ static int @@ -2593,9 +2742,8 @@ exec_stmt_return(PLpgSQL_execstate *estate, PLpgSQL_stmt_return *stmt) if (estate->retisset) return PLPGSQL_RC_RETURN; - /* initialize for null result (possibly a tuple) */ + /* initialize for null result */ estate->retval = (Datum) 0; - estate->rettupdesc = NULL; estate->retisnull = true; estate->rettype = InvalidOid; @@ -2626,10 +2774,12 @@ exec_stmt_return(PLpgSQL_execstate *estate, PLpgSQL_stmt_return *stmt) estate->rettype = var->datatype->typoid; /* - * Cope with retistuple case. A PLpgSQL_var could not be - * of composite type, so we needn't make any effort to - * convert. However, for consistency with the expression - * code path, don't throw error if the result is NULL. + * A PLpgSQL_var could not be of composite type, so + * conversion must fail if retistuple. We throw a custom + * error mainly for consistency with historical behavior. + * For the same reason, we don't throw error if the result + * is NULL. (Note that plpgsql_exec_trigger assumes that + * any non-null result has been verified to be composite.) */ if (estate->retistuple && !estate->retisnull) ereport(ERROR, @@ -2641,23 +2791,13 @@ exec_stmt_return(PLpgSQL_execstate *estate, PLpgSQL_stmt_return *stmt) case PLPGSQL_DTYPE_REC: { PLpgSQL_rec *rec = (PLpgSQL_rec *) retvar; - int32 rettypmod; - if (HeapTupleIsValid(rec->tup)) + /* If record is empty, we return NULL not a row of nulls */ + if (rec->erh && !ExpandedRecordIsEmpty(rec->erh)) { - if (estate->retistuple) - { - estate->retval = PointerGetDatum(rec->tup); - estate->rettupdesc = rec->tupdesc; - estate->retisnull = false; - } - else - exec_eval_datum(estate, - retvar, - &estate->rettype, - &rettypmod, - &estate->retval, - &estate->retisnull); + estate->retval = ExpandedRecordGetDatum(rec->erh); + estate->retisnull = false; + estate->rettype = rec->rectypeid; } } break; @@ -2667,26 +2807,13 @@ exec_stmt_return(PLpgSQL_execstate *estate, PLpgSQL_stmt_return *stmt) PLpgSQL_row *row = (PLpgSQL_row *) retvar; int32 rettypmod; - if (estate->retistuple) - { - HeapTuple tup; - - if (!row->rowtupdesc) /* should not happen */ - elog(ERROR, "row variable has no tupdesc"); - tup = make_tuple_from_row(estate, row, row->rowtupdesc); - if (tup == NULL) /* should not happen */ - elog(ERROR, "row not compatible with its own tupdesc"); - estate->retval = PointerGetDatum(tup); - estate->rettupdesc = row->rowtupdesc; - estate->retisnull = false; - } - else - exec_eval_datum(estate, - retvar, - &estate->rettype, - &rettypmod, - &estate->retval, - &estate->retisnull); + /* We get here if there are multiple OUT parameters */ + exec_eval_datum(estate, + (PLpgSQL_datum *) row, + &estate->rettype, + &rettypmod, + &estate->retval, + &estate->retisnull); } break; @@ -2706,23 +2833,15 @@ exec_stmt_return(PLpgSQL_execstate *estate, PLpgSQL_stmt_return *stmt) &(estate->rettype), &rettypmod); - if (estate->retistuple && !estate->retisnull) - { - /* Convert composite datum to a HeapTuple and TupleDesc */ - HeapTuple tuple; - TupleDesc tupdesc; - - /* Source must be of RECORD or composite type */ - if (!type_is_rowtype(estate->rettype)) - ereport(ERROR, - (errcode(ERRCODE_DATATYPE_MISMATCH), - errmsg("cannot return non-composite value from function returning composite type"))); - tuple = get_tuple_from_datum(estate->retval); - tupdesc = get_tupdesc_from_datum(estate->retval); - estate->retval = PointerGetDatum(tuple); - estate->rettupdesc = CreateTupleDescCopy(tupdesc); - ReleaseTupleDesc(tupdesc); - } + /* + * As in the DTYPE_VAR case above, throw a custom error if a non-null, + * non-composite value is returned in a function returning tuple. + */ + if (estate->retistuple && !estate->retisnull && + !type_is_rowtype(estate->rettype)) + ereport(ERROR, + (errcode(ERRCODE_DATATYPE_MISMATCH), + errmsg("cannot return non-composite value from function returning composite type"))); return PLPGSQL_RC_RETURN; } @@ -2765,8 +2884,8 @@ exec_stmt_return_next(PLpgSQL_execstate *estate, if (estate->tuple_store == NULL) exec_init_tuple_store(estate); - /* rettupdesc will be filled by exec_init_tuple_store */ - tupdesc = estate->rettupdesc; + /* tuple_store_desc will be filled by exec_init_tuple_store */ + tupdesc = estate->tuple_store_desc; natts = tupdesc->natts; /* @@ -2819,22 +2938,22 @@ exec_stmt_return_next(PLpgSQL_execstate *estate, case PLPGSQL_DTYPE_REC: { PLpgSQL_rec *rec = (PLpgSQL_rec *) retvar; + TupleDesc rec_tupdesc; TupleConversionMap *tupmap; - if (!HeapTupleIsValid(rec->tup)) - ereport(ERROR, - (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("record \"%s\" is not assigned yet", - rec->refname), - errdetail("The tuple structure of a not-yet-assigned" - " record is indeterminate."))); + /* If rec is null, try to convert it to a row of nulls */ + if (rec->erh == NULL) + instantiate_empty_record_variable(estate, rec); + if (ExpandedRecordIsEmpty(rec->erh)) + deconstruct_expanded_record(rec->erh); /* Use eval_mcontext for tuple conversion work */ oldcontext = MemoryContextSwitchTo(get_eval_mcontext(estate)); - tupmap = convert_tuples_by_position(rec->tupdesc, + rec_tupdesc = expanded_record_get_tupdesc(rec->erh); + tupmap = convert_tuples_by_position(rec_tupdesc, tupdesc, gettext_noop("wrong record type supplied in RETURN NEXT")); - tuple = rec->tup; + tuple = expanded_record_get_tuple(rec->erh); if (tupmap) tuple = do_convert_tuple(tuple, tupmap); tuplestore_puttuple(estate->tuple_store, tuple); @@ -2846,10 +2965,12 @@ exec_stmt_return_next(PLpgSQL_execstate *estate, { PLpgSQL_row *row = (PLpgSQL_row *) retvar; + /* We get here if there are multiple OUT parameters */ + /* Use eval_mcontext for tuple conversion work */ oldcontext = MemoryContextSwitchTo(get_eval_mcontext(estate)); tuple = make_tuple_from_row(estate, row, tupdesc); - if (tuple == NULL) + if (tuple == NULL) /* should not happen */ ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("wrong record type supplied in RETURN NEXT"))); @@ -2881,6 +3002,7 @@ exec_stmt_return_next(PLpgSQL_execstate *estate, /* Expression should be of RECORD or composite type */ if (!isNull) { + HeapTupleData tmptup; TupleDesc retvaldesc; TupleConversionMap *tupmap; @@ -2891,8 +3013,8 @@ exec_stmt_return_next(PLpgSQL_execstate *estate, /* Use eval_mcontext for tuple conversion work */ oldcontext = MemoryContextSwitchTo(get_eval_mcontext(estate)); - tuple = get_tuple_from_datum(retval); - retvaldesc = get_tupdesc_from_datum(retval); + retvaldesc = deconstruct_composite_datum(retval, &tmptup); + tuple = &tmptup; tupmap = convert_tuples_by_position(retvaldesc, tupdesc, gettext_noop("returned record type does not match expected record type")); if (tupmap) @@ -2992,7 +3114,7 @@ exec_stmt_return_query(PLpgSQL_execstate *estate, oldcontext = MemoryContextSwitchTo(get_eval_mcontext(estate)); tupmap = convert_tuples_by_position(portal->tupDesc, - estate->rettupdesc, + estate->tuple_store_desc, gettext_noop("structure of query does not match function result type")); while (true) @@ -3069,7 +3191,7 @@ exec_init_tuple_store(PLpgSQL_execstate *estate) CurrentResourceOwner = oldowner; MemoryContextSwitchTo(oldcxt); - estate->rettupdesc = rsi->expectedDesc; + estate->tuple_store_desc = rsi->expectedDesc; } #define SET_RAISE_OPTION_TEXT(opt, name) \ @@ -3363,11 +3485,11 @@ plpgsql_estate_setup(PLpgSQL_execstate *estate, estate->readonly_func = func->fn_readonly; - estate->rettupdesc = NULL; estate->exitlabel = NULL; estate->cur_error = NULL; estate->tuple_store = NULL; + estate->tuple_store_desc = NULL; if (rsi) { estate->tuple_store_cxt = rsi->econtext->ecxt_per_query_memory; @@ -3384,6 +3506,7 @@ plpgsql_estate_setup(PLpgSQL_execstate *estate, estate->ndatums = func->ndatums; estate->datums = palloc(sizeof(PLpgSQL_datum *) * estate->ndatums); /* caller is expected to fill the datums array */ + estate->datum_context = CurrentMemoryContext; /* initialize our ParamListInfo with appropriate hook functions */ estate->paramLI = (ParamListInfo) @@ -4449,7 +4572,7 @@ exec_assign_value(PLpgSQL_execstate *estate, { /* array and not already R/W, so apply expand_array */ newvalue = expand_array(newvalue, - CurrentMemoryContext, + estate->datum_context, NULL); } else @@ -4534,64 +4657,58 @@ exec_assign_value(PLpgSQL_execstate *estate, */ PLpgSQL_recfield *recfield = (PLpgSQL_recfield *) target; PLpgSQL_rec *rec; - int fno; - HeapTuple newtup; - int colnums[1]; - Datum values[1]; - bool nulls[1]; - Oid atttype; - int32 atttypmod; + ExpandedRecordHeader *erh; rec = (PLpgSQL_rec *) (estate->datums[recfield->recparentno]); + erh = rec->erh; /* - * Check that there is already a tuple in the record. We need - * that because records don't have any predefined field - * structure. + * If record variable is NULL, instantiate it if it has a + * named composite type, else complain. (This won't change + * the logical state of the record, but if we successfully + * assign below, the unassigned fields will all become NULLs.) */ - if (!HeapTupleIsValid(rec->tup)) + if (erh == NULL) + { + instantiate_empty_record_variable(estate, rec); + erh = rec->erh; + } + + /* + * Look up the field's properties if we have not already, or + * if the tuple descriptor ID changed since last time. + */ + if (unlikely(recfield->rectupledescid != erh->er_tupdesc_id)) + { + if (!expanded_record_lookup_field(erh, + recfield->fieldname, + &recfield->finfo)) + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_COLUMN), + errmsg("record \"%s\" has no field \"%s\"", + rec->refname, recfield->fieldname))); + recfield->rectupledescid = erh->er_tupdesc_id; + } + + /* We don't support assignments to system columns. */ + if (recfield->finfo.fnumber <= 0) ereport(ERROR, - (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("record \"%s\" is not assigned yet", - rec->refname), - errdetail("The tuple structure of a not-yet-assigned record is indeterminate."))); + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("cannot assign to system column \"%s\"", + recfield->fieldname))); - /* - * Get the number of the record field to change. Disallow - * system columns because the code below won't cope. - */ - fno = SPI_fnumber(rec->tupdesc, recfield->fieldname); - if (fno <= 0) - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_COLUMN), - errmsg("record \"%s\" has no field \"%s\"", - rec->refname, recfield->fieldname))); - colnums[0] = fno; - - /* - * Now insert the new value, being careful to cast it to the - * right type. - */ - atttype = TupleDescAttr(rec->tupdesc, fno - 1)->atttypid; - atttypmod = TupleDescAttr(rec->tupdesc, fno - 1)->atttypmod; - values[0] = exec_cast_value(estate, - value, - &isNull, - valtype, - valtypmod, - atttype, - atttypmod); - nulls[0] = isNull; - - newtup = heap_modify_tuple_by_cols(rec->tup, rec->tupdesc, - 1, colnums, values, nulls); - - if (rec->freetup) - heap_freetuple(rec->tup); - - rec->tup = newtup; - rec->freetup = true; + /* Cast the new value to the right type, if needed. */ + value = exec_cast_value(estate, + value, + &isNull, + valtype, + valtypmod, + recfield->finfo.ftypeid, + recfield->finfo.ftypmod); + /* And assign it. */ + expanded_record_set_field(erh, recfield->finfo.fnumber, + value, isNull); break; } @@ -4837,6 +4954,7 @@ exec_eval_datum(PLpgSQL_execstate *estate, PLpgSQL_row *row = (PLpgSQL_row *) datum; HeapTuple tup; + /* We get here if there are multiple OUT parameters */ if (!row->rowtupdesc) /* should not happen */ elog(ERROR, "row variable has no tupdesc"); /* Make sure we have a valid type/typmod setting */ @@ -4857,22 +4975,41 @@ exec_eval_datum(PLpgSQL_execstate *estate, { PLpgSQL_rec *rec = (PLpgSQL_rec *) datum; - if (!HeapTupleIsValid(rec->tup)) - ereport(ERROR, - (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("record \"%s\" is not assigned yet", - rec->refname), - errdetail("The tuple structure of a not-yet-assigned record is indeterminate."))); - Assert(rec->tupdesc != NULL); - /* Make sure we have a valid type/typmod setting */ - BlessTupleDesc(rec->tupdesc); - - oldcontext = MemoryContextSwitchTo(get_eval_mcontext(estate)); - *typeid = rec->tupdesc->tdtypeid; - *typetypmod = rec->tupdesc->tdtypmod; - *value = heap_copy_tuple_as_datum(rec->tup, rec->tupdesc); - *isnull = false; - MemoryContextSwitchTo(oldcontext); + if (rec->erh == NULL) + { + /* Treat uninstantiated record as a simple NULL */ + *value = (Datum) 0; + *isnull = true; + /* Report variable's declared type */ + *typeid = rec->rectypeid; + *typetypmod = -1; + } + else + { + if (ExpandedRecordIsEmpty(rec->erh)) + { + /* Empty record is also a NULL */ + *value = (Datum) 0; + *isnull = true; + } + else + { + *value = ExpandedRecordGetDatum(rec->erh); + *isnull = false; + } + if (rec->rectypeid != RECORDOID) + { + /* Report variable's declared type, if not RECORD */ + *typeid = rec->rectypeid; + *typetypmod = -1; + } + else + { + /* Report record's actual type if declared RECORD */ + *typeid = rec->erh->er_typeid; + *typetypmod = rec->erh->er_typmod; + } + } break; } @@ -4880,31 +5017,46 @@ exec_eval_datum(PLpgSQL_execstate *estate, { PLpgSQL_recfield *recfield = (PLpgSQL_recfield *) datum; PLpgSQL_rec *rec; - int fno; + ExpandedRecordHeader *erh; rec = (PLpgSQL_rec *) (estate->datums[recfield->recparentno]); - if (!HeapTupleIsValid(rec->tup)) - ereport(ERROR, - (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("record \"%s\" is not assigned yet", - rec->refname), - errdetail("The tuple structure of a not-yet-assigned record is indeterminate."))); - fno = SPI_fnumber(rec->tupdesc, recfield->fieldname); - if (fno == SPI_ERROR_NOATTRIBUTE) - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_COLUMN), - errmsg("record \"%s\" has no field \"%s\"", - rec->refname, recfield->fieldname))); - *typeid = SPI_gettypeid(rec->tupdesc, fno); - if (fno > 0) - { - Form_pg_attribute attr = TupleDescAttr(rec->tupdesc, fno - 1); + erh = rec->erh; - *typetypmod = attr->atttypmod; + /* + * If record variable is NULL, instantiate it if it has a + * named composite type, else complain. (This won't change + * the logical state of the record: it's still NULL.) + */ + if (erh == NULL) + { + instantiate_empty_record_variable(estate, rec); + erh = rec->erh; } - else - *typetypmod = -1; - *value = SPI_getbinval(rec->tup, rec->tupdesc, fno, isnull); + + /* + * Look up the field's properties if we have not already, or + * if the tuple descriptor ID changed since last time. + */ + if (unlikely(recfield->rectupledescid != erh->er_tupdesc_id)) + { + if (!expanded_record_lookup_field(erh, + recfield->fieldname, + &recfield->finfo)) + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_COLUMN), + errmsg("record \"%s\" has no field \"%s\"", + rec->refname, recfield->fieldname))); + recfield->rectupledescid = erh->er_tupdesc_id; + } + + /* Report type data. */ + *typeid = recfield->finfo.ftypeid; + *typetypmod = recfield->finfo.ftypmod; + + /* And fetch the field value. */ + *value = expanded_record_get_field(erh, + recfield->finfo.fnumber, + isnull); break; } @@ -4916,10 +5068,8 @@ exec_eval_datum(PLpgSQL_execstate *estate, /* * plpgsql_exec_get_datum_type Get datatype of a PLpgSQL_datum * - * This is the same logic as in exec_eval_datum, except that it can handle - * some cases where exec_eval_datum has to fail; specifically, we may have - * a tupdesc but no row value for a record variable. (This currently can - * happen only for a trigger's NEW/OLD records.) + * This is the same logic as in exec_eval_datum, but we skip acquiring + * the actual value of the variable. Also, needn't support DTYPE_ROW. */ Oid plpgsql_exec_get_datum_type(PLpgSQL_execstate *estate, @@ -4937,31 +5087,20 @@ plpgsql_exec_get_datum_type(PLpgSQL_execstate *estate, break; } - case PLPGSQL_DTYPE_ROW: - { - PLpgSQL_row *row = (PLpgSQL_row *) datum; - - if (!row->rowtupdesc) /* should not happen */ - elog(ERROR, "row variable has no tupdesc"); - /* Make sure we have a valid type/typmod setting */ - BlessTupleDesc(row->rowtupdesc); - typeid = row->rowtupdesc->tdtypeid; - break; - } - case PLPGSQL_DTYPE_REC: { PLpgSQL_rec *rec = (PLpgSQL_rec *) datum; - if (rec->tupdesc == NULL) - ereport(ERROR, - (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("record \"%s\" is not assigned yet", - rec->refname), - errdetail("The tuple structure of a not-yet-assigned record is indeterminate."))); - /* Make sure we have a valid type/typmod setting */ - BlessTupleDesc(rec->tupdesc); - typeid = rec->tupdesc->tdtypeid; + if (rec->erh == NULL || rec->rectypeid != RECORDOID) + { + /* Report variable's declared type */ + typeid = rec->rectypeid; + } + else + { + /* Report record's actual type if declared RECORD */ + typeid = rec->erh->er_typeid; + } break; } @@ -4969,22 +5108,34 @@ plpgsql_exec_get_datum_type(PLpgSQL_execstate *estate, { PLpgSQL_recfield *recfield = (PLpgSQL_recfield *) datum; PLpgSQL_rec *rec; - int fno; rec = (PLpgSQL_rec *) (estate->datums[recfield->recparentno]); - if (rec->tupdesc == NULL) - ereport(ERROR, - (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("record \"%s\" is not assigned yet", - rec->refname), - errdetail("The tuple structure of a not-yet-assigned record is indeterminate."))); - fno = SPI_fnumber(rec->tupdesc, recfield->fieldname); - if (fno == SPI_ERROR_NOATTRIBUTE) - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_COLUMN), - errmsg("record \"%s\" has no field \"%s\"", - rec->refname, recfield->fieldname))); - typeid = SPI_gettypeid(rec->tupdesc, fno); + + /* + * If record variable is NULL, instantiate it if it has a + * named composite type, else complain. (This won't change + * the logical state of the record: it's still NULL.) + */ + if (rec->erh == NULL) + instantiate_empty_record_variable(estate, rec); + + /* + * Look up the field's properties if we have not already, or + * if the tuple descriptor ID changed since last time. + */ + if (unlikely(recfield->rectupledescid != rec->erh->er_tupdesc_id)) + { + if (!expanded_record_lookup_field(rec->erh, + recfield->fieldname, + &recfield->finfo)) + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_COLUMN), + errmsg("record \"%s\" has no field \"%s\"", + rec->refname, recfield->fieldname))); + recfield->rectupledescid = rec->erh->er_tupdesc_id; + } + + typeid = recfield->finfo.ftypeid; break; } @@ -5001,7 +5152,8 @@ plpgsql_exec_get_datum_type(PLpgSQL_execstate *estate, * plpgsql_exec_get_datum_type_info Get datatype etc of a PLpgSQL_datum * * An extended version of plpgsql_exec_get_datum_type, which also retrieves the - * typmod and collation of the datum. + * typmod and collation of the datum. Note however that we don't report the + * possibly-mutable typmod of RECORD values, but say -1 always. */ void plpgsql_exec_get_datum_type_info(PLpgSQL_execstate *estate, @@ -5020,37 +5172,23 @@ plpgsql_exec_get_datum_type_info(PLpgSQL_execstate *estate, break; } - case PLPGSQL_DTYPE_ROW: - { - PLpgSQL_row *row = (PLpgSQL_row *) datum; - - if (!row->rowtupdesc) /* should not happen */ - elog(ERROR, "row variable has no tupdesc"); - /* Make sure we have a valid type/typmod setting */ - BlessTupleDesc(row->rowtupdesc); - *typeid = row->rowtupdesc->tdtypeid; - /* do NOT return the mutable typmod of a RECORD variable */ - *typmod = -1; - /* composite types are never collatable */ - *collation = InvalidOid; - break; - } - case PLPGSQL_DTYPE_REC: { PLpgSQL_rec *rec = (PLpgSQL_rec *) datum; - if (rec->tupdesc == NULL) - ereport(ERROR, - (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("record \"%s\" is not assigned yet", - rec->refname), - errdetail("The tuple structure of a not-yet-assigned record is indeterminate."))); - /* Make sure we have a valid type/typmod setting */ - BlessTupleDesc(rec->tupdesc); - *typeid = rec->tupdesc->tdtypeid; - /* do NOT return the mutable typmod of a RECORD variable */ - *typmod = -1; + if (rec->erh == NULL || rec->rectypeid != RECORDOID) + { + /* Report variable's declared type */ + *typeid = rec->rectypeid; + *typmod = -1; + } + else + { + /* Report record's actual type if declared RECORD */ + *typeid = rec->erh->er_typeid; + /* do NOT return the mutable typmod of a RECORD variable */ + *typmod = -1; + } /* composite types are never collatable */ *collation = InvalidOid; break; @@ -5060,38 +5198,36 @@ plpgsql_exec_get_datum_type_info(PLpgSQL_execstate *estate, { PLpgSQL_recfield *recfield = (PLpgSQL_recfield *) datum; PLpgSQL_rec *rec; - int fno; rec = (PLpgSQL_rec *) (estate->datums[recfield->recparentno]); - if (rec->tupdesc == NULL) - ereport(ERROR, - (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("record \"%s\" is not assigned yet", - rec->refname), - errdetail("The tuple structure of a not-yet-assigned record is indeterminate."))); - fno = SPI_fnumber(rec->tupdesc, recfield->fieldname); - if (fno == SPI_ERROR_NOATTRIBUTE) - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_COLUMN), - errmsg("record \"%s\" has no field \"%s\"", - rec->refname, recfield->fieldname))); - *typeid = SPI_gettypeid(rec->tupdesc, fno); - if (fno > 0) - { - Form_pg_attribute attr = TupleDescAttr(rec->tupdesc, fno - 1); - *typmod = attr->atttypmod; - } - else - *typmod = -1; - if (fno > 0) - { - Form_pg_attribute attr = TupleDescAttr(rec->tupdesc, fno - 1); + /* + * If record variable is NULL, instantiate it if it has a + * named composite type, else complain. (This won't change + * the logical state of the record: it's still NULL.) + */ + if (rec->erh == NULL) + instantiate_empty_record_variable(estate, rec); - *collation = attr->attcollation; + /* + * Look up the field's properties if we have not already, or + * if the tuple descriptor ID changed since last time. + */ + if (unlikely(recfield->rectupledescid != rec->erh->er_tupdesc_id)) + { + if (!expanded_record_lookup_field(rec->erh, + recfield->fieldname, + &recfield->finfo)) + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_COLUMN), + errmsg("record \"%s\" has no field \"%s\"", + rec->refname, recfield->fieldname))); + recfield->rectupledescid = rec->erh->er_tupdesc_id; } - else /* no system column types have collation */ - *collation = InvalidOid; + + *typeid = recfield->finfo.ftypeid; + *typmod = recfield->finfo.ftypmod; + *collation = recfield->finfo.fcollation; break; } @@ -5315,6 +5451,8 @@ exec_for_query(PLpgSQL_execstate *estate, PLpgSQL_stmt_forq *stmt, SPITupleTable *tuptab; bool found = false; int rc = PLPGSQL_RC_OK; + uint64 previous_id = INVALID_TUPLEDESC_IDENTIFIER; + bool tupdescs_match = true; uint64 n; /* Fetch loop variable's datum entry */ @@ -5357,9 +5495,56 @@ exec_for_query(PLpgSQL_execstate *estate, PLpgSQL_stmt_forq *stmt, for (i = 0; i < n; i++) { /* - * Assign the tuple to the target + * Assign the tuple to the target. Here, because we know that all + * loop iterations should be assigning the same tupdesc, we can + * optimize away repeated creations of expanded records with + * identical tupdescs. Testing for changes of er_tupdesc_id is + * reliable even if the loop body contains assignments that + * replace the target's value entirely, because it's assigned from + * a process-global counter. The case where the tupdescs don't + * match could possibly be handled more efficiently than this + * coding does, but it's not clear extra effort is worthwhile. */ - exec_move_row(estate, var, tuptab->vals[i], tuptab->tupdesc); + if (var->dtype == PLPGSQL_DTYPE_REC) + { + PLpgSQL_rec *rec = (PLpgSQL_rec *) var; + + if (rec->erh && + rec->erh->er_tupdesc_id == previous_id && + tupdescs_match) + { + /* Only need to assign a new tuple value */ + expanded_record_set_tuple(rec->erh, tuptab->vals[i], true); + } + else + { + /* + * First time through, or var's tupdesc changed in loop, + * or we have to do it the hard way because type coercion + * is needed. + */ + exec_move_row(estate, var, + tuptab->vals[i], tuptab->tupdesc); + + /* + * Check to see if physical assignment is OK next time. + * Once the tupdesc comparison has failed once, we don't + * bother rechecking in subsequent loop iterations. + */ + if (tupdescs_match) + { + tupdescs_match = + (rec->rectypeid == RECORDOID || + rec->rectypeid == tuptab->tupdesc->tdtypeid || + compatible_tupdescs(tuptab->tupdesc, + expanded_record_get_tupdesc(rec->erh))); + } + previous_id = rec->erh->er_tupdesc_id; + } + } + else + exec_move_row(estate, var, tuptab->vals[i], tuptab->tupdesc); + exec_eval_cleanup(estate); /* @@ -5684,27 +5869,33 @@ plpgsql_param_fetch(ParamListInfo params, break; case PLPGSQL_DTYPE_REC: - { - PLpgSQL_rec *rec = (PLpgSQL_rec *) datum; - - if (!HeapTupleIsValid(rec->tup)) - ok = false; - break; - } + /* always safe (might return NULL, that's fine) */ + break; case PLPGSQL_DTYPE_RECFIELD: { PLpgSQL_recfield *recfield = (PLpgSQL_recfield *) datum; PLpgSQL_rec *rec; - int fno; rec = (PLpgSQL_rec *) (estate->datums[recfield->recparentno]); - if (!HeapTupleIsValid(rec->tup)) + + /* + * If record variable is NULL, don't risk anything. + */ + if (rec->erh == NULL) ok = false; - else + + /* + * Look up the field's properties if we have not already, + * or if the tuple descriptor ID changed since last time. + */ + else if (unlikely(recfield->rectupledescid != rec->erh->er_tupdesc_id)) { - fno = SPI_fnumber(rec->tupdesc, recfield->fieldname); - if (fno == SPI_ERROR_NOATTRIBUTE) + if (expanded_record_lookup_field(rec->erh, + recfield->fieldname, + &recfield->finfo)) + recfield->rectupledescid = rec->erh->er_tupdesc_id; + else ok = false; } break; @@ -5737,10 +5928,17 @@ plpgsql_param_fetch(ParamListInfo params, * If it's a read/write expanded datum, convert reference to read-only, * unless it's safe to pass as read-write. */ - if (datum->dtype == PLPGSQL_DTYPE_VAR && dno != expr->rwparam) - prm->value = MakeExpandedObjectReadOnly(prm->value, - prm->isnull, - ((PLpgSQL_var *) datum)->datatype->typlen); + if (dno != expr->rwparam) + { + if (datum->dtype == PLPGSQL_DTYPE_VAR) + prm->value = MakeExpandedObjectReadOnly(prm->value, + prm->isnull, + ((PLpgSQL_var *) datum)->datatype->typlen); + else if (datum->dtype == PLPGSQL_DTYPE_REC) + prm->value = MakeExpandedObjectReadOnly(prm->value, + prm->isnull, + -1); + } return prm; } @@ -5774,7 +5972,13 @@ plpgsql_param_compile(ParamListInfo params, Param *param, scratch.resvalue = resv; scratch.resnull = resnull; - /* Select appropriate eval function */ + /* + * Select appropriate eval function. It seems worth special-casing + * DTYPE_VAR and DTYPE_RECFIELD for performance. Also, we can determine + * in advance whether MakeExpandedObjectReadOnly() will be required. + * Currently, only VAR and REC datums could contain read/write expanded + * objects. + */ if (datum->dtype == PLPGSQL_DTYPE_VAR) { if (dno != expr->rwparam && @@ -5783,8 +5987,13 @@ plpgsql_param_compile(ParamListInfo params, Param *param, else scratch.d.cparam.paramfunc = plpgsql_param_eval_var; } + else if (datum->dtype == PLPGSQL_DTYPE_RECFIELD) + scratch.d.cparam.paramfunc = plpgsql_param_eval_recfield; + else if (datum->dtype == PLPGSQL_DTYPE_REC && + dno != expr->rwparam) + scratch.d.cparam.paramfunc = plpgsql_param_eval_generic_ro; else - scratch.d.cparam.paramfunc = plpgsql_param_eval_non_var; + scratch.d.cparam.paramfunc = plpgsql_param_eval_generic; /* * Note: it's tempting to use paramarg to store the estate pointer and @@ -5868,12 +6077,85 @@ plpgsql_param_eval_var_ro(ExprState *state, ExprEvalStep *op, } /* - * plpgsql_param_eval_non_var evaluation of EEOP_PARAM_CALLBACK step + * plpgsql_param_eval_recfield evaluation of EEOP_PARAM_CALLBACK step * - * This handles all variable types except DTYPE_VAR. + * This is specialized to the case of DTYPE_RECFIELD variables, for which + * we never need to invoke MakeExpandedObjectReadOnly. */ static void -plpgsql_param_eval_non_var(ExprState *state, ExprEvalStep *op, +plpgsql_param_eval_recfield(ExprState *state, ExprEvalStep *op, + ExprContext *econtext) +{ + ParamListInfo params; + PLpgSQL_execstate *estate; + int dno = op->d.cparam.paramid - 1; + PLpgSQL_recfield *recfield; + PLpgSQL_rec *rec; + ExpandedRecordHeader *erh; + + /* fetch back the hook data */ + params = econtext->ecxt_param_list_info; + estate = (PLpgSQL_execstate *) params->paramFetchArg; + Assert(dno >= 0 && dno < estate->ndatums); + + /* now we can access the target datum */ + recfield = (PLpgSQL_recfield *) estate->datums[dno]; + Assert(recfield->dtype == PLPGSQL_DTYPE_RECFIELD); + + /* inline the relevant part of exec_eval_datum */ + rec = (PLpgSQL_rec *) (estate->datums[recfield->recparentno]); + erh = rec->erh; + + /* + * If record variable is NULL, instantiate it if it has a named composite + * type, else complain. (This won't change the logical state of the + * record: it's still NULL.) + */ + if (erh == NULL) + { + instantiate_empty_record_variable(estate, rec); + erh = rec->erh; + } + + /* + * Look up the field's properties if we have not already, or if the tuple + * descriptor ID changed since last time. + */ + if (unlikely(recfield->rectupledescid != erh->er_tupdesc_id)) + { + if (!expanded_record_lookup_field(erh, + recfield->fieldname, + &recfield->finfo)) + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_COLUMN), + errmsg("record \"%s\" has no field \"%s\"", + rec->refname, recfield->fieldname))); + recfield->rectupledescid = erh->er_tupdesc_id; + } + + /* OK to fetch the field value. */ + *op->resvalue = expanded_record_get_field(erh, + recfield->finfo.fnumber, + op->resnull); + + /* safety check -- needed for, eg, record fields */ + if (unlikely(recfield->finfo.ftypeid != op->d.cparam.paramtype)) + ereport(ERROR, + (errcode(ERRCODE_DATATYPE_MISMATCH), + errmsg("type of parameter %d (%s) does not match that when preparing the plan (%s)", + op->d.cparam.paramid, + format_type_be(recfield->finfo.ftypeid), + format_type_be(op->d.cparam.paramtype)))); +} + +/* + * plpgsql_param_eval_generic evaluation of EEOP_PARAM_CALLBACK step + * + * This handles all variable types, but assumes we do not need to invoke + * MakeExpandedObjectReadOnly. + */ +static void +plpgsql_param_eval_generic(ExprState *state, ExprEvalStep *op, ExprContext *econtext) { ParamListInfo params; @@ -5890,8 +6172,48 @@ plpgsql_param_eval_non_var(ExprState *state, ExprEvalStep *op, /* now we can access the target datum */ datum = estate->datums[dno]; - Assert(datum->dtype != PLPGSQL_DTYPE_VAR); + /* fetch datum's value */ + exec_eval_datum(estate, datum, + &datumtype, &datumtypmod, + op->resvalue, op->resnull); + + /* safety check -- needed for, eg, record fields */ + if (unlikely(datumtype != op->d.cparam.paramtype)) + ereport(ERROR, + (errcode(ERRCODE_DATATYPE_MISMATCH), + errmsg("type of parameter %d (%s) does not match that when preparing the plan (%s)", + op->d.cparam.paramid, + format_type_be(datumtype), + format_type_be(op->d.cparam.paramtype)))); +} + +/* + * plpgsql_param_eval_generic_ro evaluation of EEOP_PARAM_CALLBACK step + * + * This handles all variable types, but assumes we need to invoke + * MakeExpandedObjectReadOnly (hence, variable must be of a varlena type). + */ +static void +plpgsql_param_eval_generic_ro(ExprState *state, ExprEvalStep *op, + ExprContext *econtext) +{ + ParamListInfo params; + PLpgSQL_execstate *estate; + int dno = op->d.cparam.paramid - 1; + PLpgSQL_datum *datum; + Oid datumtype; + int32 datumtypmod; + + /* fetch back the hook data */ + params = econtext->ecxt_param_list_info; + estate = (PLpgSQL_execstate *) params->paramFetchArg; + Assert(dno >= 0 && dno < estate->ndatums); + + /* now we can access the target datum */ + datum = estate->datums[dno]; + + /* fetch datum's value */ exec_eval_datum(estate, datum, &datumtype, &datumtypmod, op->resvalue, op->resnull); @@ -5905,113 +6227,343 @@ plpgsql_param_eval_non_var(ExprState *state, ExprEvalStep *op, format_type_be(datumtype), format_type_be(op->d.cparam.paramtype)))); - /* - * Currently, if the dtype isn't VAR, the value couldn't be a read/write - * expanded datum. - */ + /* force the value to read-only */ + *op->resvalue = MakeExpandedObjectReadOnly(*op->resvalue, + *op->resnull, + -1); } -/* ---------- +/* * exec_move_row Move one tuple's values into a record or row * - * Since this uses exec_assign_value, caller should eventually call + * tup and tupdesc may both be NULL if we're just assigning an indeterminate + * composite NULL to the target. Alternatively, can have tup be NULL and + * tupdesc not NULL, in which case we assign a row of NULLs to the target. + * + * Since this uses the mcontext for workspace, caller should eventually call * exec_eval_cleanup to prevent long-term memory leaks. - * ---------- */ static void exec_move_row(PLpgSQL_execstate *estate, PLpgSQL_variable *target, HeapTuple tup, TupleDesc tupdesc) { + ExpandedRecordHeader *newerh = NULL; + /* - * Record is simple - just copy the tuple and its descriptor into the - * record variable + * If target is RECORD, we may be able to avoid field-by-field processing. */ if (target->dtype == PLPGSQL_DTYPE_REC) { PLpgSQL_rec *rec = (PLpgSQL_rec *) target; /* - * Copy input first, just in case it is pointing at variable's value + * If we have no source tupdesc, just set the record variable to NULL. + * (If we have a source tupdesc but not a tuple, we'll set the + * variable to a row of nulls, instead. This is odd perhaps, but + * backwards compatible.) */ - if (HeapTupleIsValid(tup)) - tup = heap_copytuple(tup); - else if (tupdesc) + if (tupdesc == NULL) { - /* If we have a tupdesc but no data, form an all-nulls tuple */ - bool *nulls; - - nulls = (bool *) - eval_mcontext_alloc(estate, tupdesc->natts * sizeof(bool)); - memset(nulls, true, tupdesc->natts * sizeof(bool)); - - tup = heap_form_tuple(tupdesc, NULL, nulls); + if (rec->erh) + DeleteExpandedObject(ExpandedRecordGetDatum(rec->erh)); + rec->erh = NULL; + return; } - if (tupdesc) - tupdesc = CreateTupleDescCopy(tupdesc); + /* + * Build a new expanded record with appropriate tupdesc. + */ + newerh = make_expanded_record_for_rec(estate, rec, tupdesc, NULL); - /* Free the old value ... */ - if (rec->freetup) + /* + * If the rowtypes match, or if we have no tuple anyway, we can + * complete the assignment without field-by-field processing. + * + * The tests here are ordered more or less in order of cheapness. We + * can easily detect it will work if the target is declared RECORD or + * has the same typeid as the source. But when assigning from a query + * result, it's common to have a source tupdesc that's labeled RECORD + * but is actually physically compatible with a named-composite-type + * target, so it's worth spending extra cycles to check for that. + */ + if (rec->rectypeid == RECORDOID || + rec->rectypeid == tupdesc->tdtypeid || + !HeapTupleIsValid(tup) || + compatible_tupdescs(tupdesc, expanded_record_get_tupdesc(newerh))) { - heap_freetuple(rec->tup); - rec->freetup = false; - } - if (rec->freetupdesc) - { - FreeTupleDesc(rec->tupdesc); - rec->freetupdesc = false; - } + if (!HeapTupleIsValid(tup)) + { + /* No data, so force the record into all-nulls state */ + deconstruct_expanded_record(newerh); + } + else + { + /* No coercion is needed, so just assign the row value */ + expanded_record_set_tuple(newerh, tup, true); + } - /* ... and install the new */ - if (HeapTupleIsValid(tup)) + /* Complete the assignment */ + assign_record_var(estate, rec, newerh); + + return; + } + } + + /* + * Otherwise, deconstruct the tuple and do field-by-field assignment, + * using exec_move_row_from_fields. + */ + if (tupdesc && HeapTupleIsValid(tup)) + { + int td_natts = tupdesc->natts; + Datum *values; + bool *nulls; + Datum values_local[64]; + bool nulls_local[64]; + + /* + * Need workspace arrays. If td_natts is small enough, use local + * arrays to save doing a palloc. Even if it's not small, we can + * allocate both the Datum and isnull arrays in one palloc chunk. + */ + if (td_natts <= lengthof(values_local)) { - rec->tup = tup; - rec->freetup = true; + values = values_local; + nulls = nulls_local; } else - rec->tup = NULL; - - if (tupdesc) { - rec->tupdesc = tupdesc; - rec->freetupdesc = true; + char *chunk; + + chunk = eval_mcontext_alloc(estate, + td_natts * (sizeof(Datum) + sizeof(bool))); + values = (Datum *) chunk; + nulls = (bool *) (chunk + td_natts * sizeof(Datum)); } + + heap_deform_tuple(tup, tupdesc, values, nulls); + + exec_move_row_from_fields(estate, target, newerh, + values, nulls, tupdesc); + } + else + { + /* + * Assign all-nulls. + */ + exec_move_row_from_fields(estate, target, newerh, + NULL, NULL, NULL); + } +} + +/* + * Build an expanded record object suitable for assignment to "rec". + * + * Caller must supply either a source tuple descriptor or a source expanded + * record (not both). If the record variable has declared type RECORD, + * it'll adopt the source's rowtype. Even if it doesn't, we may be able to + * piggyback on a source expanded record to save a typcache lookup. + * + * Caller must fill the object with data, then do assign_record_var(). + * + * The new record is initially put into the mcontext, so it will be cleaned up + * if we fail before reaching assign_record_var(). + */ +static ExpandedRecordHeader * +make_expanded_record_for_rec(PLpgSQL_execstate *estate, + PLpgSQL_rec *rec, + TupleDesc srctupdesc, + ExpandedRecordHeader *srcerh) +{ + ExpandedRecordHeader *newerh; + MemoryContext mcontext = get_eval_mcontext(estate); + + if (rec->rectypeid != RECORDOID) + { + /* + * New record must be of desired type, but maybe srcerh has already + * done all the same lookups. + */ + if (srcerh && rec->rectypeid == srcerh->er_decltypeid) + newerh = make_expanded_record_from_exprecord(srcerh, + mcontext); else - rec->tupdesc = NULL; + newerh = make_expanded_record_from_typeid(rec->rectypeid, -1, + mcontext); + } + else + { + /* + * We'll adopt the input tupdesc. We can still use + * make_expanded_record_from_exprecord, if srcerh isn't a composite + * domain. (If it is, we effectively adopt its base type.) + */ + if (srcerh && !ExpandedRecordIsDomain(srcerh)) + newerh = make_expanded_record_from_exprecord(srcerh, + mcontext); + else + { + if (!srctupdesc) + srctupdesc = expanded_record_get_tupdesc(srcerh); + newerh = make_expanded_record_from_tupdesc(srctupdesc, + mcontext); + } + } + + return newerh; +} + +/* + * exec_move_row_from_fields Move arrays of field values into a record or row + * + * When assigning to a record, the caller must have already created a suitable + * new expanded record object, newerh. Pass NULL when assigning to a row. + * + * tupdesc describes the input row, which might have different column + * types and/or different dropped-column positions than the target. + * values/nulls/tupdesc can all be NULL if we just want to assign nulls to + * all fields of the record or row. + * + * Since this uses the mcontext for workspace, caller should eventually call + * exec_eval_cleanup to prevent long-term memory leaks. + */ +static void +exec_move_row_from_fields(PLpgSQL_execstate *estate, + PLpgSQL_variable *target, + ExpandedRecordHeader *newerh, + Datum *values, bool *nulls, + TupleDesc tupdesc) +{ + int td_natts = tupdesc ? tupdesc->natts : 0; + int fnum; + int anum; + + /* Handle RECORD-target case */ + if (target->dtype == PLPGSQL_DTYPE_REC) + { + PLpgSQL_rec *rec = (PLpgSQL_rec *) target; + TupleDesc var_tupdesc; + Datum newvalues_local[64]; + bool newnulls_local[64]; + + Assert(newerh != NULL); /* caller must have built new object */ + + var_tupdesc = expanded_record_get_tupdesc(newerh); + + /* + * Coerce field values if needed. This might involve dealing with + * different sets of dropped columns and/or coercing individual column + * types. That's sort of a pain, but historically plpgsql has allowed + * it, so we preserve the behavior. However, it's worth a quick check + * to see if the tupdescs are identical. (Since expandedrecord.c + * prefers to use refcounted tupdescs from the typcache, expanded + * records with the same rowtype will have pointer-equal tupdescs.) + */ + if (var_tupdesc != tupdesc) + { + int vtd_natts = var_tupdesc->natts; + Datum *newvalues; + bool *newnulls; + + /* + * Need workspace arrays. If vtd_natts is small enough, use local + * arrays to save doing a palloc. Even if it's not small, we can + * allocate both the Datum and isnull arrays in one palloc chunk. + */ + if (vtd_natts <= lengthof(newvalues_local)) + { + newvalues = newvalues_local; + newnulls = newnulls_local; + } + else + { + char *chunk; + + chunk = eval_mcontext_alloc(estate, + vtd_natts * (sizeof(Datum) + sizeof(bool))); + newvalues = (Datum *) chunk; + newnulls = (bool *) (chunk + vtd_natts * sizeof(Datum)); + } + + /* Walk over destination columns */ + anum = 0; + for (fnum = 0; fnum < vtd_natts; fnum++) + { + Form_pg_attribute attr = TupleDescAttr(var_tupdesc, fnum); + Datum value; + bool isnull; + Oid valtype; + int32 valtypmod; + + if (attr->attisdropped) + { + /* expanded_record_set_fields should ignore this column */ + continue; /* skip dropped column in record */ + } + + while (anum < td_natts && + TupleDescAttr(tupdesc, anum)->attisdropped) + anum++; /* skip dropped column in tuple */ + + if (anum < td_natts) + { + value = values[anum]; + isnull = nulls[anum]; + valtype = TupleDescAttr(tupdesc, anum)->atttypid; + valtypmod = TupleDescAttr(tupdesc, anum)->atttypmod; + anum++; + } + else + { + value = (Datum) 0; + isnull = true; + valtype = UNKNOWNOID; + valtypmod = -1; + } + + /* Cast the new value to the right type, if needed. */ + newvalues[fnum] = exec_cast_value(estate, + value, + &isnull, + valtype, + valtypmod, + attr->atttypid, + attr->atttypmod); + newnulls[fnum] = isnull; + } + + values = newvalues; + nulls = newnulls; + } + + /* Insert the coerced field values into the new expanded record */ + expanded_record_set_fields(newerh, values, nulls); + + /* Complete the assignment */ + assign_record_var(estate, rec, newerh); return; } + /* newerh should not have been passed in non-RECORD cases */ + Assert(newerh == NULL); + /* - * Row is a bit more complicated in that we assign the individual - * attributes of the tuple to the variables the row points to. + * For a row, we assign the individual field values to the variables the + * row points to. * - * NOTE: this code used to demand row->nfields == - * HeapTupleHeaderGetNatts(tup->t_data), but that's wrong. The tuple - * might have more fields than we expected if it's from an - * inheritance-child table of the current table, or it might have fewer if - * the table has had columns added by ALTER TABLE. Ignore extra columns - * and assume NULL for missing columns, the same as heap_getattr would do. - * We also have to skip over dropped columns in either the source or - * destination. + * NOTE: both this code and the record code above silently ignore extra + * columns in the source and assume NULL for missing columns. This is + * pretty dubious but it's the historical behavior. * - * If we have no tuple data at all, we'll assign NULL to all columns of + * If we have no input data at all, we'll assign NULL to all columns of * the row variable. */ if (target->dtype == PLPGSQL_DTYPE_ROW) { PLpgSQL_row *row = (PLpgSQL_row *) target; - int td_natts = tupdesc ? tupdesc->natts : 0; - int t_natts; - int fnum; - int anum; - - if (HeapTupleIsValid(tup)) - t_natts = HeapTupleHeaderGetNatts(tup->t_data); - else - t_natts = 0; anum = 0; for (fnum = 0; fnum < row->nfields; fnum++) @@ -6022,9 +6574,6 @@ exec_move_row(PLpgSQL_execstate *estate, Oid valtype; int32 valtypmod; - if (row->varnos[fnum] < 0) - continue; /* skip dropped column in row struct */ - var = (PLpgSQL_var *) (estate->datums[row->varnos[fnum]]); while (anum < td_natts && @@ -6033,13 +6582,8 @@ exec_move_row(PLpgSQL_execstate *estate, if (anum < td_natts) { - if (anum < t_natts) - value = SPI_getbinval(tup, tupdesc, anum + 1, &isnull); - else - { - value = (Datum) 0; - isnull = true; - } + value = values[anum]; + isnull = nulls[anum]; valtype = TupleDescAttr(tupdesc, anum)->atttypid; valtypmod = TupleDescAttr(tupdesc, anum)->atttypmod; anum++; @@ -6062,6 +6606,47 @@ exec_move_row(PLpgSQL_execstate *estate, elog(ERROR, "unsupported target"); } +/* + * compatible_tupdescs: detect whether two tupdescs are physically compatible + * + * TRUE indicates that a tuple satisfying src_tupdesc can be used directly as + * a value for a composite variable using dst_tupdesc. + */ +static bool +compatible_tupdescs(TupleDesc src_tupdesc, TupleDesc dst_tupdesc) +{ + int i; + + /* Possibly we could allow src_tupdesc to have extra columns? */ + if (dst_tupdesc->natts != src_tupdesc->natts) + return false; + + for (i = 0; i < dst_tupdesc->natts; i++) + { + Form_pg_attribute dattr = TupleDescAttr(dst_tupdesc, i); + Form_pg_attribute sattr = TupleDescAttr(src_tupdesc, i); + + if (dattr->attisdropped != sattr->attisdropped) + return false; + if (!dattr->attisdropped) + { + /* Normal columns must match by type and typmod */ + if (dattr->atttypid != sattr->atttypid || + (dattr->atttypmod >= 0 && + dattr->atttypmod != sattr->atttypmod)) + return false; + } + else + { + /* Dropped columns are OK as long as length/alignment match */ + if (dattr->attlen != sattr->attlen || + dattr->attalign != sattr->attalign) + return false; + } + } + return true; +} + /* ---------- * make_tuple_from_row Make a tuple from the values of a row object * @@ -6098,8 +6683,6 @@ make_tuple_from_row(PLpgSQL_execstate *estate, nulls[i] = true; /* leave the column as null */ continue; } - if (row->varnos[i] < 0) /* should not happen */ - elog(ERROR, "dropped rowtype entry for non-dropped column"); exec_eval_datum(estate, estate->datums[row->varnos[i]], &fieldtypeid, &fieldtypmod, @@ -6114,86 +6697,290 @@ make_tuple_from_row(PLpgSQL_execstate *estate, return tuple; } -/* ---------- - * get_tuple_from_datum extract a tuple from a composite Datum +/* + * deconstruct_composite_datum extract tuple+tupdesc from composite Datum * - * Returns a HeapTuple, freshly palloc'd in caller's context. + * The caller must supply a HeapTupleData variable, in which we set up a + * tuple header pointing to the composite datum's body. To make the tuple + * value outlive that variable, caller would need to apply heap_copytuple... + * but current callers only need a short-lived tuple value anyway. * - * Note: it's caller's responsibility to be sure value is of composite type. - * ---------- - */ -static HeapTuple -get_tuple_from_datum(Datum value) -{ - HeapTupleHeader td = DatumGetHeapTupleHeader(value); - HeapTupleData tmptup; - - /* Build a temporary HeapTuple control structure */ - tmptup.t_len = HeapTupleHeaderGetDatumLength(td); - ItemPointerSetInvalid(&(tmptup.t_self)); - tmptup.t_tableOid = InvalidOid; - tmptup.t_data = td; - - /* Build a copy and return it */ - return heap_copytuple(&tmptup); -} - -/* ---------- - * get_tupdesc_from_datum get a tuple descriptor for a composite Datum - * - * Returns a pointer to the TupleDesc of the tuple's rowtype. + * Returns a pointer to the TupleDesc of the datum's rowtype. * Caller is responsible for calling ReleaseTupleDesc when done with it. * * Note: it's caller's responsibility to be sure value is of composite type. - * ---------- + * Also, best to call this in a short-lived context, as it might leak memory. */ static TupleDesc -get_tupdesc_from_datum(Datum value) +deconstruct_composite_datum(Datum value, HeapTupleData *tmptup) { - HeapTupleHeader td = DatumGetHeapTupleHeader(value); + HeapTupleHeader td; Oid tupType; int32 tupTypmod; + /* Get tuple body (note this could involve detoasting) */ + td = DatumGetHeapTupleHeader(value); + + /* Build a temporary HeapTuple control structure */ + tmptup->t_len = HeapTupleHeaderGetDatumLength(td); + ItemPointerSetInvalid(&(tmptup->t_self)); + tmptup->t_tableOid = InvalidOid; + tmptup->t_data = td; + /* Extract rowtype info and find a tupdesc */ tupType = HeapTupleHeaderGetTypeId(td); tupTypmod = HeapTupleHeaderGetTypMod(td); return lookup_rowtype_tupdesc(tupType, tupTypmod); } -/* ---------- +/* * exec_move_row_from_datum Move a composite Datum into a record or row * - * This is equivalent to get_tuple_from_datum() followed by exec_move_row(), - * but we avoid constructing an intermediate physical copy of the tuple. - * ---------- + * This is equivalent to deconstruct_composite_datum() followed by + * exec_move_row(), but we can optimize things if the Datum is an + * expanded-record reference. + * + * Note: it's caller's responsibility to be sure value is of composite type. */ static void exec_move_row_from_datum(PLpgSQL_execstate *estate, PLpgSQL_variable *target, Datum value) { - HeapTupleHeader td = DatumGetHeapTupleHeader(value); - Oid tupType; - int32 tupTypmod; - TupleDesc tupdesc; - HeapTupleData tmptup; + /* Check to see if source is an expanded record */ + if (VARATT_IS_EXTERNAL_EXPANDED(DatumGetPointer(value))) + { + ExpandedRecordHeader *erh = (ExpandedRecordHeader *) DatumGetEOHP(value); + ExpandedRecordHeader *newerh = NULL; - /* Extract rowtype info and find a tupdesc */ - tupType = HeapTupleHeaderGetTypeId(td); - tupTypmod = HeapTupleHeaderGetTypMod(td); - tupdesc = lookup_rowtype_tupdesc(tupType, tupTypmod); + Assert(erh->er_magic == ER_MAGIC); - /* Build a temporary HeapTuple control structure */ - tmptup.t_len = HeapTupleHeaderGetDatumLength(td); - ItemPointerSetInvalid(&(tmptup.t_self)); - tmptup.t_tableOid = InvalidOid; - tmptup.t_data = td; + /* These cases apply if the target is record not row... */ + if (target->dtype == PLPGSQL_DTYPE_REC) + { + PLpgSQL_rec *rec = (PLpgSQL_rec *) target; - /* Do the move */ - exec_move_row(estate, target, &tmptup, tupdesc); + /* + * If it's the same record already stored in the variable, do + * nothing. This would happen only in silly cases like "r := r", + * but we need some check to avoid possibly freeing the variable's + * live value below. Note that this applies even if what we have + * is a R/O pointer. + */ + if (erh == rec->erh) + return; - /* Release tupdesc usage count */ - ReleaseTupleDesc(tupdesc); + /* + * If we have a R/W pointer, we're allowed to just commandeer + * ownership of the expanded record. If it's of the right type to + * put into the record variable, do that. (Note we don't accept + * an expanded record of a composite-domain type as a RECORD + * value. We'll treat it as the base composite type instead; + * compare logic in make_expanded_record_for_rec.) + */ + if (VARATT_IS_EXTERNAL_EXPANDED_RW(DatumGetPointer(value)) && + (rec->rectypeid == erh->er_decltypeid || + (rec->rectypeid == RECORDOID && + !ExpandedRecordIsDomain(erh)))) + { + assign_record_var(estate, rec, erh); + return; + } + + /* + * If we already have an expanded record object in the target + * variable, and the source record contains a valid tuple + * representation with the right rowtype, then we can skip making + * a new expanded record and just assign the tuple with + * expanded_record_set_tuple. (We can't do the equivalent if we + * have to do field-by-field assignment, since that wouldn't be + * atomic if there's an error.) We consider that there's a + * rowtype match only if it's the same named composite type or + * same registered rowtype; checking for matches of anonymous + * rowtypes would be more expensive than this is worth. + */ + if (rec->erh && + (erh->flags & ER_FLAG_FVALUE_VALID) && + erh->er_typeid == rec->erh->er_typeid && + (erh->er_typeid != RECORDOID || + (erh->er_typmod == rec->erh->er_typmod && + erh->er_typmod >= 0))) + { + expanded_record_set_tuple(rec->erh, erh->fvalue, true); + return; + } + + /* + * Otherwise we're gonna need a new expanded record object. Make + * it here in hopes of piggybacking on the source object's + * previous typcache lookup. + */ + newerh = make_expanded_record_for_rec(estate, rec, NULL, erh); + + /* + * If the expanded record contains a valid tuple representation, + * and we don't need rowtype conversion, then just copying the + * tuple is probably faster than field-by-field processing. (This + * isn't duplicative of the previous check, since here we will + * catch the case where the record variable was previously empty.) + */ + if ((erh->flags & ER_FLAG_FVALUE_VALID) && + (rec->rectypeid == RECORDOID || + rec->rectypeid == erh->er_typeid)) + { + expanded_record_set_tuple(newerh, erh->fvalue, true); + assign_record_var(estate, rec, newerh); + return; + } + + /* + * Need to special-case empty source record, else code below would + * leak newerh. + */ + if (ExpandedRecordIsEmpty(erh)) + { + /* Set newerh to a row of NULLs */ + deconstruct_expanded_record(newerh); + assign_record_var(estate, rec, newerh); + return; + } + } /* end of record-target-only cases */ + + /* + * If the source expanded record is empty, we should treat that like a + * NULL tuple value. (We're unlikely to see such a case, but we must + * check this; deconstruct_expanded_record would cause a change of + * logical state, which is not OK.) + */ + if (ExpandedRecordIsEmpty(erh)) + { + exec_move_row(estate, target, NULL, + expanded_record_get_tupdesc(erh)); + return; + } + + /* + * Otherwise, ensure that the source record is deconstructed, and + * assign from its field values. + */ + deconstruct_expanded_record(erh); + exec_move_row_from_fields(estate, target, newerh, + erh->dvalues, erh->dnulls, + expanded_record_get_tupdesc(erh)); + } + else + { + /* + * Nope, we've got a plain composite Datum. Deconstruct it; but we + * don't use deconstruct_composite_datum(), because we may be able to + * skip calling lookup_rowtype_tupdesc(). + */ + HeapTupleHeader td; + HeapTupleData tmptup; + Oid tupType; + int32 tupTypmod; + TupleDesc tupdesc; + MemoryContext oldcontext; + + /* Ensure that any detoasted data winds up in the eval_mcontext */ + oldcontext = MemoryContextSwitchTo(get_eval_mcontext(estate)); + /* Get tuple body (note this could involve detoasting) */ + td = DatumGetHeapTupleHeader(value); + MemoryContextSwitchTo(oldcontext); + + /* Build a temporary HeapTuple control structure */ + tmptup.t_len = HeapTupleHeaderGetDatumLength(td); + ItemPointerSetInvalid(&(tmptup.t_self)); + tmptup.t_tableOid = InvalidOid; + tmptup.t_data = td; + + /* Extract rowtype info */ + tupType = HeapTupleHeaderGetTypeId(td); + tupTypmod = HeapTupleHeaderGetTypMod(td); + + /* Now, if the target is record not row, maybe we can optimize ... */ + if (target->dtype == PLPGSQL_DTYPE_REC) + { + PLpgSQL_rec *rec = (PLpgSQL_rec *) target; + + /* + * If we already have an expanded record object in the target + * variable, and the source datum has a matching rowtype, then we + * can skip making a new expanded record and just assign the tuple + * with expanded_record_set_tuple. We consider that there's a + * rowtype match only if it's the same named composite type or + * same registered rowtype. (Checking to reject an anonymous + * rowtype here should be redundant, but let's be safe.) + */ + if (rec->erh && + tupType == rec->erh->er_typeid && + (tupType != RECORDOID || + (tupTypmod == rec->erh->er_typmod && + tupTypmod >= 0))) + { + expanded_record_set_tuple(rec->erh, &tmptup, true); + return; + } + + /* + * If the source datum has a rowtype compatible with the target + * variable, just build a new expanded record and assign the tuple + * into it. Using make_expanded_record_from_typeid() here saves + * one typcache lookup compared to the code below. + */ + if (rec->rectypeid == RECORDOID || rec->rectypeid == tupType) + { + ExpandedRecordHeader *newerh; + MemoryContext mcontext = get_eval_mcontext(estate); + + newerh = make_expanded_record_from_typeid(tupType, tupTypmod, + mcontext); + expanded_record_set_tuple(newerh, &tmptup, true); + assign_record_var(estate, rec, newerh); + return; + } + + /* + * Otherwise, we're going to need conversion, so fall through to + * do it the hard way. + */ + } + + /* + * ROW target, or unoptimizable RECORD target, so we have to expend a + * lookup to obtain the source datum's tupdesc. + */ + tupdesc = lookup_rowtype_tupdesc(tupType, tupTypmod); + + /* Do the move */ + exec_move_row(estate, target, &tmptup, tupdesc); + + /* Release tupdesc usage count */ + ReleaseTupleDesc(tupdesc); + } +} + +/* + * If we have not created an expanded record to hold the record variable's + * value, do so. The expanded record will be "empty", so this does not + * change the logical state of the record variable: it's still NULL. + * However, now we'll have a tupdesc with which we can e.g. look up fields. + */ +static void +instantiate_empty_record_variable(PLpgSQL_execstate *estate, PLpgSQL_rec *rec) +{ + Assert(rec->erh == NULL); /* else caller error */ + + /* If declared type is RECORD, we can't instantiate */ + if (rec->rectypeid == RECORDOID) + ereport(ERROR, + (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("record \"%s\" is not assigned yet", rec->refname), + errdetail("The tuple structure of a not-yet-assigned record is indeterminate."))); + + /* OK, do it */ + rec->erh = make_expanded_record_from_typeid(rec->rectypeid, -1, + estate->datum_context); } /* ---------- @@ -6906,6 +7693,26 @@ assign_text_var(PLpgSQL_execstate *estate, PLpgSQL_var *var, const char *str) assign_simple_var(estate, var, CStringGetTextDatum(str), false, true); } +/* + * assign_record_var --- assign a new value to any REC datum. + */ +static void +assign_record_var(PLpgSQL_execstate *estate, PLpgSQL_rec *rec, + ExpandedRecordHeader *erh) +{ + Assert(rec->dtype == PLPGSQL_DTYPE_REC); + + /* Transfer new record object into datum_context */ + TransferExpandedRecord(erh, estate->datum_context); + + /* Free the old value ... */ + if (rec->erh) + DeleteExpandedObject(ExpandedRecordGetDatum(rec->erh)); + + /* ... and install the new */ + rec->erh = erh; +} + /* * exec_eval_using_params --- evaluate params of USING clause * diff --git a/src/pl/plpgsql/src/pl_funcs.c b/src/pl/plpgsql/src/pl_funcs.c index f0e85fcfcd..b36fab67bc 100644 --- a/src/pl/plpgsql/src/pl_funcs.c +++ b/src/pl/plpgsql/src/pl_funcs.c @@ -1618,15 +1618,16 @@ plpgsql_dumptree(PLpgSQL_function *func) printf("ROW %-16s fields", row->refname); for (i = 0; i < row->nfields; i++) { - if (row->fieldnames[i]) - printf(" %s=var %d", row->fieldnames[i], - row->varnos[i]); + printf(" %s=var %d", row->fieldnames[i], + row->varnos[i]); } printf("\n"); } break; case PLPGSQL_DTYPE_REC: - printf("REC %s\n", ((PLpgSQL_rec *) d)->refname); + printf("REC %-16s typoid %u\n", + ((PLpgSQL_rec *) d)->refname, + ((PLpgSQL_rec *) d)->rectypeid); break; case PLPGSQL_DTYPE_RECFIELD: printf("RECFIELD %-16s of REC %d\n", diff --git a/src/pl/plpgsql/src/pl_gram.y b/src/pl/plpgsql/src/pl_gram.y index 42f6a2e161..97c0d4f98a 100644 --- a/src/pl/plpgsql/src/pl_gram.y +++ b/src/pl/plpgsql/src/pl_gram.y @@ -512,7 +512,7 @@ decl_statement : decl_varname decl_const decl_datatype decl_collate decl_notnull else ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("row or record variable cannot be CONSTANT"), + errmsg("record variable cannot be CONSTANT"), parser_errposition(@2))); } if ($5) @@ -522,7 +522,7 @@ decl_statement : decl_varname decl_const decl_datatype decl_collate decl_notnull else ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("row or record variable cannot be NOT NULL"), + errmsg("record variable cannot be NOT NULL"), parser_errposition(@4))); } @@ -533,7 +533,7 @@ decl_statement : decl_varname decl_const decl_datatype decl_collate decl_notnull else ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("default value for row or record variable is not supported"), + errmsg("default value for record variable is not supported"), parser_errposition(@5))); } } @@ -1333,7 +1333,7 @@ for_control : for_variable K_IN { ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), - errmsg("loop variable of loop over rows must be a record or row variable or list of scalar variables"), + errmsg("loop variable of loop over rows must be a record variable or list of scalar variables"), parser_errposition(@1))); } new->query = expr; @@ -1386,6 +1386,7 @@ for_control : for_variable K_IN new->var = (PLpgSQL_variable *) plpgsql_build_record($1.name, $1.lineno, + RECORDOID, true); $$ = (PLpgSQL_stmt *) new; @@ -1524,7 +1525,7 @@ for_control : for_variable K_IN { ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("loop variable of loop over rows must be a record or row variable or list of scalar variables"), + errmsg("loop variable of loop over rows must be a record variable or list of scalar variables"), parser_errposition(@1))); } @@ -3328,7 +3329,7 @@ check_assignable(PLpgSQL_datum *datum, int location) parser_errposition(location))); break; case PLPGSQL_DTYPE_ROW: - /* always assignable? */ + /* always assignable? Shouldn't we check member vars? */ break; case PLPGSQL_DTYPE_REC: /* always assignable? What about NEW/OLD? */ @@ -3385,7 +3386,7 @@ read_into_target(PLpgSQL_variable **target, bool *strict) if ((tok = yylex()) == ',') ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("record or row variable cannot be part of multiple-item INTO list"), + errmsg("record variable cannot be part of multiple-item INTO list"), parser_errposition(yylloc))); plpgsql_push_back_token(tok); } diff --git a/src/pl/plpgsql/src/pl_handler.c b/src/pl/plpgsql/src/pl_handler.c index c49428d923..f38ef04077 100644 --- a/src/pl/plpgsql/src/pl_handler.c +++ b/src/pl/plpgsql/src/pl_handler.c @@ -443,14 +443,15 @@ plpgsql_validator(PG_FUNCTION_ARGS) } /* Disallow pseudotypes in arguments (either IN or OUT) */ - /* except for polymorphic */ + /* except for RECORD and polymorphic */ numargs = get_func_arg_info(tuple, &argtypes, &argnames, &argmodes); for (i = 0; i < numargs; i++) { if (get_typtype(argtypes[i]) == TYPTYPE_PSEUDO) { - if (!IsPolymorphicType(argtypes[i])) + if (argtypes[i] != RECORDOID && + !IsPolymorphicType(argtypes[i])) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("PL/pgSQL functions cannot accept type %s", diff --git a/src/pl/plpgsql/src/plpgsql.h b/src/pl/plpgsql/src/plpgsql.h index a9b9d91de7..d4eb67b738 100644 --- a/src/pl/plpgsql/src/plpgsql.h +++ b/src/pl/plpgsql/src/plpgsql.h @@ -20,6 +20,8 @@ #include "commands/event_trigger.h" #include "commands/trigger.h" #include "executor/spi.h" +#include "utils/expandedrecord.h" + /********************************************************************** * Definitions @@ -37,10 +39,9 @@ */ typedef enum PLpgSQL_nsitem_type { - PLPGSQL_NSTYPE_LABEL, - PLPGSQL_NSTYPE_VAR, - PLPGSQL_NSTYPE_ROW, - PLPGSQL_NSTYPE_REC + PLPGSQL_NSTYPE_LABEL, /* block label */ + PLPGSQL_NSTYPE_VAR, /* scalar variable */ + PLPGSQL_NSTYPE_REC /* composite variable */ } PLpgSQL_nsitem_type; /* @@ -72,9 +73,8 @@ typedef enum PLpgSQL_datum_type typedef enum PLpgSQL_type_type { PLPGSQL_TTYPE_SCALAR, /* scalar types and domains */ - PLPGSQL_TTYPE_ROW, /* composite types */ - PLPGSQL_TTYPE_REC, /* RECORD pseudotype */ - PLPGSQL_TTYPE_PSEUDO /* other pseudotypes */ + PLPGSQL_TTYPE_REC, /* composite types, including RECORD */ + PLPGSQL_TTYPE_PSEUDO /* pseudotypes */ } PLpgSQL_type_type; /* @@ -183,7 +183,6 @@ typedef struct PLpgSQL_type int16 typlen; /* stuff copied from its pg_type entry */ bool typbyval; char typtype; - Oid typrelid; Oid collation; /* from pg_type, but can be overridden */ bool typisarray; /* is "true" array, or domain over one */ int32 atttypmod; /* typmod (taken from someplace else) */ @@ -274,7 +273,12 @@ typedef struct PLpgSQL_var } PLpgSQL_var; /* - * Row variable + * Row variable - this represents one or more variables that are listed in an + * INTO clause, FOR-loop targetlist, cursor argument list, etc. We also use + * a row to represent a function's OUT parameters when there's more than one. + * + * Note that there's no way to name the row as such from PL/pgSQL code, + * so many functions don't need to support these. */ typedef struct PLpgSQL_row { @@ -283,21 +287,20 @@ typedef struct PLpgSQL_row char *refname; int lineno; - /* Note: TupleDesc is only set up for named rowtypes, else it is NULL. */ + /* + * rowtupdesc is only set up if we might need to convert the row into a + * composite datum, which currently only happens for OUT parameters. + * Otherwise it is NULL. + */ TupleDesc rowtupdesc; - /* - * Note: if the underlying rowtype contains a dropped column, the - * corresponding fieldnames[] entry will be NULL, and there is no - * corresponding var (varnos[] will be -1). - */ int nfields; char **fieldnames; int *varnos; } PLpgSQL_row; /* - * Record variable (non-fixed structure) + * Record variable (any composite type, including RECORD) */ typedef struct PLpgSQL_rec { @@ -305,11 +308,11 @@ typedef struct PLpgSQL_rec int dno; char *refname; int lineno; - - HeapTuple tup; - TupleDesc tupdesc; - bool freetup; - bool freetupdesc; + Oid rectypeid; /* declared type of variable */ + /* RECFIELDs for this record are chained together for easy access */ + int firstfield; /* dno of first RECFIELD, or -1 if none */ + /* We always store record variables as "expanded" records */ + ExpandedRecordHeader *erh; } PLpgSQL_rec; /* @@ -319,8 +322,12 @@ typedef struct PLpgSQL_recfield { PLpgSQL_datum_type dtype; int dno; - char *fieldname; + char *fieldname; /* name of field */ int recparentno; /* dno of parent record */ + int nextfield; /* dno of next child, or -1 if none */ + uint64 rectupledescid; /* record's tupledesc ID as of last lookup */ + ExpandedRecordFieldInfo finfo; /* field's attnum and type info */ + /* if rectupledescid == INVALID_TUPLEDESC_IDENTIFIER, finfo isn't valid */ } PLpgSQL_recfield; /* @@ -903,12 +910,12 @@ typedef struct PLpgSQL_execstate bool readonly_func; - TupleDesc rettupdesc; char *exitlabel; /* the "target" label of the current EXIT or * CONTINUE stmt, if any */ ErrorData *cur_error; /* current exception handler's error */ Tuplestorestate *tuple_store; /* SRFs accumulate results here */ + TupleDesc tuple_store_desc; /* descriptor for tuples in tuple_store */ MemoryContext tuple_store_cxt; ResourceOwner tuple_store_owner; ReturnSetInfo *rsi; @@ -917,6 +924,8 @@ typedef struct PLpgSQL_execstate int found_varno; int ndatums; PLpgSQL_datum **datums; + /* context containing variable values (same as func's SPI_proc context) */ + MemoryContext datum_context; /* * paramLI is what we use to pass local variable values to the executor. @@ -1088,7 +1097,9 @@ extern PLpgSQL_variable *plpgsql_build_variable(const char *refname, int lineno, PLpgSQL_type *dtype, bool add2namespace); extern PLpgSQL_rec *plpgsql_build_record(const char *refname, int lineno, - bool add2namespace); + Oid rectypeid, bool add2namespace); +extern PLpgSQL_recfield *plpgsql_build_recfield(PLpgSQL_rec *rec, + const char *fldname); extern int plpgsql_recognize_err_condition(const char *condname, bool allow_sqlstate); extern PLpgSQL_condition *plpgsql_parse_err_condition(char *condname); diff --git a/src/pl/plpgsql/src/sql/plpgsql_record.sql b/src/pl/plpgsql/src/sql/plpgsql_record.sql new file mode 100644 index 0000000000..069d2643cf --- /dev/null +++ b/src/pl/plpgsql/src/sql/plpgsql_record.sql @@ -0,0 +1,441 @@ +-- +-- Tests for PL/pgSQL handling of composite (record) variables +-- + +create type two_int4s as (f1 int4, f2 int4); +create type two_int8s as (q1 int8, q2 int8); + +-- base-case return of a composite type +create function retc(int) returns two_int8s language plpgsql as +$$ begin return row($1,1)::two_int8s; end $$; +select retc(42); + +-- ok to return a matching record type +create or replace function retc(int) returns two_int8s language plpgsql as +$$ begin return row($1::int8, 1::int8); end $$; +select retc(42); + +-- we don't currently support implicit casting +create or replace function retc(int) returns two_int8s language plpgsql as +$$ begin return row($1,1); end $$; +select retc(42); + +-- nor extra columns +create or replace function retc(int) returns two_int8s language plpgsql as +$$ begin return row($1::int8, 1::int8, 42); end $$; +select retc(42); + +-- same cases with an intermediate "record" variable +create or replace function retc(int) returns two_int8s language plpgsql as +$$ declare r record; begin r := row($1::int8, 1::int8); return r; end $$; +select retc(42); + +create or replace function retc(int) returns two_int8s language plpgsql as +$$ declare r record; begin r := row($1,1); return r; end $$; +select retc(42); + +create or replace function retc(int) returns two_int8s language plpgsql as +$$ declare r record; begin r := row($1::int8, 1::int8, 42); return r; end $$; +select retc(42); + +-- but, for mostly historical reasons, we do convert when assigning +-- to a named-composite-type variable +create or replace function retc(int) returns two_int8s language plpgsql as +$$ declare r two_int8s; begin r := row($1::int8, 1::int8, 42); return r; end $$; +select retc(42); + +do $$ declare c two_int8s; +begin c := row(1,2); raise notice 'c = %', c; end$$; + +do $$ declare c two_int8s; +begin for c in select 1,2 loop raise notice 'c = %', c; end loop; end$$; + +do $$ declare c4 two_int4s; c8 two_int8s; +begin + c8 := row(1,2); + c4 := c8; + c8 := c4; + raise notice 'c4 = %', c4; + raise notice 'c8 = %', c8; +end$$; + +-- check passing composite result to another function +create function getq1(two_int8s) returns int8 language plpgsql as $$ +declare r two_int8s; begin r := $1; return r.q1; end $$; + +select getq1(retc(344)); +select getq1(row(1,2)); + +do $$ +declare r1 two_int8s; r2 record; x int8; +begin + r1 := retc(345); + perform getq1(r1); + x := getq1(r1); + raise notice 'x = %', x; + r2 := retc(346); + perform getq1(r2); + x := getq1(r2); + raise notice 'x = %', x; +end$$; + +-- check assignments of composites +do $$ +declare r1 two_int8s; r2 two_int8s; r3 record; r4 record; +begin + r1 := row(1,2); + raise notice 'r1 = %', r1; + r1 := r1; -- shouldn't do anything + raise notice 'r1 = %', r1; + r2 := r1; + raise notice 'r1 = %', r1; + raise notice 'r2 = %', r2; + r2.q2 = r1.q1 + 3; -- check that r2 has distinct storage + raise notice 'r1 = %', r1; + raise notice 'r2 = %', r2; + r1 := null; + raise notice 'r1 = %', r1; + raise notice 'r2 = %', r2; + r1 := row(7,11)::two_int8s; + r2 := r1; + raise notice 'r1 = %', r1; + raise notice 'r2 = %', r2; + r3 := row(1,2); + r4 := r3; + raise notice 'r3 = %', r3; + raise notice 'r4 = %', r4; + r4.f1 := r4.f1 + 3; -- check that r4 has distinct storage + raise notice 'r3 = %', r3; + raise notice 'r4 = %', r4; + r1 := r3; + raise notice 'r1 = %', r1; + r4 := r1; + raise notice 'r4 = %', r4; + r4.q2 := r4.q2 + 1; -- r4's field names have changed + raise notice 'r4 = %', r4; +end$$; + +-- fields of named-type vars read as null if uninitialized +do $$ +declare r1 two_int8s; +begin + raise notice 'r1 = %', r1; + raise notice 'r1.q1 = %', r1.q1; + raise notice 'r1.q2 = %', r1.q2; + raise notice 'r1 = %', r1; +end$$; + +do $$ +declare r1 two_int8s; +begin + raise notice 'r1.q1 = %', r1.q1; + raise notice 'r1.q2 = %', r1.q2; + raise notice 'r1 = %', r1; + raise notice 'r1.nosuchfield = %', r1.nosuchfield; +end$$; + +-- records, not so much +do $$ +declare r1 record; +begin + raise notice 'r1 = %', r1; + raise notice 'r1.f1 = %', r1.f1; + raise notice 'r1.f2 = %', r1.f2; + raise notice 'r1 = %', r1; +end$$; + +-- but OK if you assign first +do $$ +declare r1 record; +begin + raise notice 'r1 = %', r1; + r1 := row(1,2); + raise notice 'r1.f1 = %', r1.f1; + raise notice 'r1.f2 = %', r1.f2; + raise notice 'r1 = %', r1; + raise notice 'r1.nosuchfield = %', r1.nosuchfield; +end$$; + +-- check repeated assignments to composite fields +create table some_table (id int, data text); + +do $$ +declare r some_table; +begin + r := (23, 'skidoo'); + for i in 1 .. 10 loop + r.id := r.id + i; + r.data := r.data || ' ' || i; + end loop; + raise notice 'r = %', r; +end$$; + +-- check behavior of function declared to return "record" + +create function returnsrecord(int) returns record language plpgsql as +$$ begin return row($1,$1+1); end $$; + +select returnsrecord(42); +select * from returnsrecord(42) as r(x int, y int); +select * from returnsrecord(42) as r(x int, y int, z int); -- fail +select * from returnsrecord(42) as r(x int, y bigint); -- fail + +-- same with an intermediate record variable +create or replace function returnsrecord(int) returns record language plpgsql as +$$ declare r record; begin r := row($1,$1+1); return r; end $$; + +select returnsrecord(42); +select * from returnsrecord(42) as r(x int, y int); +select * from returnsrecord(42) as r(x int, y int, z int); -- fail +select * from returnsrecord(42) as r(x int, y bigint); -- fail + +-- should work the same with a missing column in the actual result value +create table has_hole(f1 int, f2 int, f3 int); +alter table has_hole drop column f2; + +create or replace function returnsrecord(int) returns record language plpgsql as +$$ begin return row($1,$1+1)::has_hole; end $$; + +select returnsrecord(42); +select * from returnsrecord(42) as r(x int, y int); +select * from returnsrecord(42) as r(x int, y int, z int); -- fail +select * from returnsrecord(42) as r(x int, y bigint); -- fail + +-- same with an intermediate record variable +create or replace function returnsrecord(int) returns record language plpgsql as +$$ declare r record; begin r := row($1,$1+1)::has_hole; return r; end $$; + +select returnsrecord(42); +select * from returnsrecord(42) as r(x int, y int); +select * from returnsrecord(42) as r(x int, y int, z int); -- fail +select * from returnsrecord(42) as r(x int, y bigint); -- fail + +-- check access to a field of an argument declared "record" +create function getf1(x record) returns int language plpgsql as +$$ begin return x.f1; end $$; +select getf1(1); +select getf1(row(1,2)); +select getf1(row(1,2)::two_int8s); +select getf1(row(1,2)); + +-- check behavior when assignment to FOR-loop variable requires coercion +do $$ +declare r two_int8s; +begin + for r in select i, i+1 from generate_series(1,4) i + loop + raise notice 'r = %', r; + end loop; +end$$; + +-- check behavior when returning setof composite +create function returnssetofholes() returns setof has_hole language plpgsql as +$$ +declare r record; + h has_hole; +begin + return next h; + r := (1,2); + h := (3,4); + return next r; + return next h; + return next row(5,6); + return next row(7,8)::has_hole; +end$$; +select returnssetofholes(); + +create or replace function returnssetofholes() returns setof has_hole language plpgsql as +$$ +declare r record; +begin + return next r; -- fails, not assigned yet +end$$; +select returnssetofholes(); + +create or replace function returnssetofholes() returns setof has_hole language plpgsql as +$$ +begin + return next row(1,2,3); -- fails +end$$; +select returnssetofholes(); + +-- check behavior with changes of a named rowtype +create table mutable(f1 int, f2 text); + +create function sillyaddone(int) returns int language plpgsql as +$$ declare r mutable; begin r.f1 := $1; return r.f1 + 1; end $$; +select sillyaddone(42); + +alter table mutable drop column f1; +alter table mutable add column f1 float8; + +-- currently, this fails due to cached plan for "r.f1 + 1" expression +select sillyaddone(42); +\c - +-- but it's OK after a reconnect +select sillyaddone(42); + +alter table mutable drop column f1; +select sillyaddone(42); -- fail + +create function getf3(x mutable) returns int language plpgsql as +$$ begin return x.f3; end $$; +select getf3(null::mutable); -- doesn't work yet +alter table mutable add column f3 int; +select getf3(null::mutable); -- now it works +alter table mutable drop column f3; +select getf3(null::mutable); -- fails again + +-- check access to system columns in a record variable + +create function sillytrig() returns trigger language plpgsql as +$$begin + raise notice 'old.ctid = %', old.ctid; + raise notice 'old.tableoid = %', old.tableoid::regclass; + return new; +end$$; + +create trigger mutable_trig before update on mutable for each row +execute procedure sillytrig(); + +insert into mutable values ('foo'), ('bar'); +update mutable set f2 = f2 || ' baz'; +table mutable; + +-- check returning a composite datum from a trigger + +create or replace function sillytrig() returns trigger language plpgsql as +$$begin + return row(new.*); +end$$; + +update mutable set f2 = f2 || ' baz'; +table mutable; + +create or replace function sillytrig() returns trigger language plpgsql as +$$declare r record; +begin + r := row(new.*); + return r; +end$$; + +update mutable set f2 = f2 || ' baz'; +table mutable; + +-- +-- Domains of composite +-- + +create domain ordered_int8s as two_int8s check((value).q1 <= (value).q2); + +create function read_ordered_int8s(p ordered_int8s) returns int8 as $$ +begin return p.q1 + p.q2; end +$$ language plpgsql; + +select read_ordered_int8s(row(1, 2)); +select read_ordered_int8s(row(2, 1)); -- fail + +create function build_ordered_int8s(i int8, j int8) returns ordered_int8s as $$ +begin return row(i,j); end +$$ language plpgsql; + +select build_ordered_int8s(1,2); +select build_ordered_int8s(2,1); -- fail + +create function build_ordered_int8s_2(i int8, j int8) returns ordered_int8s as $$ +declare r record; begin r := row(i,j); return r; end +$$ language plpgsql; + +select build_ordered_int8s_2(1,2); +select build_ordered_int8s_2(2,1); -- fail + +create function build_ordered_int8s_3(i int8, j int8) returns ordered_int8s as $$ +declare r two_int8s; begin r := row(i,j); return r; end +$$ language plpgsql; + +select build_ordered_int8s_3(1,2); +select build_ordered_int8s_3(2,1); -- fail + +create function build_ordered_int8s_4(i int8, j int8) returns ordered_int8s as $$ +declare r ordered_int8s; begin r := row(i,j); return r; end +$$ language plpgsql; + +select build_ordered_int8s_4(1,2); +select build_ordered_int8s_4(2,1); -- fail + +create function build_ordered_int8s_a(i int8, j int8) returns ordered_int8s[] as $$ +begin return array[row(i,j), row(i,j+1)]; end +$$ language plpgsql; + +select build_ordered_int8s_a(1,2); +select build_ordered_int8s_a(2,1); -- fail + +-- check field assignment +do $$ +declare r ordered_int8s; +begin + r.q1 := null; + r.q2 := 43; + r.q1 := 42; + r.q2 := 41; -- fail +end$$; + +-- check whole-row assignment +do $$ +declare r ordered_int8s; +begin + r := null; + r := row(null,null); + r := row(1,2); + r := row(2,1); -- fail +end$$; + +-- check assignment in for-loop +do $$ +declare r ordered_int8s; +begin + for r in values (1,2),(3,4),(6,5) loop + raise notice 'r = %', r; + end loop; +end$$; + +-- check behavior with toastable fields, too + +create type two_texts as (f1 text, f2 text); +create domain ordered_texts as two_texts check((value).f1 <= (value).f2); + +create table sometable (id int, a text, b text); +-- b should be compressed, but in-line +insert into sometable values (1, 'a', repeat('ffoob',1000)); +-- this b should be out-of-line +insert into sometable values (2, 'a', repeat('ffoob',100000)); +-- this pair should fail the domain check +insert into sometable values (3, 'z', repeat('ffoob',100000)); + +do $$ +declare d ordered_texts; +begin + for d in select a, b from sometable loop + raise notice 'succeeded at "%"', d.f1; + end loop; +end$$; + +do $$ +declare r record; d ordered_texts; +begin + for r in select * from sometable loop + raise notice 'processing row %', r.id; + d := row(r.a, r.b); + end loop; +end$$; + +do $$ +declare r record; d ordered_texts; +begin + for r in select * from sometable loop + raise notice 'processing row %', r.id; + d := null; + d.f1 := r.a; + d.f2 := r.b; + end loop; +end$$; diff --git a/src/pl/plpython/plpy_typeio.c b/src/pl/plpython/plpy_typeio.c index 6c6b16f4d7..d6a6a849c3 100644 --- a/src/pl/plpython/plpy_typeio.c +++ b/src/pl/plpython/plpy_typeio.c @@ -384,7 +384,7 @@ PLy_output_setup_func(PLyObToDatum *arg, MemoryContext arg_mcxt, /* We'll set up the per-field data later */ arg->u.tuple.recdesc = NULL; arg->u.tuple.typentry = typentry; - arg->u.tuple.tupdescseq = typentry ? typentry->tupDescSeqNo - 1 : 0; + arg->u.tuple.tupdescid = INVALID_TUPLEDESC_IDENTIFIER; arg->u.tuple.atts = NULL; arg->u.tuple.natts = 0; /* Mark this invalid till needed, too */ @@ -499,7 +499,7 @@ PLy_input_setup_func(PLyDatumToOb *arg, MemoryContext arg_mcxt, /* We'll set up the per-field data later */ arg->u.tuple.recdesc = NULL; arg->u.tuple.typentry = typentry; - arg->u.tuple.tupdescseq = typentry ? typentry->tupDescSeqNo - 1 : 0; + arg->u.tuple.tupdescid = INVALID_TUPLEDESC_IDENTIFIER; arg->u.tuple.atts = NULL; arg->u.tuple.natts = 0; } @@ -969,11 +969,11 @@ PLyObject_ToComposite(PLyObToDatum *arg, PyObject *plrv, /* We should have the descriptor of the type's typcache entry */ Assert(desc == arg->u.tuple.typentry->tupDesc); /* Detect change of descriptor, update cache if needed */ - if (arg->u.tuple.tupdescseq != arg->u.tuple.typentry->tupDescSeqNo) + if (arg->u.tuple.tupdescid != arg->u.tuple.typentry->tupDesc_identifier) { PLy_output_setup_tuple(arg, desc, PLy_current_execution_context()->curr_proc); - arg->u.tuple.tupdescseq = arg->u.tuple.typentry->tupDescSeqNo; + arg->u.tuple.tupdescid = arg->u.tuple.typentry->tupDesc_identifier; } } else diff --git a/src/pl/plpython/plpy_typeio.h b/src/pl/plpython/plpy_typeio.h index 91870c91b0..82bdfae548 100644 --- a/src/pl/plpython/plpy_typeio.h +++ b/src/pl/plpython/plpy_typeio.h @@ -42,7 +42,7 @@ typedef struct PLyTupleToOb TupleDesc recdesc; /* If we're dealing with a named composite type, these fields are set: */ TypeCacheEntry *typentry; /* typcache entry for type */ - int64 tupdescseq; /* last tupdesc seqno seen in typcache */ + uint64 tupdescid; /* last tupdesc identifier seen in typcache */ /* These fields are NULL/0 if not yet set: */ PLyDatumToOb *atts; /* array of per-column conversion info */ int natts; /* length of array */ @@ -107,7 +107,7 @@ typedef struct PLyObToTuple TupleDesc recdesc; /* If we're dealing with a named composite type, these fields are set: */ TypeCacheEntry *typentry; /* typcache entry for type */ - int64 tupdescseq; /* last tupdesc seqno seen in typcache */ + uint64 tupdescid; /* last tupdesc identifier seen in typcache */ /* These fields are NULL/0 if not yet set: */ PLyObToDatum *atts; /* array of per-column conversion info */ int natts; /* length of array */ diff --git a/src/test/regress/expected/plpgsql.out b/src/test/regress/expected/plpgsql.out index 4f9501db00..0c1da08869 100644 --- a/src/test/regress/expected/plpgsql.out +++ b/src/test/regress/expected/plpgsql.out @@ -4595,23 +4595,32 @@ begin x int; y int := i; r record; + c int8_tbl; begin if i = 1 then x := 42; r := row(i, i+1); + c := row(i, i+1); end if; raise notice 'x = %', x; raise notice 'y = %', y; raise notice 'r = %', r; + raise notice 'c = %', c; end; end loop; end$$; NOTICE: x = 42 NOTICE: y = 1 NOTICE: r = (1,2) +NOTICE: c = (1,2) NOTICE: x = NOTICE: y = 2 -ERROR: record "r" is not assigned yet +NOTICE: r = +NOTICE: c = +NOTICE: x = +NOTICE: y = 3 +NOTICE: r = +NOTICE: c = \set VERBOSITY default -- Check handling of conflicts between plpgsql vars and table columns. set plpgsql.variable_conflict = error; diff --git a/src/test/regress/sql/plpgsql.sql b/src/test/regress/sql/plpgsql.sql index 3914651bf6..6bdcfe7cc5 100644 --- a/src/test/regress/sql/plpgsql.sql +++ b/src/test/regress/sql/plpgsql.sql @@ -3745,14 +3745,17 @@ begin x int; y int := i; r record; + c int8_tbl; begin if i = 1 then x := 42; r := row(i, i+1); + c := row(i, i+1); end if; raise notice 'x = %', x; raise notice 'y = %', y; raise notice 'r = %', r; + raise notice 'c = %', c; end; end loop; end$$;