diff --git a/doc/src/sgml/arch-dev.sgml b/doc/src/sgml/arch-dev.sgml index 54386ca264..0863920ab2 100644 --- a/doc/src/sgml/arch-dev.sgml +++ b/doc/src/sgml/arch-dev.sgml @@ -1,4 +1,4 @@ - + Overview of PostgreSQL Internals @@ -58,7 +58,7 @@ The rewrite system takes the query tree created by the parser stage and looks for any rules (stored in the - system catalogs) to apply to + system catalogs) to apply to the query tree. It performs the transformations given in the rule bodies. @@ -77,7 +77,7 @@ The planner/optimizer takes - the (rewritten) query tree and creates a + the (rewritten) query tree and creates a query plan that will be the input to the executor. @@ -163,8 +163,8 @@ The parser defined in gram.y and scan.l is - built using the Unix tools yacc - and lex. + built using the Unix tools bison + and flex. @@ -184,8 +184,8 @@ ASCII text) for valid syntax. If the syntax is correct a parse tree is built up and handed back; otherwise an error is returned. The parser and lexer are - implemented using the well-known Unix tools yacc - and lex. + implemented using the well-known Unix tools bison + and flex. @@ -208,13 +208,13 @@ The file scan.l is transformed to the C source file scan.c using the program - lex and gram.y is + flex and gram.y is transformed to gram.c using - yacc. After these transformations + bison. After these transformations have taken place a normal C compiler can be used to create the parser. Never make any changes to the generated C files as they - will be overwritten the next time lex - or yacc is called. + will be overwritten the next time flex + or bison is called. @@ -227,12 +227,12 @@ - A detailed description of yacc or + A detailed description of bison or the grammar rules given in gram.y would be beyond the scope of this paper. There are many books and - documents dealing with lex and - yacc. You should be familiar with - yacc before you start to study the + documents dealing with flex and + bison. You should be familiar with + bison before you start to study the grammar given in gram.y otherwise you won't understand what happens there. @@ -299,7 +299,7 @@ called whenever an individual row had been accessed. This implementation was removed in 1995 when the last official release of the Berkeley Postgres project was - transformed into Postgres95. + transformed into Postgres95. @@ -479,7 +479,7 @@ Executor - The executor takes the plan handed back by the + The executor takes the plan created by the planner/optimizer and recursively processes it to extract the required set of rows. This is essentially a demand-pull pipeline mechanism. Each time a plan node is called, it must deliver one more row, or @@ -488,7 +488,7 @@ To provide a concrete example, assume that the top - node is a MergeJoin node. + node is a MergeJoin node. Before any merge can be done two rows have to be fetched (one from each subplan). So the executor recursively calls itself to process the subplans (it starts with the subplan attached to @@ -533,17 +533,21 @@ DELETE. For SELECT, the top-level executor code only needs to send each row returned by the query plan tree off to the client. For INSERT, each returned row is inserted - into the target table specified for the INSERT. (A simple + into the target table specified for the INSERT. This is + done in a special top-level plan node called ModifyTable. + (A simple INSERT ... VALUES command creates a trivial plan tree consisting of a single Result node, which computes just one - result row. But INSERT ... SELECT can demand the full power + result row, and ModifyTable above it to perform the insertion. + But INSERT ... SELECT can demand the full power of the executor mechanism.) For UPDATE, the planner arranges that each computed row includes all the updated column values, plus the TID (tuple ID, or row ID) of the original target row; - the executor top level uses this information to create a new updated row - and mark the old row deleted. For DELETE, the only column - that is actually returned by the plan is the TID, and the executor top - level simply uses the TID to visit each target row and mark it deleted. + this data is fed into a ModifyTable node, which uses the + information to create a new updated row and mark the old row deleted. + For DELETE, the only column that is actually returned by the + plan is the TID, and the ModifyTable node simply uses the TID + to visit each target row and mark it deleted. diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c index ef3d9beb06..048d12f97a 100644 --- a/src/backend/commands/explain.c +++ b/src/backend/commands/explain.c @@ -7,7 +7,7 @@ * Portions Copyright (c) 1994-5, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/explain.c,v 1.190 2009/08/22 02:06:32 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/commands/explain.c,v 1.191 2009/10/10 01:43:45 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -581,6 +581,7 @@ ExplainNode(Plan *plan, PlanState *planstate, const char *pname; /* node type name for text output */ const char *sname; /* node type name for non-text output */ const char *strategy = NULL; + const char *operation = NULL; int save_indent = es->indent; bool haschildren; @@ -591,6 +592,24 @@ ExplainNode(Plan *plan, PlanState *planstate, case T_Result: pname = sname = "Result"; break; + case T_ModifyTable: + sname = "ModifyTable"; + switch (((ModifyTable *) plan)->operation) + { + case CMD_INSERT: + pname = operation = "Insert"; + break; + case CMD_UPDATE: + pname = operation = "Update"; + break; + case CMD_DELETE: + pname = operation = "Delete"; + break; + default: + pname = "???"; + break; + } + break; case T_Append: pname = sname = "Append"; break; @@ -736,6 +755,8 @@ ExplainNode(Plan *plan, PlanState *planstate, ExplainPropertyText("Node Type", sname, es); if (strategy) ExplainPropertyText("Strategy", strategy, es); + if (operation) + ExplainPropertyText("Operation", operation, es); if (relationship) ExplainPropertyText("Parent Relationship", relationship, es); if (plan_name) @@ -1023,6 +1044,7 @@ ExplainNode(Plan *plan, PlanState *planstate, haschildren = plan->initPlan || outerPlan(plan) || innerPlan(plan) || + IsA(plan, ModifyTable) || IsA(plan, Append) || IsA(plan, BitmapAnd) || IsA(plan, BitmapOr) || @@ -1059,6 +1081,11 @@ ExplainNode(Plan *plan, PlanState *planstate, /* special child plans */ switch (nodeTag(plan)) { + case T_ModifyTable: + ExplainMemberNodes(((ModifyTable *) plan)->plans, + ((ModifyTableState *) planstate)->mt_plans, + outer_plan, es); + break; case T_Append: ExplainMemberNodes(((Append *) plan)->appendplans, ((AppendState *) planstate)->appendplans, @@ -1408,7 +1435,8 @@ ExplainScanTarget(Scan *plan, ExplainState *es) } /* - * Explain the constituent plans of an Append, BitmapAnd, or BitmapOr node. + * Explain the constituent plans of a ModifyTable, Append, BitmapAnd, + * or BitmapOr node. * * Ordinarily we don't pass down outer_plan to our child nodes, but in these * cases we must, since the node could be an "inner indexscan" in which case diff --git a/src/backend/commands/trigger.c b/src/backend/commands/trigger.c index c1f55cfcf9..47cf0c470d 100644 --- a/src/backend/commands/trigger.c +++ b/src/backend/commands/trigger.c @@ -7,7 +7,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/trigger.c,v 1.252 2009/08/04 16:08:36 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/commands/trigger.c,v 1.253 2009/10/10 01:43:45 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -55,6 +55,7 @@ int SessionReplicationRole = SESSION_REPLICATION_ROLE_ORIGIN; static void ConvertTriggerToFK(CreateTrigStmt *stmt, Oid funcoid); static void InsertTrigger(TriggerDesc *trigdesc, Trigger *trigger, int indx); static HeapTuple GetTupleForTrigger(EState *estate, + PlanState *subplanstate, ResultRelInfo *relinfo, ItemPointer tid, TupleTableSlot **newSlot); @@ -1793,7 +1794,8 @@ ExecASDeleteTriggers(EState *estate, ResultRelInfo *relinfo) } bool -ExecBRDeleteTriggers(EState *estate, ResultRelInfo *relinfo, +ExecBRDeleteTriggers(EState *estate, PlanState *subplanstate, + ResultRelInfo *relinfo, ItemPointer tupleid) { TriggerDesc *trigdesc = relinfo->ri_TrigDesc; @@ -1806,7 +1808,8 @@ ExecBRDeleteTriggers(EState *estate, ResultRelInfo *relinfo, TupleTableSlot *newSlot; int i; - trigtuple = GetTupleForTrigger(estate, relinfo, tupleid, &newSlot); + trigtuple = GetTupleForTrigger(estate, subplanstate, relinfo, tupleid, + &newSlot); if (trigtuple == NULL) return false; @@ -1862,7 +1865,7 @@ ExecARDeleteTriggers(EState *estate, ResultRelInfo *relinfo, if (trigdesc && trigdesc->n_after_row[TRIGGER_EVENT_DELETE] > 0) { - HeapTuple trigtuple = GetTupleForTrigger(estate, relinfo, + HeapTuple trigtuple = GetTupleForTrigger(estate, NULL, relinfo, tupleid, NULL); AfterTriggerSaveEvent(relinfo, TRIGGER_EVENT_DELETE, @@ -1941,7 +1944,8 @@ ExecASUpdateTriggers(EState *estate, ResultRelInfo *relinfo) } HeapTuple -ExecBRUpdateTriggers(EState *estate, ResultRelInfo *relinfo, +ExecBRUpdateTriggers(EState *estate, PlanState *subplanstate, + ResultRelInfo *relinfo, ItemPointer tupleid, HeapTuple newtuple) { TriggerDesc *trigdesc = relinfo->ri_TrigDesc; @@ -1954,16 +1958,18 @@ ExecBRUpdateTriggers(EState *estate, ResultRelInfo *relinfo, TupleTableSlot *newSlot; int i; - trigtuple = GetTupleForTrigger(estate, relinfo, tupleid, &newSlot); + trigtuple = GetTupleForTrigger(estate, subplanstate, relinfo, tupleid, + &newSlot); if (trigtuple == NULL) return NULL; /* * In READ COMMITTED isolation level it's possible that newtuple was - * changed due to concurrent update. + * changed due to concurrent update. In that case we have a raw subplan + * output tuple and need to run it through the junk filter. */ if (newSlot != NULL) - intuple = newtuple = ExecRemoveJunk(estate->es_junkFilter, newSlot); + intuple = newtuple = ExecRemoveJunk(relinfo->ri_junkFilter, newSlot); LocTriggerData.type = T_TriggerData; LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE | @@ -2014,7 +2020,7 @@ ExecARUpdateTriggers(EState *estate, ResultRelInfo *relinfo, if (trigdesc && trigdesc->n_after_row[TRIGGER_EVENT_UPDATE] > 0) { - HeapTuple trigtuple = GetTupleForTrigger(estate, relinfo, + HeapTuple trigtuple = GetTupleForTrigger(estate, NULL, relinfo, tupleid, NULL); AfterTriggerSaveEvent(relinfo, TRIGGER_EVENT_UPDATE, @@ -2094,7 +2100,9 @@ ExecASTruncateTriggers(EState *estate, ResultRelInfo *relinfo) static HeapTuple -GetTupleForTrigger(EState *estate, ResultRelInfo *relinfo, +GetTupleForTrigger(EState *estate, + PlanState *subplanstate, + ResultRelInfo *relinfo, ItemPointer tid, TupleTableSlot **newSlot) { @@ -2111,6 +2119,9 @@ GetTupleForTrigger(EState *estate, ResultRelInfo *relinfo, *newSlot = NULL; + /* caller must pass a subplanstate if EvalPlanQual is possible */ + Assert(subplanstate != NULL); + /* * lock tuple for update */ @@ -2143,6 +2154,7 @@ ltrmark:; epqslot = EvalPlanQual(estate, relinfo->ri_RangeTableIndex, + subplanstate, &update_ctid, update_xmax); if (!TupIsNull(epqslot)) diff --git a/src/backend/executor/Makefile b/src/backend/executor/Makefile index 63c8610778..cdd71befeb 100644 --- a/src/backend/executor/Makefile +++ b/src/backend/executor/Makefile @@ -4,7 +4,7 @@ # Makefile for executor # # IDENTIFICATION -# $PostgreSQL: pgsql/src/backend/executor/Makefile,v 1.29 2008/12/28 18:53:55 tgl Exp $ +# $PostgreSQL: pgsql/src/backend/executor/Makefile,v 1.30 2009/10/10 01:43:45 tgl Exp $ # #------------------------------------------------------------------------- @@ -18,6 +18,7 @@ OBJS = execAmi.o execCurrent.o execGrouping.o execJunk.o execMain.o \ nodeBitmapAnd.o nodeBitmapOr.o \ nodeBitmapHeapscan.o nodeBitmapIndexscan.o nodeHash.o \ nodeHashjoin.o nodeIndexscan.o nodeMaterial.o nodeMergejoin.o \ + nodeModifyTable.o \ nodeNestloop.o nodeFunctionscan.o nodeRecursiveunion.o nodeResult.o \ nodeSeqscan.o nodeSetOp.o nodeSort.o nodeUnique.o \ nodeValuesscan.o nodeCtescan.o nodeWorktablescan.o \ diff --git a/src/backend/executor/README b/src/backend/executor/README index 467d6272d1..2416ac4d42 100644 --- a/src/backend/executor/README +++ b/src/backend/executor/README @@ -1,4 +1,4 @@ -$PostgreSQL: pgsql/src/backend/executor/README,v 1.8 2009/01/09 15:46:10 tgl Exp $ +$PostgreSQL: pgsql/src/backend/executor/README,v 1.9 2009/10/10 01:43:45 tgl Exp $ The Postgres Executor ===================== @@ -25,16 +25,17 @@ There is a moderately intelligent scheme to avoid rescanning nodes unnecessarily (for example, Sort does not rescan its input if no parameters of the input have changed, since it can just reread its stored sorted data). -The plan tree concept implements SELECT directly: it is only necessary to -deliver the top-level result tuples to the client, or insert them into -another table in the case of INSERT ... SELECT. (INSERT ... VALUES is -handled similarly, but the plan tree is just a Result node with no source -tables.) For UPDATE, the plan tree selects the tuples that need to be -updated (WHERE condition) and delivers a new calculated tuple value for each -such tuple, plus a "junk" (hidden) tuple CTID identifying the target tuple. -The executor's top level then uses this information to update the correct -tuple. DELETE is similar to UPDATE except that only a CTID need be -delivered by the plan tree. +For a SELECT, it is only necessary to deliver the top-level result tuples +to the client. For INSERT/UPDATE/DELETE, the actual table modification +operations happen in a top-level ModifyTable plan node. If the query +includes a RETURNING clause, the ModifyTable node delivers the computed +RETURNING rows as output, otherwise it returns nothing. Handling INSERT +is pretty straightforward: the tuples returned from the plan tree below +ModifyTable are inserted into the correct result relation. For UPDATE, +the plan tree returns the computed tuples to be updated, plus a "junk" +(hidden) CTID column identifying which table row is to be replaced by each +one. For DELETE, the plan tree need only deliver a CTID column, and the +ModifyTable node visits each of those rows and marks the row deleted. XXX a great deal more documentation needs to be written here... diff --git a/src/backend/executor/execAmi.c b/src/backend/executor/execAmi.c index 51923c436a..6c2de387ef 100644 --- a/src/backend/executor/execAmi.c +++ b/src/backend/executor/execAmi.c @@ -6,7 +6,7 @@ * Portions Copyright (c) 1996-2009, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * - * $PostgreSQL: pgsql/src/backend/executor/execAmi.c,v 1.104 2009/09/12 22:12:03 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/executor/execAmi.c,v 1.105 2009/10/10 01:43:45 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -30,6 +30,7 @@ #include "executor/nodeLimit.h" #include "executor/nodeMaterial.h" #include "executor/nodeMergejoin.h" +#include "executor/nodeModifyTable.h" #include "executor/nodeNestloop.h" #include "executor/nodeRecursiveunion.h" #include "executor/nodeResult.h" @@ -127,6 +128,10 @@ ExecReScan(PlanState *node, ExprContext *exprCtxt) ExecReScanResult((ResultState *) node, exprCtxt); break; + case T_ModifyTableState: + ExecReScanModifyTable((ModifyTableState *) node, exprCtxt); + break; + case T_AppendState: ExecReScanAppend((AppendState *) node, exprCtxt); break; diff --git a/src/backend/executor/execJunk.c b/src/backend/executor/execJunk.c index a4c77f0ad3..4d8693d9cb 100644 --- a/src/backend/executor/execJunk.c +++ b/src/backend/executor/execJunk.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/execJunk.c,v 1.58 2009/01/01 17:23:41 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/executor/execJunk.c,v 1.59 2009/10/10 01:43:45 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -34,9 +34,7 @@ * called 'resjunk'. If the value of this field is true then the * corresponding attribute is a "junk" attribute. * - * When we initialize a plan we call ExecInitJunkFilter to create - * and store the appropriate information in the es_junkFilter attribute of - * EState. + * When we initialize a plan we call ExecInitJunkFilter to create a filter. * * We then execute the plan, treating the resjunk attributes like any others. * @@ -44,7 +42,7 @@ * ExecFindJunkAttribute/ExecGetJunkAttribute to retrieve the values of the * junk attributes we are interested in, and ExecFilterJunk or ExecRemoveJunk * to remove all the junk attributes from a tuple. This new "clean" tuple is - * then printed, replaced, deleted or inserted. + * then printed, inserted, or updated. * *------------------------------------------------------------------------- */ diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c index 82b05dc4d1..7c788ea6df 100644 --- a/src/backend/executor/execMain.c +++ b/src/backend/executor/execMain.c @@ -26,13 +26,12 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/execMain.c,v 1.331 2009/10/08 22:34:57 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/executor/execMain.c,v 1.332 2009/10/10 01:43:47 tgl Exp $ * *------------------------------------------------------------------------- */ #include "postgres.h" -#include "access/heapam.h" #include "access/reloptions.h" #include "access/sysattr.h" #include "access/transam.h" @@ -44,17 +43,13 @@ #include "commands/trigger.h" #include "executor/execdebug.h" #include "executor/instrument.h" -#include "executor/nodeSubplan.h" #include "miscadmin.h" -#include "nodes/nodeFuncs.h" #include "optimizer/clauses.h" #include "parser/parse_clause.h" #include "parser/parsetree.h" #include "storage/bufmgr.h" #include "storage/lmgr.h" -#include "storage/smgr.h" #include "utils/acl.h" -#include "utils/builtins.h" #include "utils/lsyscache.h" #include "utils/memutils.h" #include "utils/snapmgr.h" @@ -77,35 +72,20 @@ typedef struct evalPlanQual /* decls for local routines only used within this module */ static void InitPlan(QueryDesc *queryDesc, int eflags); -static void ExecCheckPlanOutput(Relation resultRel, List *targetList); static void ExecEndPlan(PlanState *planstate, EState *estate); static void ExecutePlan(EState *estate, PlanState *planstate, CmdType operation, + bool sendTuples, long numberTuples, ScanDirection direction, DestReceiver *dest); -static void ExecSelect(TupleTableSlot *slot, - DestReceiver *dest, EState *estate); -static void ExecInsert(TupleTableSlot *slot, ItemPointer tupleid, - TupleTableSlot *planSlot, - DestReceiver *dest, EState *estate); -static void ExecDelete(ItemPointer tupleid, - TupleTableSlot *planSlot, - DestReceiver *dest, EState *estate); -static void ExecUpdate(TupleTableSlot *slot, ItemPointer tupleid, - TupleTableSlot *planSlot, - DestReceiver *dest, EState *estate); -static void ExecProcessReturning(ProjectionInfo *projectReturning, - TupleTableSlot *tupleSlot, - TupleTableSlot *planSlot, - DestReceiver *dest); static TupleTableSlot *EvalPlanQualNext(EState *estate); static void EndEvalPlanQual(EState *estate); static void ExecCheckRTPerms(List *rangeTable); static void ExecCheckRTEPerms(RangeTblEntry *rte); static void ExecCheckXactReadOnly(PlannedStmt *plannedstmt); static void EvalPlanQualStart(evalPlanQual *epq, EState *estate, - evalPlanQual *priorepq); + Plan *planTree, evalPlanQual *priorepq); static void EvalPlanQualStop(evalPlanQual *epq); static void OpenIntoRel(QueryDesc *queryDesc); static void CloseIntoRel(QueryDesc *queryDesc); @@ -297,7 +277,7 @@ standard_ExecutorRun(QueryDesc *queryDesc, estate->es_lastoid = InvalidOid; sendTuples = (operation == CMD_SELECT || - queryDesc->plannedstmt->returningLists); + queryDesc->plannedstmt->hasReturning); if (sendTuples) (*dest->rStartup) (dest, operation, queryDesc->tupDesc); @@ -309,6 +289,7 @@ standard_ExecutorRun(QueryDesc *queryDesc, ExecutePlan(estate, queryDesc->planstate, operation, + sendTuples, count, direction, dest); @@ -668,7 +649,10 @@ InitPlan(QueryDesc *queryDesc, int eflags) estate->es_range_table = rangeTable; /* - * initialize result relation stuff + * initialize result relation stuff, and open/lock the result rels. + * + * We must do this before initializing the plan tree, else we might + * try to do a lock upgrade if a result rel is also a source rel. */ if (plannedstmt->resultRelations) { @@ -697,8 +681,8 @@ InitPlan(QueryDesc *queryDesc, int eflags) } estate->es_result_relations = resultRelInfos; estate->es_num_result_relations = numResultRelations; - /* Initialize to first or only result rel */ - estate->es_result_relation_info = resultRelInfos; + /* es_result_relation_info is NULL except when within ModifyTable */ + estate->es_result_relation_info = NULL; } else { @@ -755,12 +739,10 @@ InitPlan(QueryDesc *queryDesc, int eflags) } /* - * Initialize the executor's tuple table. Also, if it's not a SELECT, - * set up a tuple table slot for use for trigger output tuples. + * Initialize the executor's tuple table to empty. */ estate->es_tupleTable = NIL; - if (operation != CMD_SELECT) - estate->es_trig_tuple_slot = ExecInitExtraTupleSlot(estate); + estate->es_trig_tuple_slot = NULL; /* mark EvalPlanQual not active */ estate->es_plannedstmt = plannedstmt; @@ -814,214 +796,70 @@ InitPlan(QueryDesc *queryDesc, int eflags) tupType = ExecGetResultType(planstate); /* - * Initialize the junk filter if needed. SELECT and INSERT queries need a - * filter if there are any junk attrs in the tlist. UPDATE and DELETE - * always need a filter, since there's always a junk 'ctid' attribute - * present --- no need to look first. - * - * This section of code is also a convenient place to verify that the - * output of an INSERT or UPDATE matches the target table(s). + * Initialize the junk filter if needed. SELECT queries need a + * filter if there are any junk attrs in the top-level tlist. */ + if (operation == CMD_SELECT) { bool junk_filter_needed = false; ListCell *tlist; - switch (operation) + foreach(tlist, plan->targetlist) { - case CMD_SELECT: - case CMD_INSERT: - foreach(tlist, plan->targetlist) - { - TargetEntry *tle = (TargetEntry *) lfirst(tlist); + TargetEntry *tle = (TargetEntry *) lfirst(tlist); - if (tle->resjunk) - { - junk_filter_needed = true; - break; - } - } - break; - case CMD_UPDATE: - case CMD_DELETE: + if (tle->resjunk) + { junk_filter_needed = true; break; - default: - break; + } } if (junk_filter_needed) { - /* - * If there are multiple result relations, each one needs its own - * junk filter. Note this is only possible for UPDATE/DELETE, so - * we can't be fooled by some needing a filter and some not. - */ - if (list_length(plannedstmt->resultRelations) > 1) + JunkFilter *j; + + j = ExecInitJunkFilter(planstate->plan->targetlist, + tupType->tdhasoid, + ExecInitExtraTupleSlot(estate)); + estate->es_junkFilter = j; + + /* Want to return the cleaned tuple type */ + tupType = j->jf_cleanTupType; + + /* For SELECT FOR UPDATE/SHARE, find the junk attrs now */ + foreach(l, estate->es_rowMarks) { - PlanState **appendplans; - int as_nplans; - ResultRelInfo *resultRelInfo; + ExecRowMark *erm = (ExecRowMark *) lfirst(l); + char resname[32]; - /* Top plan had better be an Append here. */ - Assert(IsA(plan, Append)); - Assert(((Append *) plan)->isTarget); - Assert(IsA(planstate, AppendState)); - appendplans = ((AppendState *) planstate)->appendplans; - as_nplans = ((AppendState *) planstate)->as_nplans; - Assert(as_nplans == estate->es_num_result_relations); - resultRelInfo = estate->es_result_relations; - for (i = 0; i < as_nplans; i++) + /* always need the ctid */ + snprintf(resname, sizeof(resname), "ctid%u", + erm->prti); + erm->ctidAttNo = ExecFindJunkAttribute(j, resname); + if (!AttributeNumberIsValid(erm->ctidAttNo)) + elog(ERROR, "could not find junk \"%s\" column", + resname); + /* if child relation, need tableoid too */ + if (erm->rti != erm->prti) { - PlanState *subplan = appendplans[i]; - JunkFilter *j; - - if (operation == CMD_UPDATE) - ExecCheckPlanOutput(resultRelInfo->ri_RelationDesc, - subplan->plan->targetlist); - - j = ExecInitJunkFilter(subplan->plan->targetlist, - resultRelInfo->ri_RelationDesc->rd_att->tdhasoid, - ExecInitExtraTupleSlot(estate)); - - /* - * Since it must be UPDATE/DELETE, there had better be a - * "ctid" junk attribute in the tlist ... but ctid could - * be at a different resno for each result relation. We - * look up the ctid resnos now and save them in the - * junkfilters. - */ - j->jf_junkAttNo = ExecFindJunkAttribute(j, "ctid"); - if (!AttributeNumberIsValid(j->jf_junkAttNo)) - elog(ERROR, "could not find junk ctid column"); - resultRelInfo->ri_junkFilter = j; - resultRelInfo++; - } - - /* - * Set active junkfilter too; at this point ExecInitAppend has - * already selected an active result relation... - */ - estate->es_junkFilter = - estate->es_result_relation_info->ri_junkFilter; - - /* - * We currently can't support rowmarks in this case, because - * the associated junk CTIDs might have different resnos in - * different subplans. - */ - if (estate->es_rowMarks) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("SELECT FOR UPDATE/SHARE is not supported within a query with multiple result relations"))); - } - else - { - /* Normal case with just one JunkFilter */ - JunkFilter *j; - - if (operation == CMD_INSERT || operation == CMD_UPDATE) - ExecCheckPlanOutput(estate->es_result_relation_info->ri_RelationDesc, - planstate->plan->targetlist); - - j = ExecInitJunkFilter(planstate->plan->targetlist, - tupType->tdhasoid, - ExecInitExtraTupleSlot(estate)); - estate->es_junkFilter = j; - if (estate->es_result_relation_info) - estate->es_result_relation_info->ri_junkFilter = j; - - if (operation == CMD_SELECT) - { - /* For SELECT, want to return the cleaned tuple type */ - tupType = j->jf_cleanTupType; - } - else if (operation == CMD_UPDATE || operation == CMD_DELETE) - { - /* For UPDATE/DELETE, find the ctid junk attr now */ - j->jf_junkAttNo = ExecFindJunkAttribute(j, "ctid"); - if (!AttributeNumberIsValid(j->jf_junkAttNo)) - elog(ERROR, "could not find junk ctid column"); - } - - /* For SELECT FOR UPDATE/SHARE, find the junk attrs now */ - foreach(l, estate->es_rowMarks) - { - ExecRowMark *erm = (ExecRowMark *) lfirst(l); - char resname[32]; - - /* always need the ctid */ - snprintf(resname, sizeof(resname), "ctid%u", + snprintf(resname, sizeof(resname), "tableoid%u", erm->prti); - erm->ctidAttNo = ExecFindJunkAttribute(j, resname); - if (!AttributeNumberIsValid(erm->ctidAttNo)) + erm->toidAttNo = ExecFindJunkAttribute(j, resname); + if (!AttributeNumberIsValid(erm->toidAttNo)) elog(ERROR, "could not find junk \"%s\" column", resname); - /* if child relation, need tableoid too */ - if (erm->rti != erm->prti) - { - snprintf(resname, sizeof(resname), "tableoid%u", - erm->prti); - erm->toidAttNo = ExecFindJunkAttribute(j, resname); - if (!AttributeNumberIsValid(erm->toidAttNo)) - elog(ERROR, "could not find junk \"%s\" column", - resname); - } } } } else { - if (operation == CMD_INSERT) - ExecCheckPlanOutput(estate->es_result_relation_info->ri_RelationDesc, - planstate->plan->targetlist); - estate->es_junkFilter = NULL; if (estate->es_rowMarks) elog(ERROR, "SELECT FOR UPDATE/SHARE, but no junk columns"); } } - /* - * Initialize RETURNING projections if needed. - */ - if (plannedstmt->returningLists) - { - TupleTableSlot *slot; - ExprContext *econtext; - ResultRelInfo *resultRelInfo; - - /* - * We set QueryDesc.tupDesc to be the RETURNING rowtype in this case. - * We assume all the sublists will generate the same output tupdesc. - */ - tupType = ExecTypeFromTL((List *) linitial(plannedstmt->returningLists), - false); - - /* Set up a slot for the output of the RETURNING projection(s) */ - slot = ExecInitExtraTupleSlot(estate); - ExecSetSlotDescriptor(slot, tupType); - /* Need an econtext too */ - econtext = CreateExprContext(estate); - - /* - * Build a projection for each result rel. Note that any SubPlans in - * the RETURNING lists get attached to the topmost plan node. - */ - Assert(list_length(plannedstmt->returningLists) == estate->es_num_result_relations); - resultRelInfo = estate->es_result_relations; - foreach(l, plannedstmt->returningLists) - { - List *rlist = (List *) lfirst(l); - List *rliststate; - - rliststate = (List *) ExecInitExpr((Expr *) rlist, planstate); - resultRelInfo->ri_projectReturning = - ExecBuildProjectionInfo(rliststate, econtext, slot, - resultRelInfo->ri_RelationDesc->rd_att); - resultRelInfo++; - } - } - queryDesc->tupDesc = tupType; queryDesc->planstate = planstate; @@ -1122,75 +960,6 @@ InitResultRelInfo(ResultRelInfo *resultRelInfo, ExecOpenIndices(resultRelInfo); } -/* - * Verify that the tuples to be produced by INSERT or UPDATE match the - * target relation's rowtype - * - * We do this to guard against stale plans. If plan invalidation is - * functioning properly then we should never get a failure here, but better - * safe than sorry. Note that this is called after we have obtained lock - * on the target rel, so the rowtype can't change underneath us. - * - * The plan output is represented by its targetlist, because that makes - * handling the dropped-column case easier. - */ -static void -ExecCheckPlanOutput(Relation resultRel, List *targetList) -{ - TupleDesc resultDesc = RelationGetDescr(resultRel); - int attno = 0; - ListCell *lc; - - foreach(lc, targetList) - { - TargetEntry *tle = (TargetEntry *) lfirst(lc); - Form_pg_attribute attr; - - if (tle->resjunk) - continue; /* ignore junk tlist items */ - - if (attno >= resultDesc->natts) - ereport(ERROR, - (errcode(ERRCODE_DATATYPE_MISMATCH), - errmsg("table row type and query-specified row type do not match"), - errdetail("Query has too many columns."))); - attr = resultDesc->attrs[attno++]; - - if (!attr->attisdropped) - { - /* Normal case: demand type match */ - if (exprType((Node *) tle->expr) != attr->atttypid) - ereport(ERROR, - (errcode(ERRCODE_DATATYPE_MISMATCH), - errmsg("table row type and query-specified row type do not match"), - errdetail("Table has type %s at ordinal position %d, but query expects %s.", - format_type_be(attr->atttypid), - attno, - format_type_be(exprType((Node *) tle->expr))))); - } - else - { - /* - * For a dropped column, we can't check atttypid (it's likely 0). - * In any case the planner has most likely inserted an INT4 null. - * What we insist on is just *some* NULL constant. - */ - if (!IsA(tle->expr, Const) || - !((Const *) tle->expr)->constisnull) - ereport(ERROR, - (errcode(ERRCODE_DATATYPE_MISMATCH), - errmsg("table row type and query-specified row type do not match"), - errdetail("Query provides a value for a dropped column at ordinal position %d.", - attno))); - } - } - if (attno != resultDesc->natts) - ereport(ERROR, - (errcode(ERRCODE_DATATYPE_MISMATCH), - errmsg("table row type and query-specified row type do not match"), - errdetail("Query has too few columns."))); -} - /* * ExecGetTriggerResultRel * @@ -1281,11 +1050,13 @@ ExecGetTriggerResultRel(EState *estate, Oid relid) * recognize how far down the requirement really goes, but for now we just * make all plan nodes do the same thing if the top level forces the choice. * - * We assume that estate->es_result_relation_info is already set up to - * describe the target relation. Note that in an UPDATE that spans an - * inheritance tree, some of the target relations may have OIDs and some not. - * We have to make the decisions on a per-relation basis as we initialize - * each of the child plans of the topmost Append plan. + * We assume that if we are generating tuples for INSERT or UPDATE, + * estate->es_result_relation_info is already set up to describe the target + * relation. Note that in an UPDATE that spans an inheritance tree, some of + * the target relations may have OIDs and some not. We have to make the + * decisions on a per-relation basis as we initialize each of the subplans of + * the ModifyTable node, so ModifyTable has to set es_result_relation_info + * while initializing each subplan. * * SELECT INTO is even uglier, because we don't have the INTO relation's * descriptor available when this code runs; we have to look aside at a @@ -1294,26 +1065,24 @@ ExecGetTriggerResultRel(EState *estate, Oid relid) bool ExecContextForcesOids(PlanState *planstate, bool *hasoids) { + ResultRelInfo *ri = planstate->state->es_result_relation_info; + + if (ri != NULL) + { + Relation rel = ri->ri_RelationDesc; + + if (rel != NULL) + { + *hasoids = rel->rd_rel->relhasoids; + return true; + } + } + if (planstate->state->es_select_into) { *hasoids = planstate->state->es_into_oids; return true; } - else - { - ResultRelInfo *ri = planstate->state->es_result_relation_info; - - if (ri != NULL) - { - Relation rel = ri->ri_RelationDesc; - - if (rel != NULL) - { - *hasoids = rel->rd_rel->relhasoids; - return true; - } - } - } return false; } @@ -1416,6 +1185,7 @@ static void ExecutePlan(EState *estate, PlanState *planstate, CmdType operation, + bool sendTuples, long numberTuples, ScanDirection direction, DestReceiver *dest) @@ -1423,8 +1193,6 @@ ExecutePlan(EState *estate, JunkFilter *junkfilter; TupleTableSlot *planSlot; TupleTableSlot *slot; - ItemPointer tupleid = NULL; - ItemPointerData tuple_ctid; long current_tuple_count; /* @@ -1437,25 +1205,6 @@ ExecutePlan(EState *estate, */ estate->es_direction = direction; - /* - * Process BEFORE EACH STATEMENT triggers - */ - switch (operation) - { - case CMD_UPDATE: - ExecBSUpdateTriggers(estate, estate->es_result_relation_info); - break; - case CMD_DELETE: - ExecBSDeleteTriggers(estate, estate->es_result_relation_info); - break; - case CMD_INSERT: - ExecBSInsertTriggers(estate, estate->es_result_relation_info); - break; - default: - /* do nothing */ - break; - } - /* * Loop until we've processed the proper number of tuples from the plan. */ @@ -1578,6 +1327,7 @@ lnext: ; /* updated, so look at updated version */ newSlot = EvalPlanQual(estate, erm->rti, + planstate, &update_ctid, update_xmax); if (!TupIsNull(newSlot)) @@ -1605,61 +1355,25 @@ lnext: ; } /* - * extract the 'ctid' junk attribute. + * Create a new "clean" tuple with all junk attributes removed. */ - if (operation == CMD_UPDATE || operation == CMD_DELETE) - { - Datum datum; - bool isNull; - - datum = ExecGetJunkAttribute(slot, junkfilter->jf_junkAttNo, - &isNull); - /* shouldn't ever get a null result... */ - if (isNull) - elog(ERROR, "ctid is NULL"); - - tupleid = (ItemPointer) DatumGetPointer(datum); - tuple_ctid = *tupleid; /* make sure we don't free the ctid!! */ - tupleid = &tuple_ctid; - } - - /* - * Create a new "clean" tuple with all junk attributes removed. We - * don't need to do this for DELETE, however (there will in fact - * be no non-junk attributes in a DELETE!) - */ - if (operation != CMD_DELETE) - slot = ExecFilterJunk(junkfilter, slot); + slot = ExecFilterJunk(junkfilter, slot); } /* - * now that we have a tuple, do the appropriate thing with it.. either - * send it to the output destination, add it to a relation someplace, - * delete it from a relation, or modify some of its attributes. + * If we are supposed to send the tuple somewhere, do so. + * (In practice this is probably always the case at this point.) */ - switch (operation) - { - case CMD_SELECT: - ExecSelect(slot, dest, estate); - break; + if (sendTuples) + (*dest->receiveSlot) (slot, dest); - case CMD_INSERT: - ExecInsert(slot, tupleid, planSlot, dest, estate); - break; - - case CMD_DELETE: - ExecDelete(tupleid, planSlot, dest, estate); - break; - - case CMD_UPDATE: - ExecUpdate(slot, tupleid, planSlot, dest, estate); - break; - - default: - elog(ERROR, "unrecognized operation code: %d", - (int) operation); - break; - } + /* + * Count tuples processed, if this is a SELECT. (For other operation + * types, the ModifyTable plan node must count the appropriate + * events.) + */ + if (operation == CMD_SELECT) + (estate->es_processed)++; /* * check our tuple count.. if we've processed the proper number then @@ -1670,453 +1384,8 @@ lnext: ; if (numberTuples && numberTuples == current_tuple_count) break; } - - /* - * Process AFTER EACH STATEMENT triggers - */ - switch (operation) - { - case CMD_UPDATE: - ExecASUpdateTriggers(estate, estate->es_result_relation_info); - break; - case CMD_DELETE: - ExecASDeleteTriggers(estate, estate->es_result_relation_info); - break; - case CMD_INSERT: - ExecASInsertTriggers(estate, estate->es_result_relation_info); - break; - default: - /* do nothing */ - break; - } } -/* ---------------------------------------------------------------- - * ExecSelect - * - * SELECTs are easy.. we just pass the tuple to the appropriate - * output function. - * ---------------------------------------------------------------- - */ -static void -ExecSelect(TupleTableSlot *slot, - DestReceiver *dest, - EState *estate) -{ - (*dest->receiveSlot) (slot, dest); - (estate->es_processed)++; -} - -/* ---------------------------------------------------------------- - * ExecInsert - * - * INSERTs are trickier.. we have to insert the tuple into - * the base relation and insert appropriate tuples into the - * index relations. - * ---------------------------------------------------------------- - */ -static void -ExecInsert(TupleTableSlot *slot, - ItemPointer tupleid, - TupleTableSlot *planSlot, - DestReceiver *dest, - EState *estate) -{ - HeapTuple tuple; - ResultRelInfo *resultRelInfo; - Relation resultRelationDesc; - Oid newId; - List *recheckIndexes = NIL; - - /* - * get the heap tuple out of the tuple table slot, making sure we have a - * writable copy - */ - tuple = ExecMaterializeSlot(slot); - - /* - * get information on the (current) result relation - */ - resultRelInfo = estate->es_result_relation_info; - resultRelationDesc = resultRelInfo->ri_RelationDesc; - - /* - * If the result relation has OIDs, force the tuple's OID to zero so that - * heap_insert will assign a fresh OID. Usually the OID already will be - * zero at this point, but there are corner cases where the plan tree can - * return a tuple extracted literally from some table with the same - * rowtype. - * - * XXX if we ever wanted to allow users to assign their own OIDs to new - * rows, this'd be the place to do it. For the moment, we make a point of - * doing this before calling triggers, so that a user-supplied trigger - * could hack the OID if desired. - */ - if (resultRelationDesc->rd_rel->relhasoids) - HeapTupleSetOid(tuple, InvalidOid); - - /* BEFORE ROW INSERT Triggers */ - if (resultRelInfo->ri_TrigDesc && - resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_INSERT] > 0) - { - HeapTuple newtuple; - - newtuple = ExecBRInsertTriggers(estate, resultRelInfo, tuple); - - if (newtuple == NULL) /* "do nothing" */ - return; - - if (newtuple != tuple) /* modified by Trigger(s) */ - { - /* - * Put the modified tuple into a slot for convenience of routines - * below. We assume the tuple was allocated in per-tuple memory - * context, and therefore will go away by itself. The tuple table - * slot should not try to clear it. - */ - TupleTableSlot *newslot = estate->es_trig_tuple_slot; - - if (newslot->tts_tupleDescriptor != slot->tts_tupleDescriptor) - ExecSetSlotDescriptor(newslot, slot->tts_tupleDescriptor); - ExecStoreTuple(newtuple, newslot, InvalidBuffer, false); - slot = newslot; - tuple = newtuple; - } - } - - /* - * Check the constraints of the tuple - */ - if (resultRelationDesc->rd_att->constr) - ExecConstraints(resultRelInfo, slot, estate); - - /* - * insert the tuple - * - * Note: heap_insert returns the tid (location) of the new tuple in the - * t_self field. - */ - newId = heap_insert(resultRelationDesc, tuple, - estate->es_output_cid, 0, NULL); - - (estate->es_processed)++; - estate->es_lastoid = newId; - setLastTid(&(tuple->t_self)); - - /* - * insert index entries for tuple - */ - if (resultRelInfo->ri_NumIndices > 0) - recheckIndexes = ExecInsertIndexTuples(slot, &(tuple->t_self), - estate, false); - - /* AFTER ROW INSERT Triggers */ - ExecARInsertTriggers(estate, resultRelInfo, tuple, recheckIndexes); - - /* Process RETURNING if present */ - if (resultRelInfo->ri_projectReturning) - ExecProcessReturning(resultRelInfo->ri_projectReturning, - slot, planSlot, dest); -} - -/* ---------------------------------------------------------------- - * ExecDelete - * - * DELETE is like UPDATE, except that we delete the tuple and no - * index modifications are needed - * ---------------------------------------------------------------- - */ -static void -ExecDelete(ItemPointer tupleid, - TupleTableSlot *planSlot, - DestReceiver *dest, - EState *estate) -{ - ResultRelInfo *resultRelInfo; - Relation resultRelationDesc; - HTSU_Result result; - ItemPointerData update_ctid; - TransactionId update_xmax; - - /* - * get information on the (current) result relation - */ - resultRelInfo = estate->es_result_relation_info; - resultRelationDesc = resultRelInfo->ri_RelationDesc; - - /* BEFORE ROW DELETE Triggers */ - if (resultRelInfo->ri_TrigDesc && - resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_DELETE] > 0) - { - bool dodelete; - - dodelete = ExecBRDeleteTriggers(estate, resultRelInfo, tupleid); - - if (!dodelete) /* "do nothing" */ - return; - } - - /* - * delete the tuple - * - * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that - * the row to be deleted is visible to that snapshot, and throw a can't- - * serialize error if not. This is a special-case behavior needed for - * referential integrity updates in serializable transactions. - */ -ldelete:; - result = heap_delete(resultRelationDesc, tupleid, - &update_ctid, &update_xmax, - estate->es_output_cid, - estate->es_crosscheck_snapshot, - true /* wait for commit */ ); - switch (result) - { - case HeapTupleSelfUpdated: - /* already deleted by self; nothing to do */ - return; - - case HeapTupleMayBeUpdated: - break; - - case HeapTupleUpdated: - if (IsXactIsoLevelSerializable) - ereport(ERROR, - (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), - errmsg("could not serialize access due to concurrent update"))); - else if (!ItemPointerEquals(tupleid, &update_ctid)) - { - TupleTableSlot *epqslot; - - epqslot = EvalPlanQual(estate, - resultRelInfo->ri_RangeTableIndex, - &update_ctid, - update_xmax); - if (!TupIsNull(epqslot)) - { - *tupleid = update_ctid; - goto ldelete; - } - } - /* tuple already deleted; nothing to do */ - return; - - default: - elog(ERROR, "unrecognized heap_delete status: %u", result); - return; - } - - (estate->es_processed)++; - - /* - * Note: Normally one would think that we have to delete index tuples - * associated with the heap tuple now... - * - * ... but in POSTGRES, we have no need to do this because VACUUM will - * take care of it later. We can't delete index tuples immediately - * anyway, since the tuple is still visible to other transactions. - */ - - /* AFTER ROW DELETE Triggers */ - ExecARDeleteTriggers(estate, resultRelInfo, tupleid); - - /* Process RETURNING if present */ - if (resultRelInfo->ri_projectReturning) - { - /* - * We have to put the target tuple into a slot, which means first we - * gotta fetch it. We can use the trigger tuple slot. - */ - TupleTableSlot *slot = estate->es_trig_tuple_slot; - HeapTupleData deltuple; - Buffer delbuffer; - - deltuple.t_self = *tupleid; - if (!heap_fetch(resultRelationDesc, SnapshotAny, - &deltuple, &delbuffer, false, NULL)) - elog(ERROR, "failed to fetch deleted tuple for DELETE RETURNING"); - - if (slot->tts_tupleDescriptor != RelationGetDescr(resultRelationDesc)) - ExecSetSlotDescriptor(slot, RelationGetDescr(resultRelationDesc)); - ExecStoreTuple(&deltuple, slot, InvalidBuffer, false); - - ExecProcessReturning(resultRelInfo->ri_projectReturning, - slot, planSlot, dest); - - ExecClearTuple(slot); - ReleaseBuffer(delbuffer); - } -} - -/* ---------------------------------------------------------------- - * ExecUpdate - * - * note: we can't run UPDATE queries with transactions - * off because UPDATEs are actually INSERTs and our - * scan will mistakenly loop forever, updating the tuple - * it just inserted.. This should be fixed but until it - * is, we don't want to get stuck in an infinite loop - * which corrupts your database.. - * ---------------------------------------------------------------- - */ -static void -ExecUpdate(TupleTableSlot *slot, - ItemPointer tupleid, - TupleTableSlot *planSlot, - DestReceiver *dest, - EState *estate) -{ - HeapTuple tuple; - ResultRelInfo *resultRelInfo; - Relation resultRelationDesc; - HTSU_Result result; - ItemPointerData update_ctid; - TransactionId update_xmax; - List *recheckIndexes = NIL; - - /* - * abort the operation if not running transactions - */ - if (IsBootstrapProcessingMode()) - elog(ERROR, "cannot UPDATE during bootstrap"); - - /* - * get the heap tuple out of the tuple table slot, making sure we have a - * writable copy - */ - tuple = ExecMaterializeSlot(slot); - - /* - * get information on the (current) result relation - */ - resultRelInfo = estate->es_result_relation_info; - resultRelationDesc = resultRelInfo->ri_RelationDesc; - - /* BEFORE ROW UPDATE Triggers */ - if (resultRelInfo->ri_TrigDesc && - resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_UPDATE] > 0) - { - HeapTuple newtuple; - - newtuple = ExecBRUpdateTriggers(estate, resultRelInfo, - tupleid, tuple); - - if (newtuple == NULL) /* "do nothing" */ - return; - - if (newtuple != tuple) /* modified by Trigger(s) */ - { - /* - * Put the modified tuple into a slot for convenience of routines - * below. We assume the tuple was allocated in per-tuple memory - * context, and therefore will go away by itself. The tuple table - * slot should not try to clear it. - */ - TupleTableSlot *newslot = estate->es_trig_tuple_slot; - - if (newslot->tts_tupleDescriptor != slot->tts_tupleDescriptor) - ExecSetSlotDescriptor(newslot, slot->tts_tupleDescriptor); - ExecStoreTuple(newtuple, newslot, InvalidBuffer, false); - slot = newslot; - tuple = newtuple; - } - } - - /* - * Check the constraints of the tuple - * - * If we generate a new candidate tuple after EvalPlanQual testing, we - * must loop back here and recheck constraints. (We don't need to redo - * triggers, however. If there are any BEFORE triggers then trigger.c - * will have done heap_lock_tuple to lock the correct tuple, so there's no - * need to do them again.) - */ -lreplace:; - if (resultRelationDesc->rd_att->constr) - ExecConstraints(resultRelInfo, slot, estate); - - /* - * replace the heap tuple - * - * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that - * the row to be updated is visible to that snapshot, and throw a can't- - * serialize error if not. This is a special-case behavior needed for - * referential integrity updates in serializable transactions. - */ - result = heap_update(resultRelationDesc, tupleid, tuple, - &update_ctid, &update_xmax, - estate->es_output_cid, - estate->es_crosscheck_snapshot, - true /* wait for commit */ ); - switch (result) - { - case HeapTupleSelfUpdated: - /* already deleted by self; nothing to do */ - return; - - case HeapTupleMayBeUpdated: - break; - - case HeapTupleUpdated: - if (IsXactIsoLevelSerializable) - ereport(ERROR, - (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), - errmsg("could not serialize access due to concurrent update"))); - else if (!ItemPointerEquals(tupleid, &update_ctid)) - { - TupleTableSlot *epqslot; - - epqslot = EvalPlanQual(estate, - resultRelInfo->ri_RangeTableIndex, - &update_ctid, - update_xmax); - if (!TupIsNull(epqslot)) - { - *tupleid = update_ctid; - slot = ExecFilterJunk(estate->es_junkFilter, epqslot); - tuple = ExecMaterializeSlot(slot); - goto lreplace; - } - } - /* tuple already deleted; nothing to do */ - return; - - default: - elog(ERROR, "unrecognized heap_update status: %u", result); - return; - } - - (estate->es_processed)++; - - /* - * Note: instead of having to update the old index tuples associated with - * the heap tuple, all we do is form and insert new index tuples. This is - * because UPDATEs are actually DELETEs and INSERTs, and index tuple - * deletion is done later by VACUUM (see notes in ExecDelete). All we do - * here is insert new index tuples. -cim 9/27/89 - */ - - /* - * insert index entries for tuple - * - * Note: heap_update returns the tid (location) of the new tuple in the - * t_self field. - * - * If it's a HOT update, we mustn't insert new index entries. - */ - if (resultRelInfo->ri_NumIndices > 0 && !HeapTupleIsHeapOnly(tuple)) - recheckIndexes = ExecInsertIndexTuples(slot, &(tuple->t_self), - estate, false); - - /* AFTER ROW UPDATE Triggers */ - ExecARUpdateTriggers(estate, resultRelInfo, tupleid, tuple, - recheckIndexes); - - /* Process RETURNING if present */ - if (resultRelInfo->ri_projectReturning) - ExecProcessReturning(resultRelInfo->ri_projectReturning, - slot, planSlot, dest); -} /* * ExecRelCheck --- check that tuple meets constraints for result relation @@ -2217,42 +1486,6 @@ ExecConstraints(ResultRelInfo *resultRelInfo, } } -/* - * ExecProcessReturning --- evaluate a RETURNING list and send to dest - * - * projectReturning: RETURNING projection info for current result rel - * tupleSlot: slot holding tuple actually inserted/updated/deleted - * planSlot: slot holding tuple returned by top plan node - * dest: where to send the output - */ -static void -ExecProcessReturning(ProjectionInfo *projectReturning, - TupleTableSlot *tupleSlot, - TupleTableSlot *planSlot, - DestReceiver *dest) -{ - ExprContext *econtext = projectReturning->pi_exprContext; - TupleTableSlot *retSlot; - - /* - * Reset per-tuple memory context to free any expression evaluation - * storage allocated in the previous cycle. - */ - ResetExprContext(econtext); - - /* Make tuple and any needed join variables available to ExecProject */ - econtext->ecxt_scantuple = tupleSlot; - econtext->ecxt_outertuple = planSlot; - - /* Compute the RETURNING expressions */ - retSlot = ExecProject(projectReturning, NULL); - - /* Send to dest */ - (*dest->receiveSlot) (retSlot, dest); - - ExecClearTuple(retSlot); -} - /* * Check a modified tuple to see if we want to process its updated version * under READ COMMITTED rules. @@ -2261,6 +1494,7 @@ ExecProcessReturning(ProjectionInfo *projectReturning, * * estate - executor state data * rti - rangetable index of table containing tuple + * subplanstate - portion of plan tree that needs to be re-evaluated * *tid - t_ctid from the outdated tuple (ie, next updated version) * priorXmax - t_xmax from the outdated tuple * @@ -2272,6 +1506,7 @@ ExecProcessReturning(ProjectionInfo *projectReturning, */ TupleTableSlot * EvalPlanQual(EState *estate, Index rti, + PlanState *subplanstate, ItemPointer tid, TransactionId priorXmax) { evalPlanQual *epq; @@ -2526,10 +1761,10 @@ EvalPlanQual(EState *estate, Index rti, * * Note: if we were re-using PlanQual plans via ExecReScan, we'd need to * instead copy down changeable state from the top plan (including - * es_result_relation_info, es_junkFilter) and reset locally changeable + * es_result_relation_info) and reset locally changeable * state in the epq (including es_param_exec_vals, es_evTupleNull). */ - EvalPlanQualStart(epq, estate, epq->next); + EvalPlanQualStart(epq, estate, subplanstate->plan, epq->next); /* * free old RTE' tuple, if any, and store target tuple where relation's @@ -2628,7 +1863,8 @@ EndEvalPlanQual(EState *estate) * the top-level estate rather than initializing it fresh. */ static void -EvalPlanQualStart(evalPlanQual *epq, EState *estate, evalPlanQual *priorepq) +EvalPlanQualStart(evalPlanQual *epq, EState *estate, Plan *planTree, + evalPlanQual *priorepq) { EState *epqstate; int rtsize; @@ -2690,6 +1926,9 @@ EvalPlanQualStart(evalPlanQual *epq, EState *estate, evalPlanQual *priorepq) * Initialize private state information for each SubPlan. We must do this * before running ExecInitNode on the main query tree, since * ExecInitSubPlan expects to be able to find these entries. + * Some of the SubPlans might not be used in the part of the plan tree + * we intend to run, but since it's not easy to tell which, we just + * initialize them all. */ Assert(epqstate->es_subplanstates == NIL); foreach(l, estate->es_plannedstmt->subplans) @@ -2704,11 +1943,11 @@ EvalPlanQualStart(evalPlanQual *epq, EState *estate, evalPlanQual *priorepq) } /* - * Initialize the private state information for all the nodes in the query - * tree. This opens files, allocates storage and leaves us ready to start - * processing tuples. + * Initialize the private state information for all the nodes in the + * part of the plan tree we need to run. This opens files, allocates + * storage and leaves us ready to start processing tuples. */ - epq->planstate = ExecInitNode(estate->es_plannedstmt->planTree, epqstate, 0); + epq->planstate = ExecInitNode(planTree, epqstate, 0); MemoryContextSwitchTo(oldcontext); } diff --git a/src/backend/executor/execProcnode.c b/src/backend/executor/execProcnode.c index 1b06ff823f..5339a57b4f 100644 --- a/src/backend/executor/execProcnode.c +++ b/src/backend/executor/execProcnode.c @@ -12,7 +12,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/execProcnode.c,v 1.66 2009/09/27 21:10:53 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/executor/execProcnode.c,v 1.67 2009/10/10 01:43:47 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -93,6 +93,7 @@ #include "executor/nodeLimit.h" #include "executor/nodeMaterial.h" #include "executor/nodeMergejoin.h" +#include "executor/nodeModifyTable.h" #include "executor/nodeNestloop.h" #include "executor/nodeRecursiveunion.h" #include "executor/nodeResult.h" @@ -146,6 +147,11 @@ ExecInitNode(Plan *node, EState *estate, int eflags) estate, eflags); break; + case T_ModifyTable: + result = (PlanState *) ExecInitModifyTable((ModifyTable *) node, + estate, eflags); + break; + case T_Append: result = (PlanState *) ExecInitAppend((Append *) node, estate, eflags); @@ -343,6 +349,10 @@ ExecProcNode(PlanState *node) result = ExecResult((ResultState *) node); break; + case T_ModifyTableState: + result = ExecModifyTable((ModifyTableState *) node); + break; + case T_AppendState: result = ExecAppend((AppendState *) node); break; @@ -524,7 +534,7 @@ MultiExecProcNode(PlanState *node) * Recursively cleans up all the nodes in the plan rooted * at 'node'. * - * After this operation, the query plan will not be able to + * After this operation, the query plan will not be able to be * processed any further. This should be called only after * the query plan has been fully executed. * ---------------------------------------------------------------- @@ -553,6 +563,10 @@ ExecEndNode(PlanState *node) ExecEndResult((ResultState *) node); break; + case T_ModifyTableState: + ExecEndModifyTable((ModifyTableState *) node); + break; + case T_AppendState: ExecEndAppend((AppendState *) node); break; diff --git a/src/backend/executor/nodeAppend.c b/src/backend/executor/nodeAppend.c index 014b45a007..47b55a3467 100644 --- a/src/backend/executor/nodeAppend.c +++ b/src/backend/executor/nodeAppend.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/nodeAppend.c,v 1.75 2009/09/27 21:10:53 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/executor/nodeAppend.c,v 1.76 2009/10/10 01:43:47 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -74,50 +74,33 @@ static bool exec_append_initialize_next(AppendState *appendstate); static bool exec_append_initialize_next(AppendState *appendstate) { - EState *estate; int whichplan; /* * get information from the append node */ - estate = appendstate->ps.state; whichplan = appendstate->as_whichplan; - if (whichplan < appendstate->as_firstplan) + if (whichplan < 0) { /* * if scanning in reverse, we start at the last scan in the list and * then proceed back to the first.. in any case we inform ExecAppend * that we are at the end of the line by returning FALSE */ - appendstate->as_whichplan = appendstate->as_firstplan; + appendstate->as_whichplan = 0; return FALSE; } - else if (whichplan > appendstate->as_lastplan) + else if (whichplan >= appendstate->as_nplans) { /* * as above, end the scan if we go beyond the last scan in our list.. */ - appendstate->as_whichplan = appendstate->as_lastplan; + appendstate->as_whichplan = appendstate->as_nplans - 1; return FALSE; } else { - /* - * initialize the scan - * - * If we are controlling the target relation, select the proper active - * ResultRelInfo and junk filter for this target. - */ - if (((Append *) appendstate->ps.plan)->isTarget) - { - Assert(whichplan < estate->es_num_result_relations); - estate->es_result_relation_info = - estate->es_result_relations + whichplan; - estate->es_junkFilter = - estate->es_result_relation_info->ri_junkFilter; - } - return TRUE; } } @@ -131,10 +114,6 @@ exec_append_initialize_next(AppendState *appendstate) * append node may not be scanned, but this way all of the * structures get allocated in the executor's top level memory * block instead of that of the call to ExecAppend.) - * - * Special case: during an EvalPlanQual recheck query of an inherited - * target relation, we only want to initialize and scan the single - * subplan that corresponds to the target relation being checked. * ---------------------------------------------------------------- */ AppendState * @@ -144,7 +123,7 @@ ExecInitAppend(Append *node, EState *estate, int eflags) PlanState **appendplanstates; int nplans; int i; - Plan *initNode; + ListCell *lc; /* check for unsupported flags */ Assert(!(eflags & EXEC_FLAG_MARK)); @@ -164,27 +143,6 @@ ExecInitAppend(Append *node, EState *estate, int eflags) appendstate->appendplans = appendplanstates; appendstate->as_nplans = nplans; - /* - * Do we want to scan just one subplan? (Special case for EvalPlanQual) - * XXX pretty dirty way of determining that this case applies ... - */ - if (node->isTarget && estate->es_evTuple != NULL) - { - int tplan; - - tplan = estate->es_result_relation_info - estate->es_result_relations; - Assert(tplan >= 0 && tplan < nplans); - - appendstate->as_firstplan = tplan; - appendstate->as_lastplan = tplan; - } - else - { - /* normal case, scan all subplans */ - appendstate->as_firstplan = 0; - appendstate->as_lastplan = nplans - 1; - } - /* * Miscellaneous initialization * @@ -200,32 +158,27 @@ ExecInitAppend(Append *node, EState *estate, int eflags) /* * call ExecInitNode on each of the plans to be executed and save the - * results into the array "appendplans". Note we *must* set - * estate->es_result_relation_info correctly while we initialize each - * sub-plan; ExecContextForcesOids depends on that! + * results into the array "appendplans". */ - for (i = appendstate->as_firstplan; i <= appendstate->as_lastplan; i++) + i = 0; + foreach(lc, node->appendplans) { - appendstate->as_whichplan = i; - exec_append_initialize_next(appendstate); + Plan *initNode = (Plan *) lfirst(lc); - initNode = (Plan *) list_nth(node->appendplans, i); appendplanstates[i] = ExecInitNode(initNode, estate, eflags); + i++; } /* - * Initialize tuple type. (Note: in an inherited UPDATE situation, the - * tuple type computed here corresponds to the parent table, which is - * really a lie since tuples returned from child subplans will not all - * look the same.) + * initialize output tuple type */ ExecAssignResultTypeFromTL(&appendstate->ps); appendstate->ps.ps_ProjInfo = NULL; /* - * return the result from the first subplan's initialization + * initialize to scan first subplan */ - appendstate->as_whichplan = appendstate->as_firstplan; + appendstate->as_whichplan = 0; exec_append_initialize_next(appendstate); return appendstate; @@ -260,9 +213,7 @@ ExecAppend(AppendState *node) /* * If the subplan gave us something then return it as-is. We do * NOT make use of the result slot that was set up in - * ExecInitAppend, first because there's no reason to and second - * because it may have the wrong tuple descriptor in - * inherited-UPDATE cases. + * ExecInitAppend; there's no need for it. */ return result; } @@ -305,13 +256,10 @@ ExecEndAppend(AppendState *node) nplans = node->as_nplans; /* - * shut down each of the subscans (that we've initialized) + * shut down each of the subscans */ for (i = 0; i < nplans; i++) - { - if (appendplans[i]) - ExecEndNode(appendplans[i]); - } + ExecEndNode(appendplans[i]); } void @@ -319,7 +267,7 @@ ExecReScanAppend(AppendState *node, ExprContext *exprCtxt) { int i; - for (i = node->as_firstplan; i <= node->as_lastplan; i++) + for (i = 0; i < node->as_nplans; i++) { PlanState *subnode = node->appendplans[i]; @@ -337,13 +285,8 @@ ExecReScanAppend(AppendState *node, ExprContext *exprCtxt) * exprCtxt down to the subnodes (needed for appendrel indexscan). */ if (subnode->chgParam == NULL || exprCtxt != NULL) - { - /* make sure estate is correct for this subnode (needed??) */ - node->as_whichplan = i; - exec_append_initialize_next(node); ExecReScan(subnode, exprCtxt); - } } - node->as_whichplan = node->as_firstplan; + node->as_whichplan = 0; exec_append_initialize_next(node); } diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c new file mode 100644 index 0000000000..a9fd8c4974 --- /dev/null +++ b/src/backend/executor/nodeModifyTable.c @@ -0,0 +1,1005 @@ +/*------------------------------------------------------------------------- + * + * nodeModifyTable.c + * routines to handle ModifyTable nodes. + * + * Portions Copyright (c) 1996-2009, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $PostgreSQL: pgsql/src/backend/executor/nodeModifyTable.c,v 1.1 2009/10/10 01:43:47 tgl Exp $ + * + *------------------------------------------------------------------------- + */ +/* INTERFACE ROUTINES + * ExecInitModifyTable - initialize the ModifyTable node + * ExecModifyTable - retrieve the next tuple from the node + * ExecEndModifyTable - shut down the ModifyTable node + * ExecReScanModifyTable - rescan the ModifyTable node + * + * NOTES + * Each ModifyTable node contains a list of one or more subplans, + * much like an Append node. There is one subplan per result relation. + * The key reason for this is that in an inherited UPDATE command, each + * result relation could have a different schema (more or different + * columns) requiring a different plan tree to produce it. In an + * inherited DELETE, all the subplans should produce the same output + * rowtype, but we might still find that different plans are appropriate + * for different child relations. + * + * If the query specifies RETURNING, then the ModifyTable returns a + * RETURNING tuple after completing each row insert, update, or delete. + * It must be called again to continue the operation. Without RETURNING, + * we just loop within the node until all the work is done, then + * return NULL. This avoids useless call/return overhead. + */ + +#include "postgres.h" + +#include "access/xact.h" +#include "commands/trigger.h" +#include "executor/executor.h" +#include "executor/nodeModifyTable.h" +#include "miscadmin.h" +#include "nodes/nodeFuncs.h" +#include "storage/bufmgr.h" +#include "utils/builtins.h" +#include "utils/memutils.h" +#include "utils/tqual.h" + + +/* + * Verify that the tuples to be produced by INSERT or UPDATE match the + * target relation's rowtype + * + * We do this to guard against stale plans. If plan invalidation is + * functioning properly then we should never get a failure here, but better + * safe than sorry. Note that this is called after we have obtained lock + * on the target rel, so the rowtype can't change underneath us. + * + * The plan output is represented by its targetlist, because that makes + * handling the dropped-column case easier. + */ +static void +ExecCheckPlanOutput(Relation resultRel, List *targetList) +{ + TupleDesc resultDesc = RelationGetDescr(resultRel); + int attno = 0; + ListCell *lc; + + foreach(lc, targetList) + { + TargetEntry *tle = (TargetEntry *) lfirst(lc); + Form_pg_attribute attr; + + if (tle->resjunk) + continue; /* ignore junk tlist items */ + + if (attno >= resultDesc->natts) + ereport(ERROR, + (errcode(ERRCODE_DATATYPE_MISMATCH), + errmsg("table row type and query-specified row type do not match"), + errdetail("Query has too many columns."))); + attr = resultDesc->attrs[attno++]; + + if (!attr->attisdropped) + { + /* Normal case: demand type match */ + if (exprType((Node *) tle->expr) != attr->atttypid) + ereport(ERROR, + (errcode(ERRCODE_DATATYPE_MISMATCH), + errmsg("table row type and query-specified row type do not match"), + errdetail("Table has type %s at ordinal position %d, but query expects %s.", + format_type_be(attr->atttypid), + attno, + format_type_be(exprType((Node *) tle->expr))))); + } + else + { + /* + * For a dropped column, we can't check atttypid (it's likely 0). + * In any case the planner has most likely inserted an INT4 null. + * What we insist on is just *some* NULL constant. + */ + if (!IsA(tle->expr, Const) || + !((Const *) tle->expr)->constisnull) + ereport(ERROR, + (errcode(ERRCODE_DATATYPE_MISMATCH), + errmsg("table row type and query-specified row type do not match"), + errdetail("Query provides a value for a dropped column at ordinal position %d.", + attno))); + } + } + if (attno != resultDesc->natts) + ereport(ERROR, + (errcode(ERRCODE_DATATYPE_MISMATCH), + errmsg("table row type and query-specified row type do not match"), + errdetail("Query has too few columns."))); +} + +/* + * ExecProcessReturning --- evaluate a RETURNING list + * + * projectReturning: RETURNING projection info for current result rel + * tupleSlot: slot holding tuple actually inserted/updated/deleted + * planSlot: slot holding tuple returned by top subplan node + * + * Returns a slot holding the result tuple + */ +static TupleTableSlot * +ExecProcessReturning(ProjectionInfo *projectReturning, + TupleTableSlot *tupleSlot, + TupleTableSlot *planSlot) +{ + ExprContext *econtext = projectReturning->pi_exprContext; + + /* + * Reset per-tuple memory context to free any expression evaluation + * storage allocated in the previous cycle. + */ + ResetExprContext(econtext); + + /* Make tuple and any needed join variables available to ExecProject */ + econtext->ecxt_scantuple = tupleSlot; + econtext->ecxt_outertuple = planSlot; + + /* Compute the RETURNING expressions */ + return ExecProject(projectReturning, NULL); +} + +/* ---------------------------------------------------------------- + * ExecInsert + * + * For INSERT, we have to insert the tuple into the target relation + * and insert appropriate tuples into the index relations. + * + * Returns RETURNING result if any, otherwise NULL. + * ---------------------------------------------------------------- + */ +static TupleTableSlot * +ExecInsert(TupleTableSlot *slot, + TupleTableSlot *planSlot, + EState *estate) +{ + HeapTuple tuple; + ResultRelInfo *resultRelInfo; + Relation resultRelationDesc; + Oid newId; + List *recheckIndexes = NIL; + + /* + * get the heap tuple out of the tuple table slot, making sure we have a + * writable copy + */ + tuple = ExecMaterializeSlot(slot); + + /* + * get information on the (current) result relation + */ + resultRelInfo = estate->es_result_relation_info; + resultRelationDesc = resultRelInfo->ri_RelationDesc; + + /* + * If the result relation has OIDs, force the tuple's OID to zero so that + * heap_insert will assign a fresh OID. Usually the OID already will be + * zero at this point, but there are corner cases where the plan tree can + * return a tuple extracted literally from some table with the same + * rowtype. + * + * XXX if we ever wanted to allow users to assign their own OIDs to new + * rows, this'd be the place to do it. For the moment, we make a point of + * doing this before calling triggers, so that a user-supplied trigger + * could hack the OID if desired. + */ + if (resultRelationDesc->rd_rel->relhasoids) + HeapTupleSetOid(tuple, InvalidOid); + + /* BEFORE ROW INSERT Triggers */ + if (resultRelInfo->ri_TrigDesc && + resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_INSERT] > 0) + { + HeapTuple newtuple; + + newtuple = ExecBRInsertTriggers(estate, resultRelInfo, tuple); + + if (newtuple == NULL) /* "do nothing" */ + return NULL; + + if (newtuple != tuple) /* modified by Trigger(s) */ + { + /* + * Put the modified tuple into a slot for convenience of routines + * below. We assume the tuple was allocated in per-tuple memory + * context, and therefore will go away by itself. The tuple table + * slot should not try to clear it. + */ + TupleTableSlot *newslot = estate->es_trig_tuple_slot; + + if (newslot->tts_tupleDescriptor != slot->tts_tupleDescriptor) + ExecSetSlotDescriptor(newslot, slot->tts_tupleDescriptor); + ExecStoreTuple(newtuple, newslot, InvalidBuffer, false); + slot = newslot; + tuple = newtuple; + } + } + + /* + * Check the constraints of the tuple + */ + if (resultRelationDesc->rd_att->constr) + ExecConstraints(resultRelInfo, slot, estate); + + /* + * insert the tuple + * + * Note: heap_insert returns the tid (location) of the new tuple in the + * t_self field. + */ + newId = heap_insert(resultRelationDesc, tuple, + estate->es_output_cid, 0, NULL); + + (estate->es_processed)++; + estate->es_lastoid = newId; + setLastTid(&(tuple->t_self)); + + /* + * insert index entries for tuple + */ + if (resultRelInfo->ri_NumIndices > 0) + recheckIndexes = ExecInsertIndexTuples(slot, &(tuple->t_self), + estate, false); + + /* AFTER ROW INSERT Triggers */ + ExecARInsertTriggers(estate, resultRelInfo, tuple, recheckIndexes); + + /* Process RETURNING if present */ + if (resultRelInfo->ri_projectReturning) + return ExecProcessReturning(resultRelInfo->ri_projectReturning, + slot, planSlot); + + return NULL; +} + +/* ---------------------------------------------------------------- + * ExecDelete + * + * DELETE is like UPDATE, except that we delete the tuple and no + * index modifications are needed + * + * Returns RETURNING result if any, otherwise NULL. + * ---------------------------------------------------------------- + */ +static TupleTableSlot * +ExecDelete(ItemPointer tupleid, + TupleTableSlot *planSlot, + PlanState *subplanstate, + EState *estate) +{ + ResultRelInfo *resultRelInfo; + Relation resultRelationDesc; + HTSU_Result result; + ItemPointerData update_ctid; + TransactionId update_xmax; + + /* + * get information on the (current) result relation + */ + resultRelInfo = estate->es_result_relation_info; + resultRelationDesc = resultRelInfo->ri_RelationDesc; + + /* BEFORE ROW DELETE Triggers */ + if (resultRelInfo->ri_TrigDesc && + resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_DELETE] > 0) + { + bool dodelete; + + dodelete = ExecBRDeleteTriggers(estate, subplanstate, resultRelInfo, + tupleid); + + if (!dodelete) /* "do nothing" */ + return NULL; + } + + /* + * delete the tuple + * + * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that + * the row to be deleted is visible to that snapshot, and throw a can't- + * serialize error if not. This is a special-case behavior needed for + * referential integrity updates in serializable transactions. + */ +ldelete:; + result = heap_delete(resultRelationDesc, tupleid, + &update_ctid, &update_xmax, + estate->es_output_cid, + estate->es_crosscheck_snapshot, + true /* wait for commit */ ); + switch (result) + { + case HeapTupleSelfUpdated: + /* already deleted by self; nothing to do */ + return NULL; + + case HeapTupleMayBeUpdated: + break; + + case HeapTupleUpdated: + if (IsXactIsoLevelSerializable) + ereport(ERROR, + (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), + errmsg("could not serialize access due to concurrent update"))); + else if (!ItemPointerEquals(tupleid, &update_ctid)) + { + TupleTableSlot *epqslot; + + epqslot = EvalPlanQual(estate, + resultRelInfo->ri_RangeTableIndex, + subplanstate, + &update_ctid, + update_xmax); + if (!TupIsNull(epqslot)) + { + *tupleid = update_ctid; + goto ldelete; + } + } + /* tuple already deleted; nothing to do */ + return NULL; + + default: + elog(ERROR, "unrecognized heap_delete status: %u", result); + return NULL; + } + + (estate->es_processed)++; + + /* + * Note: Normally one would think that we have to delete index tuples + * associated with the heap tuple now... + * + * ... but in POSTGRES, we have no need to do this because VACUUM will + * take care of it later. We can't delete index tuples immediately + * anyway, since the tuple is still visible to other transactions. + */ + + /* AFTER ROW DELETE Triggers */ + ExecARDeleteTriggers(estate, resultRelInfo, tupleid); + + /* Process RETURNING if present */ + if (resultRelInfo->ri_projectReturning) + { + /* + * We have to put the target tuple into a slot, which means first we + * gotta fetch it. We can use the trigger tuple slot. + */ + TupleTableSlot *slot = estate->es_trig_tuple_slot; + TupleTableSlot *rslot; + HeapTupleData deltuple; + Buffer delbuffer; + + deltuple.t_self = *tupleid; + if (!heap_fetch(resultRelationDesc, SnapshotAny, + &deltuple, &delbuffer, false, NULL)) + elog(ERROR, "failed to fetch deleted tuple for DELETE RETURNING"); + + if (slot->tts_tupleDescriptor != RelationGetDescr(resultRelationDesc)) + ExecSetSlotDescriptor(slot, RelationGetDescr(resultRelationDesc)); + ExecStoreTuple(&deltuple, slot, InvalidBuffer, false); + + rslot = ExecProcessReturning(resultRelInfo->ri_projectReturning, + slot, planSlot); + + ExecClearTuple(slot); + ReleaseBuffer(delbuffer); + + return rslot; + } + + return NULL; +} + +/* ---------------------------------------------------------------- + * ExecUpdate + * + * note: we can't run UPDATE queries with transactions + * off because UPDATEs are actually INSERTs and our + * scan will mistakenly loop forever, updating the tuple + * it just inserted.. This should be fixed but until it + * is, we don't want to get stuck in an infinite loop + * which corrupts your database.. + * + * Returns RETURNING result if any, otherwise NULL. + * ---------------------------------------------------------------- + */ +static TupleTableSlot * +ExecUpdate(ItemPointer tupleid, + TupleTableSlot *slot, + TupleTableSlot *planSlot, + PlanState *subplanstate, + EState *estate) +{ + HeapTuple tuple; + ResultRelInfo *resultRelInfo; + Relation resultRelationDesc; + HTSU_Result result; + ItemPointerData update_ctid; + TransactionId update_xmax; + List *recheckIndexes = NIL; + + /* + * abort the operation if not running transactions + */ + if (IsBootstrapProcessingMode()) + elog(ERROR, "cannot UPDATE during bootstrap"); + + /* + * get the heap tuple out of the tuple table slot, making sure we have a + * writable copy + */ + tuple = ExecMaterializeSlot(slot); + + /* + * get information on the (current) result relation + */ + resultRelInfo = estate->es_result_relation_info; + resultRelationDesc = resultRelInfo->ri_RelationDesc; + + /* BEFORE ROW UPDATE Triggers */ + if (resultRelInfo->ri_TrigDesc && + resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_UPDATE] > 0) + { + HeapTuple newtuple; + + newtuple = ExecBRUpdateTriggers(estate, subplanstate, resultRelInfo, + tupleid, tuple); + + if (newtuple == NULL) /* "do nothing" */ + return NULL; + + if (newtuple != tuple) /* modified by Trigger(s) */ + { + /* + * Put the modified tuple into a slot for convenience of routines + * below. We assume the tuple was allocated in per-tuple memory + * context, and therefore will go away by itself. The tuple table + * slot should not try to clear it. + */ + TupleTableSlot *newslot = estate->es_trig_tuple_slot; + + if (newslot->tts_tupleDescriptor != slot->tts_tupleDescriptor) + ExecSetSlotDescriptor(newslot, slot->tts_tupleDescriptor); + ExecStoreTuple(newtuple, newslot, InvalidBuffer, false); + slot = newslot; + tuple = newtuple; + } + } + + /* + * Check the constraints of the tuple + * + * If we generate a new candidate tuple after EvalPlanQual testing, we + * must loop back here and recheck constraints. (We don't need to redo + * triggers, however. If there are any BEFORE triggers then trigger.c + * will have done heap_lock_tuple to lock the correct tuple, so there's no + * need to do them again.) + */ +lreplace:; + if (resultRelationDesc->rd_att->constr) + ExecConstraints(resultRelInfo, slot, estate); + + /* + * replace the heap tuple + * + * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that + * the row to be updated is visible to that snapshot, and throw a can't- + * serialize error if not. This is a special-case behavior needed for + * referential integrity updates in serializable transactions. + */ + result = heap_update(resultRelationDesc, tupleid, tuple, + &update_ctid, &update_xmax, + estate->es_output_cid, + estate->es_crosscheck_snapshot, + true /* wait for commit */ ); + switch (result) + { + case HeapTupleSelfUpdated: + /* already deleted by self; nothing to do */ + return NULL; + + case HeapTupleMayBeUpdated: + break; + + case HeapTupleUpdated: + if (IsXactIsoLevelSerializable) + ereport(ERROR, + (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), + errmsg("could not serialize access due to concurrent update"))); + else if (!ItemPointerEquals(tupleid, &update_ctid)) + { + TupleTableSlot *epqslot; + + epqslot = EvalPlanQual(estate, + resultRelInfo->ri_RangeTableIndex, + subplanstate, + &update_ctid, + update_xmax); + if (!TupIsNull(epqslot)) + { + *tupleid = update_ctid; + slot = ExecFilterJunk(resultRelInfo->ri_junkFilter, epqslot); + tuple = ExecMaterializeSlot(slot); + goto lreplace; + } + } + /* tuple already deleted; nothing to do */ + return NULL; + + default: + elog(ERROR, "unrecognized heap_update status: %u", result); + return NULL; + } + + (estate->es_processed)++; + + /* + * Note: instead of having to update the old index tuples associated with + * the heap tuple, all we do is form and insert new index tuples. This is + * because UPDATEs are actually DELETEs and INSERTs, and index tuple + * deletion is done later by VACUUM (see notes in ExecDelete). All we do + * here is insert new index tuples. -cim 9/27/89 + */ + + /* + * insert index entries for tuple + * + * Note: heap_update returns the tid (location) of the new tuple in the + * t_self field. + * + * If it's a HOT update, we mustn't insert new index entries. + */ + if (resultRelInfo->ri_NumIndices > 0 && !HeapTupleIsHeapOnly(tuple)) + recheckIndexes = ExecInsertIndexTuples(slot, &(tuple->t_self), + estate, false); + + /* AFTER ROW UPDATE Triggers */ + ExecARUpdateTriggers(estate, resultRelInfo, tupleid, tuple, + recheckIndexes); + + /* Process RETURNING if present */ + if (resultRelInfo->ri_projectReturning) + return ExecProcessReturning(resultRelInfo->ri_projectReturning, + slot, planSlot); + + return NULL; +} + + +/* + * Process BEFORE EACH STATEMENT triggers + */ +static void +fireBSTriggers(ModifyTableState *node) +{ + switch (node->operation) + { + case CMD_INSERT: + ExecBSInsertTriggers(node->ps.state, + node->ps.state->es_result_relations); + break; + case CMD_UPDATE: + ExecBSUpdateTriggers(node->ps.state, + node->ps.state->es_result_relations); + break; + case CMD_DELETE: + ExecBSDeleteTriggers(node->ps.state, + node->ps.state->es_result_relations); + break; + default: + elog(ERROR, "unknown operation"); + break; + } +} + +/* + * Process AFTER EACH STATEMENT triggers + */ +static void +fireASTriggers(ModifyTableState *node) +{ + switch (node->operation) + { + case CMD_INSERT: + ExecASInsertTriggers(node->ps.state, + node->ps.state->es_result_relations); + break; + case CMD_UPDATE: + ExecASUpdateTriggers(node->ps.state, + node->ps.state->es_result_relations); + break; + case CMD_DELETE: + ExecASDeleteTriggers(node->ps.state, + node->ps.state->es_result_relations); + break; + default: + elog(ERROR, "unknown operation"); + break; + } +} + + +/* ---------------------------------------------------------------- + * ExecModifyTable + * + * Perform table modifications as required, and return RETURNING results + * if needed. + * ---------------------------------------------------------------- + */ +TupleTableSlot * +ExecModifyTable(ModifyTableState *node) +{ + EState *estate = node->ps.state; + CmdType operation = node->operation; + PlanState *subplanstate; + JunkFilter *junkfilter; + TupleTableSlot *slot; + TupleTableSlot *planSlot; + ItemPointer tupleid = NULL; + ItemPointerData tuple_ctid; + + /* + * On first call, fire BEFORE STATEMENT triggers before proceeding. + */ + if (node->fireBSTriggers) + { + fireBSTriggers(node); + node->fireBSTriggers = false; + } + + /* + * es_result_relation_info must point to the currently active result + * relation. (Note we assume that ModifyTable nodes can't be nested.) + * We want it to be NULL whenever we're not within ModifyTable, though. + */ + estate->es_result_relation_info = + estate->es_result_relations + node->mt_whichplan; + + /* Preload local variables */ + subplanstate = node->mt_plans[node->mt_whichplan]; + junkfilter = estate->es_result_relation_info->ri_junkFilter; + + /* + * Fetch rows from subplan(s), and execute the required table modification + * for each row. + */ + for (;;) + { + planSlot = ExecProcNode(subplanstate); + + if (TupIsNull(planSlot)) + { + /* advance to next subplan if any */ + node->mt_whichplan++; + if (node->mt_whichplan < node->mt_nplans) + { + estate->es_result_relation_info++; + subplanstate = node->mt_plans[node->mt_whichplan]; + junkfilter = estate->es_result_relation_info->ri_junkFilter; + continue; + } + else + break; + } + + slot = planSlot; + + if (junkfilter != NULL) + { + /* + * extract the 'ctid' junk attribute. + */ + if (operation == CMD_UPDATE || operation == CMD_DELETE) + { + Datum datum; + bool isNull; + + datum = ExecGetJunkAttribute(slot, junkfilter->jf_junkAttNo, + &isNull); + /* shouldn't ever get a null result... */ + if (isNull) + elog(ERROR, "ctid is NULL"); + + tupleid = (ItemPointer) DatumGetPointer(datum); + tuple_ctid = *tupleid; /* be sure we don't free the ctid!! */ + tupleid = &tuple_ctid; + } + + /* + * apply the junkfilter if needed. + */ + if (operation != CMD_DELETE) + slot = ExecFilterJunk(junkfilter, slot); + } + + switch (operation) + { + case CMD_INSERT: + slot = ExecInsert(slot, planSlot, estate); + break; + case CMD_UPDATE: + slot = ExecUpdate(tupleid, slot, planSlot, + subplanstate, estate); + break; + case CMD_DELETE: + slot = ExecDelete(tupleid, planSlot, + subplanstate, estate); + break; + default: + elog(ERROR, "unknown operation"); + break; + } + + /* + * If we got a RETURNING result, return it to caller. We'll continue + * the work on next call. + */ + if (slot) + { + estate->es_result_relation_info = NULL; + return slot; + } + } + + /* Reset es_result_relation_info before exiting */ + estate->es_result_relation_info = NULL; + + /* + * We're done, but fire AFTER STATEMENT triggers before exiting. + */ + fireASTriggers(node); + + return NULL; +} + +/* ---------------------------------------------------------------- + * ExecInitModifyTable + * ---------------------------------------------------------------- + */ +ModifyTableState * +ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) +{ + ModifyTableState *mtstate; + CmdType operation = node->operation; + int nplans = list_length(node->plans); + ResultRelInfo *resultRelInfo; + TupleDesc tupDesc; + Plan *subplan; + ListCell *l; + int i; + + /* check for unsupported flags */ + Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK))); + + /* + * This should NOT get called during EvalPlanQual; we should have passed + * a subplan tree to EvalPlanQual, instead. Use a runtime test not just + * Assert because this condition is easy to miss in testing ... + */ + if (estate->es_evTuple != NULL) + elog(ERROR, "ModifyTable should not be called during EvalPlanQual"); + + /* + * create state structure + */ + mtstate = makeNode(ModifyTableState); + mtstate->ps.plan = (Plan *) node; + mtstate->ps.state = estate; + mtstate->ps.targetlist = NIL; /* not actually used */ + + mtstate->mt_plans = (PlanState **) palloc0(sizeof(PlanState *) * nplans); + mtstate->mt_nplans = nplans; + mtstate->operation = operation; + mtstate->fireBSTriggers = true; + + /* For the moment, assume our targets are exactly the global result rels */ + + /* + * call ExecInitNode on each of the plans to be executed and save the + * results into the array "mt_plans". Note we *must* set + * estate->es_result_relation_info correctly while we initialize each + * sub-plan; ExecContextForcesOids depends on that! + */ + estate->es_result_relation_info = estate->es_result_relations; + i = 0; + foreach(l, node->plans) + { + subplan = (Plan *) lfirst(l); + mtstate->mt_plans[i] = ExecInitNode(subplan, estate, eflags); + estate->es_result_relation_info++; + i++; + } + estate->es_result_relation_info = NULL; + + /* select first subplan */ + mtstate->mt_whichplan = 0; + subplan = (Plan *) linitial(node->plans); + + /* + * Initialize RETURNING projections if needed. + */ + if (node->returningLists) + { + TupleTableSlot *slot; + ExprContext *econtext; + + /* + * Initialize result tuple slot and assign its rowtype using the + * first RETURNING list. We assume the rest will look the same. + */ + tupDesc = ExecTypeFromTL((List *) linitial(node->returningLists), + false); + + /* Set up a slot for the output of the RETURNING projection(s) */ + ExecInitResultTupleSlot(estate, &mtstate->ps); + ExecAssignResultType(&mtstate->ps, tupDesc); + slot = mtstate->ps.ps_ResultTupleSlot; + + /* Need an econtext too */ + econtext = CreateExprContext(estate); + mtstate->ps.ps_ExprContext = econtext; + + /* + * Build a projection for each result rel. + */ + Assert(list_length(node->returningLists) == estate->es_num_result_relations); + resultRelInfo = estate->es_result_relations; + foreach(l, node->returningLists) + { + List *rlist = (List *) lfirst(l); + List *rliststate; + + rliststate = (List *) ExecInitExpr((Expr *) rlist, &mtstate->ps); + resultRelInfo->ri_projectReturning = + ExecBuildProjectionInfo(rliststate, econtext, slot, + resultRelInfo->ri_RelationDesc->rd_att); + resultRelInfo++; + } + } + else + { + /* + * We still must construct a dummy result tuple type, because + * InitPlan expects one (maybe should change that?). + */ + tupDesc = ExecTypeFromTL(NIL, false); + ExecInitResultTupleSlot(estate, &mtstate->ps); + ExecAssignResultType(&mtstate->ps, tupDesc); + + mtstate->ps.ps_ExprContext = NULL; + } + + /* + * Initialize the junk filter(s) if needed. INSERT queries need a filter + * if there are any junk attrs in the tlist. UPDATE and DELETE + * always need a filter, since there's always a junk 'ctid' attribute + * present --- no need to look first. + * + * If there are multiple result relations, each one needs its own junk + * filter. Note multiple rels are only possible for UPDATE/DELETE, so we + * can't be fooled by some needing a filter and some not. + * + * This section of code is also a convenient place to verify that the + * output of an INSERT or UPDATE matches the target table(s). + */ + { + bool junk_filter_needed = false; + + switch (operation) + { + case CMD_INSERT: + foreach(l, subplan->targetlist) + { + TargetEntry *tle = (TargetEntry *) lfirst(l); + + if (tle->resjunk) + { + junk_filter_needed = true; + break; + } + } + break; + case CMD_UPDATE: + case CMD_DELETE: + junk_filter_needed = true; + break; + default: + elog(ERROR, "unknown operation"); + break; + } + + if (junk_filter_needed) + { + resultRelInfo = estate->es_result_relations; + for (i = 0; i < nplans; i++) + { + JunkFilter *j; + + subplan = mtstate->mt_plans[i]->plan; + if (operation == CMD_INSERT || operation == CMD_UPDATE) + ExecCheckPlanOutput(resultRelInfo->ri_RelationDesc, + subplan->targetlist); + + j = ExecInitJunkFilter(subplan->targetlist, + resultRelInfo->ri_RelationDesc->rd_att->tdhasoid, + ExecInitExtraTupleSlot(estate)); + + if (operation == CMD_UPDATE || operation == CMD_DELETE) + { + /* For UPDATE/DELETE, find the ctid junk attr now */ + j->jf_junkAttNo = ExecFindJunkAttribute(j, "ctid"); + if (!AttributeNumberIsValid(j->jf_junkAttNo)) + elog(ERROR, "could not find junk ctid column"); + } + + resultRelInfo->ri_junkFilter = j; + resultRelInfo++; + } + } + else + { + if (operation == CMD_INSERT) + ExecCheckPlanOutput(estate->es_result_relations->ri_RelationDesc, + subplan->targetlist); + } + } + + /* + * Set up a tuple table slot for use for trigger output tuples. + * In a plan containing multiple ModifyTable nodes, all can share + * one such slot, so we keep it in the estate. + */ + if (estate->es_trig_tuple_slot == NULL) + estate->es_trig_tuple_slot = ExecInitExtraTupleSlot(estate); + + return mtstate; +} + +/* ---------------------------------------------------------------- + * ExecEndModifyTable + * + * Shuts down the plan. + * + * Returns nothing of interest. + * ---------------------------------------------------------------- + */ +void +ExecEndModifyTable(ModifyTableState *node) +{ + int i; + + /* + * Free the exprcontext + */ + ExecFreeExprContext(&node->ps); + + /* + * clean out the tuple table + */ + ExecClearTuple(node->ps.ps_ResultTupleSlot); + + /* + * shut down subplans + */ + for (i=0; imt_nplans; i++) + ExecEndNode(node->mt_plans[i]); +} + +void +ExecReScanModifyTable(ModifyTableState *node, ExprContext *exprCtxt) +{ + /* + * Currently, we don't need to support rescan on ModifyTable nodes. + * The semantics of that would be a bit debatable anyway. + */ + elog(ERROR, "ExecReScanModifyTable is not implemented"); +} diff --git a/src/backend/executor/spi.c b/src/backend/executor/spi.c index 8fc4536041..e6bb04bc8a 100644 --- a/src/backend/executor/spi.c +++ b/src/backend/executor/spi.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/spi.c,v 1.209 2009/10/02 17:57:30 alvherre Exp $ + * $PostgreSQL: pgsql/src/backend/executor/spi.c,v 1.210 2009/10/10 01:43:47 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -1975,19 +1975,19 @@ _SPI_pquery(QueryDesc *queryDesc, bool fire_triggers, long tcount) res = SPI_OK_SELECT; break; case CMD_INSERT: - if (queryDesc->plannedstmt->returningLists) + if (queryDesc->plannedstmt->hasReturning) res = SPI_OK_INSERT_RETURNING; else res = SPI_OK_INSERT; break; case CMD_DELETE: - if (queryDesc->plannedstmt->returningLists) + if (queryDesc->plannedstmt->hasReturning) res = SPI_OK_DELETE_RETURNING; else res = SPI_OK_DELETE; break; case CMD_UPDATE: - if (queryDesc->plannedstmt->returningLists) + if (queryDesc->plannedstmt->hasReturning) res = SPI_OK_UPDATE_RETURNING; else res = SPI_OK_UPDATE; @@ -2011,7 +2011,7 @@ _SPI_pquery(QueryDesc *queryDesc, bool fire_triggers, long tcount) _SPI_current->processed = queryDesc->estate->es_processed; _SPI_current->lastoid = queryDesc->estate->es_lastoid; - if ((res == SPI_OK_SELECT || queryDesc->plannedstmt->returningLists) && + if ((res == SPI_OK_SELECT || queryDesc->plannedstmt->hasReturning) && queryDesc->dest->mydest == DestSPI) { if (_SPI_checktuples()) diff --git a/src/backend/nodes/copyfuncs.c b/src/backend/nodes/copyfuncs.c index 27c231701d..cee7cb8ded 100644 --- a/src/backend/nodes/copyfuncs.c +++ b/src/backend/nodes/copyfuncs.c @@ -15,7 +15,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/nodes/copyfuncs.c,v 1.442 2009/10/08 02:39:20 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/nodes/copyfuncs.c,v 1.443 2009/10/10 01:43:49 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -77,6 +77,7 @@ _copyPlannedStmt(PlannedStmt *from) PlannedStmt *newnode = makeNode(PlannedStmt); COPY_SCALAR_FIELD(commandType); + COPY_SCALAR_FIELD(hasReturning); COPY_SCALAR_FIELD(canSetTag); COPY_NODE_FIELD(planTree); COPY_NODE_FIELD(rtable); @@ -85,7 +86,6 @@ _copyPlannedStmt(PlannedStmt *from) COPY_NODE_FIELD(intoClause); COPY_NODE_FIELD(subplans); COPY_BITMAPSET_FIELD(rewindPlanIDs); - COPY_NODE_FIELD(returningLists); COPY_NODE_FIELD(rowMarks); COPY_NODE_FIELD(relationOids); COPY_NODE_FIELD(invalItems); @@ -154,6 +154,30 @@ _copyResult(Result *from) return newnode; } +/* + * _copyModifyTable + */ +static ModifyTable * +_copyModifyTable(ModifyTable *from) +{ + ModifyTable *newnode = makeNode(ModifyTable); + + /* + * copy node superclass fields + */ + CopyPlanFields((Plan *) from, (Plan *) newnode); + + /* + * copy remainder of node + */ + COPY_SCALAR_FIELD(operation); + COPY_NODE_FIELD(resultRelations); + COPY_NODE_FIELD(plans); + COPY_NODE_FIELD(returningLists); + + return newnode; +} + /* * _copyAppend */ @@ -171,7 +195,6 @@ _copyAppend(Append *from) * copy remainder of node */ COPY_NODE_FIELD(appendplans); - COPY_SCALAR_FIELD(isTarget); return newnode; } @@ -3482,6 +3505,9 @@ copyObject(void *from) case T_Result: retval = _copyResult(from); break; + case T_ModifyTable: + retval = _copyModifyTable(from); + break; case T_Append: retval = _copyAppend(from); break; diff --git a/src/backend/nodes/outfuncs.c b/src/backend/nodes/outfuncs.c index 79665ed12a..a776f9fe3e 100644 --- a/src/backend/nodes/outfuncs.c +++ b/src/backend/nodes/outfuncs.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/nodes/outfuncs.c,v 1.366 2009/10/08 02:39:21 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/nodes/outfuncs.c,v 1.367 2009/10/10 01:43:49 tgl Exp $ * * NOTES * Every node type that can appear in stored rules' parsetrees *must* @@ -242,6 +242,7 @@ _outPlannedStmt(StringInfo str, PlannedStmt *node) WRITE_NODE_TYPE("PLANNEDSTMT"); WRITE_ENUM_FIELD(commandType, CmdType); + WRITE_BOOL_FIELD(hasReturning); WRITE_BOOL_FIELD(canSetTag); WRITE_NODE_FIELD(planTree); WRITE_NODE_FIELD(rtable); @@ -250,7 +251,6 @@ _outPlannedStmt(StringInfo str, PlannedStmt *node) WRITE_NODE_FIELD(intoClause); WRITE_NODE_FIELD(subplans); WRITE_BITMAPSET_FIELD(rewindPlanIDs); - WRITE_NODE_FIELD(returningLists); WRITE_NODE_FIELD(rowMarks); WRITE_NODE_FIELD(relationOids); WRITE_NODE_FIELD(invalItems); @@ -318,6 +318,19 @@ _outResult(StringInfo str, Result *node) WRITE_NODE_FIELD(resconstantqual); } +static void +_outModifyTable(StringInfo str, ModifyTable *node) +{ + WRITE_NODE_TYPE("MODIFYTABLE"); + + _outPlanInfo(str, (Plan *) node); + + WRITE_ENUM_FIELD(operation, CmdType); + WRITE_NODE_FIELD(resultRelations); + WRITE_NODE_FIELD(plans); + WRITE_NODE_FIELD(returningLists); +} + static void _outAppend(StringInfo str, Append *node) { @@ -326,7 +339,6 @@ _outAppend(StringInfo str, Append *node) _outPlanInfo(str, (Plan *) node); WRITE_NODE_FIELD(appendplans); - WRITE_BOOL_FIELD(isTarget); } static void @@ -1501,7 +1513,6 @@ _outPlannerInfo(StringInfo str, PlannerInfo *node) WRITE_UINT_FIELD(query_level); WRITE_NODE_FIELD(join_rel_list); WRITE_NODE_FIELD(resultRelations); - WRITE_NODE_FIELD(returningLists); WRITE_NODE_FIELD(init_plans); WRITE_NODE_FIELD(cte_plan_ids); WRITE_NODE_FIELD(eq_classes); @@ -2408,6 +2419,9 @@ _outNode(StringInfo str, void *obj) case T_Result: _outResult(str, obj); break; + case T_ModifyTable: + _outModifyTable(str, obj); + break; case T_Append: _outAppend(str, obj); break; diff --git a/src/backend/optimizer/plan/createplan.c b/src/backend/optimizer/plan/createplan.c index 0bb53d3308..b29b076591 100644 --- a/src/backend/optimizer/plan/createplan.c +++ b/src/backend/optimizer/plan/createplan.c @@ -10,7 +10,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/optimizer/plan/createplan.c,v 1.263 2009/09/17 20:49:29 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/optimizer/plan/createplan.c,v 1.264 2009/10/10 01:43:49 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -579,7 +579,7 @@ create_append_plan(PlannerInfo *root, AppendPath *best_path) subplans = lappend(subplans, create_plan(root, subpath)); } - plan = make_append(subplans, false, tlist); + plan = make_append(subplans, tlist); return (Plan *) plan; } @@ -2621,7 +2621,7 @@ make_worktablescan(List *qptlist, } Append * -make_append(List *appendplans, bool isTarget, List *tlist) +make_append(List *appendplans, List *tlist) { Append *node = makeNode(Append); Plan *plan = &node->plan; @@ -2657,7 +2657,6 @@ make_append(List *appendplans, bool isTarget, List *tlist) plan->lefttree = NULL; plan->righttree = NULL; node->appendplans = appendplans; - node->isTarget = isTarget; return node; } @@ -3711,6 +3710,73 @@ make_result(PlannerInfo *root, return node; } +/* + * make_modifytable + * Build a ModifyTable plan node + * + * Currently, we don't charge anything extra for the actual table modification + * work, nor for the RETURNING expressions if any. It would only be window + * dressing, since these are always top-level nodes and there is no way for + * the costs to change any higher-level planning choices. But we might want + * to make it look better sometime. + */ +ModifyTable * +make_modifytable(CmdType operation, List *resultRelations, + List *subplans, List *returningLists) +{ + ModifyTable *node = makeNode(ModifyTable); + Plan *plan = &node->plan; + double total_size; + ListCell *subnode; + + Assert(list_length(resultRelations) == list_length(subplans)); + Assert(returningLists == NIL || + list_length(resultRelations) == list_length(returningLists)); + + /* + * Compute cost as sum of subplan costs. + */ + plan->startup_cost = 0; + plan->total_cost = 0; + plan->plan_rows = 0; + total_size = 0; + foreach(subnode, subplans) + { + Plan *subplan = (Plan *) lfirst(subnode); + + if (subnode == list_head(subplans)) /* first node? */ + plan->startup_cost = subplan->startup_cost; + plan->total_cost += subplan->total_cost; + plan->plan_rows += subplan->plan_rows; + total_size += subplan->plan_width * subplan->plan_rows; + } + if (plan->plan_rows > 0) + plan->plan_width = rint(total_size / plan->plan_rows); + else + plan->plan_width = 0; + + node->plan.lefttree = NULL; + node->plan.righttree = NULL; + node->plan.qual = NIL; + + /* + * Set up the visible plan targetlist as being the same as the first + * RETURNING list. This is for the use of EXPLAIN; the executor won't + * pay any attention to the targetlist. + */ + if (returningLists) + node->plan.targetlist = copyObject(linitial(returningLists)); + else + node->plan.targetlist = NIL; + + node->operation = operation; + node->resultRelations = resultRelations; + node->plans = subplans; + node->returningLists = returningLists; + + return node; +} + /* * is_projection_capable_plan * Check whether a given Plan node is able to do projection. @@ -3727,6 +3793,7 @@ is_projection_capable_plan(Plan *plan) case T_Unique: case T_SetOp: case T_Limit: + case T_ModifyTable: case T_Append: case T_RecursiveUnion: return false; diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c index 64f77a5c71..4b06c823b6 100644 --- a/src/backend/optimizer/plan/planner.c +++ b/src/backend/optimizer/plan/planner.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/optimizer/plan/planner.c,v 1.257 2009/10/08 02:39:21 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/optimizer/plan/planner.c,v 1.258 2009/10/10 01:43:49 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -217,6 +217,7 @@ standard_planner(Query *parse, int cursorOptions, ParamListInfo boundParams) result = makeNode(PlannedStmt); result->commandType = parse->commandType; + result->hasReturning = (parse->returningList != NIL); result->canSetTag = parse->canSetTag; result->transientPlan = glob->transientPlan; result->planTree = top_plan; @@ -226,7 +227,6 @@ standard_planner(Query *parse, int cursorOptions, ParamListInfo boundParams) result->intoClause = parse->intoClause; result->subplans = glob->subplans; result->rewindPlanIDs = glob->rewindPlanIDs; - result->returningLists = root->returningLists; result->rowMarks = parse->rowMarks; result->relationOids = glob->relationOids; result->invalItems = glob->invalItems; @@ -478,7 +478,39 @@ subquery_planner(PlannerGlobal *glob, Query *parse, rt_fetch(parse->resultRelation, parse->rtable)->inh) plan = inheritance_planner(root); else + { plan = grouping_planner(root, tuple_fraction); + /* If it's not SELECT, we need a ModifyTable node */ + if (parse->commandType != CMD_SELECT) + { + /* + * Deal with the RETURNING clause if any. It's convenient to pass + * the returningList through setrefs.c now rather than at top + * level (if we waited, handling inherited UPDATE/DELETE would be + * much harder). + */ + List *returningLists; + + if (parse->returningList) + { + List *rlist; + + Assert(parse->resultRelation); + rlist = set_returning_clause_references(root->glob, + parse->returningList, + plan, + parse->resultRelation); + returningLists = list_make1(rlist); + } + else + returningLists = NIL; + + plan = (Plan *) make_modifytable(parse->commandType, + copyObject(root->resultRelations), + list_make1(plan), + returningLists); + } + } /* * If any subplans were generated, or if we're inside a subplan, build @@ -625,9 +657,7 @@ preprocess_qual_conditions(PlannerInfo *root, Node *jtnode) * is an inheritance set. Source inheritance is expanded at the bottom of the * plan tree (see allpaths.c), but target inheritance has to be expanded at * the top. The reason is that for UPDATE, each target relation needs a - * different targetlist matching its own column set. Also, for both UPDATE - * and DELETE, the executor needs the Append plan node at the top, else it - * can't keep track of which table is the current target table. Fortunately, + * different targetlist matching its own column set. Fortunately, * the UPDATE/DELETE target can never be the nullable side of an outer join, * so it's OK to generate the plan this way. * @@ -642,7 +672,7 @@ inheritance_planner(PlannerInfo *root) List *resultRelations = NIL; List *returningLists = NIL; List *rtable = NIL; - List *tlist = NIL; + List *tlist; PlannerInfo subroot; ListCell *l; @@ -662,7 +692,6 @@ inheritance_planner(PlannerInfo *root) subroot.parse = (Query *) adjust_appendrel_attrs((Node *) parse, appinfo); - subroot.returningLists = NIL; subroot.init_plans = NIL; /* We needn't modify the child's append_rel_list */ /* There shouldn't be any OJ info to translate, as yet */ @@ -680,12 +709,9 @@ inheritance_planner(PlannerInfo *root) if (is_dummy_plan(subplan)) continue; - /* Save rtable and tlist from first rel for use below */ + /* Save rtable from first rel for use below */ if (subplans == NIL) - { rtable = subroot.parse->rtable; - tlist = subplan->targetlist; - } subplans = lappend(subplans, subplan); @@ -698,20 +724,24 @@ inheritance_planner(PlannerInfo *root) /* Build list of per-relation RETURNING targetlists */ if (parse->returningList) { - Assert(list_length(subroot.returningLists) == 1); - returningLists = list_concat(returningLists, - subroot.returningLists); + List *rlist; + + rlist = set_returning_clause_references(root->glob, + subroot.parse->returningList, + subplan, + appinfo->child_relid); + returningLists = lappend(returningLists, rlist); } } root->resultRelations = resultRelations; - root->returningLists = returningLists; /* Mark result as unordered (probably unnecessary) */ root->query_pathkeys = NIL; /* - * If we managed to exclude every child rel, return a dummy plan + * If we managed to exclude every child rel, return a dummy plan; + * it doesn't even need a ModifyTable node. */ if (subplans == NIL) { @@ -738,11 +768,11 @@ inheritance_planner(PlannerInfo *root) */ parse->rtable = rtable; - /* Suppress Append if there's only one surviving child rel */ - if (list_length(subplans) == 1) - return (Plan *) linitial(subplans); - - return (Plan *) make_append(subplans, true, tlist); + /* And last, tack on a ModifyTable node to do the UPDATE/DELETE work */ + return (Plan *) make_modifytable(parse->commandType, + copyObject(root->resultRelations), + subplans, + returningLists); } /*-------------------- @@ -1569,25 +1599,6 @@ grouping_planner(PlannerInfo *root, double tuple_fraction) count_est); } - /* - * Deal with the RETURNING clause if any. It's convenient to pass the - * returningList through setrefs.c now rather than at top level (if we - * waited, handling inherited UPDATE/DELETE would be much harder). - */ - if (parse->returningList) - { - List *rlist; - - Assert(parse->resultRelation); - rlist = set_returning_clause_references(root->glob, - parse->returningList, - result_plan, - parse->resultRelation); - root->returningLists = list_make1(rlist); - } - else - root->returningLists = NIL; - /* Compute result-relations list if needed */ if (parse->resultRelation) root->resultRelations = list_make1_int(parse->resultRelation); diff --git a/src/backend/optimizer/plan/setrefs.c b/src/backend/optimizer/plan/setrefs.c index 11e14f96c5..9b10b381ac 100644 --- a/src/backend/optimizer/plan/setrefs.c +++ b/src/backend/optimizer/plan/setrefs.c @@ -9,7 +9,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/optimizer/plan/setrefs.c,v 1.150 2009/06/11 14:48:59 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/optimizer/plan/setrefs.c,v 1.151 2009/10/10 01:43:49 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -442,6 +442,29 @@ set_plan_refs(PlannerGlobal *glob, Plan *plan, int rtoffset) fix_scan_expr(glob, splan->resconstantqual, rtoffset); } break; + case T_ModifyTable: + { + ModifyTable *splan = (ModifyTable *) plan; + + /* + * planner.c already called set_returning_clause_references, + * so we should not process either the targetlist or the + * returningLists. + */ + Assert(splan->plan.qual == NIL); + + foreach(l, splan->resultRelations) + { + lfirst_int(l) += rtoffset; + } + foreach(l, splan->plans) + { + lfirst(l) = set_plan_refs(glob, + (Plan *) lfirst(l), + rtoffset); + } + } + break; case T_Append: { Append *splan = (Append *) plan; @@ -1600,7 +1623,7 @@ fix_upper_expr_mutator(Node *node, fix_upper_expr_context *context) * * If the query involves more than just the result table, we have to * adjust any Vars that refer to other tables to reference junk tlist - * entries in the top plan's targetlist. Vars referencing the result + * entries in the top subplan's targetlist. Vars referencing the result * table should be left alone, however (the executor will evaluate them * using the actual heap tuple, after firing triggers if any). In the * adjusted RETURNING list, result-table Vars will still have their @@ -1610,8 +1633,8 @@ fix_upper_expr_mutator(Node *node, fix_upper_expr_context *context) * glob->relationOids. * * 'rlist': the RETURNING targetlist to be fixed - * 'topplan': the top Plan node for the query (not yet passed through - * set_plan_references) + * 'topplan': the top subplan node that will be just below the ModifyTable + * node (note it's not yet passed through set_plan_references) * 'resultRelation': RT index of the associated result relation * * Note: we assume that result relations will have rtoffset zero, that is, diff --git a/src/backend/optimizer/plan/subselect.c b/src/backend/optimizer/plan/subselect.c index 78809474a3..6a813106d1 100644 --- a/src/backend/optimizer/plan/subselect.c +++ b/src/backend/optimizer/plan/subselect.c @@ -7,7 +7,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/optimizer/plan/subselect.c,v 1.153 2009/09/12 22:12:04 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/optimizer/plan/subselect.c,v 1.154 2009/10/10 01:43:49 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -1937,6 +1937,23 @@ finalize_plan(PlannerInfo *root, Plan *plan, Bitmapset *valid_params) ((WorkTableScan *) plan)->wtParam); break; + case T_ModifyTable: + { + ListCell *l; + + finalize_primnode((Node *) ((ModifyTable *) plan)->returningLists, + &context); + foreach(l, ((ModifyTable *) plan)->plans) + { + context.paramids = + bms_add_members(context.paramids, + finalize_plan(root, + (Plan *) lfirst(l), + valid_params)); + } + } + break; + case T_Append: { ListCell *l; diff --git a/src/backend/optimizer/prep/prepunion.c b/src/backend/optimizer/prep/prepunion.c index a0098836b9..7e6e1fbdcb 100644 --- a/src/backend/optimizer/prep/prepunion.c +++ b/src/backend/optimizer/prep/prepunion.c @@ -22,7 +22,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/optimizer/prep/prepunion.c,v 1.174 2009/09/02 17:52:24 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/optimizer/prep/prepunion.c,v 1.175 2009/10/10 01:43:49 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -448,7 +448,7 @@ generate_union_plan(SetOperationStmt *op, PlannerInfo *root, /* * Append the child results together. */ - plan = (Plan *) make_append(planlist, false, tlist); + plan = (Plan *) make_append(planlist, tlist); /* * For UNION ALL, we just need the Append plan. For UNION, need to add @@ -539,7 +539,7 @@ generate_nonunion_plan(SetOperationStmt *op, PlannerInfo *root, /* * Append the child results together. */ - plan = (Plan *) make_append(planlist, false, tlist); + plan = (Plan *) make_append(planlist, tlist); /* Identify the grouping semantics */ groupList = generate_setop_grouplist(op, tlist); diff --git a/src/backend/tcop/pquery.c b/src/backend/tcop/pquery.c index 98716830cd..f07bb49b53 100644 --- a/src/backend/tcop/pquery.c +++ b/src/backend/tcop/pquery.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/tcop/pquery.c,v 1.131 2009/06/11 14:49:02 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/tcop/pquery.c,v 1.132 2009/10/10 01:43:49 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -338,7 +338,7 @@ ChoosePortalStrategy(List *stmts) { if (++nSetTag > 1) return PORTAL_MULTI_QUERY; /* no need to look further */ - if (pstmt->returningLists == NIL) + if (!pstmt->hasReturning) return PORTAL_MULTI_QUERY; /* no need to look further */ } } @@ -414,8 +414,8 @@ FetchStatementTargetList(Node *stmt) pstmt->utilityStmt == NULL && pstmt->intoClause == NULL) return pstmt->planTree->targetlist; - if (pstmt->returningLists) - return (List *) linitial(pstmt->returningLists); + if (pstmt->hasReturning) + return pstmt->planTree->targetlist; return NIL; } if (IsA(stmt, FetchStmt)) @@ -570,9 +570,9 @@ PortalStart(Portal portal, ParamListInfo params, Snapshot snapshot) pstmt = (PlannedStmt *) PortalGetPrimaryStmt(portal); Assert(IsA(pstmt, PlannedStmt)); - Assert(pstmt->returningLists); + Assert(pstmt->hasReturning); portal->tupDesc = - ExecCleanTypeFromTL((List *) linitial(pstmt->returningLists), + ExecCleanTypeFromTL(pstmt->planTree->targetlist, false); } diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c index d88d8f22f3..35c530bbda 100644 --- a/src/backend/utils/adt/ruleutils.c +++ b/src/backend/utils/adt/ruleutils.c @@ -9,7 +9,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/utils/adt/ruleutils.c,v 1.308 2009/10/09 21:02:55 petere Exp $ + * $PostgreSQL: pgsql/src/backend/utils/adt/ruleutils.c,v 1.309 2009/10/10 01:43:49 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -3346,11 +3346,12 @@ static void push_plan(deparse_namespace *dpns, Plan *subplan) { /* - * We special-case Append to pretend that the first child plan is the - * OUTER referent; otherwise normal. + * We special-case ModifyTable to pretend that the first child plan is the + * OUTER referent; otherwise normal. This is to support RETURNING lists + * containing references to non-target relations. */ - if (IsA(subplan, Append)) - dpns->outer_plan = (Plan *) linitial(((Append *) subplan)->appendplans); + if (IsA(subplan, ModifyTable)) + dpns->outer_plan = (Plan *) linitial(((ModifyTable *) subplan)->plans); else dpns->outer_plan = outerPlan(subplan); diff --git a/src/backend/utils/cache/plancache.c b/src/backend/utils/cache/plancache.c index 82f90a9312..6cd138b288 100644 --- a/src/backend/utils/cache/plancache.c +++ b/src/backend/utils/cache/plancache.c @@ -35,7 +35,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/utils/cache/plancache.c,v 1.28 2009/07/14 15:37:50 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/utils/cache/plancache.c,v 1.29 2009/10/10 01:43:50 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -880,8 +880,8 @@ PlanCacheComputeResultDesc(List *stmt_list) if (IsA(node, PlannedStmt)) { pstmt = (PlannedStmt *) node; - Assert(pstmt->returningLists); - return ExecCleanTypeFromTL((List *) linitial(pstmt->returningLists), false); + Assert(pstmt->hasReturning); + return ExecCleanTypeFromTL(pstmt->planTree->targetlist, false); } /* other cases shouldn't happen, but return NULL */ break; diff --git a/src/include/commands/trigger.h b/src/include/commands/trigger.h index c92337a5a1..94cb061959 100644 --- a/src/include/commands/trigger.h +++ b/src/include/commands/trigger.h @@ -6,7 +6,7 @@ * Portions Copyright (c) 1996-2009, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * - * $PostgreSQL: pgsql/src/include/commands/trigger.h,v 1.75 2009/07/29 20:56:20 tgl Exp $ + * $PostgreSQL: pgsql/src/include/commands/trigger.h,v 1.76 2009/10/10 01:43:50 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -139,6 +139,7 @@ extern void ExecBSDeleteTriggers(EState *estate, extern void ExecASDeleteTriggers(EState *estate, ResultRelInfo *relinfo); extern bool ExecBRDeleteTriggers(EState *estate, + PlanState *subplanstate, ResultRelInfo *relinfo, ItemPointer tupleid); extern void ExecARDeleteTriggers(EState *estate, @@ -149,6 +150,7 @@ extern void ExecBSUpdateTriggers(EState *estate, extern void ExecASUpdateTriggers(EState *estate, ResultRelInfo *relinfo); extern HeapTuple ExecBRUpdateTriggers(EState *estate, + PlanState *subplanstate, ResultRelInfo *relinfo, ItemPointer tupleid, HeapTuple newtuple); diff --git a/src/include/executor/executor.h b/src/include/executor/executor.h index 69fbb932fe..cb79e26976 100644 --- a/src/include/executor/executor.h +++ b/src/include/executor/executor.h @@ -7,7 +7,7 @@ * Portions Copyright (c) 1996-2009, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * - * $PostgreSQL: pgsql/src/include/executor/executor.h,v 1.160 2009/09/27 21:10:53 tgl Exp $ + * $PostgreSQL: pgsql/src/include/executor/executor.h,v 1.161 2009/10/10 01:43:50 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -167,6 +167,7 @@ extern bool ExecContextForcesOids(PlanState *planstate, bool *hasoids); extern void ExecConstraints(ResultRelInfo *resultRelInfo, TupleTableSlot *slot, EState *estate); extern TupleTableSlot *EvalPlanQual(EState *estate, Index rti, + PlanState *subplanstate, ItemPointer tid, TransactionId priorXmax); extern PlanState *ExecGetActivePlanTree(QueryDesc *queryDesc); extern DestReceiver *CreateIntoRelDestReceiver(void); diff --git a/src/include/executor/nodeModifyTable.h b/src/include/executor/nodeModifyTable.h new file mode 100644 index 0000000000..e9662d4cf8 --- /dev/null +++ b/src/include/executor/nodeModifyTable.h @@ -0,0 +1,23 @@ +/*------------------------------------------------------------------------- + * + * nodeModifyTable.h + * + * + * Portions Copyright (c) 1996-2009, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * $PostgreSQL: pgsql/src/include/executor/nodeModifyTable.h,v 1.1 2009/10/10 01:43:50 tgl Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef NODEMODIFYTABLE_H +#define NODEMODIFYTABLE_H + +#include "nodes/execnodes.h" + +extern ModifyTableState *ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags); +extern TupleTableSlot *ExecModifyTable(ModifyTableState *node); +extern void ExecEndModifyTable(ModifyTableState *node); +extern void ExecReScanModifyTable(ModifyTableState *node, ExprContext *exprCtxt); + +#endif /* NODEMODIFYTABLE_H */ diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h index ea66e109c1..264ef741da 100644 --- a/src/include/nodes/execnodes.h +++ b/src/include/nodes/execnodes.h @@ -7,7 +7,7 @@ * Portions Copyright (c) 1996-2009, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * - * $PostgreSQL: pgsql/src/include/nodes/execnodes.h,v 1.208 2009/09/27 20:09:58 tgl Exp $ + * $PostgreSQL: pgsql/src/include/nodes/execnodes.h,v 1.209 2009/10/10 01:43:50 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -339,7 +339,7 @@ typedef struct EState ResultRelInfo *es_result_relations; /* array of ResultRelInfos */ int es_num_result_relations; /* length of array */ ResultRelInfo *es_result_relation_info; /* currently active array elt */ - JunkFilter *es_junkFilter; /* currently active junk filter */ + JunkFilter *es_junkFilter; /* top-level junk filter, if any */ /* Stuff used for firing triggers: */ List *es_trig_target_relations; /* trigger-only ResultRelInfos */ @@ -975,13 +975,25 @@ typedef struct ResultState bool rs_checkqual; /* do we need to check the qual? */ } ResultState; +/* ---------------- + * ModifyTableState information + * ---------------- + */ +typedef struct ModifyTableState +{ + PlanState ps; /* its first field is NodeTag */ + CmdType operation; + PlanState **mt_plans; /* subplans (one per target rel) */ + int mt_nplans; /* number of plans in the array */ + int mt_whichplan; /* which one is being executed (0..n-1) */ + bool fireBSTriggers; /* do we need to fire stmt triggers? */ +} ModifyTableState; + /* ---------------- * AppendState information * - * nplans how many plans are in the list + * nplans how many plans are in the array * whichplan which plan is being executed (0 .. n-1) - * firstplan first plan to execute (usually 0) - * lastplan last plan to execute (usually n-1) * ---------------- */ typedef struct AppendState @@ -990,8 +1002,6 @@ typedef struct AppendState PlanState **appendplans; /* array of PlanStates for my inputs */ int as_nplans; int as_whichplan; - int as_firstplan; - int as_lastplan; } AppendState; /* ---------------- diff --git a/src/include/nodes/nodes.h b/src/include/nodes/nodes.h index 2a4468799f..53c406cc51 100644 --- a/src/include/nodes/nodes.h +++ b/src/include/nodes/nodes.h @@ -7,7 +7,7 @@ * Portions Copyright (c) 1996-2009, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * - * $PostgreSQL: pgsql/src/include/nodes/nodes.h,v 1.228 2009/10/08 02:39:24 tgl Exp $ + * $PostgreSQL: pgsql/src/include/nodes/nodes.h,v 1.229 2009/10/10 01:43:50 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -43,6 +43,7 @@ typedef enum NodeTag */ T_Plan = 100, T_Result, + T_ModifyTable, T_Append, T_RecursiveUnion, T_BitmapAnd, @@ -81,6 +82,7 @@ typedef enum NodeTag */ T_PlanState = 200, T_ResultState, + T_ModifyTableState, T_AppendState, T_RecursiveUnionState, T_BitmapAndState, diff --git a/src/include/nodes/plannodes.h b/src/include/nodes/plannodes.h index 44f14140f4..26b0fc3335 100644 --- a/src/include/nodes/plannodes.h +++ b/src/include/nodes/plannodes.h @@ -7,7 +7,7 @@ * Portions Copyright (c) 1996-2009, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * - * $PostgreSQL: pgsql/src/include/nodes/plannodes.h,v 1.110 2009/06/11 14:49:11 momjian Exp $ + * $PostgreSQL: pgsql/src/include/nodes/plannodes.h,v 1.111 2009/10/10 01:43:50 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -38,6 +38,8 @@ typedef struct PlannedStmt CmdType commandType; /* select|insert|update|delete */ + bool hasReturning; /* is it insert|update|delete RETURNING? */ + bool canSetTag; /* do I set the command result tag? */ bool transientPlan; /* redo plan when TransactionXmin changes? */ @@ -57,18 +59,6 @@ typedef struct PlannedStmt Bitmapset *rewindPlanIDs; /* indices of subplans that require REWIND */ - /* - * If the query has a returningList then the planner will store a list of - * processed targetlists (one per result relation) here. We must have a - * separate RETURNING targetlist for each result rel because column - * numbers may vary within an inheritance tree. In the targetlists, Vars - * referencing the result relation will have their original varno and - * varattno, while Vars referencing other rels will be converted to have - * varno OUTER and varattno referencing a resjunk entry in the top plan - * node's targetlist. - */ - List *returningLists; /* list of lists of TargetEntry, or NIL */ - List *rowMarks; /* a list of RowMarkClause's */ List *relationOids; /* OIDs of relations the plan depends on */ @@ -164,22 +154,30 @@ typedef struct Result Node *resconstantqual; } Result; +/* ---------------- + * ModifyTable node - + * Apply rows produced by subplan(s) to result table(s), + * by inserting, updating, or deleting. + * ---------------- + */ +typedef struct ModifyTable +{ + Plan plan; + CmdType operation; /* INSERT, UPDATE, or DELETE */ + List *resultRelations; /* integer list of RT indexes */ + List *plans; /* plan(s) producing source data */ + List *returningLists; /* per-target-table RETURNING tlists */ +} ModifyTable; + /* ---------------- * Append node - * Generate the concatenation of the results of sub-plans. - * - * Append nodes are sometimes used to switch between several result relations - * (when the target of an UPDATE or DELETE is an inheritance set). Such a - * node will have isTarget true. The Append executor is then responsible - * for updating the executor state to point at the correct target relation - * whenever it switches subplans. * ---------------- */ typedef struct Append { Plan plan; List *appendplans; - bool isTarget; } Append; /* ---------------- diff --git a/src/include/nodes/relation.h b/src/include/nodes/relation.h index 9b59d63b2b..5e504b0ab4 100644 --- a/src/include/nodes/relation.h +++ b/src/include/nodes/relation.h @@ -7,7 +7,7 @@ * Portions Copyright (c) 1996-2009, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * - * $PostgreSQL: pgsql/src/include/nodes/relation.h,v 1.175 2009/09/17 20:49:29 tgl Exp $ + * $PostgreSQL: pgsql/src/include/nodes/relation.h,v 1.176 2009/10/10 01:43:50 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -140,8 +140,6 @@ typedef struct PlannerInfo List *resultRelations; /* integer list of RT indexes, or NIL */ - List *returningLists; /* list of lists of TargetEntry, or NIL */ - List *init_plans; /* init SubPlans for query */ List *cte_plan_ids; /* per-CTE-item list of subplan IDs */ diff --git a/src/include/optimizer/planmain.h b/src/include/optimizer/planmain.h index 3ffd80003a..1e27bd847c 100644 --- a/src/include/optimizer/planmain.h +++ b/src/include/optimizer/planmain.h @@ -7,7 +7,7 @@ * Portions Copyright (c) 1996-2009, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * - * $PostgreSQL: pgsql/src/include/optimizer/planmain.h,v 1.118 2009/06/11 14:49:11 momjian Exp $ + * $PostgreSQL: pgsql/src/include/optimizer/planmain.h,v 1.119 2009/10/10 01:43:50 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -41,7 +41,7 @@ extern Plan *optimize_minmax_aggregates(PlannerInfo *root, List *tlist, extern Plan *create_plan(PlannerInfo *root, Path *best_path); extern SubqueryScan *make_subqueryscan(List *qptlist, List *qpqual, Index scanrelid, Plan *subplan, List *subrtable); -extern Append *make_append(List *appendplans, bool isTarget, List *tlist); +extern Append *make_append(List *appendplans, List *tlist); extern RecursiveUnion *make_recursive_union(List *tlist, Plan *lefttree, Plan *righttree, int wtParam, List *distinctList, long numGroups); @@ -74,6 +74,8 @@ extern SetOp *make_setop(SetOpCmd cmd, SetOpStrategy strategy, Plan *lefttree, long numGroups, double outputRows); extern Result *make_result(PlannerInfo *root, List *tlist, Node *resconstantqual, Plan *subplan); +extern ModifyTable *make_modifytable(CmdType operation, List *resultRelations, + List *subplans, List *returningLists); extern bool is_projection_capable_plan(Plan *plan); /*