From 0a19e71adba1e97f5150229e576e3f93eb2db0de Mon Sep 17 00:00:00 2001 From: amitlan Date: Thu, 2 Jul 2020 10:51:45 +0900 Subject: [PATCH v2 2/2] Initialize result relation information lazily Currently, all elements of the ModifyTableState.resultRelInfo array are initialized in ExecInitModifyTable(), possibly wastefully, because only one or a handful of potentially many result relations appearing in that array may actually have any rows to update or delete. This commit refactors all places that directly access the individual elements of the array to instead go through a lazy-initialization-on- access function, such that only the elements corresponding to result relations that are actually operated on are initialized. Also, extend this lazy initialization approach to some of the individual fields of ResultRelInfo such that even for the result relations that are initialized, those fields are only initialized on first access. While no performance improvement is to be expected there, it can lead to a simpler initialization logic of the ResultRelInfo itself, because the conditions for whether a given field is needed or not tends to look confusing. One side-effect of this is that any "SubPlans" referenced in the expressions of those fields are also lazily initialized and hence changes the output of EXPLAIN (without ANALYZE) in some regression tests. Another unrelated regression test output change is in update.out, which is caused by deferred initialization of PartitionTupleRouting for update tuple routing. Whereas previously a partition constraint violation error would be reported as occurring on a leaf partition, due to the aforementioned change, it is now shown as occurring on the query's target relation, which is valid because it is really that table's (which is a sub-partitioned table) partition constraint that is actually violated in the affected test cases. --- src/backend/commands/explain.c | 6 +- src/backend/executor/execMain.c | 7 + src/backend/executor/execPartition.c | 116 ++- src/backend/executor/nodeModifyTable.c | 1123 ++++++++++++++----------- src/include/executor/nodeModifyTable.h | 1 + src/include/nodes/execnodes.h | 2 + src/test/regress/expected/insert_conflict.out | 5 +- src/test/regress/expected/updatable_views.out | 18 +- src/test/regress/expected/update.out | 12 +- 9 files changed, 737 insertions(+), 553 deletions(-) diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c index 43f9b01..edd79d7 100644 --- a/src/backend/commands/explain.c +++ b/src/backend/commands/explain.c @@ -18,7 +18,9 @@ #include "commands/createas.h" #include "commands/defrem.h" #include "commands/prepare.h" +#include "executor/executor.h" #include "executor/nodeHash.h" +#include "executor/nodeModifyTable.h" #include "foreign/fdwapi.h" #include "jit/jit.h" #include "nodes/extensible.h" @@ -3678,14 +3680,14 @@ show_modifytable_info(ModifyTableState *mtstate, List *ancestors, /* Should we explicitly label target relations? */ labeltargets = (mtstate->mt_nplans > 1 || (mtstate->mt_nplans == 1 && - mtstate->resultRelInfo->ri_RangeTableIndex != node->nominalRelation)); + ExecGetResultRelation(mtstate, 0)->ri_RangeTableIndex != node->nominalRelation)); if (labeltargets) ExplainOpenGroup("Target Tables", "Target Tables", false, es); for (j = 0; j < mtstate->mt_nplans; j++) { - ResultRelInfo *resultRelInfo = mtstate->resultRelInfo + j; + ResultRelInfo *resultRelInfo = ExecGetResultRelation(mtstate, j); FdwRoutine *fdwroutine = resultRelInfo->ri_FdwRoutine; if (labeltargets) diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c index 7179f58..f484e6a 100644 --- a/src/backend/executor/execMain.c +++ b/src/backend/executor/execMain.c @@ -1236,6 +1236,7 @@ InitResultRelInfo(ResultRelInfo *resultRelInfo, resultRelInfo->ri_ConstraintExprs = NULL; resultRelInfo->ri_GeneratedExprs = NULL; resultRelInfo->ri_junkFilter = NULL; + resultRelInfo->ri_junkFilterValid = false;; resultRelInfo->ri_projectReturning = NULL; resultRelInfo->ri_onConflictArbiterIndexes = NIL; resultRelInfo->ri_onConflict = NULL; @@ -1247,6 +1248,7 @@ InitResultRelInfo(ResultRelInfo *resultRelInfo, * ExecInitRoutingInfo */ resultRelInfo->ri_PartitionTupleSlot = NULL; /* ditto */ resultRelInfo->ri_ChildToRootMap = NULL; + resultRelInfo->ri_ChildToRootMapValid = false; resultRelInfo->ri_CopyMultiInsertBuffer = NULL; } @@ -1440,6 +1442,11 @@ ExecCloseResultRelations(EState *estate) ResultRelInfo *resultRelInfo = lfirst(l); ExecCloseIndices(resultRelInfo); + if (!resultRelInfo->ri_usesFdwDirectModify && + resultRelInfo->ri_FdwRoutine != NULL && + resultRelInfo->ri_FdwRoutine->EndForeignModify != NULL) + resultRelInfo->ri_FdwRoutine->EndForeignModify(estate, + resultRelInfo); } /* Close any relations that have been opened by ExecGetTriggerResultRel(). */ diff --git a/src/backend/executor/execPartition.c b/src/backend/executor/execPartition.c index 86594bd..8265db2 100644 --- a/src/backend/executor/execPartition.c +++ b/src/backend/executor/execPartition.c @@ -20,6 +20,7 @@ #include "catalog/pg_type.h" #include "executor/execPartition.h" #include "executor/executor.h" +#include "executor/nodeModifyTable.h" #include "foreign/fdwapi.h" #include "mb/pg_wchar.h" #include "miscadmin.h" @@ -157,10 +158,11 @@ typedef struct PartitionDispatchData typedef struct SubplanResultRelHashElem { Oid relid; /* hash key -- must be first */ - ResultRelInfo *rri; + int index; } SubplanResultRelHashElem; +static ResultRelInfo *ExecLookupUpdateResultRelByOid(ModifyTableState *mtstate, Oid reloid); static void ExecHashSubPlanResultRelsByOid(ModifyTableState *mtstate, PartitionTupleRouting *proute); static ResultRelInfo *ExecInitPartitionInfo(ModifyTableState *mtstate, @@ -218,7 +220,6 @@ ExecSetupPartitionTupleRouting(EState *estate, ModifyTableState *mtstate, Relation rel) { PartitionTupleRouting *proute; - ModifyTable *node = mtstate ? (ModifyTable *) mtstate->ps.plan : NULL; /* * Here we attempt to expend as little effort as possible in setting up @@ -240,17 +241,6 @@ ExecSetupPartitionTupleRouting(EState *estate, ModifyTableState *mtstate, ExecInitPartitionDispatchInfo(estate, proute, RelationGetRelid(rel), NULL, 0); - /* - * If performing an UPDATE with tuple routing, we can reuse partition - * sub-plan result rels. We build a hash table to map the OIDs of - * partitions present in mtstate->resultRelInfo to their ResultRelInfos. - * Every time a tuple is routed to a partition that we've yet to set the - * ResultRelInfo for, before we go to the trouble of making one, we check - * for a pre-made one in the hash table. - */ - if (node && node->operation == CMD_UPDATE) - ExecHashSubPlanResultRelsByOid(mtstate, proute); - return proute; } @@ -350,7 +340,6 @@ ExecFindPartition(ModifyTableState *mtstate, is_leaf = partdesc->is_leaf[partidx]; if (is_leaf) { - /* * We've reached the leaf -- hurray, we're done. Look to see if * we've already got a ResultRelInfo for this partition. @@ -367,20 +356,19 @@ ExecFindPartition(ModifyTableState *mtstate, /* * We have not yet set up a ResultRelInfo for this partition, - * but if we have a subplan hash table, we might have one - * there. If not, we'll have to create one. + * but if the partition is also an UPDATE result relation, use + * the one in mtstate->resultRelInfo instead of creating a new + * one with ExecInitPartitionInfo(). */ - if (proute->subplan_resultrel_htab) + if (mtstate->operation == CMD_UPDATE && mtstate->ps.plan) { Oid partoid = partdesc->oids[partidx]; - SubplanResultRelHashElem *elem; - elem = hash_search(proute->subplan_resultrel_htab, - &partoid, HASH_FIND, NULL); - if (elem) + rri = ExecLookupUpdateResultRelByOid(mtstate, partoid); + + if (rri) { found = true; - rri = elem->rri; /* Verify this ResultRelInfo allows INSERTs */ CheckValidResultRel(rri, CMD_INSERT); @@ -508,6 +496,41 @@ ExecFindPartition(ModifyTableState *mtstate, } /* + * ExecLookupUpdateResultRelByOid + * If the table with given OID appears in the list of result relations + * to be updated by the given ModifyTable node, return its + * ResultRelInfo, NULL otherwise. + */ +static ResultRelInfo * +ExecLookupUpdateResultRelByOid(ModifyTableState *mtstate, Oid reloid) +{ + PartitionTupleRouting *proute = mtstate->mt_partition_tuple_routing; + SubplanResultRelHashElem *elem; + ResultRelInfo *result = NULL; + + Assert(proute != NULL); + if (proute->subplan_resultrel_htab == NULL) + ExecHashSubPlanResultRelsByOid(mtstate, proute); + + elem = hash_search(proute->subplan_resultrel_htab, &reloid, + HASH_FIND, NULL); + + if (elem) + { + result = ExecGetResultRelation(mtstate, elem->index); + + /* + * This is required in order to convert the partition's tuple to be + * compatible with the root partitioned table's tuple descriptor. When + * generating the per-subplan result rels, this was not set. + */ + result->ri_PartitionRoot = proute->partition_root; + } + + return result; +} + +/* * ExecHashSubPlanResultRelsByOid * Build a hash table to allow fast lookups of subplan ResultRelInfos by * partition Oid. We also populate the subplan ResultRelInfo with an @@ -517,9 +540,13 @@ static void ExecHashSubPlanResultRelsByOid(ModifyTableState *mtstate, PartitionTupleRouting *proute) { + EState *estate = mtstate->ps.state; + ModifyTable *plan = (ModifyTable *) mtstate->ps.plan; + ListCell *l; HASHCTL ctl; HTAB *htab; int i; + MemoryContext oldcxt = MemoryContextSwitchTo(estate->es_query_cxt); memset(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(Oid); @@ -530,26 +557,26 @@ ExecHashSubPlanResultRelsByOid(ModifyTableState *mtstate, &ctl, HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); proute->subplan_resultrel_htab = htab; - /* Hash all subplans by their Oid */ - for (i = 0; i < mtstate->mt_nplans; i++) + /* + * Map each result relation's OID to its ordinal position in + * plan->resultRelations. + */ + i = 0; + foreach(l, plan->resultRelations) { - ResultRelInfo *rri = &mtstate->resultRelInfo[i]; + Index rti = lfirst_int(l); + RangeTblEntry *rte = exec_rt_fetch(rti, estate); + Oid partoid = rte->relid; bool found; - Oid partoid = RelationGetRelid(rri->ri_RelationDesc); SubplanResultRelHashElem *elem; elem = (SubplanResultRelHashElem *) hash_search(htab, &partoid, HASH_ENTER, &found); Assert(!found); - elem->rri = rri; - - /* - * This is required in order to convert the partition's tuple to be - * compatible with the root partitioned table's tuple descriptor. When - * generating the per-subplan result rels, this was not set. - */ - rri->ri_PartitionRoot = proute->partition_root; + elem->index = i++; } + + MemoryContextSwitchTo(oldcxt); } /* @@ -570,7 +597,8 @@ ExecInitPartitionInfo(ModifyTableState *mtstate, EState *estate, ModifyTable *node = (ModifyTable *) mtstate->ps.plan; Relation rootrel = rootResultRelInfo->ri_RelationDesc, partrel; - Relation firstResultRel = mtstate->resultRelInfo[0].ri_RelationDesc; + Relation firstResultRel = NULL; + Index firstVarno = 0; ResultRelInfo *leaf_part_rri; MemoryContext oldcxt; AttrMap *part_attmap = NULL; @@ -606,19 +634,26 @@ ExecInitPartitionInfo(ModifyTableState *mtstate, EState *estate, (node != NULL && node->onConflictAction != ONCONFLICT_NONE)); + if (node) + { + ResultRelInfo *firstResultRelInfo = ExecGetResultRelation(mtstate, 0); + + firstResultRel = firstResultRelInfo->ri_RelationDesc; + firstVarno = firstResultRelInfo->ri_RangeTableIndex; + } + /* * Build WITH CHECK OPTION constraints for the partition. Note that we * didn't build the withCheckOptionList for partitions within the planner, * but simple translation of varattnos will suffice. This only occurs for * the INSERT case or in the case of UPDATE tuple routing where we didn't - * find a result rel to reuse in ExecSetupPartitionTupleRouting(). + * find a result rel to reuse. */ if (node && node->withCheckOptionLists != NIL) { List *wcoList; List *wcoExprs = NIL; ListCell *ll; - int firstVarno = mtstate->resultRelInfo[0].ri_RangeTableIndex; /* * In the case of INSERT on a partitioned table, there is only one @@ -682,7 +717,6 @@ ExecInitPartitionInfo(ModifyTableState *mtstate, EState *estate, TupleTableSlot *slot; ExprContext *econtext; List *returningList; - int firstVarno = mtstate->resultRelInfo[0].ri_RangeTableIndex; /* See the comment above for WCO lists. */ Assert((node->operation == CMD_INSERT && @@ -741,7 +775,6 @@ ExecInitPartitionInfo(ModifyTableState *mtstate, EState *estate, */ if (node && node->onConflictAction != ONCONFLICT_NONE) { - int firstVarno = mtstate->resultRelInfo[0].ri_RangeTableIndex; TupleDesc partrelDesc = RelationGetDescr(partrel); ExprContext *econtext = mtstate->ps.ps_ExprContext; ListCell *lc; @@ -916,9 +949,14 @@ ExecInitPartitionInfo(ModifyTableState *mtstate, EState *estate, * from partition's rowtype to the root partition table's. */ if (mtstate->mt_transition_capture || mtstate->mt_oc_transition_capture) + { leaf_part_rri->ri_ChildToRootMap = convert_tuples_by_name(RelationGetDescr(leaf_part_rri->ri_RelationDesc), RelationGetDescr(leaf_part_rri->ri_PartitionRoot)); + /* First time creating the map for this result relation. */ + Assert(!leaf_part_rri->ri_ChildToRootMapValid); + leaf_part_rri->ri_ChildToRootMapValid = true; + } /* * Since we've just initialized this ResultRelInfo, it's not in any list diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c index 05e68ef..db24dff 100644 --- a/src/backend/executor/nodeModifyTable.c +++ b/src/backend/executor/nodeModifyTable.c @@ -144,10 +144,41 @@ ExecCheckPlanOutput(Relation resultRel, List *targetList) } /* + * Initialize ri_returningList and ri_projectReturning for RETURNING + */ +static void +InitReturningProjection(ModifyTableState *mtstate, + ResultRelInfo *resultRelInfo) +{ + ModifyTable *plan = (ModifyTable *) mtstate->ps.plan; + int whichrel = resultRelInfo - mtstate->resultRelInfo; + List *rlist; + TupleTableSlot *slot; + ExprContext *econtext; + + Assert(whichrel >= 0 && whichrel < mtstate->mt_nplans); + rlist = (List *) list_nth(plan->returningLists, whichrel); + slot = mtstate->ps.ps_ResultTupleSlot; + Assert(slot != NULL); + econtext = mtstate->ps.ps_ExprContext; + Assert(econtext != NULL); + + /* Must not do this a second time! */ + Assert(resultRelInfo->ri_returningList == NIL && + resultRelInfo->ri_projectReturning == NULL); + resultRelInfo->ri_returningList = rlist; + resultRelInfo->ri_projectReturning = + ExecBuildProjectionInfo(rlist, econtext, slot, &mtstate->ps, + resultRelInfo->ri_RelationDesc->rd_att); +} + +/* * ExecProcessReturning --- evaluate a RETURNING list * * resultRelInfo: current result rel - * tupleSlot: slot holding tuple actually inserted/updated/deleted + * tupleSlot: slot holding tuple actually inserted/updated or NULL for delete + * tupleid, oldtuple: when called for delete, one of these can be used to + * fill the RETURNING slot for the relation * planSlot: slot holding tuple returned by top subplan node * * Note: If tupleSlot is NULL, the FDW should have already provided econtext's @@ -156,12 +187,50 @@ ExecCheckPlanOutput(Relation resultRel, List *targetList) * Returns a slot holding the result tuple */ static TupleTableSlot * -ExecProcessReturning(ResultRelInfo *resultRelInfo, +ExecProcessReturning(ModifyTableState *mtstate, + ResultRelInfo *resultRelInfo, TupleTableSlot *tupleSlot, + ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *planSlot) { - ProjectionInfo *projectReturning = resultRelInfo->ri_projectReturning; - ExprContext *econtext = projectReturning->pi_exprContext; + EState *estate = mtstate->ps.state; + ModifyTable *plan = (ModifyTable *) mtstate->ps.plan; + ProjectionInfo *projectReturning; + ExprContext *econtext; + bool clearTupleSlot = false; + TupleTableSlot *result; + + if (plan->returningLists == NIL) + return NULL; + + if (resultRelInfo->ri_returningList == NIL) + InitReturningProjection(mtstate, resultRelInfo); + + projectReturning = resultRelInfo->ri_projectReturning; + econtext = projectReturning->pi_exprContext; + + /* + * Fill tupleSlot with provided tuple or after fetching the tuple with + * provided tupleid. + */ + if (tupleSlot == NULL && resultRelInfo->ri_FdwRoutine == NULL) + { + /* FDW must have provided a slot containing the deleted row */ + Assert(resultRelInfo->ri_FdwRoutine == NULL); + tupleSlot = ExecGetReturningSlot(estate, resultRelInfo); + if (oldtuple != NULL) + { + ExecForceStoreHeapTuple(oldtuple, tupleSlot, false); + } + else + { + if (!table_tuple_fetch_row_version(resultRelInfo->ri_RelationDesc, + tupleid, SnapshotAny, + tupleSlot)) + elog(ERROR, "failed to fetch deleted tuple for DELETE RETURNING"); + } + clearTupleSlot = true; + } /* Make tuple and any needed join variables available to ExecProject */ if (tupleSlot) @@ -176,7 +245,392 @@ ExecProcessReturning(ResultRelInfo *resultRelInfo, RelationGetRelid(resultRelInfo->ri_RelationDesc); /* Compute the RETURNING expressions */ - return ExecProject(projectReturning); + result = ExecProject(projectReturning); + + if (clearTupleSlot) + ExecClearTuple(tupleSlot); + + return result; +} + +/* + * Perform WITH CHECK OPTIONS check, if any. + */ +static void +ExecProcessWithCheckOptions(ModifyTableState *mtstate, ResultRelInfo *resultRelInfo, + TupleTableSlot *slot, WCOKind wco_kind) +{ + ModifyTable *node = (ModifyTable *) mtstate->ps.plan; + EState *estate = mtstate->ps.state; + + if (node->withCheckOptionLists == NIL) + return; + + /* Initilize expression state if not already done. */ + if (resultRelInfo->ri_WithCheckOptions == NIL) + { + int whichrel = resultRelInfo - mtstate->resultRelInfo; + List *wcoList; + List *wcoExprs = NIL; + ListCell *ll; + + Assert(whichrel >= 0 && whichrel < mtstate->mt_nplans); + wcoList = (List *) list_nth(node->withCheckOptionLists, whichrel); + foreach(ll, wcoList) + { + WithCheckOption *wco = (WithCheckOption *) lfirst(ll); + ExprState *wcoExpr = ExecInitQual((List *) wco->qual, + &mtstate->ps); + + wcoExprs = lappend(wcoExprs, wcoExpr); + } + + resultRelInfo->ri_WithCheckOptions = wcoList; + resultRelInfo->ri_WithCheckOptionExprs = wcoExprs; + } + + /* + * ExecWithCheckOptions() will skip any WCOs which are not of the kind + * we are looking for at this point. + */ + ExecWithCheckOptions(wco_kind, resultRelInfo, slot, estate); +} + +/* + * Return the list of arbiter indexes to be used for ON CONFLICT processing + * on given result relation, fetching it from the plan if not already done. + */ +static List * +GetOnConflictArbiterIndexes(ModifyTableState *mtstate, + ResultRelInfo *resultRelInfo) +{ + if (resultRelInfo->ri_onConflictArbiterIndexes == NIL) + { + ModifyTable *plan = (ModifyTable *) mtstate->ps.plan; + + resultRelInfo->ri_onConflictArbiterIndexes = plan->arbiterIndexes; + } + + return resultRelInfo->ri_onConflictArbiterIndexes; +} + +/* + * Initialize target list, projection and qual for ON CONFLICT DO UPDATE. + */ +static void +InitOnConflictState(ModifyTableState *mtstate, + ResultRelInfo *resultRelInfo) +{ + ModifyTable *plan = (ModifyTable *) mtstate->ps.plan; + EState *estate = mtstate->ps.state; + TupleDesc relationDesc; + TupleDesc tupDesc; + ExprContext *econtext; + + /* insert may only have one relation, inheritance is not expanded */ + Assert(mtstate->mt_nplans == 1); + + /* already exists if created by RETURNING processing above */ + if (mtstate->ps.ps_ExprContext == NULL) + ExecAssignExprContext(estate, &mtstate->ps); + + econtext = mtstate->ps.ps_ExprContext; + relationDesc = resultRelInfo->ri_RelationDesc->rd_att; + + /* create state for DO UPDATE SET operation */ + resultRelInfo->ri_onConflict = makeNode(OnConflictSetState); + + /* initialize slot for the existing tuple */ + resultRelInfo->ri_onConflict->oc_Existing = + table_slot_create(resultRelInfo->ri_RelationDesc, + &mtstate->ps.state->es_tupleTable); + + /* + * Create the tuple slot for the UPDATE SET projection. We want a slot + * of the table's type here, because the slot will be used to insert + * into the table, and for RETURNING processing - which may access + * system attributes. + */ + tupDesc = ExecTypeFromTL((List *) plan->onConflictSet); + resultRelInfo->ri_onConflict->oc_ProjSlot = + ExecInitExtraTupleSlot(mtstate->ps.state, tupDesc, + table_slot_callbacks(resultRelInfo->ri_RelationDesc)); + + /* build UPDATE SET projection state */ + resultRelInfo->ri_onConflict->oc_ProjInfo = + ExecBuildProjectionInfo(plan->onConflictSet, econtext, + resultRelInfo->ri_onConflict->oc_ProjSlot, + &mtstate->ps, + relationDesc); + + /* initialize state to evaluate the WHERE clause, if any */ + if (plan->onConflictWhere) + { + ExprState *qualexpr; + + qualexpr = ExecInitQual((List *) plan->onConflictWhere, + &mtstate->ps); + resultRelInfo->ri_onConflict->oc_WhereClause = qualexpr; + } +} + +/* + * Initialize ri_junkFilter if needed. + * + * INSERT queries need a filter if there are any junk attrs in the tlist. + * UPDATE and DELETE always need a filter, since there's always at least one + * junk attribute present --- no need to look first. Typically, this will be + * a 'ctid' or 'wholerow' attribute, but in the case of a foreign data wrapper + * it might be a set of junk attributes sufficient to identify the remote row. + * + * If there are multiple result relations, each one needs its own junk filter. + * Note multiple rels are only possible for UPDATE/DELETE, so we can't be + * fooled by some needing a filter and some not. + * + * This is also a convenient place to verify that the output of an INSERT or + * UPDATE matches the target table(s). + */ +static void +InitJunkFilter(ModifyTableState *mtstate, ResultRelInfo *resultRelInfo) +{ + EState *estate = mtstate->ps.state; + CmdType operation = mtstate->operation; + Plan *subplan = mtstate->mt_plans[mtstate->mt_whichplan]->plan; + ListCell *l; + bool junk_filter_needed = false; + + switch (operation) + { + case CMD_INSERT: + foreach(l, subplan->targetlist) + { + TargetEntry *tle = (TargetEntry *) lfirst(l); + + if (tle->resjunk) + { + junk_filter_needed = true; + break; + } + } + break; + case CMD_UPDATE: + case CMD_DELETE: + junk_filter_needed = true; + break; + default: + elog(ERROR, "unknown operation"); + break; + } + + if (junk_filter_needed) + { + JunkFilter *j; + TupleTableSlot *junkresslot; + + junkresslot = + ExecInitExtraTupleSlot(estate, NULL, + table_slot_callbacks(resultRelInfo->ri_RelationDesc)); + + /* + * For an INSERT or UPDATE, the result tuple must always match + * the target table's descriptor. For a DELETE, it won't + * (indeed, there's probably no non-junk output columns). + */ + if (operation == CMD_INSERT || operation == CMD_UPDATE) + { + ExecCheckPlanOutput(resultRelInfo->ri_RelationDesc, + subplan->targetlist); + j = ExecInitJunkFilterInsertion(subplan->targetlist, + RelationGetDescr(resultRelInfo->ri_RelationDesc), + junkresslot); + } + else + j = ExecInitJunkFilter(subplan->targetlist, + junkresslot); + + if (operation == CMD_UPDATE || operation == CMD_DELETE) + { + /* For UPDATE/DELETE, find the appropriate junk attr now */ + char relkind; + + relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind; + if (relkind == RELKIND_RELATION || + relkind == RELKIND_MATVIEW || + relkind == RELKIND_PARTITIONED_TABLE) + { + j->jf_junkAttNo = ExecFindJunkAttribute(j, "ctid"); + if (!AttributeNumberIsValid(j->jf_junkAttNo)) + elog(ERROR, "could not find junk ctid column"); + } + else if (relkind == RELKIND_FOREIGN_TABLE) + { + /* + * When there is a row-level trigger, there should be + * a wholerow attribute. + */ + j->jf_junkAttNo = ExecFindJunkAttribute(j, "wholerow"); + } + else + { + j->jf_junkAttNo = ExecFindJunkAttribute(j, "wholerow"); + if (!AttributeNumberIsValid(j->jf_junkAttNo)) + elog(ERROR, "could not find junk wholerow column"); + } + } + + /* Must not do this a second time! */ + Assert(resultRelInfo->ri_junkFilter == NULL); + resultRelInfo->ri_junkFilter = j; + resultRelInfo->ri_junkFilterValid = true; + } + + if (operation == CMD_INSERT || operation == CMD_UPDATE) + ExecCheckPlanOutput(resultRelInfo->ri_RelationDesc, + subplan->targetlist); +} + +/* + * Returns the map needed to convert given child relation's tuples to the + * root relation's format, possibly initializing if not already done. + */ +static TupleConversionMap * +GetChildToRootMap(ModifyTableState *mtstate, ResultRelInfo *resultRelInfo) +{ + if (!resultRelInfo->ri_ChildToRootMapValid) + { + Relation relation = resultRelInfo->ri_RelationDesc; + Relation targetRel = mtstate->rootResultRelInfo->ri_RelationDesc; + + resultRelInfo->ri_ChildToRootMap = + convert_tuples_by_name(RelationGetDescr(relation), + RelationGetDescr(targetRel)); + resultRelInfo->ri_ChildToRootMapValid = true; + } + + return resultRelInfo->ri_ChildToRootMap; +} + +/* + * ExecGetResultRelation + * Returns mtstate->resultRelInfo[whichrel], possibly initializing it + * if being requested for the first time + */ +ResultRelInfo * +ExecGetResultRelation(ModifyTableState *mtstate, int whichrel) +{ + EState *estate = mtstate->ps.state; + ModifyTable *plan = (ModifyTable *) mtstate->ps.plan; + Index rti; + ResultRelInfo *resultRelInfo = NULL; + + /* + * Initialized result relations are added to es_result_relations, so check + * there first. Remember that es_result_relations is indexed by RT index, + * so fetch the relation's RT index from the plan. + */ + Assert(plan != NULL); + Assert(whichrel >= 0 && whichrel < mtstate->mt_nplans); + rti = list_nth_int(plan->resultRelations, whichrel); + if (estate->es_result_relations) + resultRelInfo = estate->es_result_relations[rti - 1]; + + /* Nope, so initialize. */ + if (resultRelInfo == NULL) + { + int eflags = estate->es_top_eflags; + CmdType operation = mtstate->operation; + MemoryContext oldcxt; + + Assert(whichrel >= 0); + resultRelInfo = &mtstate->resultRelInfo[whichrel]; + + /* Things built here have to last for the query duration. */ + oldcxt = MemoryContextSwitchTo(estate->es_query_cxt); + + /* + * Perform InitResultRelInfo() and save the pointer in + * es_result_relations. + */ + ExecInitResultRelation(estate, resultRelInfo, rti); + + /* + * A few more initializations that are not handled by + * InitResultRelInfo() follow. + */ + + /* + * Verify result relation is a valid target for the current operation. + */ + CheckValidResultRel(resultRelInfo, operation); + + /* Initialize the usesFdwDirectModify flag */ + resultRelInfo->ri_usesFdwDirectModify = bms_is_member(whichrel, + plan->fdwDirectModifyPlans); + + /* Also let FDWs init themselves for foreign-table result rels */ + if (resultRelInfo->ri_FdwRoutine != NULL) + { + if (resultRelInfo->ri_usesFdwDirectModify) + { + ForeignScanState *fscan = (ForeignScanState *) mtstate->mt_plans[whichrel]; + + /* + * For the FDW's convenience, set the ForeignScanState node's + * ResultRelInfo to let the FDW know which result relation it + * is going to work with. + */ + Assert(IsA(fscan, ForeignScanState)); + fscan->resultRelInfo = resultRelInfo; + resultRelInfo->ri_FdwRoutine->BeginDirectModify(fscan, eflags); + } + else if (resultRelInfo->ri_FdwRoutine->BeginForeignModify != NULL) + { + List *fdw_private = (List *) list_nth(plan->fdwPrivLists, + whichrel); + + resultRelInfo->ri_FdwRoutine->BeginForeignModify(mtstate, + resultRelInfo, + fdw_private, + whichrel, + eflags); + } + } + + /* + * If transition tuples will be captured, initialize a map to convert + * child tuples into the format of the table mentioned in the query + * (root relation), because the transition tuple store can only store + * tuples in the root table format. However for INSERT, the map is + * only initialized for a given partition when the partition itself is + * first initialized by ExecFindPartition. Also, this map is also + * needed if an UPDATE ends up having to move tuples across + * partitions, because in that case the child tuple to be moved first + * needs to be converted into the root table's format. In that case, + * we use GetChildToRootMap() to either create one from scratch if + * we didn't already create it here. + * + * Note: We cannot always initialize this map lazily, that is, use + * GetChildToRootMap(), because AfterTriggerSaveEvent(), which needs + * the map, doesn't have access to the "target" relation that is + * needed to create the map. + */ + if (mtstate->mt_transition_capture && operation != CMD_INSERT) + { + Relation relation = resultRelInfo->ri_RelationDesc; + Relation targetRel = mtstate->rootResultRelInfo->ri_RelationDesc; + + resultRelInfo->ri_ChildToRootMap = + convert_tuples_by_name(RelationGetDescr(relation), + RelationGetDescr(targetRel)); + /* First time creating the map for this result relation. */ + Assert(!resultRelInfo->ri_ChildToRootMapValid); + resultRelInfo->ri_ChildToRootMapValid = true; + } + + MemoryContextSwitchTo(oldcxt); + } + + return resultRelInfo; } /* @@ -398,12 +852,27 @@ ExecInsert(ModifyTableState *mtstate, { ResultRelInfo *partRelInfo; + /* + * ExecInitPartitionInfo() expects that the root parent's ri_onConflict + * is initialized. XXX maybe it shouldn't? + */ + if (onconflict != ONCONFLICT_NONE && + resultRelInfo->ri_onConflict == NULL) + { + (void) GetOnConflictArbiterIndexes(mtstate, resultRelInfo); + if (onconflict == ONCONFLICT_UPDATE) + InitOnConflictState(mtstate, resultRelInfo); + } + slot = ExecPrepareTupleRouting(mtstate, estate, proute, resultRelInfo, slot, &partRelInfo); resultRelInfo = partRelInfo; } + if (resultRelInfo->ri_IndexRelationDescs == NULL) + ExecOpenIndices(resultRelInfo, onconflict != ONCONFLICT_NONE); + ExecMaterializeSlot(slot); resultRelationDesc = resultRelInfo->ri_RelationDesc; @@ -489,12 +958,7 @@ ExecInsert(ModifyTableState *mtstate, wco_kind = (mtstate->operation == CMD_UPDATE) ? WCO_RLS_UPDATE_CHECK : WCO_RLS_INSERT_CHECK; - /* - * ExecWithCheckOptions() will skip any WCOs which are not of the kind - * we are looking for at this point. - */ - if (resultRelInfo->ri_WithCheckOptions != NIL) - ExecWithCheckOptions(wco_kind, resultRelInfo, slot, estate); + ExecProcessWithCheckOptions(mtstate, resultRelInfo, slot, wco_kind); /* * Check the constraints of the tuple. @@ -521,7 +985,8 @@ ExecInsert(ModifyTableState *mtstate, bool specConflict; List *arbiterIndexes; - arbiterIndexes = resultRelInfo->ri_onConflictArbiterIndexes; + arbiterIndexes = GetOnConflictArbiterIndexes(mtstate, + resultRelInfo); /* * Do a non-conclusive check for conflicts first. @@ -691,12 +1156,11 @@ ExecInsert(ModifyTableState *mtstate, * ExecWithCheckOptions() will skip any WCOs which are not of the kind we * are looking for at this point. */ - if (resultRelInfo->ri_WithCheckOptions != NIL) - ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo, slot, estate); + ExecProcessWithCheckOptions(mtstate, resultRelInfo, slot, WCO_VIEW_CHECK); /* Process RETURNING if present */ - if (resultRelInfo->ri_projectReturning) - result = ExecProcessReturning(resultRelInfo, slot, planSlot); + result = ExecProcessReturning(mtstate, resultRelInfo, slot, NULL, NULL, + planSlot); return result; } @@ -1011,45 +1475,23 @@ ldelete:; ar_delete_trig_tcs); /* Process RETURNING if present and if requested */ - if (processReturning && resultRelInfo->ri_projectReturning) + if (processReturning) { - /* - * We have to put the target tuple into a slot, which means first we - * gotta fetch it. We can use the trigger tuple slot. - */ - TupleTableSlot *rslot; - - if (resultRelInfo->ri_FdwRoutine) - { - /* FDW must have provided a slot containing the deleted row */ - Assert(!TupIsNull(slot)); - } - else - { - slot = ExecGetReturningSlot(estate, resultRelInfo); - if (oldtuple != NULL) - { - ExecForceStoreHeapTuple(oldtuple, slot, false); - } - else - { - if (!table_tuple_fetch_row_version(resultRelationDesc, tupleid, - SnapshotAny, slot)) - elog(ERROR, "failed to fetch deleted tuple for DELETE RETURNING"); - } - } - - rslot = ExecProcessReturning(resultRelInfo, slot, planSlot); + TupleTableSlot *rslot = ExecProcessReturning(mtstate, resultRelInfo, + slot, tupleid, oldtuple, + planSlot); /* * Before releasing the target tuple again, make sure rslot has a * local copy of any pass-by-reference values. */ - ExecMaterializeSlot(rslot); - - ExecClearTuple(slot); - - return rslot; + if (rslot) + { + ExecMaterializeSlot(rslot); + if (slot) + ExecClearTuple(slot); + return rslot; + } } return NULL; @@ -1082,7 +1524,6 @@ ExecCrossPartitionUpdate(ModifyTableState *mtstate, TupleTableSlot **inserted_tuple) { EState *estate = mtstate->ps.state; - PartitionTupleRouting *proute = mtstate->mt_partition_tuple_routing; TupleConversionMap *tupconv_map; bool tuple_deleted; TupleTableSlot *epqslot = NULL; @@ -1101,13 +1542,27 @@ ExecCrossPartitionUpdate(ModifyTableState *mtstate, errmsg("invalid ON UPDATE specification"), errdetail("The result tuple would appear in a different partition than the original tuple."))); - /* - * When an UPDATE is run on a leaf partition, we will not have partition - * tuple routing set up. In that case, fail with partition constraint - * violation error. - */ - if (proute == NULL) - ExecPartitionCheckEmitError(resultRelInfo, slot, estate); + /* Initialize tuple routing info if not already done. */ + if (mtstate->mt_partition_tuple_routing == NULL) + { + Relation targetRel = mtstate->rootResultRelInfo->ri_RelationDesc; + MemoryContext oldcxt; + + /* Things built here have to last for the query duration. */ + oldcxt = MemoryContextSwitchTo(estate->es_query_cxt); + + mtstate->mt_partition_tuple_routing = + ExecSetupPartitionTupleRouting(estate, mtstate, targetRel); + + /* + * Before a partition's tuple can be re-routed, it must first + * be converted to the root's format and we need a slot for + * storing such tuple. + */ + Assert(mtstate->mt_root_tuple_slot == NULL); + mtstate->mt_root_tuple_slot = table_slot_create(targetRel, NULL); + MemoryContextSwitchTo(oldcxt); + } /* * Row movement, part 1. Delete the tuple, but skip RETURNING processing. @@ -1161,7 +1616,7 @@ ExecCrossPartitionUpdate(ModifyTableState *mtstate, * convert the tuple into root's tuple descriptor if needed, since * ExecInsert() starts the search from root. */ - tupconv_map = resultRelInfo->ri_ChildToRootMap; + tupconv_map = GetChildToRootMap(mtstate, resultRelInfo); if (tupconv_map != NULL) slot = execute_attr_map_slot(tupconv_map->attrMap, slot, @@ -1226,6 +1681,9 @@ ExecUpdate(ModifyTableState *mtstate, if (IsBootstrapProcessingMode()) elog(ERROR, "cannot UPDATE during bootstrap"); + if (resultRelInfo->ri_IndexRelationDescs == NULL) + ExecOpenIndices(resultRelInfo, false); + ExecMaterializeSlot(slot); /* BEFORE ROW UPDATE Triggers */ @@ -1318,16 +1776,9 @@ lreplace:; resultRelationDesc->rd_rel->relispartition && !ExecPartitionCheck(resultRelInfo, slot, estate, false); - if (!partition_constraint_failed && - resultRelInfo->ri_WithCheckOptions != NIL) - { - /* - * ExecWithCheckOptions() will skip any WCOs which are not of the - * kind we are looking for at this point. - */ - ExecWithCheckOptions(WCO_RLS_UPDATE_CHECK, - resultRelInfo, slot, estate); - } + if (!partition_constraint_failed) + ExecProcessWithCheckOptions(mtstate, resultRelInfo, slot, + WCO_RLS_UPDATE_CHECK); /* * If a partition check failed, try to move the row into the right @@ -1340,6 +1791,13 @@ lreplace:; bool retry; /* + * When an UPDATE is run directly on a leaf partition, simply fail + * with partition constraint violation error. + */ + if (resultRelInfo == mtstate->rootResultRelInfo) + ExecPartitionCheckEmitError(resultRelInfo, slot, estate); + + /* * ExecCrossPartitionUpdate will first DELETE the row from the * partition it's currently in and then insert it back into the * root table, which will re-route it to the correct partition. @@ -1535,18 +1993,12 @@ lreplace:; * required to do this after testing all constraints and uniqueness * violations per the SQL spec, so we do it after actually updating the * record in the heap and all indexes. - * - * ExecWithCheckOptions() will skip any WCOs which are not of the kind we - * are looking for at this point. */ - if (resultRelInfo->ri_WithCheckOptions != NIL) - ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo, slot, estate); + ExecProcessWithCheckOptions(mtstate, resultRelInfo, slot, WCO_VIEW_CHECK); /* Process RETURNING if present */ - if (resultRelInfo->ri_projectReturning) - return ExecProcessReturning(resultRelInfo, slot, planSlot); - - return NULL; + return ExecProcessReturning(mtstate, resultRelInfo, slot, NULL, NULL, + planSlot); } /* @@ -1570,10 +2022,10 @@ ExecOnConflictUpdate(ModifyTableState *mtstate, bool canSetTag, TupleTableSlot **returning) { - ExprContext *econtext = mtstate->ps.ps_ExprContext; + ExprContext *econtext; Relation relation = resultRelInfo->ri_RelationDesc; - ExprState *onConflictSetWhere = resultRelInfo->ri_onConflict->oc_WhereClause; - TupleTableSlot *existing = resultRelInfo->ri_onConflict->oc_Existing; + ExprState *onConflictSetWhere; + TupleTableSlot *existing; TM_FailureData tmfd; LockTupleMode lockmode; TM_Result test; @@ -1581,6 +2033,13 @@ ExecOnConflictUpdate(ModifyTableState *mtstate, TransactionId xmin; bool isnull; + if (resultRelInfo->ri_onConflict == NULL) + InitOnConflictState(mtstate, resultRelInfo); + + econtext = mtstate->ps.ps_ExprContext; + onConflictSetWhere = resultRelInfo->ri_onConflict->oc_WhereClause; + existing = resultRelInfo->ri_onConflict->oc_Existing; + /* Determine lock mode to use */ lockmode = ExecUpdateLockMode(estate, resultRelInfo); @@ -1719,27 +2178,23 @@ ExecOnConflictUpdate(ModifyTableState *mtstate, return true; /* done with the tuple */ } - if (resultRelInfo->ri_WithCheckOptions != NIL) - { - /* - * Check target's existing tuple against UPDATE-applicable USING - * security barrier quals (if any), enforced here as RLS checks/WCOs. - * - * The rewriter creates UPDATE RLS checks/WCOs for UPDATE security - * quals, and stores them as WCOs of "kind" WCO_RLS_CONFLICT_CHECK, - * but that's almost the extent of its special handling for ON - * CONFLICT DO UPDATE. - * - * The rewriter will also have associated UPDATE applicable straight - * RLS checks/WCOs for the benefit of the ExecUpdate() call that - * follows. INSERTs and UPDATEs naturally have mutually exclusive WCO - * kinds, so there is no danger of spurious over-enforcement in the - * INSERT or UPDATE path. - */ - ExecWithCheckOptions(WCO_RLS_CONFLICT_CHECK, resultRelInfo, - existing, - mtstate->ps.state); - } + /* + * Check target's existing tuple against UPDATE-applicable USING + * security barrier quals (if any), enforced here as RLS checks/WCOs. + * + * The rewriter creates UPDATE RLS checks/WCOs for UPDATE security + * quals, and stores them as WCOs of "kind" WCO_RLS_CONFLICT_CHECK, + * but that's almost the extent of its special handling for ON + * CONFLICT DO UPDATE. + * + * The rewriter will also have associated UPDATE applicable straight + * RLS checks/WCOs for the benefit of the ExecUpdate() call that + * follows. INSERTs and UPDATEs naturally have mutually exclusive WCO + * kinds, so there is no danger of spurious over-enforcement in the + * INSERT or UPDATE path. + */ + ExecProcessWithCheckOptions(mtstate, resultRelInfo, existing, + WCO_RLS_CONFLICT_CHECK); /* Project the new tuple version */ ExecProject(resultRelInfo->ri_onConflict->oc_ProjInfo); @@ -1929,11 +2384,12 @@ static TupleTableSlot * ExecModifyTable(PlanState *pstate) { ModifyTableState *node = castNode(ModifyTableState, pstate); + ModifyTable *plan = (ModifyTable *) node->ps.plan; EState *estate = node->ps.state; CmdType operation = node->operation; - ResultRelInfo *resultRelInfo; + ResultRelInfo *resultRelInfo = NULL; PlanState *subplanstate; - JunkFilter *junkfilter; + JunkFilter *junkfilter = NULL; TupleTableSlot *slot; TupleTableSlot *planSlot; ItemPointer tupleid; @@ -1974,9 +2430,7 @@ ExecModifyTable(PlanState *pstate) } /* Preload local variables */ - resultRelInfo = node->resultRelInfo + node->mt_whichplan; subplanstate = node->mt_plans[node->mt_whichplan]; - junkfilter = resultRelInfo->ri_junkFilter; /* * Fetch rows from subplan(s), and execute the required table modification @@ -2000,17 +2454,37 @@ ExecModifyTable(PlanState *pstate) if (pstate->ps_ExprContext) ResetExprContext(pstate->ps_ExprContext); + /* + * FDWs that can push down a modify operation would need to see the + * ResultRelInfo, so fetch one if not already done before executing + * the subplan, potentially opening it for the first time. + */ + if (bms_is_member(node->mt_whichplan, plan->fdwDirectModifyPlans) && + resultRelInfo == NULL) + { + resultRelInfo = ExecGetResultRelation(node, node->mt_whichplan); + + /* + * Must make sure to initialize the RETURNING projection as well, + * because some FDWs rely on accessing ri_projectReturning to + * set its "scan" tuple to use below for computing the actual + * RETURNING targetlist. + */ + if (plan->returningLists && resultRelInfo->ri_returningList == NIL) + InitReturningProjection(node, resultRelInfo); + } + planSlot = ExecProcNode(subplanstate); if (TupIsNull(planSlot)) { - /* advance to next subplan if any */ + /* Signal to initialize the next plan's relation. */ + resultRelInfo = NULL; + node->mt_whichplan++; if (node->mt_whichplan < node->mt_nplans) { - resultRelInfo++; subplanstate = node->mt_plans[node->mt_whichplan]; - junkfilter = resultRelInfo->ri_junkFilter; EvalPlanQualSetPlan(&node->mt_epqstate, subplanstate->plan, node->mt_arowmarks[node->mt_whichplan]); continue; @@ -2020,8 +2494,25 @@ ExecModifyTable(PlanState *pstate) } /* + * Fetch the result relation for the current plan if not already done, + * potentially opening it for the first time. + */ + if (resultRelInfo == NULL) + { + resultRelInfo = ExecGetResultRelation(node, node->mt_whichplan); + if (!resultRelInfo->ri_junkFilterValid) + InitJunkFilter(node, resultRelInfo); + junkfilter = resultRelInfo->ri_junkFilter; + } + + /* * Ensure input tuple is the right format for the target relation. */ + if (node->mt_scans[node->mt_whichplan] == NULL) + node->mt_scans[node->mt_whichplan] = + ExecInitExtraTupleSlot(node->ps.state, + ExecGetResultType(subplanstate), + table_slot_callbacks(resultRelInfo->ri_RelationDesc)); if (node->mt_scans[node->mt_whichplan]->tts_ops != planSlot->tts_ops) { ExecCopySlot(node->mt_scans[node->mt_whichplan], planSlot); @@ -2042,7 +2533,8 @@ ExecModifyTable(PlanState *pstate) * ExecProcessReturning by IterateDirectModify, so no need to * provide it here. */ - slot = ExecProcessReturning(resultRelInfo, NULL, planSlot); + slot = ExecProcessReturning(node, resultRelInfo, NULL, NULL, NULL, + planSlot); return slot; } @@ -2175,13 +2667,10 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) ModifyTableState *mtstate; CmdType operation = node->operation; int nplans = list_length(node->plans); - ResultRelInfo *resultRelInfo; Plan *subplan; - ListCell *l, - *l1; + ListCell *l; int i; Relation rel; - bool update_tuple_routing_needed = node->partColsUpdated; /* check for unsupported flags */ Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK))); @@ -2198,7 +2687,20 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) mtstate->canSetTag = node->canSetTag; mtstate->mt_done = false; + /* + * call ExecInitNode on each of the plans to be executed and save the + * results into the array "mt_plans". + */ + mtstate->mt_nplans = nplans; mtstate->mt_plans = (PlanState **) palloc0(sizeof(PlanState *) * nplans); + i = 0; + foreach(l, node->plans) + { + subplan = (Plan *) lfirst(l); + + mtstate->mt_plans[i++] = ExecInitNode(subplan, estate, eflags); + } + mtstate->resultRelInfo = (ResultRelInfo *) palloc(nplans * sizeof(ResultRelInfo)); mtstate->mt_scans = (TupleTableSlot **) palloc0(sizeof(TupleTableSlot *) * nplans); @@ -2225,13 +2727,17 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) } else { - mtstate->rootResultRelInfo = mtstate->resultRelInfo; - ExecInitResultRelation(estate, mtstate->resultRelInfo, - linitial_int(node->resultRelations)); + /* + * Unlike a partitioned target relation, the target relation in this + * case will be actually used by ExecModifyTable(), so use + * ExecGetResultRelation() to get the ResultRelInfo, because it + * initializes some fields that a bare InitResultRelInfo() doesn't. + */ + mtstate->rootResultRelInfo = ExecGetResultRelation(mtstate, 0); + Assert(mtstate->rootResultRelInfo == mtstate->resultRelInfo); } mtstate->mt_arowmarks = (List **) palloc0(sizeof(List *) * nplans); - mtstate->mt_nplans = nplans; /* set up epqstate with dummy subplan data for the moment */ EvalPlanQualInit(&mtstate->mt_epqstate, estate, NULL, NIL, node->epqParam); @@ -2244,177 +2750,9 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) if (!(eflags & EXEC_FLAG_EXPLAIN_ONLY)) ExecSetupTransitionCaptureState(mtstate, estate); - /* - * call ExecInitNode on each of the plans to be executed and save the - * results into the array "mt_plans". This is also a convenient place to - * verify that the proposed target relations are valid and open their - * indexes for insertion of new index entries. - */ - resultRelInfo = mtstate->resultRelInfo; - i = 0; - forboth(l, node->resultRelations, l1, node->plans) - { - Index resultRelation = lfirst_int(l); - - subplan = (Plan *) lfirst(l1); - - /* - * This opens result relation and fills ResultRelInfo. (root relation - * was initialized already.) - */ - if (resultRelInfo != mtstate->rootResultRelInfo) - ExecInitResultRelation(estate, resultRelInfo, resultRelation); - - /* Initialize the usesFdwDirectModify flag */ - resultRelInfo->ri_usesFdwDirectModify = bms_is_member(i, - node->fdwDirectModifyPlans); - - /* - * Verify result relation is a valid target for the current operation - */ - CheckValidResultRel(resultRelInfo, operation); - - /* - * If there are indices on the result relation, open them and save - * descriptors in the result relation info, so that we can add new - * index entries for the tuples we add/update. We need not do this - * for a DELETE, however, since deletion doesn't affect indexes. Also, - * inside an EvalPlanQual operation, the indexes might be open - * already, since we share the resultrel state with the original - * query. - */ - if (resultRelInfo->ri_RelationDesc->rd_rel->relhasindex && - operation != CMD_DELETE && - resultRelInfo->ri_IndexRelationDescs == NULL) - ExecOpenIndices(resultRelInfo, - node->onConflictAction != ONCONFLICT_NONE); - - /* - * If this is an UPDATE and a BEFORE UPDATE trigger is present, the - * trigger itself might modify the partition-key values. So arrange - * for tuple routing. - */ - if (resultRelInfo->ri_TrigDesc && - resultRelInfo->ri_TrigDesc->trig_update_before_row && - operation == CMD_UPDATE) - update_tuple_routing_needed = true; - - /* Now init the plan for this result rel */ - mtstate->mt_plans[i] = ExecInitNode(subplan, estate, eflags); - mtstate->mt_scans[i] = - ExecInitExtraTupleSlot(mtstate->ps.state, ExecGetResultType(mtstate->mt_plans[i]), - table_slot_callbacks(resultRelInfo->ri_RelationDesc)); - - /* Also let FDWs init themselves for foreign-table result rels */ - if (resultRelInfo->ri_FdwRoutine != NULL) - { - if (resultRelInfo->ri_usesFdwDirectModify) - { - ForeignScanState *fscan = (ForeignScanState *) mtstate->mt_plans[i]; - - /* - * For the FDW's convenience, set the ForeignScanState node's - * ResultRelInfo to let the FDW know which result relation it - * is going to work with. - */ - Assert(IsA(fscan, ForeignScanState)); - fscan->resultRelInfo = resultRelInfo; - resultRelInfo->ri_FdwRoutine->BeginDirectModify(fscan, eflags); - } - else if (resultRelInfo->ri_FdwRoutine->BeginForeignModify != NULL) - { - List *fdw_private = (List *) list_nth(node->fdwPrivLists, i); - - resultRelInfo->ri_FdwRoutine->BeginForeignModify(mtstate, - resultRelInfo, - fdw_private, - i, - eflags); - } - } - - /* - * If needed, initialize a map to convert tuples in the child format - * to the format of the table mentioned in the query (root relation). - * It's needed for update tuple routing, because the routing starts - * from the root relation. It's also needed for capturing transition - * tuples, because the transition tuple store can only store tuples in - * the root table format. - * - * For INSERT, the map is only initialized for a given partition when - * the partition itself is first initialized by ExecFindPartition(). - */ - if (update_tuple_routing_needed || - (mtstate->mt_transition_capture && - mtstate->operation != CMD_INSERT)) - resultRelInfo->ri_ChildToRootMap = - convert_tuples_by_name(RelationGetDescr(resultRelInfo->ri_RelationDesc), - RelationGetDescr(mtstate->rootResultRelInfo->ri_RelationDesc)); - resultRelInfo++; - i++; - } - - /* Get the target relation */ - rel = mtstate->rootResultRelInfo->ri_RelationDesc; - - /* - * If it's not a partitioned table after all, UPDATE tuple routing should - * not be attempted. - */ - if (rel->rd_rel->relkind != RELKIND_PARTITIONED_TABLE) - update_tuple_routing_needed = false; - - /* - * Build state for tuple routing if it's an INSERT or if it's an UPDATE of - * partition key. - */ - if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE && - (operation == CMD_INSERT || update_tuple_routing_needed)) - mtstate->mt_partition_tuple_routing = - ExecSetupPartitionTupleRouting(estate, mtstate, rel); - - /* - * For update row movement we'll need a dedicated slot to store the tuples - * that have been converted from partition format to the root table - * format. - */ - if (update_tuple_routing_needed) - mtstate->mt_root_tuple_slot = table_slot_create(rel, NULL); - - /* - * Initialize any WITH CHECK OPTION constraints if needed. - */ - resultRelInfo = mtstate->resultRelInfo; - i = 0; - foreach(l, node->withCheckOptionLists) - { - List *wcoList = (List *) lfirst(l); - List *wcoExprs = NIL; - ListCell *ll; - - foreach(ll, wcoList) - { - WithCheckOption *wco = (WithCheckOption *) lfirst(ll); - ExprState *wcoExpr = ExecInitQual((List *) wco->qual, - &mtstate->ps); - - wcoExprs = lappend(wcoExprs, wcoExpr); - } - - resultRelInfo->ri_WithCheckOptions = wcoList; - resultRelInfo->ri_WithCheckOptionExprs = wcoExprs; - resultRelInfo++; - i++; - } - - /* - * Initialize RETURNING projections if needed. - */ + /* Initialize some global state for RETURNING projections. */ if (node->returningLists) { - TupleTableSlot *slot; - ExprContext *econtext; - /* * Initialize result tuple slot and assign its rowtype using the first * RETURNING list. We assume the rest will look the same. @@ -2423,27 +2761,10 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) /* Set up a slot for the output of the RETURNING projection(s) */ ExecInitResultTupleSlotTL(&mtstate->ps, &TTSOpsVirtual); - slot = mtstate->ps.ps_ResultTupleSlot; /* Need an econtext too */ if (mtstate->ps.ps_ExprContext == NULL) ExecAssignExprContext(estate, &mtstate->ps); - econtext = mtstate->ps.ps_ExprContext; - - /* - * Build a projection for each result rel. - */ - resultRelInfo = mtstate->resultRelInfo; - foreach(l, node->returningLists) - { - List *rlist = (List *) lfirst(l); - - resultRelInfo->ri_returningList = rlist; - resultRelInfo->ri_projectReturning = - ExecBuildProjectionInfo(rlist, econtext, slot, &mtstate->ps, - resultRelInfo->ri_RelationDesc->rd_att); - resultRelInfo++; - } } else { @@ -2457,67 +2778,18 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) mtstate->ps.ps_ExprContext = NULL; } - /* Set the list of arbiter indexes if needed for ON CONFLICT */ - resultRelInfo = mtstate->resultRelInfo; - if (node->onConflictAction != ONCONFLICT_NONE) - resultRelInfo->ri_onConflictArbiterIndexes = node->arbiterIndexes; + /* Get the target relation */ + rel = mtstate->rootResultRelInfo->ri_RelationDesc; /* - * If needed, Initialize target list, projection and qual for ON CONFLICT - * DO UPDATE. + * Build state for tuple routing if it's an INSERT. An UPDATE might need + * it too, but it's initialized only when it actually ends up moving + * tuples between partitions; see ExecCrossPartitionUpdate(). */ - if (node->onConflictAction == ONCONFLICT_UPDATE) - { - ExprContext *econtext; - TupleDesc relationDesc; - TupleDesc tupDesc; - - /* insert may only have one plan, inheritance is not expanded */ - Assert(nplans == 1); - - /* already exists if created by RETURNING processing above */ - if (mtstate->ps.ps_ExprContext == NULL) - ExecAssignExprContext(estate, &mtstate->ps); - - econtext = mtstate->ps.ps_ExprContext; - relationDesc = resultRelInfo->ri_RelationDesc->rd_att; - - /* create state for DO UPDATE SET operation */ - resultRelInfo->ri_onConflict = makeNode(OnConflictSetState); - - /* initialize slot for the existing tuple */ - resultRelInfo->ri_onConflict->oc_Existing = - table_slot_create(resultRelInfo->ri_RelationDesc, - &mtstate->ps.state->es_tupleTable); - - /* - * Create the tuple slot for the UPDATE SET projection. We want a slot - * of the table's type here, because the slot will be used to insert - * into the table, and for RETURNING processing - which may access - * system attributes. - */ - tupDesc = ExecTypeFromTL((List *) node->onConflictSet); - resultRelInfo->ri_onConflict->oc_ProjSlot = - ExecInitExtraTupleSlot(mtstate->ps.state, tupDesc, - table_slot_callbacks(resultRelInfo->ri_RelationDesc)); - - /* build UPDATE SET projection state */ - resultRelInfo->ri_onConflict->oc_ProjInfo = - ExecBuildProjectionInfo(node->onConflictSet, econtext, - resultRelInfo->ri_onConflict->oc_ProjSlot, - &mtstate->ps, - relationDesc); - - /* initialize state to evaluate the WHERE clause, if any */ - if (node->onConflictWhere) - { - ExprState *qualexpr; - - qualexpr = ExecInitQual((List *) node->onConflictWhere, - &mtstate->ps); - resultRelInfo->ri_onConflict->oc_WhereClause = qualexpr; - } - } + if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE && + operation == CMD_INSERT) + mtstate->mt_partition_tuple_routing = + ExecSetupPartitionTupleRouting(estate, mtstate, rel); /* * If we have any secondary relations in an UPDATE or DELETE, they need to @@ -2555,121 +2827,6 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) mtstate->mt_arowmarks[0]); /* - * Initialize the junk filter(s) if needed. INSERT queries need a filter - * if there are any junk attrs in the tlist. UPDATE and DELETE always - * need a filter, since there's always at least one junk attribute present - * --- no need to look first. Typically, this will be a 'ctid' or - * 'wholerow' attribute, but in the case of a foreign data wrapper it - * might be a set of junk attributes sufficient to identify the remote - * row. - * - * If there are multiple result relations, each one needs its own junk - * filter. Note multiple rels are only possible for UPDATE/DELETE, so we - * can't be fooled by some needing a filter and some not. - * - * This section of code is also a convenient place to verify that the - * output of an INSERT or UPDATE matches the target table(s). - */ - { - bool junk_filter_needed = false; - - switch (operation) - { - case CMD_INSERT: - foreach(l, subplan->targetlist) - { - TargetEntry *tle = (TargetEntry *) lfirst(l); - - if (tle->resjunk) - { - junk_filter_needed = true; - break; - } - } - break; - case CMD_UPDATE: - case CMD_DELETE: - junk_filter_needed = true; - break; - default: - elog(ERROR, "unknown operation"); - break; - } - - if (junk_filter_needed) - { - resultRelInfo = mtstate->resultRelInfo; - for (i = 0; i < nplans; i++) - { - JunkFilter *j; - TupleTableSlot *junkresslot; - - subplan = mtstate->mt_plans[i]->plan; - - junkresslot = - ExecInitExtraTupleSlot(estate, NULL, - table_slot_callbacks(resultRelInfo->ri_RelationDesc)); - - /* - * For an INSERT or UPDATE, the result tuple must always match - * the target table's descriptor. For a DELETE, it won't - * (indeed, there's probably no non-junk output columns). - */ - if (operation == CMD_INSERT || operation == CMD_UPDATE) - { - ExecCheckPlanOutput(resultRelInfo->ri_RelationDesc, - subplan->targetlist); - j = ExecInitJunkFilterInsertion(subplan->targetlist, - RelationGetDescr(resultRelInfo->ri_RelationDesc), - junkresslot); - } - else - j = ExecInitJunkFilter(subplan->targetlist, - junkresslot); - - if (operation == CMD_UPDATE || operation == CMD_DELETE) - { - /* For UPDATE/DELETE, find the appropriate junk attr now */ - char relkind; - - relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind; - if (relkind == RELKIND_RELATION || - relkind == RELKIND_MATVIEW || - relkind == RELKIND_PARTITIONED_TABLE) - { - j->jf_junkAttNo = ExecFindJunkAttribute(j, "ctid"); - if (!AttributeNumberIsValid(j->jf_junkAttNo)) - elog(ERROR, "could not find junk ctid column"); - } - else if (relkind == RELKIND_FOREIGN_TABLE) - { - /* - * When there is a row-level trigger, there should be - * a wholerow attribute. - */ - j->jf_junkAttNo = ExecFindJunkAttribute(j, "wholerow"); - } - else - { - j->jf_junkAttNo = ExecFindJunkAttribute(j, "wholerow"); - if (!AttributeNumberIsValid(j->jf_junkAttNo)) - elog(ERROR, "could not find junk wholerow column"); - } - } - - resultRelInfo->ri_junkFilter = j; - resultRelInfo++; - } - } - else - { - if (operation == CMD_INSERT) - ExecCheckPlanOutput(mtstate->resultRelInfo->ri_RelationDesc, - subplan->targetlist); - } - } - - /* * Lastly, if this is not the primary (canSetTag) ModifyTable node, add it * to estate->es_auxmodifytables so that it will be run to completion by * ExecPostprocessPlan. (It'd actually work fine to add the primary @@ -2699,20 +2856,6 @@ ExecEndModifyTable(ModifyTableState *node) int i; /* - * Allow any FDWs to shut down - */ - for (i = 0; i < node->mt_nplans; i++) - { - ResultRelInfo *resultRelInfo = node->resultRelInfo + i; - - if (!resultRelInfo->ri_usesFdwDirectModify && - resultRelInfo->ri_FdwRoutine != NULL && - resultRelInfo->ri_FdwRoutine->EndForeignModify != NULL) - resultRelInfo->ri_FdwRoutine->EndForeignModify(node->ps.state, - resultRelInfo); - } - - /* * Close all the partitioned tables, leaf partitions, and their indices * and release the slot used for tuple routing, if set. */ diff --git a/src/include/executor/nodeModifyTable.h b/src/include/executor/nodeModifyTable.h index 46a2dc9..9ae7e40 100644 --- a/src/include/executor/nodeModifyTable.h +++ b/src/include/executor/nodeModifyTable.h @@ -22,5 +22,6 @@ extern void ExecComputeStoredGenerated(ResultRelInfo *resultRelInfo, extern ModifyTableState *ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags); extern void ExecEndModifyTable(ModifyTableState *node); extern void ExecReScanModifyTable(ModifyTableState *node); +extern ResultRelInfo *ExecGetResultRelation(ModifyTableState *mtstate, int whichrel); #endif /* NODEMODIFYTABLE_H */ diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h index 6c0a7d6..f2f4bed 100644 --- a/src/include/nodes/execnodes.h +++ b/src/include/nodes/execnodes.h @@ -463,6 +463,7 @@ typedef struct ResultRelInfo /* for removing junk attributes from tuples */ JunkFilter *ri_junkFilter; + bool ri_junkFilterValid; /* has the filter been initialized? */ /* list of RETURNING expressions */ List *ri_returningList; @@ -497,6 +498,7 @@ typedef struct ResultRelInfo * transition tuple capture or update partition row movement is active. */ TupleConversionMap *ri_ChildToRootMap; + bool ri_ChildToRootMapValid; /* has the map been initialized? */ /* for use by copy.c when performing multi-inserts */ struct CopyMultiInsertBuffer *ri_CopyMultiInsertBuffer; diff --git a/src/test/regress/expected/insert_conflict.out b/src/test/regress/expected/insert_conflict.out index ff157ce..74cd7e2 100644 --- a/src/test/regress/expected/insert_conflict.out +++ b/src/test/regress/expected/insert_conflict.out @@ -52,10 +52,7 @@ explain (costs off) insert into insertconflicttest values(0, 'Crowberry') on con Conflict Arbiter Indexes: op_index_key, collation_index_key, both_index_key Conflict Filter: (SubPlan 1) -> Result - SubPlan 1 - -> Index Only Scan using both_index_expr_key on insertconflicttest ii - Index Cond: (key = excluded.key) -(8 rows) +(5 rows) -- Neither collation nor operator class specifications are required -- -- supplying them merely *limits* matches to indexes with matching opclasses diff --git a/src/test/regress/expected/updatable_views.out b/src/test/regress/expected/updatable_views.out index caed1c1..d8d2a3d 100644 --- a/src/test/regress/expected/updatable_views.out +++ b/src/test/regress/expected/updatable_views.out @@ -1862,28 +1862,22 @@ UPDATE rw_view1 SET a = a + 5; -- should fail ERROR: new row violates check option for view "rw_view1" DETAIL: Failing row contains (15). EXPLAIN (costs off) INSERT INTO rw_view1 VALUES (5); - QUERY PLAN ---------------------------------------------------------- + QUERY PLAN +---------------------- Insert on base_tbl b -> Result - SubPlan 1 - -> Index Only Scan using ref_tbl_pkey on ref_tbl r - Index Cond: (a = b.a) -(5 rows) +(2 rows) EXPLAIN (costs off) UPDATE rw_view1 SET a = a + 5; - QUERY PLAN ------------------------------------------------------------ + QUERY PLAN +----------------------------------------- Update on base_tbl b -> Hash Join Hash Cond: (b.a = r.a) -> Seq Scan on base_tbl b -> Hash -> Seq Scan on ref_tbl r - SubPlan 1 - -> Index Only Scan using ref_tbl_pkey on ref_tbl r_1 - Index Cond: (a = b.a) -(9 rows) +(6 rows) DROP TABLE base_tbl, ref_tbl CASCADE; NOTICE: drop cascades to view rw_view1 diff --git a/src/test/regress/expected/update.out b/src/test/regress/expected/update.out index bf939d7..0ad0d1a 100644 --- a/src/test/regress/expected/update.out +++ b/src/test/regress/expected/update.out @@ -341,8 +341,8 @@ DETAIL: Failing row contains (105, 85, null, b, 15). -- fail, no partition key update, so no attempt to move tuple, -- but "a = 'a'" violates partition constraint enforced by root partition) UPDATE part_b_10_b_20 set a = 'a'; -ERROR: new row for relation "part_c_1_100" violates partition constraint -DETAIL: Failing row contains (null, 1, 96, 12, a). +ERROR: new row for relation "part_b_10_b_20" violates partition constraint +DETAIL: Failing row contains (null, 96, a, 12, 1). -- ok, partition key update, no constraint violation UPDATE range_parted set d = d - 10 WHERE d > 10; -- ok, no partition key update, no constraint violation @@ -372,8 +372,8 @@ UPDATE part_b_10_b_20 set c = c + 20 returning c, b, a; -- fail, row movement happens only within the partition subtree. UPDATE part_b_10_b_20 set b = b - 6 WHERE c > 116 returning *; -ERROR: new row for relation "part_d_1_15" violates partition constraint -DETAIL: Failing row contains (2, 117, 2, b, 7). +ERROR: new row for relation "part_b_10_b_20" violates partition constraint +DETAIL: Failing row contains (2, 117, b, 7, 2). -- ok, row movement, with subset of rows moved into different partition. UPDATE range_parted set b = b - 6 WHERE c > 116 returning a, b + c; a | ?column? @@ -814,8 +814,8 @@ INSERT into sub_parted VALUES (1,2,10); -- Test partition constraint violation when intermediate ancestor is used and -- constraint is inherited from upper root. UPDATE sub_parted set a = 2 WHERE c = 10; -ERROR: new row for relation "sub_part2" violates partition constraint -DETAIL: Failing row contains (2, 10, 2). +ERROR: new row for relation "sub_parted" violates partition constraint +DETAIL: Failing row contains (2, 2, 10). -- Test update-partition-key, where the unpruned partitions do not have their -- partition keys updated. SELECT tableoid::regclass::text, * FROM list_parted WHERE a = 2 ORDER BY 1; -- 1.8.3.1