diff --git a/doc/src/sgml/ddl.sgml b/doc/src/sgml/ddl.sgml
index b05a9c2..5a436a1 100644
--- a/doc/src/sgml/ddl.sgml
+++ b/doc/src/sgml/ddl.sgml
@@ -2993,6 +2993,11 @@ VALUES ('Albany', NULL, NULL, 'NY');
foreign table partitions.
+
+ Updating the partition key of a row might cause it to be moved into a
+ different partition where this row satisfies its partition constraint.
+
+
Example
@@ -3285,9 +3290,20 @@ ALTER TABLE measurement ATTACH PARTITION measurement_y2008m02
- An UPDATE> that causes a row to move from one partition to
- another fails, because the new value of the row fails to satisfy the
- implicit partition constraint of the original partition.
+ When an UPDATE> causes a row to move from one partition to
+ another, there is a chance that another concurrent UPDATE> or
+ DELETE> misses this row. Suppose, during the row movement,
+ the row is still visible for the concurrent session, and it is about to
+ do an UPDATE> or DELETE> operation on the same
+ row. This DML operation can silently miss this row if the row now gets
+ deleted from the partition by the first session as part of its
+ UPDATE> row movement. In such case, the concurrent
+ UPDATE>/DELETE>, being unaware of the row
+ movement, interprets that the row has just been deleted so there is
+ nothing to be done for this row. Whereas, in the usual case where the
+ table is not partitioned, or where there is no row movement, the second
+ session would have identified the newly updated row and carried
+ UPDATE>/DELETE> on this new row version.
diff --git a/doc/src/sgml/ref/update.sgml b/doc/src/sgml/ref/update.sgml
index 8a1619f..28cfc1a 100644
--- a/doc/src/sgml/ref/update.sgml
+++ b/doc/src/sgml/ref/update.sgml
@@ -282,10 +282,17 @@ UPDATE count
In the case of a partitioned table, updating a row might cause it to no
- longer satisfy the partition constraint. Since there is no provision to
- move the row to the partition appropriate to the new value of its
- partitioning key, an error will occur in this case. This can also happen
- when updating a partition directly.
+ longer satisfy the partition constraint of the containing partition. In that
+ case, if there is some other partition in the partition tree for which this
+ row satisfies its partition constraint, then the row is moved to that
+ partition. If there isn't such a partition, an error will occur. The error
+ will also occur when updating a partition directly. Behind the scenes, the
+ row movement is actually a DELETE> and
+ INSERT> operation. However, there is a possibility that a
+ concurrent UPDATE> or DELETE> on the same row may miss
+ this row. For details see the section
+ .
+
diff --git a/doc/src/sgml/trigger.sgml b/doc/src/sgml/trigger.sgml
index f5f74af..99b271f 100644
--- a/doc/src/sgml/trigger.sgml
+++ b/doc/src/sgml/trigger.sgml
@@ -154,6 +154,29 @@
+ If an UPDATE on a partitioned table causes a row to
+ move to another partition, it will be performed as a
+ DELETE from the original partition followed by
+ INSERT into the new partition. In this case, all
+ row-level BEFORE> UPDATE triggers and all
+ row-level BEFORE> DELETE triggers are fired
+ on the original partition. Then all row-level BEFORE>
+ INSERT triggers are fired on the destination partition.
+ The possibility of surprising outcomes should be considered when all these
+ triggers affect the row being moved. As far as AFTER ROW>
+ triggers are concerned, AFTER> DELETE and
+ AFTER> INSERT triggers are applied; but
+ AFTER> UPDATE triggers are not applied
+ because the UPDATE has been converted to a
+ DELETE and INSERT. As far as
+ statement-level triggers are concerned, none of the
+ DELETE or INSERT triggers are fired,
+ even if row movement occurs; only the UPDATE triggers
+ defined on the target table used in the UPDATE statement
+ will be fired.
+
+
+
Trigger functions invoked by per-statement triggers should always
return NULL. Trigger functions invoked by per-row
triggers can return a table row (a value of
diff --git a/src/backend/catalog/partition.c b/src/backend/catalog/partition.c
index 1ab6dba..737c9e30 100644
--- a/src/backend/catalog/partition.c
+++ b/src/backend/catalog/partition.c
@@ -1105,7 +1105,8 @@ get_qual_from_partbound(Relation rel, Relation parent,
/*
* map_partition_varattnos - maps varattno of any Vars in expr from the
- * parent attno to partition attno.
+ * attno's of 'from_rel' partition to the attno's of 'to_rel' partition.
+ * The rels can be both leaf partition or a partitioned table.
*
* We must allow for cases where physical attnos of a partition can be
* different from the parent's.
@@ -1118,8 +1119,8 @@ get_qual_from_partbound(Relation rel, Relation parent,
* are working on Lists, so it's less messy to do the casts internally.
*/
List *
-map_partition_varattnos(List *expr, int target_varno,
- Relation partrel, Relation parent,
+map_partition_varattnos(List *expr, int fromrel_varno,
+ Relation to_rel, Relation from_rel,
bool *found_whole_row)
{
bool my_found_whole_row = false;
@@ -1128,14 +1129,14 @@ map_partition_varattnos(List *expr, int target_varno,
{
AttrNumber *part_attnos;
- part_attnos = convert_tuples_by_name_map(RelationGetDescr(partrel),
- RelationGetDescr(parent),
+ part_attnos = convert_tuples_by_name_map(RelationGetDescr(to_rel),
+ RelationGetDescr(from_rel),
gettext_noop("could not convert row type"));
expr = (List *) map_variable_attnos((Node *) expr,
- target_varno, 0,
+ fromrel_varno, 0,
part_attnos,
- RelationGetDescr(parent)->natts,
- RelationGetForm(partrel)->reltype,
+ RelationGetDescr(from_rel)->natts,
+ RelationGetForm(to_rel)->reltype,
&my_found_whole_row);
}
@@ -2439,6 +2440,77 @@ error_exit:
}
/*
+ * For each column of rel which is in the partition key or which appears
+ * in an expression which is in the partition key, translate the attribute
+ * number of that column according to the given parent, and add the resulting
+ * column number to the bitmapset, offset as we frequently do by
+ * FirstLowInvalidHeapAttributeNumber.
+ */
+void
+pull_child_partition_columns(Bitmapset **bitmapset,
+ Relation rel,
+ Relation parent)
+{
+ PartitionKey key = RelationGetPartitionKey(rel);
+ int16 partnatts = get_partition_natts(key);
+ List *partexprs = get_partition_exprs(key);
+ ListCell *lc;
+ Bitmapset *child_keycols = NULL;
+ int i;
+ AttrNumber *map;
+ int child_keycol = -1;
+
+ /*
+ * First, compute the complete set of partition columns for this rel. For
+ * compatibility with the API exposed by pull_varattnos, we offset the
+ * column numbers by FirstLowInvalidHeapAttributeNumber.
+ */
+ for (i = 0; i < partnatts; i++)
+ {
+ AttrNumber partattno = get_partition_col_attnum(key, i);
+
+ if (partattno != 0)
+ child_keycols =
+ bms_add_member(child_keycols,
+ partattno - FirstLowInvalidHeapAttributeNumber);
+ }
+ foreach(lc, partexprs)
+ {
+ Node *expr = (Node *) lfirst(lc);
+
+ pull_varattnos(expr, 1, &child_keycols);
+ }
+
+ /*
+ * Next, work out how to convert from the attribute numbers for the child
+ * to the attribute numbers for the parent.
+ */
+ map =
+ convert_tuples_by_name_map(RelationGetDescr(parent),
+ RelationGetDescr(rel),
+ gettext_noop("could not convert row type"));
+
+ /*
+ * For each child key column we have identified, translate to the
+ * corresponding parent key column. Entry 0 in the map array corresponds
+ * to attribute number 1, which corresponds to a bitmapset entry for 1 -
+ * FirstLowInvalidHeapAttributeNumber.
+ */
+ while ((child_keycol = bms_next_member(child_keycols, child_keycol)) >= 0)
+ {
+ int kc = child_keycol + FirstLowInvalidHeapAttributeNumber;
+
+ Assert(kc > 0 && kc <= RelationGetNumberOfAttributes(rel));
+ *bitmapset =
+ bms_add_member(*bitmapset,
+ map[kc - 1] - FirstLowInvalidHeapAttributeNumber);
+ }
+
+ /* Release memory. */
+ pfree(map);
+}
+
+/*
* qsort_partition_list_value_cmp
*
* Compare two list partition bound datums
diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c
index 098bc66..7881720 100644
--- a/src/backend/commands/copy.c
+++ b/src/backend/commands/copy.c
@@ -2730,7 +2730,7 @@ CopyFrom(CopyState cstate)
/* Check the constraints of the tuple */
if (cstate->rel->rd_att->constr || check_partition_constr)
- ExecConstraints(resultRelInfo, slot, estate);
+ ExecConstraints(resultRelInfo, slot, estate, true);
if (useHeapMultiInsert)
{
diff --git a/src/backend/commands/trigger.c b/src/backend/commands/trigger.c
index e75a59d..873156b 100644
--- a/src/backend/commands/trigger.c
+++ b/src/backend/commands/trigger.c
@@ -2854,8 +2854,13 @@ ExecARUpdateTriggers(EState *estate, ResultRelInfo *relinfo,
{
HeapTuple trigtuple;
- Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
- if (fdw_trigtuple == NULL)
+ /*
+ * Note: if the UPDATE is converted into a DELETE+INSERT as part of
+ * update-partition-key operation, then this function is also called
+ * separately for DELETE and INSERT to capture transition table rows.
+ * In such case, either old tuple or new tuple can be NULL.
+ */
+ if (fdw_trigtuple == NULL && ItemPointerIsValid(tupleid))
trigtuple = GetTupleForTrigger(estate,
NULL,
relinfo,
@@ -5428,7 +5433,12 @@ AfterTriggerPendingOnRel(Oid relid)
* triggers actually need to be queued. It is also called after each row,
* even if there are no triggers for that event, if there are any AFTER
* STATEMENT triggers for the statement which use transition tables, so that
- * the transition tuplestores can be built.
+ * the transition tuplestores can be built. Furthermore, if the transition
+ * capture is happening for UPDATEd rows being moved to another partition due
+ * partition-key change, then this function is called once when the row is
+ * deleted (to capture OLD row), and once when the row is inserted to another
+ * partition (to capture NEW row). This is done separately because DELETE and
+ * INSERT happen on different tables.
*
* Transition tuplestores are built now, rather than when events are pulled
* off of the queue because AFTER ROW triggers are allowed to select from the
@@ -5477,12 +5487,27 @@ AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo,
bool update_new_table = transition_capture->tcs_update_new_table;
bool insert_new_table = transition_capture->tcs_insert_new_table;;
- if ((event == TRIGGER_EVENT_DELETE && delete_old_table) ||
- (event == TRIGGER_EVENT_UPDATE && update_old_table))
+ /*
+ * For capturing transition tuples for UPDATE events fired during
+ * partition row movement, either oldtup or newtup can be NULL,
+ * depending on whether the event is for row being deleted from old
+ * partition or it's for row being inserted into the new partition. But
+ * in any case, oldtup should always be non-NULL for DELETE events, and
+ * newtup should be non-NULL for INSERT events, because for transition
+ * capture with partition row movement, INSERT and DELETE events don't
+ * fire; only UPDATE event is fired.
+ */
+ Assert(!(event == TRIGGER_EVENT_DELETE && delete_old_table &&
+ oldtup == NULL));
+ Assert(!(event == TRIGGER_EVENT_INSERT && insert_new_table &&
+ newtup == NULL));
+
+ if (oldtup != NULL &&
+ ((event == TRIGGER_EVENT_DELETE && delete_old_table) ||
+ (event == TRIGGER_EVENT_UPDATE && update_old_table)))
{
Tuplestorestate *old_tuplestore;
- Assert(oldtup != NULL);
old_tuplestore = transition_capture->tcs_private->old_tuplestore;
if (map != NULL)
@@ -5495,12 +5520,12 @@ AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo,
else
tuplestore_puttuple(old_tuplestore, oldtup);
}
- if ((event == TRIGGER_EVENT_INSERT && insert_new_table) ||
- (event == TRIGGER_EVENT_UPDATE && update_new_table))
+ if (newtup != NULL &&
+ ((event == TRIGGER_EVENT_INSERT && insert_new_table) ||
+ (event == TRIGGER_EVENT_UPDATE && update_new_table)))
{
Tuplestorestate *new_tuplestore;
- Assert(newtup != NULL);
new_tuplestore = transition_capture->tcs_private->new_tuplestore;
if (original_insert_tuple != NULL)
@@ -5520,7 +5545,8 @@ AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo,
if (trigdesc == NULL ||
(event == TRIGGER_EVENT_DELETE && !trigdesc->trig_delete_after_row) ||
(event == TRIGGER_EVENT_INSERT && !trigdesc->trig_insert_after_row) ||
- (event == TRIGGER_EVENT_UPDATE && !trigdesc->trig_update_after_row))
+ (event == TRIGGER_EVENT_UPDATE && !trigdesc->trig_update_after_row) ||
+ (event == TRIGGER_EVENT_UPDATE && (oldtup == NULL || newtup == NULL)))
return;
}
diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c
index b31ab36..d48da8e 100644
--- a/src/backend/executor/execMain.c
+++ b/src/backend/executor/execMain.c
@@ -104,9 +104,6 @@ static char *ExecBuildSlotPartitionKeyDescription(Relation rel,
int maxfieldlen);
static void EvalPlanQualStart(EPQState *epqstate, EState *parentestate,
Plan *planTree);
-static void ExecPartitionCheck(ResultRelInfo *resultRelInfo,
- TupleTableSlot *slot, EState *estate);
-
/*
* Note that GetUpdatedColumns() also exists in commands/trigger.c. There does
* not appear to be any good header to put it into, given the structures that
@@ -1851,15 +1848,10 @@ ExecRelCheck(ResultRelInfo *resultRelInfo,
/*
* ExecPartitionCheck --- check that tuple meets the partition constraint.
*/
-static void
+bool
ExecPartitionCheck(ResultRelInfo *resultRelInfo, TupleTableSlot *slot,
EState *estate)
{
- Relation rel = resultRelInfo->ri_RelationDesc;
- TupleDesc tupdesc = RelationGetDescr(rel);
- Bitmapset *modifiedCols;
- Bitmapset *insertedCols;
- Bitmapset *updatedCols;
ExprContext *econtext;
/*
@@ -1887,52 +1879,66 @@ ExecPartitionCheck(ResultRelInfo *resultRelInfo, TupleTableSlot *slot,
* As in case of the catalogued constraints, we treat a NULL result as
* success here, not a failure.
*/
- if (!ExecCheck(resultRelInfo->ri_PartitionCheckExpr, econtext))
- {
- char *val_desc;
- Relation orig_rel = rel;
+ return ExecCheck(resultRelInfo->ri_PartitionCheckExpr, econtext);
+}
- /* See the comment above. */
- if (resultRelInfo->ri_PartitionRoot)
+/*
+ * ExecPartitionCheckEmitError - Form and emit an error message after a failed
+ * partition constraint check.
+ */
+void
+ExecPartitionCheckEmitError(ResultRelInfo *resultRelInfo,
+ TupleTableSlot *slot,
+ EState *estate)
+{
+ Relation rel = resultRelInfo->ri_RelationDesc;
+ Relation orig_rel = rel;
+ TupleDesc tupdesc = RelationGetDescr(rel);
+ char *val_desc;
+ Bitmapset *modifiedCols;
+ Bitmapset *insertedCols;
+ Bitmapset *updatedCols;
+
+ /* See the comments in ExecConstraints. */
+ if (resultRelInfo->ri_PartitionRoot)
+ {
+ HeapTuple tuple = ExecFetchSlotTuple(slot);
+ TupleDesc old_tupdesc = RelationGetDescr(rel);
+ TupleConversionMap *map;
+
+ rel = resultRelInfo->ri_PartitionRoot;
+ tupdesc = RelationGetDescr(rel);
+ /* a reverse map */
+ map = convert_tuples_by_name(old_tupdesc, tupdesc,
+ gettext_noop("could not convert row type"));
+ if (map != NULL)
{
- HeapTuple tuple = ExecFetchSlotTuple(slot);
- TupleDesc old_tupdesc = RelationGetDescr(rel);
- TupleConversionMap *map;
-
- rel = resultRelInfo->ri_PartitionRoot;
- tupdesc = RelationGetDescr(rel);
- /* a reverse map */
- map = convert_tuples_by_name(old_tupdesc, tupdesc,
- gettext_noop("could not convert row type"));
- if (map != NULL)
- {
- tuple = do_convert_tuple(tuple, map);
- ExecSetSlotDescriptor(slot, tupdesc);
- ExecStoreTuple(tuple, slot, InvalidBuffer, false);
- }
+ tuple = do_convert_tuple(tuple, map);
+ ExecSetSlotDescriptor(slot, tupdesc);
+ ExecStoreTuple(tuple, slot, InvalidBuffer, false);
}
-
- insertedCols = GetInsertedColumns(resultRelInfo, estate);
- updatedCols = GetUpdatedColumns(resultRelInfo, estate);
- modifiedCols = bms_union(insertedCols, updatedCols);
- val_desc = ExecBuildSlotValueDescription(RelationGetRelid(rel),
- slot,
- tupdesc,
- modifiedCols,
- 64);
- ereport(ERROR,
- (errcode(ERRCODE_CHECK_VIOLATION),
- errmsg("new row for relation \"%s\" violates partition constraint",
- RelationGetRelationName(orig_rel)),
- val_desc ? errdetail("Failing row contains %s.", val_desc) : 0));
}
+
+ insertedCols = GetInsertedColumns(resultRelInfo, estate);
+ updatedCols = GetUpdatedColumns(resultRelInfo, estate);
+ modifiedCols = bms_union(insertedCols, updatedCols);
+ val_desc = ExecBuildSlotValueDescription(RelationGetRelid(rel),
+ slot,
+ tupdesc,
+ modifiedCols,
+ 64);
+ ereport(ERROR,
+ (errcode(ERRCODE_CHECK_VIOLATION),
+ errmsg("new row for relation \"%s\" violates partition constraint",
+ RelationGetRelationName(orig_rel)),
+ val_desc ? errdetail("Failing row contains %s.", val_desc) : 0));
}
/*
* ExecConstraints - check constraints of the tuple in 'slot'
*
- * This checks the traditional NOT NULL and check constraints, as well as
- * the partition constraint, if any.
+ * This checks the traditional NOT NULL and check constraints, and if requested,
+ * checks the partition constraint.
*
* Note: 'slot' contains the tuple to check the constraints of, which may
* have been converted from the original input tuple after tuple routing.
@@ -1940,7 +1946,8 @@ ExecPartitionCheck(ResultRelInfo *resultRelInfo, TupleTableSlot *slot,
*/
void
ExecConstraints(ResultRelInfo *resultRelInfo,
- TupleTableSlot *slot, EState *estate)
+ TupleTableSlot *slot, EState *estate,
+ bool check_partition_constraint)
{
Relation rel = resultRelInfo->ri_RelationDesc;
TupleDesc tupdesc = RelationGetDescr(rel);
@@ -2056,8 +2063,9 @@ ExecConstraints(ResultRelInfo *resultRelInfo,
}
}
- if (resultRelInfo->ri_PartitionCheck)
- ExecPartitionCheck(resultRelInfo, slot, estate);
+ if (check_partition_constraint && resultRelInfo->ri_PartitionCheck &&
+ !ExecPartitionCheck(resultRelInfo, slot, estate))
+ ExecPartitionCheckEmitError(resultRelInfo, slot, estate);
}
@@ -3252,18 +3260,18 @@ EvalPlanQualEnd(EPQState *epqstate)
* every partitioned table in the partition tree
* 'partitions' receives an array of ResultRelInfo* objects with one entry for
* every leaf partition in the partition tree
- * 'tup_conv_maps' receives an array of TupleConversionMap objects with one
- * entry for every leaf partition (required to convert input tuple based
- * on the root table's rowtype to a leaf partition's rowtype after tuple
- * routing is done)
+ * 'perleaf_parentchild_maps' receives an array of TupleConversionMap objects
+ * with on entry for every leaf partition (required to convert input tuple
+ * based on the root table's rowtype to a leaf partition's rowtype after
+ * tuple routing is done)
* 'partition_tuple_slot' receives a standalone TupleTableSlot to be used
* to manipulate any given leaf partition's rowtype after that partition
* is chosen by tuple-routing.
* 'num_parted' receives the number of partitioned tables in the partition
* tree (= the number of entries in the 'pd' output array)
* 'num_partitions' receives the number of leaf partitions in the partition
- * tree (= the number of entries in the 'partitions' and 'tup_conv_maps'
- * output arrays
+ * tree (= the number of entries in the 'partitions' and
+ * 'perleaf_parentchild_maps' output arrays
*
* Note that all the relations in the partition tree are locked using the
* RowExclusiveLock mode upon return from this function.
@@ -3276,7 +3284,7 @@ ExecSetupPartitionTupleRouting(Relation rel,
EState *estate,
PartitionDispatch **pd,
ResultRelInfo ***partitions,
- TupleConversionMap ***tup_conv_maps,
+ TupleConversionMap ***perleaf_parentchild_maps,
TupleTableSlot **partition_tuple_slot,
int *num_parted, int *num_partitions)
{
@@ -3297,8 +3305,8 @@ ExecSetupPartitionTupleRouting(Relation rel,
*num_partitions = list_length(leaf_parts);
*partitions = (ResultRelInfo **) palloc(*num_partitions *
sizeof(ResultRelInfo *));
- *tup_conv_maps = (TupleConversionMap **) palloc0(*num_partitions *
- sizeof(TupleConversionMap *));
+ *perleaf_parentchild_maps = (TupleConversionMap **) palloc0(*num_partitions *
+ sizeof(TupleConversionMap *));
if (num_update_rri != 0)
{
@@ -3405,11 +3413,13 @@ ExecSetupPartitionTupleRouting(Relation rel,
* Save a tuple conversion map to convert a tuple routed to this
* partition from the parent's type to the partition's.
*/
- (*tup_conv_maps)[i] = convert_tuples_by_name(tupDesc, part_tupdesc,
- gettext_noop("could not convert row type"));
+ (*perleaf_parentchild_maps)[i] = convert_tuples_by_name(tupDesc, part_tupdesc,
+ gettext_noop("could not convert row type"));
/*
- * Verify result relation is a valid target for INSERT.
+ * Verify result relation is a valid target for insert operation. Even
+ * for updates, we are doing this for tuple-routing, so again, we need
+ * to check the validity for insert operation.
*/
CheckValidResultRel(leaf_part_rri, CMD_INSERT);
@@ -3462,8 +3472,9 @@ ExecFindPartition(ResultRelInfo *resultRelInfo, PartitionDispatch *pd,
* First check the root table's partition constraint, if any. No point in
* routing the tuple if it doesn't belong in the root table itself.
*/
- if (resultRelInfo->ri_PartitionCheck)
- ExecPartitionCheck(resultRelInfo, slot, estate);
+ if (resultRelInfo->ri_PartitionCheck &&
+ !ExecPartitionCheck(resultRelInfo, slot, estate))
+ ExecPartitionCheckEmitError(resultRelInfo, slot, estate);
result = get_partition_for_tuple(pd, slot, estate,
&failed_at, &failed_slot);
diff --git a/src/backend/executor/execReplication.c b/src/backend/executor/execReplication.c
index 5a75e02..6b8af46 100644
--- a/src/backend/executor/execReplication.c
+++ b/src/backend/executor/execReplication.c
@@ -401,7 +401,7 @@ ExecSimpleRelationInsert(EState *estate, TupleTableSlot *slot)
/* Check the constraints of the tuple */
if (rel->rd_att->constr)
- ExecConstraints(resultRelInfo, slot, estate);
+ ExecConstraints(resultRelInfo, slot, estate, true);
/* Store the slot into tuple that we can inspect. */
tuple = ExecMaterializeSlot(slot);
@@ -466,7 +466,7 @@ ExecSimpleRelationUpdate(EState *estate, EPQState *epqstate,
/* Check the constraints of the tuple */
if (rel->rd_att->constr)
- ExecConstraints(resultRelInfo, slot, estate);
+ ExecConstraints(resultRelInfo, slot, estate, true);
/* Store the slot into tuple that we can write. */
tuple = ExecMaterializeSlot(slot);
diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c
index a64b477..03bf01c 100644
--- a/src/backend/executor/nodeModifyTable.c
+++ b/src/backend/executor/nodeModifyTable.c
@@ -45,6 +45,7 @@
#include "foreign/fdwapi.h"
#include "miscadmin.h"
#include "nodes/nodeFuncs.h"
+#include "optimizer/var.h"
#include "parser/parsetree.h"
#include "storage/bufmgr.h"
#include "storage/lmgr.h"
@@ -63,6 +64,11 @@ static bool ExecOnConflictUpdate(ModifyTableState *mtstate,
bool canSetTag,
TupleTableSlot **returning);
+static HeapTuple ConvertPartitionTupleSlot(ModifyTableState *mtstate,
+ TupleConversionMap *map,
+ HeapTuple tuple,
+ TupleTableSlot *new_slot,
+ TupleTableSlot **p_old_slot);
/*
* Verify that the tuples to be produced by INSERT or UPDATE match the
* target relation's rowtype
@@ -240,6 +246,38 @@ ExecCheckTIDVisible(EState *estate,
ReleaseBuffer(buffer);
}
+/*
+ * ConvertPartitionTupleSlot -- convenience function for converting tuple and
+ * storing it into a tuple slot provided through 'new_slot', which typically
+ * should be one of the dedicated partition tuple slot. Passes the partition
+ * tuple slot back into output param p_old_slot. If no mapping present, keeps
+ * p_old_slot unchanged.
+ *
+ * Returns the converted tuple.
+ */
+static HeapTuple
+ConvertPartitionTupleSlot(ModifyTableState *mtstate,
+ TupleConversionMap *map,
+ HeapTuple tuple,
+ TupleTableSlot *new_slot,
+ TupleTableSlot **p_old_slot)
+{
+ if (!map)
+ return tuple;
+
+ tuple = do_convert_tuple(tuple, map);
+
+ /*
+ * Change the partition tuple slot descriptor, as per converted tuple.
+ */
+ *p_old_slot = new_slot;
+ Assert(new_slot != NULL);
+ ExecSetSlotDescriptor(new_slot, map->outdesc);
+ ExecStoreTuple(tuple, new_slot, InvalidBuffer, true);
+
+ return tuple;
+}
+
/* ----------------------------------------------------------------
* ExecInsert
*
@@ -265,6 +303,7 @@ ExecInsert(ModifyTableState *mtstate,
Oid newId;
List *recheckIndexes = NIL;
TupleTableSlot *result = NULL;
+ TransitionCaptureState *transition_capture = mtstate->mt_transition_capture;
/*
* get the heap tuple out of the tuple table slot, making sure we have a
@@ -281,17 +320,49 @@ ExecInsert(ModifyTableState *mtstate,
if (mtstate->mt_partition_dispatch_info)
{
int leaf_part_index;
- TupleConversionMap *map;
+ ResultRelInfo *rootResultRelInfo;
+
+ /*
+ * If the original operation is UPDATE, the root partition rel needs
+ * to be fetched from mtstate->rootResultRelInfo.
+ */
+ rootResultRelInfo = (mtstate->rootResultRelInfo ?
+ mtstate->rootResultRelInfo : resultRelInfo);
+
+ /*
+ * If the resultRelInfo is not the root partition (which happens for
+ * UPDATE), we should convert the tuple into root partition's tuple
+ * descriptor, since ExecFindPartition() starts the search from root.
+ * The tuple conversion map list is in the order of
+ * mtstate->resultRelInfo[], so to retrieve the one for this
+ * resultRel, we need to know the position of the resultRel in
+ * mtstate->resultRelInfo[].
+ */
+ if (rootResultRelInfo != resultRelInfo &&
+ mtstate->mt_persubplan_childparent_maps != NULL)
+ {
+ int map_index = resultRelInfo - mtstate->resultRelInfo;
+
+ /* resultRelInfo must be one of the per-subplan result rels. */
+ Assert(resultRelInfo >= mtstate->resultRelInfo &&
+ resultRelInfo <= mtstate->resultRelInfo + mtstate->mt_nplans - 1);
+
+ tuple = ConvertPartitionTupleSlot(mtstate,
+ mtstate->mt_persubplan_childparent_maps[map_index],
+ tuple,
+ mtstate->mt_rootpartition_tuple_slot,
+ &slot);
+ }
/*
* Away we go ... If we end up not finding a partition after all,
* ExecFindPartition() does not return and errors out instead.
* Otherwise, the returned value is to be used as an index into arrays
- * mt_partitions[] and mt_partition_tupconv_maps[] that will get us
+ * mt_partitions[] and mt_perleaf_parentchild_maps[] that will get us
* the ResultRelInfo and TupleConversionMap for the partition,
* respectively.
*/
- leaf_part_index = ExecFindPartition(resultRelInfo,
+ leaf_part_index = ExecFindPartition(rootResultRelInfo,
mtstate->mt_partition_dispatch_info,
slot,
estate);
@@ -331,7 +402,7 @@ ExecInsert(ModifyTableState *mtstate,
*/
mtstate->mt_transition_capture->tcs_original_insert_tuple = NULL;
mtstate->mt_transition_capture->tcs_map =
- mtstate->mt_transition_tupconv_maps[leaf_part_index];
+ mtstate->mt_perleaf_childparent_maps[leaf_part_index];
}
else
{
@@ -345,29 +416,17 @@ ExecInsert(ModifyTableState *mtstate,
}
if (mtstate->mt_oc_transition_capture != NULL)
mtstate->mt_oc_transition_capture->tcs_map =
- mtstate->mt_transition_tupconv_maps[leaf_part_index];
+ mtstate->mt_perleaf_childparent_maps[leaf_part_index];
/*
* We might need to convert from the parent rowtype to the partition
* rowtype.
*/
- map = mtstate->mt_partition_tupconv_maps[leaf_part_index];
- if (map)
- {
- Relation partrel = resultRelInfo->ri_RelationDesc;
-
- tuple = do_convert_tuple(tuple, map);
-
- /*
- * We must use the partition's tuple descriptor from this point
- * on, until we're finished dealing with the partition. Use the
- * dedicated slot for that.
- */
- slot = mtstate->mt_partition_tuple_slot;
- Assert(slot != NULL);
- ExecSetSlotDescriptor(slot, RelationGetDescr(partrel));
- ExecStoreTuple(tuple, slot, InvalidBuffer, true);
- }
+ tuple = ConvertPartitionTupleSlot(mtstate,
+ mtstate->mt_perleaf_parentchild_maps[leaf_part_index],
+ tuple,
+ mtstate->mt_partition_tuple_slot,
+ &slot);
}
resultRelationDesc = resultRelInfo->ri_RelationDesc;
@@ -485,7 +544,7 @@ ExecInsert(ModifyTableState *mtstate,
/* Check the constraints of the tuple */
if (resultRelationDesc->rd_att->constr || check_partition_constr)
- ExecConstraints(resultRelInfo, slot, estate);
+ ExecConstraints(resultRelInfo, slot, estate, true);
if (onconflict != ONCONFLICT_NONE && resultRelInfo->ri_NumIndices > 0)
{
@@ -621,9 +680,31 @@ ExecInsert(ModifyTableState *mtstate,
setLastTid(&(tuple->t_self));
}
+ /*
+ * In case this is part of update tuple routing, put this row into the
+ * transition NEW TABLE if we are capturing transition tables. We need to
+ * do this separately for DELETE and INSERT because they happen on
+ * different tables.
+ */
+ if (mtstate->operation == CMD_UPDATE && mtstate->mt_transition_capture
+ && mtstate->mt_transition_capture->tcs_update_new_table)
+ {
+ ExecARUpdateTriggers(estate, resultRelInfo, NULL,
+ NULL,
+ tuple,
+ NULL,
+ mtstate->mt_transition_capture);
+
+ /*
+ * Now that we have already captured NEW TABLE row, any AR INSERT
+ * trigger should not again capture it below. Arrange for the same.
+ */
+ transition_capture = NULL;
+ }
+
/* AFTER ROW INSERT Triggers */
ExecARInsertTriggers(estate, resultRelInfo, tuple, recheckIndexes,
- mtstate->mt_transition_capture);
+ transition_capture);
list_free(recheckIndexes);
@@ -677,6 +758,8 @@ ExecDelete(ModifyTableState *mtstate,
TupleTableSlot *planSlot,
EPQState *epqstate,
EState *estate,
+ bool *delete_skipped,
+ bool process_returning,
bool canSetTag)
{
ResultRelInfo *resultRelInfo;
@@ -684,6 +767,10 @@ ExecDelete(ModifyTableState *mtstate,
HTSU_Result result;
HeapUpdateFailureData hufd;
TupleTableSlot *slot = NULL;
+ TransitionCaptureState *transition_capture = mtstate->mt_transition_capture;
+
+ if (delete_skipped)
+ *delete_skipped = true;
/*
* get information on the (current) result relation
@@ -848,12 +935,39 @@ ldelete:;
if (canSetTag)
(estate->es_processed)++;
+ /* The delete has actually happened, so inform that to the caller */
+ if (delete_skipped)
+ *delete_skipped = false;
+
+ /*
+ * In case this is part of update tuple routing, put this row into the
+ * transition OLD TABLE if we are capturing transition tables. We need to
+ * do this separately for DELETE and INSERT because they happen on
+ * different tables.
+ */
+ if (mtstate->operation == CMD_UPDATE && mtstate->mt_transition_capture
+ && mtstate->mt_transition_capture->tcs_update_old_table)
+ {
+ ExecARUpdateTriggers(estate, resultRelInfo,
+ tupleid,
+ oldtuple,
+ NULL,
+ NULL,
+ transition_capture);
+
+ /*
+ * Now that we have already captured OLD TABLE row, any AR DELETE
+ * trigger should not again capture it below. Arrange for the same.
+ */
+ transition_capture = NULL;
+ }
+
/* AFTER ROW DELETE Triggers */
ExecARDeleteTriggers(estate, resultRelInfo, tupleid, oldtuple,
- mtstate->mt_transition_capture);
+ transition_capture);
- /* Process RETURNING if present */
- if (resultRelInfo->ri_projectReturning)
+ /* Process RETURNING if present and if requested */
+ if (process_returning && resultRelInfo->ri_projectReturning)
{
/*
* We have to put the target tuple into a slot, which means first we
@@ -946,6 +1060,8 @@ ExecUpdate(ModifyTableState *mtstate,
HTSU_Result result;
HeapUpdateFailureData hufd;
List *recheckIndexes = NIL;
+ TupleConversionMap *saved_tcs_map = NULL;
+
/*
* abort the operation if not running transactions
@@ -1042,12 +1158,82 @@ lreplace:;
resultRelInfo, slot, estate);
/*
+ * If a partition check fails, try to move the row into the right
+ * partition.
+ */
+ if (resultRelInfo->ri_PartitionCheck &&
+ !ExecPartitionCheck(resultRelInfo, slot, estate))
+ {
+ bool delete_skipped;
+ TupleTableSlot *ret_slot;
+
+ /*
+ * When an UPDATE is run with a leaf partition, we would not have
+ * partition tuple routing setup. In that case, fail with
+ * partition constraint violation error.
+ */
+ if (mtstate->mt_partition_dispatch_info == NULL)
+ ExecPartitionCheckEmitError(resultRelInfo, slot, estate);
+
+ /* Do the row movement. */
+
+ /*
+ * Skip RETURNING processing for DELETE. We want to return rows
+ * from INSERT.
+ */
+ ExecDelete(mtstate, tupleid, oldtuple, planSlot, epqstate, estate,
+ &delete_skipped, false, false);
+
+ /*
+ * For some reason if DELETE didn't happen (for e.g. trigger
+ * prevented it, or it was already deleted by self, or it was
+ * concurrently deleted by another transaction), then we should
+ * skip INSERT as well, otherwise, there will be effectively one
+ * new row inserted.
+ *
+ * For a normal UPDATE, the case where the tuple has been the
+ * subject of a concurrent UPDATE or DELETE would be handled by
+ * the EvalPlanQual machinery, but for an UPDATE that we've
+ * translated into a DELETE from this partition and an INSERT into
+ * some other partition, that's not available, because CTID chains
+ * can't span relation boundaries. We mimic the semantics to a
+ * limited extent by skipping the INSERT if the DELETE fails to
+ * find a tuple. This ensures that two concurrent attempts to
+ * UPDATE the same tuple at the same time can't turn one tuple
+ * into two, and that an UPDATE of a just-deleted tuple can't
+ * resurrect it.
+ */
+ if (delete_skipped)
+ return NULL;
+
+ if (mtstate->mt_transition_capture)
+ saved_tcs_map = mtstate->mt_transition_capture->tcs_map;
+
+ ret_slot = ExecInsert(mtstate, slot, planSlot, NULL,
+ ONCONFLICT_NONE, estate, canSetTag);
+
+ if (mtstate->mt_transition_capture)
+ {
+ /*
+ * Revert back to the transition capture map created for
+ * UPDATE; otherwise the next UPDATE will incorrectly use the
+ * one created for INESRT.
+ */
+ mtstate->mt_transition_capture->tcs_original_insert_tuple = NULL;
+ mtstate->mt_transition_capture->tcs_map = saved_tcs_map;
+ }
+ return ret_slot;
+ }
+
+ /*
* Check the constraints of the tuple. Note that we pass the same
* slot for the orig_slot argument, because unlike ExecInsert(), no
* tuple-routing is performed here, hence the slot remains unchanged.
+ * We have already checked partition constraints above, so skip them
+ * below.
*/
- if (resultRelationDesc->rd_att->constr || resultRelInfo->ri_PartitionCheck)
- ExecConstraints(resultRelInfo, slot, estate);
+ if (resultRelationDesc->rd_att->constr)
+ ExecConstraints(resultRelInfo, slot, estate, false);
/*
* replace the heap tuple
@@ -1468,6 +1654,45 @@ fireASTriggers(ModifyTableState *node)
}
/*
+ * Set up per subplan tuple conversion map from child partition to root
+ * partitioned table. The map is needed for collecting transition tuples for
+ * AFTER triggers, and for UPDATE row movement.
+ */
+static void
+ExecSetupPerSubplanChildParentMap(ModifyTableState *mtstate)
+{
+ TupleConversionMap **tup_conv_maps;
+ TupleDesc outdesc;
+ ResultRelInfo *resultRelInfo;
+ ResultRelInfo *rootRelInfo;
+ int nplans = mtstate->mt_nplans;
+ int i;
+
+ Assert(mtstate->operation != CMD_INSERT);
+
+ if (mtstate->mt_persubplan_childparent_maps != NULL)
+ return;
+
+ rootRelInfo = getASTriggerResultRelInfo(mtstate);
+
+ mtstate->mt_persubplan_childparent_maps =
+ (TupleConversionMap **) palloc0(sizeof(TupleConversionMap *) * nplans);
+
+ /* Get tuple descriptor of the root partition. */
+ outdesc = RelationGetDescr(rootRelInfo->ri_RelationDesc);
+
+ resultRelInfo = mtstate->resultRelInfo;
+ tup_conv_maps = mtstate->mt_persubplan_childparent_maps;
+ for (i = 0; i < nplans; i++)
+ {
+ TupleDesc indesc = RelationGetDescr(resultRelInfo[i].ri_RelationDesc);
+
+ tup_conv_maps[i] = convert_tuples_by_name(indesc, outdesc,
+ gettext_noop("could not convert row type"));
+ }
+}
+
+/*
* Set up the state needed for collecting transition tuples for AFTER
* triggers.
*/
@@ -1475,6 +1700,11 @@ static void
ExecSetupTransitionCaptureState(ModifyTableState *mtstate, EState *estate)
{
ResultRelInfo *targetRelInfo = getASTriggerResultRelInfo(mtstate);
+ ResultRelInfo **resultRelInfos;
+ int numResultRelInfos;
+ int update_rri_index = -1;
+ ResultRelInfo *update_rri = mtstate->resultRelInfo;
+ Oid cur_reloid = InvalidOid;
int i;
/* Check for transition tables on the directly targeted relation. */
@@ -1489,71 +1719,108 @@ ExecSetupTransitionCaptureState(ModifyTableState *mtstate, EState *estate)
RelationGetRelid(targetRelInfo->ri_RelationDesc),
CMD_UPDATE);
+ if (mtstate->mt_transition_capture == NULL &&
+ mtstate->mt_oc_transition_capture == NULL)
+ return;
+
/*
- * If we found that we need to collect transition tuples then we may also
+ * Now that we know that we need to collect transition tuples, we may also
* need tuple conversion maps for any children that have TupleDescs that
* aren't compatible with the tuplestores. (We can share these maps
* between the regular and ON CONFLICT cases.)
*/
- if (mtstate->mt_transition_capture != NULL ||
- mtstate->mt_oc_transition_capture != NULL)
+
+ /* Make sure per-subplan mapping is there. */
+ if (mtstate->operation != CMD_INSERT)
+ ExecSetupPerSubplanChildParentMap(mtstate);
+
+ /*
+ * Install the conversion map for the first plan for UPDATE and DELETE
+ * operations. It will be advanced each time we switch to the next
+ * plan. (INSERT operations set it every time, so we need not update
+ * mtstate->mt_oc_transition_capture here.)
+ */
+ if (mtstate->mt_transition_capture &&
+ mtstate->mt_persubplan_childparent_maps)
{
- int numResultRelInfos;
+ mtstate->mt_transition_capture->tcs_map =
+ mtstate->mt_persubplan_childparent_maps[0];
+ }
+
+ /* If no tuple routing, return without setting up per-leaf-partition map */
+ if (mtstate->mt_partition_dispatch_info == NULL)
+ return;
- numResultRelInfos = (mtstate->mt_partition_tuple_slot != NULL ?
- mtstate->mt_num_partitions :
- mtstate->mt_nplans);
+ numResultRelInfos = mtstate->mt_num_partitions;
+ resultRelInfos = mtstate->mt_partitions;
- /*
- * Build array of conversion maps from each child's TupleDesc to the
- * one used in the tuplestore. The map pointers may be NULL when no
- * conversion is necessary, which is hopefully a common case for
- * partitions.
- */
- mtstate->mt_transition_tupconv_maps = (TupleConversionMap **)
- palloc0(sizeof(TupleConversionMap *) * numResultRelInfos);
+ /*
+ * Build array of conversion maps from each child's TupleDesc to the one
+ * used in the tuplestore. The map pointers may be NULL when no
+ * conversion is necessary, which is hopefully a common case for
+ * partitions.
+ */
+ mtstate->mt_perleaf_childparent_maps = (TupleConversionMap **)
+ palloc0(sizeof(TupleConversionMap *) * numResultRelInfos);
- /* Choose the right set of partitions */
- if (mtstate->mt_partition_dispatch_info != NULL)
+ /* For Inserts, just create all new map elements. */
+ if (mtstate->operation == CMD_INSERT)
+ {
+ for (i = 0; i < numResultRelInfos; ++i)
{
+ mtstate->mt_perleaf_childparent_maps[i] =
+ convert_tuples_by_name(RelationGetDescr(resultRelInfos[i]->ri_RelationDesc),
+ RelationGetDescr(targetRelInfo->ri_RelationDesc),
+ gettext_noop("could not convert row type"));
+ }
+ return;
+ }
+
+ /*
+ * But for Updates, we can share the per-subplan maps with the per-leaf
+ * maps.
+ */
+ update_rri_index = 0;
+ update_rri = mtstate->resultRelInfo;
+ if (mtstate->mt_nplans > 0)
+ cur_reloid = RelationGetRelid(update_rri[0].ri_RelationDesc);
+
+ for (i = 0; i < numResultRelInfos; ++i)
+ {
+ ResultRelInfo *resultRelInfo = mtstate->mt_partitions[i];
+
+ /* Is this leaf partition present in the update resultrel ? */
+ if (cur_reloid == RelationGetRelid(resultRelInfo->ri_RelationDesc))
+ {
+ Assert(update_rri_index < mtstate->mt_nplans);
+
+ mtstate->mt_perleaf_childparent_maps[i] =
+ mtstate->mt_persubplan_childparent_maps[update_rri_index];
+ update_rri_index++;
+
/*
- * For tuple routing among partitions, we need TupleDescs based
- * on the partition routing table.
+ * If this was the last UPDATE resultrel, indicate that by
+ * invalidating the cur_reloid.
*/
- ResultRelInfo **resultRelInfos = mtstate->mt_partitions;
-
- for (i = 0; i < numResultRelInfos; ++i)
- {
- mtstate->mt_transition_tupconv_maps[i] =
- convert_tuples_by_name(RelationGetDescr(resultRelInfos[i]->ri_RelationDesc),
- RelationGetDescr(targetRelInfo->ri_RelationDesc),
- gettext_noop("could not convert row type"));
- }
+ if (update_rri_index == mtstate->mt_nplans)
+ cur_reloid = InvalidOid;
+ else
+ cur_reloid = RelationGetRelid(update_rri[update_rri_index].ri_RelationDesc);
}
else
{
- /* Otherwise we need the ResultRelInfo for each subplan. */
- ResultRelInfo *resultRelInfos = mtstate->resultRelInfo;
-
- for (i = 0; i < numResultRelInfos; ++i)
- {
- mtstate->mt_transition_tupconv_maps[i] =
- convert_tuples_by_name(RelationGetDescr(resultRelInfos[i].ri_RelationDesc),
- RelationGetDescr(targetRelInfo->ri_RelationDesc),
- gettext_noop("could not convert row type"));
- }
+ mtstate->mt_perleaf_childparent_maps[i] =
+ convert_tuples_by_name(RelationGetDescr(resultRelInfo->ri_RelationDesc),
+ RelationGetDescr(targetRelInfo->ri_RelationDesc),
+ gettext_noop("could not convert row type"));
}
-
- /*
- * Install the conversion map for the first plan for UPDATE and DELETE
- * operations. It will be advanced each time we switch to the next
- * plan. (INSERT operations set it every time, so we need not update
- * mtstate->mt_oc_transition_capture here.)
- */
- if (mtstate->mt_transition_capture)
- mtstate->mt_transition_capture->tcs_map =
- mtstate->mt_transition_tupconv_maps[0];
}
+
+ /*
+ * We should have found all the per-subplan reloids in the leaf
+ * partitions.
+ */
+ Assert(update_rri_index == mtstate->mt_nplans);
}
/* ----------------------------------------------------------------
@@ -1659,15 +1926,15 @@ ExecModifyTable(PlanState *pstate)
/* Prepare to convert transition tuples from this child. */
if (node->mt_transition_capture != NULL)
{
- Assert(node->mt_transition_tupconv_maps != NULL);
+ Assert(node->mt_persubplan_childparent_maps != NULL);
node->mt_transition_capture->tcs_map =
- node->mt_transition_tupconv_maps[node->mt_whichplan];
+ node->mt_persubplan_childparent_maps[node->mt_whichplan];
}
if (node->mt_oc_transition_capture != NULL)
{
- Assert(node->mt_transition_tupconv_maps != NULL);
+ Assert(node->mt_persubplan_childparent_maps != NULL);
node->mt_oc_transition_capture->tcs_map =
- node->mt_transition_tupconv_maps[node->mt_whichplan];
+ node->mt_persubplan_childparent_maps[node->mt_whichplan];
}
continue;
}
@@ -1783,7 +2050,8 @@ ExecModifyTable(PlanState *pstate)
break;
case CMD_DELETE:
slot = ExecDelete(node, tupleid, oldtuple, planSlot,
- &node->mt_epqstate, estate, node->canSetTag);
+ &node->mt_epqstate, estate,
+ NULL, true, node->canSetTag);
break;
default:
elog(ERROR, "unknown operation");
@@ -1828,9 +2096,12 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
ResultRelInfo *resultRelInfo;
TupleDesc tupDesc;
Plan *subplan;
+ int firstVarno = 0;
+ Relation firstResultRel = NULL;
ListCell *l;
int i;
Relation rel;
+ bool update_tuple_routing_needed = node->part_cols_updated;
/* check for unsupported flags */
Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
@@ -1903,6 +2174,15 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
resultRelInfo->ri_IndexRelationDescs == NULL)
ExecOpenIndices(resultRelInfo, mtstate->mt_onconflict != ONCONFLICT_NONE);
+ /*
+ * If this is an UPDATE and a BEFORE UPDATE trigger is present, we may
+ * need to do update tuple routing.
+ */
+ if (resultRelInfo->ri_TrigDesc &&
+ resultRelInfo->ri_TrigDesc->trig_update_before_row &&
+ operation == CMD_UPDATE)
+ update_tuple_routing_needed = true;
+
/* Now init the plan for this result rel */
estate->es_result_relation_info = resultRelInfo;
mtstate->mt_plans[i] = ExecInitNode(subplan, estate, eflags);
@@ -1940,36 +2220,64 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
else
rel = mtstate->resultRelInfo->ri_RelationDesc;
- /* Build state for INSERT tuple routing */
- if (operation == CMD_INSERT &&
- rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
+ /* Decide whether we need to perform update tuple routing. */
+ if (rel->rd_rel->relkind != RELKIND_PARTITIONED_TABLE)
+ update_tuple_routing_needed = false;
+
+ /*
+ * Build state for tuple routing if it's an INSERT or if it's an UPDATE of
+ * partition key.
+ */
+ if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE &&
+ (operation == CMD_INSERT || update_tuple_routing_needed))
{
PartitionDispatch *partition_dispatch_info;
ResultRelInfo **partitions;
- TupleConversionMap **partition_tupconv_maps;
+ TupleConversionMap **perleaf_parentchild_maps;
TupleTableSlot *partition_tuple_slot;
int num_parted,
num_partitions;
ExecSetupPartitionTupleRouting(rel,
- NULL,
- 0,
+ (operation == CMD_UPDATE ?
+ mtstate->resultRelInfo : NULL),
+ (operation == CMD_UPDATE ? nplans : 0),
node->nominalRelation,
estate,
&partition_dispatch_info,
&partitions,
- &partition_tupconv_maps,
+ &perleaf_parentchild_maps,
&partition_tuple_slot,
&num_parted, &num_partitions);
mtstate->mt_partition_dispatch_info = partition_dispatch_info;
mtstate->mt_num_dispatch = num_parted;
mtstate->mt_partitions = partitions;
mtstate->mt_num_partitions = num_partitions;
- mtstate->mt_partition_tupconv_maps = partition_tupconv_maps;
+ mtstate->mt_perleaf_parentchild_maps = perleaf_parentchild_maps;
mtstate->mt_partition_tuple_slot = partition_tuple_slot;
+ mtstate->mt_rootpartition_tuple_slot = MakeTupleTableSlot();
+
+ /*
+ * Below are required as reference objects for mapping partition
+ * attno's in expressions such as WCO and RETURNING.
+ */
+ firstVarno = mtstate->resultRelInfo[0].ri_RangeTableIndex;
+ firstResultRel = mtstate->resultRelInfo[0].ri_RelationDesc;
}
/*
+ * Construct mapping from each of the resultRelInfo attnos to the root
+ * attno. This is required when during update row movement the tuple
+ * descriptor of a source partition does not match the root partition
+ * descriptor. In such case we need to convert tuples to the root
+ * partition tuple descriptor, because the search for destination
+ * partition starts from the root. Skip this setup if it's not a partition
+ * key update.
+ */
+ if (update_tuple_routing_needed)
+ ExecSetupPerSubplanChildParentMap(mtstate);
+
+ /*
* Build state for collecting transition tuples. This requires having a
* valid trigger query context, so skip it in explain-only mode.
*/
@@ -2006,50 +2314,53 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
* Build WITH CHECK OPTION constraints for each leaf partition rel. Note
* that we didn't build the withCheckOptionList for each partition within
* the planner, but simple translation of the varattnos for each partition
- * will suffice. This only occurs for the INSERT case; UPDATE/DELETE
- * cases are handled above.
+ * will suffice. This only occurs for the INSERT case or for UPDATE row
+ * movement. DELETEs and local UPDATEs are handled above.
*/
if (node->withCheckOptionLists != NIL && mtstate->mt_num_partitions > 0)
{
- List *wcoList;
- PlanState *plan;
+ List *firstWco;
/*
* In case of INSERT on partitioned tables, there is only one plan.
* Likewise, there is only one WITH CHECK OPTIONS list, not one per
- * partition. We make a copy of the WCO qual for each partition; note
- * that, if there are SubPlans in there, they all end up attached to
- * the one parent Plan node.
+ * partition. Whereas for UPDATE, there are as many WCOs as there are
+ * plans. So in either case, use the WCO expression of the first
+ * resultRelInfo as a reference to calculate attno's for the WCO
+ * expression of each of the partitions. We make a copy of the WCO
+ * qual for each partition. Note that, if there are SubPlans in there,
+ * they all end up attached to the one parent Plan node.
*/
- Assert(operation == CMD_INSERT &&
- list_length(node->withCheckOptionLists) == 1 &&
- mtstate->mt_nplans == 1);
- wcoList = linitial(node->withCheckOptionLists);
- plan = mtstate->mt_plans[0];
+ Assert(update_tuple_routing_needed ||
+ (operation == CMD_INSERT &&
+ list_length(node->withCheckOptionLists) == 1 &&
+ mtstate->mt_nplans == 1));
+
+ firstWco = linitial(node->withCheckOptionLists);
for (i = 0; i < mtstate->mt_num_partitions; i++)
{
Relation partrel;
- List *mapped_wcoList;
+ List *mappedWco;
List *wcoExprs = NIL;
ListCell *ll;
resultRelInfo = mtstate->mt_partitions[i];
partrel = resultRelInfo->ri_RelationDesc;
- /* varno = node->nominalRelation */
- mapped_wcoList = map_partition_varattnos(wcoList,
- node->nominalRelation,
- partrel, rel, NULL);
- foreach(ll, mapped_wcoList)
+ mappedWco = map_partition_varattnos(firstWco,
+ firstVarno,
+ partrel, firstResultRel,
+ NULL);
+ foreach(ll, mappedWco)
{
WithCheckOption *wco = castNode(WithCheckOption, lfirst(ll));
ExprState *wcoExpr = ExecInitQual(castNode(List, wco->qual),
- plan);
+ &mtstate->ps);
wcoExprs = lappend(wcoExprs, wcoExpr);
}
- resultRelInfo->ri_WithCheckOptions = mapped_wcoList;
+ resultRelInfo->ri_WithCheckOptions = mappedWco;
resultRelInfo->ri_WithCheckOptionExprs = wcoExprs;
}
}
@@ -2061,7 +2372,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
{
TupleTableSlot *slot;
ExprContext *econtext;
- List *returningList;
+ List *firstReturningList;
/*
* Initialize result tuple slot and assign its rowtype using the first
@@ -2098,10 +2409,10 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
* Build a projection for each leaf partition rel. Note that we
* didn't build the returningList for each partition within the
* planner, but simple translation of the varattnos for each partition
- * will suffice. This only occurs for the INSERT case; UPDATE/DELETE
- * are handled above.
+ * will suffice. This only occurs for the INSERT case or for UPDATE
+ * row movement. DELETEs and local UPDATEs are handled above.
*/
- returningList = linitial(node->returningLists);
+ firstReturningList = linitial(node->returningLists);
for (i = 0; i < mtstate->mt_num_partitions; i++)
{
Relation partrel;
@@ -2110,10 +2421,14 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
resultRelInfo = mtstate->mt_partitions[i];
partrel = resultRelInfo->ri_RelationDesc;
- /* varno = node->nominalRelation */
- rlist = map_partition_varattnos(returningList,
- node->nominalRelation,
- partrel, rel, NULL);
+ /*
+ * Use the returning expression of the first resultRelInfo as a
+ * reference to calculate attno's for the returning expression of
+ * each of the partitions.
+ */
+ rlist = map_partition_varattnos(firstReturningList,
+ firstVarno,
+ partrel, firstResultRel, NULL);
resultRelInfo->ri_projectReturning =
ExecBuildProjectionInfo(rlist, econtext, slot, &mtstate->ps,
resultRelInfo->ri_RelationDesc->rd_att);
@@ -2359,6 +2674,7 @@ void
ExecEndModifyTable(ModifyTableState *node)
{
int i;
+ CmdType operation = node->operation;
/*
* Allow any FDWs to shut down
@@ -2393,11 +2709,23 @@ ExecEndModifyTable(ModifyTableState *node)
{
ResultRelInfo *resultRelInfo = node->mt_partitions[i];
+ /*
+ * If this result rel is one of the subplan result rels, let
+ * ExecEndPlan() close it. For INSERTs, this does not apply because
+ * all leaf partition result rels are anyway newly allocated.
+ */
+ if (operation == CMD_UPDATE &&
+ resultRelInfo >= node->resultRelInfo &&
+ resultRelInfo < node->resultRelInfo + node->mt_nplans)
+ continue;
+
ExecCloseIndices(resultRelInfo);
heap_close(resultRelInfo->ri_RelationDesc, NoLock);
}
- /* Release the standalone partition tuple descriptor, if any */
+ /* Release the standalone partition tuple descriptors, if any */
+ if (node->mt_rootpartition_tuple_slot)
+ ExecDropSingleTupleTableSlot(node->mt_rootpartition_tuple_slot);
if (node->mt_partition_tuple_slot)
ExecDropSingleTupleTableSlot(node->mt_partition_tuple_slot);
diff --git a/src/backend/nodes/copyfuncs.c b/src/backend/nodes/copyfuncs.c
index f1bed14..2d86593 100644
--- a/src/backend/nodes/copyfuncs.c
+++ b/src/backend/nodes/copyfuncs.c
@@ -204,6 +204,7 @@ _copyModifyTable(const ModifyTable *from)
COPY_SCALAR_FIELD(canSetTag);
COPY_SCALAR_FIELD(nominalRelation);
COPY_NODE_FIELD(partitioned_rels);
+ COPY_SCALAR_FIELD(part_cols_updated);
COPY_NODE_FIELD(resultRelations);
COPY_SCALAR_FIELD(resultRelIndex);
COPY_SCALAR_FIELD(rootResultRelIndex);
@@ -2260,6 +2261,7 @@ _copyPartitionedChildRelInfo(const PartitionedChildRelInfo *from)
COPY_SCALAR_FIELD(parent_relid);
COPY_NODE_FIELD(child_rels);
+ COPY_BITMAPSET_FIELD(all_part_cols);
return newnode;
}
diff --git a/src/backend/nodes/equalfuncs.c b/src/backend/nodes/equalfuncs.c
index 8b56b91..9428c2c 100644
--- a/src/backend/nodes/equalfuncs.c
+++ b/src/backend/nodes/equalfuncs.c
@@ -909,6 +909,7 @@ _equalPartitionedChildRelInfo(const PartitionedChildRelInfo *a, const Partitione
{
COMPARE_SCALAR_FIELD(parent_relid);
COMPARE_NODE_FIELD(child_rels);
+ COMPARE_BITMAPSET_FIELD(all_part_cols);
return true;
}
diff --git a/src/backend/nodes/outfuncs.c b/src/backend/nodes/outfuncs.c
index b83d919..2492cb8 100644
--- a/src/backend/nodes/outfuncs.c
+++ b/src/backend/nodes/outfuncs.c
@@ -367,6 +367,7 @@ _outModifyTable(StringInfo str, const ModifyTable *node)
WRITE_BOOL_FIELD(canSetTag);
WRITE_UINT_FIELD(nominalRelation);
WRITE_NODE_FIELD(partitioned_rels);
+ WRITE_BOOL_FIELD(part_cols_updated);
WRITE_NODE_FIELD(resultRelations);
WRITE_INT_FIELD(resultRelIndex);
WRITE_INT_FIELD(rootResultRelIndex);
@@ -2096,6 +2097,7 @@ _outModifyTablePath(StringInfo str, const ModifyTablePath *node)
WRITE_BOOL_FIELD(canSetTag);
WRITE_UINT_FIELD(nominalRelation);
WRITE_NODE_FIELD(partitioned_rels);
+ WRITE_BOOL_FIELD(part_cols_updated);
WRITE_NODE_FIELD(resultRelations);
WRITE_NODE_FIELD(subpaths);
WRITE_NODE_FIELD(subroots);
@@ -2518,6 +2520,7 @@ _outPartitionedChildRelInfo(StringInfo str, const PartitionedChildRelInfo *node)
WRITE_UINT_FIELD(parent_relid);
WRITE_NODE_FIELD(child_rels);
+ WRITE_BITMAPSET_FIELD(all_part_cols);
}
static void
diff --git a/src/backend/nodes/readfuncs.c b/src/backend/nodes/readfuncs.c
index fbf8330..0b1c70e 100644
--- a/src/backend/nodes/readfuncs.c
+++ b/src/backend/nodes/readfuncs.c
@@ -1562,6 +1562,7 @@ _readModifyTable(void)
READ_BOOL_FIELD(canSetTag);
READ_UINT_FIELD(nominalRelation);
READ_NODE_FIELD(partitioned_rels);
+ READ_BOOL_FIELD(part_cols_updated);
READ_NODE_FIELD(resultRelations);
READ_INT_FIELD(resultRelIndex);
READ_INT_FIELD(rootResultRelIndex);
diff --git a/src/backend/optimizer/path/allpaths.c b/src/backend/optimizer/path/allpaths.c
index a7866a9..946964a 100644
--- a/src/backend/optimizer/path/allpaths.c
+++ b/src/backend/optimizer/path/allpaths.c
@@ -1310,7 +1310,7 @@ add_paths_to_append_rel(PlannerInfo *root, RelOptInfo *rel,
case RTE_RELATION:
if (rte->relkind == RELKIND_PARTITIONED_TABLE)
partitioned_rels =
- get_partitioned_child_rels(root, rel->relid);
+ get_partitioned_child_rels(root, rel->relid, NULL);
break;
case RTE_SUBQUERY:
build_partitioned_rels = true;
@@ -1337,7 +1337,7 @@ add_paths_to_append_rel(PlannerInfo *root, RelOptInfo *rel,
{
List *cprels;
- cprels = get_partitioned_child_rels(root, childrel->relid);
+ cprels = get_partitioned_child_rels(root, childrel->relid, NULL);
partitioned_rels = list_concat(partitioned_rels,
list_copy(cprels));
}
diff --git a/src/backend/optimizer/plan/createplan.c b/src/backend/optimizer/plan/createplan.c
index 2821662..85e3126 100644
--- a/src/backend/optimizer/plan/createplan.c
+++ b/src/backend/optimizer/plan/createplan.c
@@ -277,6 +277,7 @@ static ProjectSet *make_project_set(List *tlist, Plan *subplan);
static ModifyTable *make_modifytable(PlannerInfo *root,
CmdType operation, bool canSetTag,
Index nominalRelation, List *partitioned_rels,
+ bool part_cols_updated,
List *resultRelations, List *subplans,
List *withCheckOptionLists, List *returningLists,
List *rowMarks, OnConflictExpr *onconflict, int epqParam);
@@ -2361,6 +2362,7 @@ create_modifytable_plan(PlannerInfo *root, ModifyTablePath *best_path)
best_path->canSetTag,
best_path->nominalRelation,
best_path->partitioned_rels,
+ best_path->part_cols_updated,
best_path->resultRelations,
subplans,
best_path->withCheckOptionLists,
@@ -6405,6 +6407,7 @@ static ModifyTable *
make_modifytable(PlannerInfo *root,
CmdType operation, bool canSetTag,
Index nominalRelation, List *partitioned_rels,
+ bool part_cols_updated,
List *resultRelations, List *subplans,
List *withCheckOptionLists, List *returningLists,
List *rowMarks, OnConflictExpr *onconflict, int epqParam)
@@ -6431,6 +6434,7 @@ make_modifytable(PlannerInfo *root,
node->canSetTag = canSetTag;
node->nominalRelation = nominalRelation;
node->partitioned_rels = partitioned_rels;
+ node->part_cols_updated = part_cols_updated;
node->resultRelations = resultRelations;
node->resultRelIndex = -1; /* will be set correctly in setrefs.c */
node->rootResultRelIndex = -1; /* will be set correctly in setrefs.c */
diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c
index 7f146d6..3aad00b 100644
--- a/src/backend/optimizer/plan/planner.c
+++ b/src/backend/optimizer/plan/planner.c
@@ -1060,6 +1060,7 @@ inheritance_planner(PlannerInfo *root)
Query *parent_parse;
Bitmapset *parent_relids = bms_make_singleton(top_parentRTindex);
PlannerInfo **parent_roots = NULL;
+ bool part_cols_updated = false;
Assert(parse->commandType != CMD_INSERT);
@@ -1130,10 +1131,16 @@ inheritance_planner(PlannerInfo *root)
parent_rte = rt_fetch(top_parentRTindex, root->parse->rtable);
if (parent_rte->relkind == RELKIND_PARTITIONED_TABLE)
{
+ Bitmapset *all_part_cols = NULL;
+
nominalRelation = top_parentRTindex;
- partitioned_rels = get_partitioned_child_rels(root, top_parentRTindex);
+ partitioned_rels = get_partitioned_child_rels(root, top_parentRTindex,
+ &all_part_cols);
/* The root partitioned table is included as a child rel */
Assert(list_length(partitioned_rels) >= 1);
+
+ if (bms_overlap(all_part_cols, parent_rte->updatedCols))
+ part_cols_updated = true;
}
/*
@@ -1471,6 +1478,7 @@ inheritance_planner(PlannerInfo *root)
parse->canSetTag,
nominalRelation,
partitioned_rels,
+ part_cols_updated,
resultRelations,
subpaths,
subroots,
@@ -2088,6 +2096,7 @@ grouping_planner(PlannerInfo *root, bool inheritance_update,
parse->canSetTag,
parse->resultRelation,
NIL,
+ false,
list_make1_int(parse->resultRelation),
list_make1(path),
list_make1(root),
@@ -6118,11 +6127,16 @@ plan_cluster_use_sort(Oid tableOid, Oid indexOid)
* Returns a list of the RT indexes of the partitioned child relations
* with rti as the root parent RT index.
*
+ * If all_part_cols_p is non-NULL, *all_part_cols_p is set to a bitmapset
+ * of all partitioning columns used by the partitioned table or any
+ * descendent.
+ *
* Note: This function might get called even for range table entries that
* are not partitioned tables; in such a case, it will simply return NIL.
*/
List *
-get_partitioned_child_rels(PlannerInfo *root, Index rti)
+get_partitioned_child_rels(PlannerInfo *root, Index rti,
+ Bitmapset **all_part_cols_p)
{
List *result = NIL;
ListCell *l;
@@ -6134,6 +6148,8 @@ get_partitioned_child_rels(PlannerInfo *root, Index rti)
if (pc->parent_relid == rti)
{
result = pc->child_rels;
+ if (all_part_cols_p)
+ *all_part_cols_p = pc->all_part_cols;
break;
}
}
diff --git a/src/backend/optimizer/prep/prepunion.c b/src/backend/optimizer/prep/prepunion.c
index 3e0c3de..f28b381 100644
--- a/src/backend/optimizer/prep/prepunion.c
+++ b/src/backend/optimizer/prep/prepunion.c
@@ -105,7 +105,8 @@ static void expand_partitioned_rtentry(PlannerInfo *root,
RangeTblEntry *parentrte,
Index parentRTindex, Relation parentrel,
PlanRowMark *top_parentrc, LOCKMODE lockmode,
- List **appinfos, List **partitioned_child_rels);
+ List **appinfos, Bitmapset **all_part_cols,
+ List **partitioned_child_rels);
static void expand_single_inheritance_child(PlannerInfo *root,
RangeTblEntry *parentrte,
Index parentRTindex, Relation parentrel,
@@ -1464,15 +1465,20 @@ expand_inherited_rtentry(PlannerInfo *root, RangeTblEntry *rte, Index rti)
if (RelationGetPartitionDesc(oldrelation) != NULL)
{
List *partitioned_child_rels = NIL;
+ Bitmapset *all_part_cols = NULL;
Assert(rte->relkind == RELKIND_PARTITIONED_TABLE);
/*
* If this table has partitions, recursively expand them in the order
- * in which they appear in the PartitionDesc.
+ * in which they appear in the PartitionDesc. Also, extract the
+ * partition key columns of the root partitioned table. Those of the
+ * child partitions would be collected during recursive expansion.
*/
+ pull_child_partition_columns(&all_part_cols, oldrelation, oldrelation);
expand_partitioned_rtentry(root, rte, rti, oldrelation, oldrc,
lockmode, &root->append_rel_list,
+ &all_part_cols,
&partitioned_child_rels);
/*
@@ -1490,6 +1496,7 @@ expand_inherited_rtentry(PlannerInfo *root, RangeTblEntry *rte, Index rti)
pcinfo = makeNode(PartitionedChildRelInfo);
pcinfo->parent_relid = rti;
pcinfo->child_rels = partitioned_child_rels;
+ pcinfo->all_part_cols = all_part_cols;
root->pcinfo_list = lappend(root->pcinfo_list, pcinfo);
}
}
@@ -1566,7 +1573,8 @@ static void
expand_partitioned_rtentry(PlannerInfo *root, RangeTblEntry *parentrte,
Index parentRTindex, Relation parentrel,
PlanRowMark *top_parentrc, LOCKMODE lockmode,
- List **appinfos, List **partitioned_child_rels)
+ List **appinfos, Bitmapset **all_part_cols,
+ List **partitioned_child_rels)
{
int i;
RangeTblEntry *childrte;
@@ -1618,9 +1626,15 @@ expand_partitioned_rtentry(PlannerInfo *root, RangeTblEntry *parentrte,
/* If this child is itself partitioned, recurse */
if (childrel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
+ {
+ /* Also, collect the partition columns */
+ pull_child_partition_columns(all_part_cols, childrel, parentrel);
+
expand_partitioned_rtentry(root, childrte, childRTindex,
childrel, top_parentrc, lockmode,
- appinfos, partitioned_child_rels);
+ appinfos, all_part_cols,
+ partitioned_child_rels);
+ }
/* Close child relation, but keep locks */
heap_close(childrel, NoLock);
diff --git a/src/backend/optimizer/util/pathnode.c b/src/backend/optimizer/util/pathnode.c
index 26567cb..326c858 100644
--- a/src/backend/optimizer/util/pathnode.c
+++ b/src/backend/optimizer/util/pathnode.c
@@ -3162,6 +3162,8 @@ create_lockrows_path(PlannerInfo *root, RelOptInfo *rel,
* 'partitioned_rels' is an integer list of RT indexes of non-leaf tables in
* the partition tree, if this is an UPDATE/DELETE to a partitioned table.
* Otherwise NIL.
+ * 'part_cols_updated' if any partitioning columns are being updated, either
+ * from the named relation or a descendent partitione table.
* 'resultRelations' is an integer list of actual RT indexes of target rel(s)
* 'subpaths' is a list of Path(s) producing source data (one per rel)
* 'subroots' is a list of PlannerInfo structs (one per rel)
@@ -3175,6 +3177,7 @@ ModifyTablePath *
create_modifytable_path(PlannerInfo *root, RelOptInfo *rel,
CmdType operation, bool canSetTag,
Index nominalRelation, List *partitioned_rels,
+ bool part_cols_updated,
List *resultRelations, List *subpaths,
List *subroots,
List *withCheckOptionLists, List *returningLists,
@@ -3242,6 +3245,7 @@ create_modifytable_path(PlannerInfo *root, RelOptInfo *rel,
pathnode->canSetTag = canSetTag;
pathnode->nominalRelation = nominalRelation;
pathnode->partitioned_rels = list_copy(partitioned_rels);
+ pathnode->part_cols_updated = part_cols_updated;
pathnode->resultRelations = resultRelations;
pathnode->subpaths = subpaths;
pathnode->subroots = subroots;
diff --git a/src/include/catalog/partition.h b/src/include/catalog/partition.h
index 454a940..b714bc3 100644
--- a/src/include/catalog/partition.h
+++ b/src/include/catalog/partition.h
@@ -80,8 +80,8 @@ extern void check_new_partition_bound(char *relname, Relation parent,
extern Oid get_partition_parent(Oid relid);
extern List *get_qual_from_partbound(Relation rel, Relation parent,
PartitionBoundSpec *spec);
-extern List *map_partition_varattnos(List *expr, int target_varno,
- Relation partrel, Relation parent,
+extern List *map_partition_varattnos(List *expr, int fromrel_varno,
+ Relation to_rel, Relation from_rel,
bool *found_whole_row);
extern List *RelationGetPartitionQual(Relation rel);
extern Expr *get_partition_qual_relid(Oid relid);
@@ -99,6 +99,9 @@ extern int get_partition_for_tuple(PartitionDispatch *pd,
EState *estate,
PartitionDispatchData **failed_at,
TupleTableSlot **failed_slot);
+extern void pull_child_partition_columns(Bitmapset **bitmapset,
+ Relation rel,
+ Relation parent);
extern Oid get_default_oid_from_partdesc(PartitionDesc partdesc);
extern Oid get_default_partition_oid(Oid parentId);
extern void update_default_partition_oid(Oid parentId, Oid defaultPartId);
diff --git a/src/include/executor/executor.h b/src/include/executor/executor.h
index cc1cc2a..8e2bf5f 100644
--- a/src/include/executor/executor.h
+++ b/src/include/executor/executor.h
@@ -187,7 +187,10 @@ extern ResultRelInfo *ExecGetTriggerResultRel(EState *estate, Oid relid);
extern void ExecCleanUpTriggerState(EState *estate);
extern bool ExecContextForcesOids(PlanState *planstate, bool *hasoids);
extern void ExecConstraints(ResultRelInfo *resultRelInfo,
- TupleTableSlot *slot, EState *estate);
+ TupleTableSlot *slot, EState *estate,
+ bool check_partition_constraint);
+extern void ExecPartitionCheckEmitError(ResultRelInfo *resultRelInfo,
+ TupleTableSlot *slot, EState *estate);
extern void ExecWithCheckOptions(WCOKind kind, ResultRelInfo *resultRelInfo,
TupleTableSlot *slot, EState *estate);
extern LockTupleMode ExecUpdateLockMode(EState *estate, ResultRelInfo *relinfo);
@@ -220,6 +223,8 @@ extern int ExecFindPartition(ResultRelInfo *resultRelInfo,
PartitionDispatch *pd,
TupleTableSlot *slot,
EState *estate);
+extern bool ExecPartitionCheck(ResultRelInfo *resultRelInfo,
+ TupleTableSlot *slot, EState *estate);
#define EvalPlanQualSetSlot(epqstate, slot) ((epqstate)->origslot = (slot))
extern void EvalPlanQualFetchRowMarks(EPQState *epqstate);
diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h
index 9187f7a..9ba1976 100644
--- a/src/include/nodes/execnodes.h
+++ b/src/include/nodes/execnodes.h
@@ -979,15 +979,32 @@ typedef struct ModifyTableState
int mt_num_partitions; /* Number of members in the following
* arrays */
ResultRelInfo **mt_partitions; /* Per partition result relation pointers */
- TupleConversionMap **mt_partition_tupconv_maps;
- /* Per partition tuple conversion map */
+
+ /*
+ * Per partition conversion map to convert tuples from root to leaf
+ * partition
+ */
+ TupleConversionMap **mt_perleaf_parentchild_maps;
+
+ /*
+ * Per partition conversion map to convert tuples from leaf partition to
+ * root
+ */
+ TupleConversionMap **mt_perleaf_childparent_maps;
+
+ /*
+ * Per subplan conversion map to convert tuples from leaf partition to
+ * root partitioned table
+ */
+ TupleConversionMap **mt_persubplan_childparent_maps;
+
TupleTableSlot *mt_partition_tuple_slot;
+ TupleTableSlot *mt_rootpartition_tuple_slot;
+
struct TransitionCaptureState *mt_transition_capture;
/* controls transition table population for specified operation */
struct TransitionCaptureState *mt_oc_transition_capture;
/* controls transition table population for INSERT...ON CONFLICT UPDATE */
- TupleConversionMap **mt_transition_tupconv_maps;
- /* Per plan/partition tuple conversion */
} ModifyTableState;
/* ----------------
diff --git a/src/include/nodes/plannodes.h b/src/include/nodes/plannodes.h
index a382331..6981f58 100644
--- a/src/include/nodes/plannodes.h
+++ b/src/include/nodes/plannodes.h
@@ -219,6 +219,7 @@ typedef struct ModifyTable
Index nominalRelation; /* Parent RT index for use of EXPLAIN */
/* RT indexes of non-leaf tables in a partition tree */
List *partitioned_rels;
+ bool part_cols_updated; /* some part col in hierarchy updated */
List *resultRelations; /* integer list of RT indexes */
int resultRelIndex; /* index of first resultRel in plan's list */
int rootResultRelIndex; /* index of the partitioned table root */
diff --git a/src/include/nodes/relation.h b/src/include/nodes/relation.h
index 48e6012..5e7d07c 100644
--- a/src/include/nodes/relation.h
+++ b/src/include/nodes/relation.h
@@ -1633,6 +1633,7 @@ typedef struct ModifyTablePath
Index nominalRelation; /* Parent RT index for use of EXPLAIN */
/* RT indexes of non-leaf tables in a partition tree */
List *partitioned_rels;
+ bool part_cols_updated; /* some part col in hierarchy updated */
List *resultRelations; /* integer list of RT indexes */
List *subpaths; /* Path(s) producing source data */
List *subroots; /* per-target-table PlannerInfos */
@@ -2075,6 +2076,10 @@ typedef struct AppendRelInfo
* The child_rels list must contain at least one element, because the parent
* partitioned table is itself counted as a child.
*
+ * all_part_cols contains all attribute numbers from the parent that are
+ * used as partitioning columns by the parent or some descendent which is
+ * itself partitioned.
+ *
* These structs are kept in the PlannerInfo node's pcinfo_list.
*/
typedef struct PartitionedChildRelInfo
@@ -2083,6 +2088,7 @@ typedef struct PartitionedChildRelInfo
Index parent_relid;
List *child_rels;
+ Bitmapset *all_part_cols;
} PartitionedChildRelInfo;
/*
diff --git a/src/include/optimizer/pathnode.h b/src/include/optimizer/pathnode.h
index e372f88..b38f2f1 100644
--- a/src/include/optimizer/pathnode.h
+++ b/src/include/optimizer/pathnode.h
@@ -238,6 +238,7 @@ extern ModifyTablePath *create_modifytable_path(PlannerInfo *root,
RelOptInfo *rel,
CmdType operation, bool canSetTag,
Index nominalRelation, List *partitioned_rels,
+ bool part_cols_updated,
List *resultRelations, List *subpaths,
List *subroots,
List *withCheckOptionLists, List *returningLists,
diff --git a/src/include/optimizer/planner.h b/src/include/optimizer/planner.h
index 2a4cf71..c6c15c5 100644
--- a/src/include/optimizer/planner.h
+++ b/src/include/optimizer/planner.h
@@ -57,6 +57,7 @@ extern Expr *preprocess_phv_expression(PlannerInfo *root, Expr *expr);
extern bool plan_cluster_use_sort(Oid tableOid, Oid indexOid);
-extern List *get_partitioned_child_rels(PlannerInfo *root, Index rti);
+extern List *get_partitioned_child_rels(PlannerInfo *root, Index rti,
+ Bitmapset **all_part_cols_p);
#endif /* PLANNER_H */
diff --git a/src/test/regress/expected/update.out b/src/test/regress/expected/update.out
index cef70b1..82c63f9 100644
--- a/src/test/regress/expected/update.out
+++ b/src/test/regress/expected/update.out
@@ -198,27 +198,345 @@ INSERT INTO upsert_test VALUES (1, 'Bat') ON CONFLICT(a)
DROP TABLE update_test;
DROP TABLE upsert_test;
--- update to a partition should check partition bound constraint for the new tuple
-create table range_parted (
+---------------------------
+-- UPDATE with row movement
+---------------------------
+-- update to a partition should check partition bound constraint for the new tuple.
+-- If partition key is updated, the row should be moved to the appropriate
+-- partition. updatable views using partitions should enforce the check options
+-- for the rows that have been moved.
+create table mintab(c1 int);
+insert into mintab values (120);
+CREATE TABLE range_parted (
a text,
- b int
+ b int,
+ c numeric
) partition by range (a, b);
-create table part_a_1_a_10 partition of range_parted for values from ('a', 1) to ('a', 10);
-create table part_a_10_a_20 partition of range_parted for values from ('a', 10) to ('a', 20);
+CREATE VIEW upview AS SELECT * FROM range_parted WHERE (select c > c1 from mintab) WITH CHECK OPTION;
+-- Create partitions intentionally in descending bound order, so as to test
+-- that the sub plans are getting ordered in ascending bound order rather than ordered by the oid values.
+create table part_b_10_b_20 partition of range_parted for values from ('b', 10) to ('b', 20) partition by range (c);
create table part_b_1_b_10 partition of range_parted for values from ('b', 1) to ('b', 10);
-create table part_b_10_b_20 partition of range_parted for values from ('b', 10) to ('b', 20);
-insert into part_a_1_a_10 values ('a', 1);
-insert into part_b_10_b_20 values ('b', 10);
--- fail
-update part_a_1_a_10 set a = 'b' where a = 'a';
-ERROR: new row for relation "part_a_1_a_10" violates partition constraint
-DETAIL: Failing row contains (b, 1).
-update range_parted set b = b - 1 where b = 10;
-ERROR: new row for relation "part_b_10_b_20" violates partition constraint
-DETAIL: Failing row contains (b, 9).
--- ok
-update range_parted set b = b + 1 where b = 10;
+create table part_a_10_a_20 partition of range_parted for values from ('a', 10) to ('a', 20);
+create table part_a_1_a_10 partition of range_parted for values from ('a', 1) to ('a', 10);
+-- This tests partition-key UPDATE on a partitioned table that does not have any child partitions
+update part_b_10_b_20 set b = b - 6;
+-- As mentioned above, the partition creation is intentionally kept in descending bound order.
+create table part_c_100_200 (c numeric, a text, b int);
+alter table part_b_10_b_20 attach partition part_c_100_200 for values from (100) to (200);
+create table part_c_1_100 (b int, c numeric, a text);
+alter table part_b_10_b_20 attach partition part_c_1_100 for values from (1) to (100);
+\set init_range_parted 'truncate range_parted; insert into range_parted values (''a'', 1, NULL), (''a'', 10, 200), (''b'', 12, 96), (''b'', 13, 97), (''b'', 15, 105), (''b'', 17, 105)'
+:init_range_parted;
+select tableoid::regclass::text partname, * from range_parted order by 1, 2, 3, 4;
+ partname | a | b | c
+----------------+---+----+-----
+ part_a_10_a_20 | a | 10 | 200
+ part_a_1_a_10 | a | 1 |
+ part_c_100_200 | b | 15 | 105
+ part_c_100_200 | b | 17 | 105
+ part_c_1_100 | b | 12 | 96
+ part_c_1_100 | b | 13 | 97
+(6 rows)
+
+-- The order of subplans should be in bound order
+explain (costs off) update range_parted set c = c - 50 where c > 97;
+ QUERY PLAN
+-------------------------------------
+ Update on range_parted
+ Update on part_a_1_a_10
+ Update on part_a_10_a_20
+ Update on part_b_1_b_10
+ Update on part_c_1_100
+ Update on part_c_100_200
+ -> Seq Scan on part_a_1_a_10
+ Filter: (c > '97'::numeric)
+ -> Seq Scan on part_a_10_a_20
+ Filter: (c > '97'::numeric)
+ -> Seq Scan on part_b_1_b_10
+ Filter: (c > '97'::numeric)
+ -> Seq Scan on part_c_1_100
+ Filter: (c > '97'::numeric)
+ -> Seq Scan on part_c_100_200
+ Filter: (c > '97'::numeric)
+(16 rows)
+
+-- fail (row movement happens only within the partition subtree) :
+update part_c_1_100 set c = c + 20 where c = 96;
+ERROR: new row for relation "part_c_1_100" violates partition constraint
+DETAIL: Failing row contains (12, 116, b).
+-- No row found :
+update part_c_1_100 set c = c + 20 where c = 98;
+-- ok (row movement)
+update part_b_10_b_20 set c = c + 20 returning c, b, a;
+ c | b | a
+-----+----+---
+ 116 | 12 | b
+ 117 | 13 | b
+ 125 | 15 | b
+ 125 | 17 | b
+(4 rows)
+
+select a, b, c from part_c_1_100 order by 1, 2, 3;
+ a | b | c
+---+---+---
+(0 rows)
+
+select a, b, c from part_c_100_200 order by 1, 2, 3;
+ a | b | c
+---+----+-----
+ b | 12 | 116
+ b | 13 | 117
+ b | 15 | 125
+ b | 17 | 125
+(4 rows)
+
+-- fail (row movement happens only within the partition subtree) :
+update part_b_10_b_20 set b = b - 6 where c > 116 returning *;
+ERROR: new row for relation "part_c_100_200" violates partition constraint
+DETAIL: Failing row contains (117, b, 7).
+-- ok (row movement, with subset of rows moved into different partition)
+update range_parted set b = b - 6 where c > 116 returning a, b + c;
+ a | ?column?
+---+----------
+ a | 204
+ b | 124
+ b | 134
+ b | 136
+(4 rows)
+
+select tableoid::regclass::text partname, * from range_parted order by 1, 2, 3, 4;
+ partname | a | b | c
+----------------+---+----+-----
+ part_a_1_a_10 | a | 1 |
+ part_a_1_a_10 | a | 4 | 200
+ part_b_1_b_10 | b | 7 | 117
+ part_b_1_b_10 | b | 9 | 125
+ part_c_100_200 | b | 11 | 125
+ part_c_100_200 | b | 12 | 116
+(6 rows)
+
+-- update partition key using updatable view.
+-- succeeds
+update upview set c = 199 where b = 4;
+-- fail, check option violation
+update upview set c = 120 where b = 4;
+ERROR: new row violates check option for view "upview"
+DETAIL: Failing row contains (a, 4, 120).
+-- fail, row movement with check option violation
+update upview set a = 'b', b = 15, c = 120 where b = 4;
+ERROR: new row violates check option for view "upview"
+DETAIL: Failing row contains (b, 15, 120).
+-- succeeds, row movement , check option passes
+update upview set a = 'b', b = 15 where b = 4;
+select tableoid::regclass::text partname, * from range_parted order by 1, 2, 3, 4;
+ partname | a | b | c
+----------------+---+----+-----
+ part_a_1_a_10 | a | 1 |
+ part_b_1_b_10 | b | 7 | 117
+ part_b_1_b_10 | b | 9 | 125
+ part_c_100_200 | b | 11 | 125
+ part_c_100_200 | b | 12 | 116
+ part_c_100_200 | b | 15 | 199
+(6 rows)
+
+-- cleanup
+drop view upview;
+-- RETURNING having whole-row vars.
+----------------------------------
+:init_range_parted;
+update range_parted set c = 95 where a = 'b' and b > 10 and c > 100 returning (range_parted) , *;
+ range_parted | a | b | c
+--------------+---+----+----
+ (b,15,95) | b | 15 | 95
+ (b,17,95) | b | 17 | 95
+(2 rows)
+
+select tableoid::regclass::text partname, * from range_parted order by 1, 2, 3, 4;
+ partname | a | b | c
+----------------+---+----+-----
+ part_a_10_a_20 | a | 10 | 200
+ part_a_1_a_10 | a | 1 |
+ part_c_1_100 | b | 12 | 96
+ part_c_1_100 | b | 13 | 97
+ part_c_1_100 | b | 15 | 95
+ part_c_1_100 | b | 17 | 95
+(6 rows)
+
+-- Transition tables with update row movement
+---------------------------------------------
+:init_range_parted;
+select tableoid::regclass::text partname, * from range_parted order by 1, 2, 3, 4;
+ partname | a | b | c
+----------------+---+----+-----
+ part_a_10_a_20 | a | 10 | 200
+ part_a_1_a_10 | a | 1 |
+ part_c_100_200 | b | 15 | 105
+ part_c_100_200 | b | 17 | 105
+ part_c_1_100 | b | 12 | 96
+ part_c_1_100 | b | 13 | 97
+(6 rows)
+
+create function trans_updatetrigfunc() returns trigger language plpgsql as
+$$
+ begin
+ raise notice 'trigger = %, old table = %, new table = %',
+ TG_NAME,
+ (select string_agg(old_table::text, ', ' order by a) from old_table),
+ (select string_agg(new_table::text, ', ' order by a) from new_table);
+ return null;
+ end;
+$$;
+create trigger trans_updatetrig
+ after update on range_parted referencing old table as old_table new table as new_table
+ for each statement execute procedure trans_updatetrigfunc();
+update range_parted set c = (case when c = 96 then 110 else c + 1 end ) where a = 'b' and b > 10 and c >= 96;
+NOTICE: trigger = trans_updatetrig, old table = (b,12,96), (b,13,97), (b,15,105), (b,17,105), new table = (b,12,110), (b,13,98), (b,15,106), (b,17,106)
+select tableoid::regclass::text partname, * from range_parted order by 1, 2, 3, 4;
+ partname | a | b | c
+----------------+---+----+-----
+ part_a_10_a_20 | a | 10 | 200
+ part_a_1_a_10 | a | 1 |
+ part_c_100_200 | b | 12 | 110
+ part_c_100_200 | b | 15 | 106
+ part_c_100_200 | b | 17 | 106
+ part_c_1_100 | b | 13 | 98
+(6 rows)
+
+:init_range_parted;
+-- Enabling OLD TABLE capture for both DELETE as well as UPDATE stmt triggers
+-- should not cause DELETEd rows to be captured twice. Similar thing for
+-- INSERT triggers and inserted rows.
+create trigger trans_deletetrig
+ after delete on range_parted referencing old table as old_table
+ for each statement execute procedure trans_updatetrigfunc();
+create trigger trans_inserttrig
+ after insert on range_parted referencing new table as new_table
+ for each statement execute procedure trans_updatetrigfunc();
+update range_parted set c = c + 50 where a = 'b' and b > 10 and c >= 96;
+NOTICE: trigger = trans_updatetrig, old table = (b,12,96), (b,13,97), (b,15,105), (b,17,105), new table = (b,12,146), (b,13,147), (b,15,155), (b,17,155)
+select tableoid::regclass::text partname, * from range_parted order by 1, 2, 3, 4;
+ partname | a | b | c
+----------------+---+----+-----
+ part_a_10_a_20 | a | 10 | 200
+ part_a_1_a_10 | a | 1 |
+ part_c_100_200 | b | 12 | 146
+ part_c_100_200 | b | 13 | 147
+ part_c_100_200 | b | 15 | 155
+ part_c_100_200 | b | 17 | 155
+(6 rows)
+
+drop trigger trans_updatetrig ON range_parted;
+drop trigger trans_deletetrig ON range_parted;
+drop trigger trans_inserttrig ON range_parted;
+-- Install BR triggers on child partition, so that transition tuple conversion takes place.
+create function func_parted_mod_b() returns trigger as $$
+begin
+ NEW.b = NEW.b + 1;
+ return NEW;
+end $$ language plpgsql;
+create trigger trig_c1_100 before update or insert on part_c_1_100
+ for each row execute procedure func_parted_mod_b();
+create trigger trig_c100_200 before update or insert on part_c_100_200
+ for each row execute procedure func_parted_mod_b();
+:init_range_parted;
+update range_parted set c = (case when c = 96 then 110 else c + 1 end ) where a = 'b' and b > 10 and c >= 96;
+select tableoid::regclass::text partname, * from range_parted order by 1, 2, 3, 4;
+ partname | a | b | c
+----------------+---+----+-----
+ part_a_10_a_20 | a | 10 | 200
+ part_a_1_a_10 | a | 1 |
+ part_c_100_200 | b | 15 | 110
+ part_c_100_200 | b | 17 | 106
+ part_c_100_200 | b | 19 | 106
+ part_c_1_100 | b | 15 | 98
+(6 rows)
+
+:init_range_parted;
+update range_parted set c = c + 50 where a = 'b' and b > 10 and c >= 96;
+select tableoid::regclass::text partname, * from range_parted order by 1, 2, 3, 4;
+ partname | a | b | c
+----------------+---+----+-----
+ part_a_10_a_20 | a | 10 | 200
+ part_a_1_a_10 | a | 1 |
+ part_c_100_200 | b | 15 | 146
+ part_c_100_200 | b | 16 | 147
+ part_c_100_200 | b | 17 | 155
+ part_c_100_200 | b | 19 | 155
+(6 rows)
+
+drop trigger trig_c1_100 ON part_c_1_100;
+drop trigger trig_c100_200 ON part_c_100_200;
+drop function func_parted_mod_b();
+-- statement triggers with update row movement
+---------------------------------------------------
+:init_range_parted;
+select tableoid::regclass::text partname, * from range_parted order by 1, 2, 3, 4;
+ partname | a | b | c
+----------------+---+----+-----
+ part_a_10_a_20 | a | 10 | 200
+ part_a_1_a_10 | a | 1 |
+ part_c_100_200 | b | 15 | 105
+ part_c_100_200 | b | 17 | 105
+ part_c_1_100 | b | 12 | 96
+ part_c_1_100 | b | 13 | 97
+(6 rows)
+
+create function trigfunc() returns trigger language plpgsql as
+$$
+ begin
+ raise notice 'trigger = % fired on table % during %',
+ TG_NAME, TG_TABLE_NAME, TG_OP;
+ return null;
+ end;
+$$;
+-- Triggers on root partition
+create trigger parent_delete_trig
+ after delete on range_parted for each statement execute procedure trigfunc();
+create trigger parent_update_trig
+ after update on range_parted for each statement execute procedure trigfunc();
+create trigger parent_insert_trig
+ after insert on range_parted for each statement execute procedure trigfunc();
+-- Triggers on leaf partition part_c_1_100
+create trigger c1_delete_trig
+ after delete on part_c_1_100 for each statement execute procedure trigfunc();
+create trigger c1_update_trig
+ after update on part_c_1_100 for each statement execute procedure trigfunc();
+create trigger c1_insert_trig
+ after insert on part_c_1_100 for each statement execute procedure trigfunc();
+-- Triggers on leaf partition part_c_100_200
+create trigger c100_delete_trig
+ after delete on part_c_100_200 for each statement execute procedure trigfunc();
+create trigger c100_update_trig
+ after update on part_c_100_200 for each statement execute procedure trigfunc();
+create trigger c100_insert_trig
+ after insert on part_c_100_200 for each statement execute procedure trigfunc();
+-- Move all rows from part_c_100_200 to part_c_1_100. None of the delete or insert statement triggers should be fired.
+update range_parted set c = c - 50 where c > 97;
+NOTICE: trigger = parent_update_trig fired on table range_parted during UPDATE
+select tableoid::regclass::text partname, * from range_parted order by 1, 2, 3, 4;
+ partname | a | b | c
+----------------+---+----+-----
+ part_a_10_a_20 | a | 10 | 150
+ part_a_1_a_10 | a | 1 |
+ part_c_1_100 | b | 12 | 96
+ part_c_1_100 | b | 13 | 97
+ part_c_1_100 | b | 15 | 55
+ part_c_1_100 | b | 17 | 55
+(6 rows)
+
+drop trigger parent_delete_trig ON range_parted;
+drop trigger parent_update_trig ON range_parted;
+drop trigger parent_insert_trig ON range_parted;
+drop trigger c1_delete_trig ON part_c_1_100;
+drop trigger c1_update_trig ON part_c_1_100;
+drop trigger c1_insert_trig ON part_c_1_100;
+drop trigger c100_delete_trig ON part_c_100_200;
+drop trigger c100_update_trig ON part_c_100_200;
+drop trigger c100_insert_trig ON part_c_100_200;
+drop table mintab;
-- Creating default partition for range
+:init_range_parted;
create table part_def partition of range_parted default;
\d+ part_def
Table "public.part_def"
@@ -226,6 +544,7 @@ create table part_def partition of range_parted default;
--------+---------+-----------+----------+---------+----------+--------------+-------------
a | text | | | | extended | |
b | integer | | | | plain | |
+ c | numeric | | | | main | |
Partition of: range_parted DEFAULT
Partition constraint: (NOT (((a = 'a'::text) AND (b >= 1) AND (b < 10)) OR ((a = 'a'::text) AND (b >= 10) AND (b < 20)) OR ((a = 'b'::text) AND (b >= 1) AND (b < 10)) OR ((a = 'b'::text) AND (b >= 10) AND (b < 20))))
@@ -235,7 +554,55 @@ update part_def set a = 'd' where a = 'c';
-- fail
update part_def set a = 'a' where a = 'd';
ERROR: new row for relation "part_def" violates partition constraint
-DETAIL: Failing row contains (a, 9).
+DETAIL: Failing row contains (a, 9, null).
+select tableoid::regclass::text partname, * from range_parted order by 1, 2, 3, 4;
+ partname | a | b | c
+----------------+---+----+-----
+ part_a_10_a_20 | a | 10 | 200
+ part_a_1_a_10 | a | 1 |
+ part_c_100_200 | b | 15 | 105
+ part_c_100_200 | b | 17 | 105
+ part_c_1_100 | b | 12 | 96
+ part_c_1_100 | b | 13 | 97
+ part_def | d | 9 |
+(7 rows)
+
+-- Update row movement from non-default to default partition.
+-- Fail, default partition is not under part_a_10_a_20;
+update part_a_10_a_20 set a = 'ad' where a = 'a';
+ERROR: new row for relation "part_a_10_a_20" violates partition constraint
+DETAIL: Failing row contains (ad, 10, 200).
+-- Success
+update range_parted set a = 'ad' where a = 'a';
+update range_parted set a = 'bd' where a = 'b';
+select tableoid::regclass::text partname, * from range_parted order by 1, 2, 3, 4;
+ partname | a | b | c
+----------+----+----+-----
+ part_def | ad | 1 |
+ part_def | ad | 10 | 200
+ part_def | bd | 12 | 96
+ part_def | bd | 13 | 97
+ part_def | bd | 15 | 105
+ part_def | bd | 17 | 105
+ part_def | d | 9 |
+(7 rows)
+
+-- Update row movement from default to non-default partitions.
+-- Success
+update range_parted set a = 'a' where a = 'ad';
+update range_parted set a = 'b' where a = 'bd';
+select tableoid::regclass::text partname, * from range_parted order by 1, 2, 3, 4;
+ partname | a | b | c
+----------------+---+----+-----
+ part_a_10_a_20 | a | 10 | 200
+ part_a_1_a_10 | a | 1 |
+ part_c_100_200 | b | 15 | 105
+ part_c_100_200 | b | 17 | 105
+ part_c_1_100 | b | 12 | 96
+ part_c_1_100 | b | 13 | 97
+ part_def | d | 9 |
+(7 rows)
+
create table list_parted (
a text,
b int
@@ -250,6 +617,110 @@ ERROR: new row for relation "list_default" violates partition constraint
DETAIL: Failing row contains (a, 10).
-- ok
update list_default set a = 'x' where a = 'd';
--- cleanup
+drop table list_parted;
+--------------
+-- UPDATE with
+-- partition key or non-partition columns, with different column ordering,
+-- triggers.
+--------------
+-- Setup
+--------
+create table list_parted (a numeric, b int, c int8) partition by list (a);
+create table sub_parted partition of list_parted for values in (1) partition by list (b);
+create table sub_part1(b int, c int8, a numeric);
+alter table sub_parted attach partition sub_part1 for values in (1);
+create table sub_part2(b int, c int8, a numeric);
+alter table sub_parted attach partition sub_part2 for values in (2);
+create table list_part1(a numeric, b int, c int8);
+alter table list_parted attach partition list_part1 for values in (2,3);
+insert into list_parted values (2,5,50);
+insert into list_parted values (3,6,60);
+insert into sub_parted values (1,1,60);
+insert into sub_parted values (1,2,10);
+-- Test partition constraint violation when intermediate ancestor is used and
+-- constraint is inherited from upper root.
+update sub_parted set a = 2 where c = 10;
+ERROR: new row for relation "sub_part2" violates partition constraint
+DETAIL: Failing row contains (2, 10, 2).
+-- UPDATE which does not modify partition key of partitions that are chosen for update.
+select tableoid::regclass::text , * from list_parted where a = 2 order by 1;
+ tableoid | a | b | c
+------------+---+---+----
+ list_part1 | 2 | 5 | 50
+(1 row)
+
+update list_parted set b = c + a where a = 2;
+select tableoid::regclass::text , * from list_parted where a = 2 order by 1;
+ tableoid | a | b | c
+------------+---+----+----
+ list_part1 | 2 | 52 | 50
+(1 row)
+
+-----------
+-- Triggers can cause UPDATE row movement if it modified partition key.
+-----------
+create function func_parted_mod_b() returns trigger as $$
+begin
+ NEW.b = 2; -- This is changing partition key column.
+ return NEW;
+end $$ language plpgsql;
+create trigger parted_mod_b before update on sub_part1
+ for each row execute procedure func_parted_mod_b();
+select tableoid::regclass::text , * from list_parted order by 1, 2, 3, 4;
+ tableoid | a | b | c
+------------+---+----+----
+ list_part1 | 2 | 52 | 50
+ list_part1 | 3 | 6 | 60
+ sub_part1 | 1 | 1 | 60
+ sub_part2 | 1 | 2 | 10
+(4 rows)
+
+-- This should do the tuple routing even though there is no explicit
+-- partition-key update, because there is a trigger on sub_part1
+update list_parted set c = 70 where b = 1 ;
+select tableoid::regclass::text , * from list_parted order by 1, 2, 3, 4;
+ tableoid | a | b | c
+------------+---+----+----
+ list_part1 | 2 | 52 | 50
+ list_part1 | 3 | 6 | 60
+ sub_part2 | 1 | 2 | 10
+ sub_part2 | 1 | 2 | 70
+(4 rows)
+
+drop trigger parted_mod_b ON sub_part1 ;
+-- If BR DELETE trigger prevented DELETE from happening, we should also skip
+-- the INSERT if that delete is part of UPDATE=>DELETE+INSERT.
+create or replace function func_parted_mod_b() returns trigger as $$
+begin return NULL; end $$ language plpgsql;
+create trigger trig_skip_delete before delete on sub_part1
+ for each row execute procedure func_parted_mod_b();
+update list_parted set b = 1 where c = 70;
+select tableoid::regclass::text , * from list_parted order by 1, 2, 3, 4;
+ tableoid | a | b | c
+------------+---+----+----
+ list_part1 | 2 | 52 | 50
+ list_part1 | 3 | 6 | 60
+ sub_part1 | 1 | 1 | 70
+ sub_part2 | 1 | 2 | 10
+(4 rows)
+
+drop trigger trig_skip_delete ON sub_part1 ;
+-- UPDATE partition-key with FROM clause. If join produces multiple output
+-- rows for the same row to be modified, we should tuple-route the row only once.
+-- There should not be any rows inserted.
+create table non_parted (id int);
+insert into non_parted values (1), (1), (1), (2), (2), (2), (3), (3), (3);
+update list_parted t1 set a = 2 from non_parted t2 where t1.a = t2.id and a = 1;
+select tableoid::regclass::text , * from list_parted order by 1, 2, 3, 4;
+ tableoid | a | b | c
+------------+---+----+----
+ list_part1 | 2 | 1 | 70
+ list_part1 | 2 | 2 | 10
+ list_part1 | 2 | 52 | 50
+ list_part1 | 3 | 6 | 60
+(4 rows)
+
+drop table non_parted;
+drop function func_parted_mod_b ( ) ;
drop table range_parted;
drop table list_parted;
diff --git a/src/test/regress/sql/update.sql b/src/test/regress/sql/update.sql
index 66d1fec..02d4c5e 100644
--- a/src/test/regress/sql/update.sql
+++ b/src/test/regress/sql/update.sql
@@ -107,25 +107,203 @@ INSERT INTO upsert_test VALUES (1, 'Bat') ON CONFLICT(a)
DROP TABLE update_test;
DROP TABLE upsert_test;
--- update to a partition should check partition bound constraint for the new tuple
-create table range_parted (
+
+---------------------------
+-- UPDATE with row movement
+---------------------------
+
+-- update to a partition should check partition bound constraint for the new tuple.
+-- If partition key is updated, the row should be moved to the appropriate
+-- partition. updatable views using partitions should enforce the check options
+-- for the rows that have been moved.
+create table mintab(c1 int);
+insert into mintab values (120);
+CREATE TABLE range_parted (
a text,
- b int
+ b int,
+ c numeric
) partition by range (a, b);
-create table part_a_1_a_10 partition of range_parted for values from ('a', 1) to ('a', 10);
-create table part_a_10_a_20 partition of range_parted for values from ('a', 10) to ('a', 20);
+CREATE VIEW upview AS SELECT * FROM range_parted WHERE (select c > c1 from mintab) WITH CHECK OPTION;
+
+-- Create partitions intentionally in descending bound order, so as to test
+-- that the sub plans are getting ordered in ascending bound order rather than ordered by the oid values.
+create table part_b_10_b_20 partition of range_parted for values from ('b', 10) to ('b', 20) partition by range (c);
create table part_b_1_b_10 partition of range_parted for values from ('b', 1) to ('b', 10);
-create table part_b_10_b_20 partition of range_parted for values from ('b', 10) to ('b', 20);
-insert into part_a_1_a_10 values ('a', 1);
-insert into part_b_10_b_20 values ('b', 10);
+create table part_a_10_a_20 partition of range_parted for values from ('a', 10) to ('a', 20);
+create table part_a_1_a_10 partition of range_parted for values from ('a', 1) to ('a', 10);
+
+-- This tests partition-key UPDATE on a partitioned table that does not have any child partitions
+update part_b_10_b_20 set b = b - 6;
+
+-- As mentioned above, the partition creation is intentionally kept in descending bound order.
+create table part_c_100_200 (c numeric, a text, b int);
+alter table part_b_10_b_20 attach partition part_c_100_200 for values from (100) to (200);
+create table part_c_1_100 (b int, c numeric, a text);
+alter table part_b_10_b_20 attach partition part_c_1_100 for values from (1) to (100);
+
+\set init_range_parted 'truncate range_parted; insert into range_parted values (''a'', 1, NULL), (''a'', 10, 200), (''b'', 12, 96), (''b'', 13, 97), (''b'', 15, 105), (''b'', 17, 105)'
+:init_range_parted;
+select tableoid::regclass::text partname, * from range_parted order by 1, 2, 3, 4;
+
+-- The order of subplans should be in bound order
+explain (costs off) update range_parted set c = c - 50 where c > 97;
+
+-- fail (row movement happens only within the partition subtree) :
+update part_c_1_100 set c = c + 20 where c = 96;
+-- No row found :
+update part_c_1_100 set c = c + 20 where c = 98;
+-- ok (row movement)
+update part_b_10_b_20 set c = c + 20 returning c, b, a;
+select a, b, c from part_c_1_100 order by 1, 2, 3;
+select a, b, c from part_c_100_200 order by 1, 2, 3;
+
+-- fail (row movement happens only within the partition subtree) :
+update part_b_10_b_20 set b = b - 6 where c > 116 returning *;
+-- ok (row movement, with subset of rows moved into different partition)
+update range_parted set b = b - 6 where c > 116 returning a, b + c;
+
+select tableoid::regclass::text partname, * from range_parted order by 1, 2, 3, 4;
+
+-- update partition key using updatable view.
+
+-- succeeds
+update upview set c = 199 where b = 4;
+-- fail, check option violation
+update upview set c = 120 where b = 4;
+-- fail, row movement with check option violation
+update upview set a = 'b', b = 15, c = 120 where b = 4;
+-- succeeds, row movement , check option passes
+update upview set a = 'b', b = 15 where b = 4;
+
+select tableoid::regclass::text partname, * from range_parted order by 1, 2, 3, 4;
+
+-- cleanup
+drop view upview;
+
+-- RETURNING having whole-row vars.
+----------------------------------
+:init_range_parted;
+update range_parted set c = 95 where a = 'b' and b > 10 and c > 100 returning (range_parted) , *;
+select tableoid::regclass::text partname, * from range_parted order by 1, 2, 3, 4;
+
+
+-- Transition tables with update row movement
+---------------------------------------------
+:init_range_parted;
+select tableoid::regclass::text partname, * from range_parted order by 1, 2, 3, 4;
+
+create function trans_updatetrigfunc() returns trigger language plpgsql as
+$$
+ begin
+ raise notice 'trigger = %, old table = %, new table = %',
+ TG_NAME,
+ (select string_agg(old_table::text, ', ' order by a) from old_table),
+ (select string_agg(new_table::text, ', ' order by a) from new_table);
+ return null;
+ end;
+$$;
+
+create trigger trans_updatetrig
+ after update on range_parted referencing old table as old_table new table as new_table
+ for each statement execute procedure trans_updatetrigfunc();
+
+update range_parted set c = (case when c = 96 then 110 else c + 1 end ) where a = 'b' and b > 10 and c >= 96;
+select tableoid::regclass::text partname, * from range_parted order by 1, 2, 3, 4;
+:init_range_parted;
+
+-- Enabling OLD TABLE capture for both DELETE as well as UPDATE stmt triggers
+-- should not cause DELETEd rows to be captured twice. Similar thing for
+-- INSERT triggers and inserted rows.
+create trigger trans_deletetrig
+ after delete on range_parted referencing old table as old_table
+ for each statement execute procedure trans_updatetrigfunc();
+create trigger trans_inserttrig
+ after insert on range_parted referencing new table as new_table
+ for each statement execute procedure trans_updatetrigfunc();
+update range_parted set c = c + 50 where a = 'b' and b > 10 and c >= 96;
+select tableoid::regclass::text partname, * from range_parted order by 1, 2, 3, 4;
+drop trigger trans_updatetrig ON range_parted;
+drop trigger trans_deletetrig ON range_parted;
+drop trigger trans_inserttrig ON range_parted;
+
+-- Install BR triggers on child partition, so that transition tuple conversion takes place.
+create function func_parted_mod_b() returns trigger as $$
+begin
+ NEW.b = NEW.b + 1;
+ return NEW;
+end $$ language plpgsql;
+create trigger trig_c1_100 before update or insert on part_c_1_100
+ for each row execute procedure func_parted_mod_b();
+create trigger trig_c100_200 before update or insert on part_c_100_200
+ for each row execute procedure func_parted_mod_b();
+:init_range_parted;
+update range_parted set c = (case when c = 96 then 110 else c + 1 end ) where a = 'b' and b > 10 and c >= 96;
+select tableoid::regclass::text partname, * from range_parted order by 1, 2, 3, 4;
+:init_range_parted;
+update range_parted set c = c + 50 where a = 'b' and b > 10 and c >= 96;
+select tableoid::regclass::text partname, * from range_parted order by 1, 2, 3, 4;
+drop trigger trig_c1_100 ON part_c_1_100;
+drop trigger trig_c100_200 ON part_c_100_200;
+drop function func_parted_mod_b();
+
+
+-- statement triggers with update row movement
+---------------------------------------------------
+
+:init_range_parted;
+select tableoid::regclass::text partname, * from range_parted order by 1, 2, 3, 4;
+
+create function trigfunc() returns trigger language plpgsql as
+$$
+ begin
+ raise notice 'trigger = % fired on table % during %',
+ TG_NAME, TG_TABLE_NAME, TG_OP;
+ return null;
+ end;
+$$;
+-- Triggers on root partition
+create trigger parent_delete_trig
+ after delete on range_parted for each statement execute procedure trigfunc();
+create trigger parent_update_trig
+ after update on range_parted for each statement execute procedure trigfunc();
+create trigger parent_insert_trig
+ after insert on range_parted for each statement execute procedure trigfunc();
+
+-- Triggers on leaf partition part_c_1_100
+create trigger c1_delete_trig
+ after delete on part_c_1_100 for each statement execute procedure trigfunc();
+create trigger c1_update_trig
+ after update on part_c_1_100 for each statement execute procedure trigfunc();
+create trigger c1_insert_trig
+ after insert on part_c_1_100 for each statement execute procedure trigfunc();
+
+-- Triggers on leaf partition part_c_100_200
+create trigger c100_delete_trig
+ after delete on part_c_100_200 for each statement execute procedure trigfunc();
+create trigger c100_update_trig
+ after update on part_c_100_200 for each statement execute procedure trigfunc();
+create trigger c100_insert_trig
+ after insert on part_c_100_200 for each statement execute procedure trigfunc();
+
+-- Move all rows from part_c_100_200 to part_c_1_100. None of the delete or insert statement triggers should be fired.
+update range_parted set c = c - 50 where c > 97;
+select tableoid::regclass::text partname, * from range_parted order by 1, 2, 3, 4;
+
+drop trigger parent_delete_trig ON range_parted;
+drop trigger parent_update_trig ON range_parted;
+drop trigger parent_insert_trig ON range_parted;
+drop trigger c1_delete_trig ON part_c_1_100;
+drop trigger c1_update_trig ON part_c_1_100;
+drop trigger c1_insert_trig ON part_c_1_100;
+drop trigger c100_delete_trig ON part_c_100_200;
+drop trigger c100_update_trig ON part_c_100_200;
+drop trigger c100_insert_trig ON part_c_100_200;
+
+drop table mintab;
--- fail
-update part_a_1_a_10 set a = 'b' where a = 'a';
-update range_parted set b = b - 1 where b = 10;
--- ok
-update range_parted set b = b + 1 where b = 10;
-- Creating default partition for range
+:init_range_parted;
create table part_def partition of range_parted default;
\d+ part_def
insert into range_parted values ('c', 9);
@@ -134,6 +312,21 @@ update part_def set a = 'd' where a = 'c';
-- fail
update part_def set a = 'a' where a = 'd';
+select tableoid::regclass::text partname, * from range_parted order by 1, 2, 3, 4;
+
+-- Update row movement from non-default to default partition.
+-- Fail, default partition is not under part_a_10_a_20;
+update part_a_10_a_20 set a = 'ad' where a = 'a';
+-- Success
+update range_parted set a = 'ad' where a = 'a';
+update range_parted set a = 'bd' where a = 'b';
+select tableoid::regclass::text partname, * from range_parted order by 1, 2, 3, 4;
+-- Update row movement from default to non-default partitions.
+-- Success
+update range_parted set a = 'a' where a = 'ad';
+update range_parted set a = 'b' where a = 'bd';
+select tableoid::regclass::text partname, * from range_parted order by 1, 2, 3, 4;
+
create table list_parted (
a text,
b int
@@ -148,6 +341,82 @@ update list_default set a = 'a' where a = 'd';
-- ok
update list_default set a = 'x' where a = 'd';
--- cleanup
+drop table list_parted;
+
+--------------
+-- UPDATE with
+-- partition key or non-partition columns, with different column ordering,
+-- triggers.
+--------------
+
+-- Setup
+--------
+create table list_parted (a numeric, b int, c int8) partition by list (a);
+create table sub_parted partition of list_parted for values in (1) partition by list (b);
+
+create table sub_part1(b int, c int8, a numeric);
+alter table sub_parted attach partition sub_part1 for values in (1);
+create table sub_part2(b int, c int8, a numeric);
+alter table sub_parted attach partition sub_part2 for values in (2);
+
+create table list_part1(a numeric, b int, c int8);
+alter table list_parted attach partition list_part1 for values in (2,3);
+
+insert into list_parted values (2,5,50);
+insert into list_parted values (3,6,60);
+insert into sub_parted values (1,1,60);
+insert into sub_parted values (1,2,10);
+
+-- Test partition constraint violation when intermediate ancestor is used and
+-- constraint is inherited from upper root.
+update sub_parted set a = 2 where c = 10;
+
+-- UPDATE which does not modify partition key of partitions that are chosen for update.
+select tableoid::regclass::text , * from list_parted where a = 2 order by 1;
+update list_parted set b = c + a where a = 2;
+select tableoid::regclass::text , * from list_parted where a = 2 order by 1;
+
+
+-----------
+-- Triggers can cause UPDATE row movement if it modified partition key.
+-----------
+create function func_parted_mod_b() returns trigger as $$
+begin
+ NEW.b = 2; -- This is changing partition key column.
+ return NEW;
+end $$ language plpgsql;
+create trigger parted_mod_b before update on sub_part1
+ for each row execute procedure func_parted_mod_b();
+
+select tableoid::regclass::text , * from list_parted order by 1, 2, 3, 4;
+
+-- This should do the tuple routing even though there is no explicit
+-- partition-key update, because there is a trigger on sub_part1
+update list_parted set c = 70 where b = 1 ;
+select tableoid::regclass::text , * from list_parted order by 1, 2, 3, 4;
+
+drop trigger parted_mod_b ON sub_part1 ;
+
+-- If BR DELETE trigger prevented DELETE from happening, we should also skip
+-- the INSERT if that delete is part of UPDATE=>DELETE+INSERT.
+create or replace function func_parted_mod_b() returns trigger as $$
+begin return NULL; end $$ language plpgsql;
+create trigger trig_skip_delete before delete on sub_part1
+ for each row execute procedure func_parted_mod_b();
+update list_parted set b = 1 where c = 70;
+select tableoid::regclass::text , * from list_parted order by 1, 2, 3, 4;
+
+drop trigger trig_skip_delete ON sub_part1 ;
+
+-- UPDATE partition-key with FROM clause. If join produces multiple output
+-- rows for the same row to be modified, we should tuple-route the row only once.
+-- There should not be any rows inserted.
+create table non_parted (id int);
+insert into non_parted values (1), (1), (1), (2), (2), (2), (3), (3), (3);
+update list_parted t1 set a = 2 from non_parted t2 where t1.a = t2.id and a = 1;
+select tableoid::regclass::text , * from list_parted order by 1, 2, 3, 4;
+drop table non_parted;
+
+drop function func_parted_mod_b ( ) ;
drop table range_parted;
drop table list_parted;