diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c index dacbe751c6..abe70097c5 100644 --- a/src/backend/commands/copy.c +++ b/src/backend/commands/copy.c @@ -81,7 +81,7 @@ typedef enum EolType } EolType; /* - * Represents the heap insert method to be used during COPY to. + * Represents the heap insert method to be used during COPY FROM. */ typedef enum CopyInsertMethod { @@ -179,8 +179,6 @@ typedef struct CopyStateData bool volatile_defexprs; /* is any of defexprs volatile? */ List *range_table; - /* Tuple-routing support info */ - PartitionTupleRouting *partition_tuple_routing; TransitionCaptureState *transition_capture; @@ -2525,8 +2523,7 @@ CopyFrom(CopyState cstate) */ if (cstate->rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE) { - proute = cstate->partition_tuple_routing = - ExecSetupPartitionTupleRouting(NULL, cstate->rel); + proute = ExecSetupPartitionTupleRouting(NULL, cstate->rel); /* * If we are capturing transition tuples, they may need to be @@ -2773,7 +2770,8 @@ CopyFrom(CopyState cstate) * too often. */ if (unlikely(lastPartitionSampleLineNo <= (cstate->cur_lineno - - RECHECK_MULTI_INSERT_THRESHOLD))) + RECHECK_MULTI_INSERT_THRESHOLD) + && cstate->cur_lineno >= RECHECK_MULTI_INSERT_THRESHOLD)) { avgTuplesPerPartChange = (cstate->cur_lineno - lastPartitionSampleLineNo) / @@ -2864,10 +2862,12 @@ CopyFrom(CopyState cstate) /* * We might need to convert from the parent rowtype to the - * partition rowtype. + * partition rowtype. Don't free the already stored tuple as it + * may still be required for a multi-insert batch. */ tuple = ConvertPartitionTupleSlot(proute->parent_child_tupconv_maps[leaf_part_index], tuple, + false, proute->partition_tuple_slot, &slot); @@ -3054,8 +3054,8 @@ CopyFrom(CopyState cstate) ExecCloseIndices(target_resultRelInfo); /* Close all the partitioned tables, leaf partitions, and their indices */ - if (cstate->partition_tuple_routing) - ExecCleanupTupleRouting(mtstate, cstate->partition_tuple_routing); + if (proute) + ExecCleanupTupleRouting(mtstate, proute); /* Close any trigger target relations */ ExecCleanUpTriggerState(estate); diff --git a/src/backend/executor/execPartition.c b/src/backend/executor/execPartition.c index cd0ec08461..8d1146216d 100644 --- a/src/backend/executor/execPartition.c +++ b/src/backend/executor/execPartition.c @@ -773,6 +773,7 @@ TupConvMapForLeaf(PartitionTupleRouting *proute, HeapTuple ConvertPartitionTupleSlot(TupleConversionMap *map, HeapTuple tuple, + bool shouldFree, TupleTableSlot *new_slot, TupleTableSlot **p_my_slot) { @@ -787,7 +788,7 @@ ConvertPartitionTupleSlot(TupleConversionMap *map, *p_my_slot = new_slot; Assert(new_slot != NULL); ExecSetSlotDescriptor(new_slot, map->outdesc); - ExecStoreTuple(tuple, new_slot, InvalidBuffer, true); + ExecStoreTuple(tuple, new_slot, InvalidBuffer, shouldFree); return tuple; } diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c index f535762e2d..c5591bacc9 100644 --- a/src/backend/executor/nodeModifyTable.c +++ b/src/backend/executor/nodeModifyTable.c @@ -1163,6 +1163,7 @@ lreplace:; tupconv_map = tupconv_map_for_subplan(mtstate, map_index); tuple = ConvertPartitionTupleSlot(tupconv_map, tuple, + true, proute->root_tuple_slot, &slot); @@ -1791,6 +1792,7 @@ ExecPrepareTupleRouting(ModifyTableState *mtstate, */ ConvertPartitionTupleSlot(proute->parent_child_tupconv_maps[partidx], tuple, + true, proute->partition_tuple_slot, &slot); diff --git a/src/include/executor/execPartition.h b/src/include/executor/execPartition.h index 862bf65060..0a249985bb 100644 --- a/src/include/executor/execPartition.h +++ b/src/include/executor/execPartition.h @@ -204,6 +204,7 @@ extern TupleConversionMap *TupConvMapForLeaf(PartitionTupleRouting *proute, ResultRelInfo *rootRelInfo, int leaf_index); extern HeapTuple ConvertPartitionTupleSlot(TupleConversionMap *map, HeapTuple tuple, + bool shouldFree, TupleTableSlot *new_slot, TupleTableSlot **p_my_slot); extern void ExecCleanupTupleRouting(ModifyTableState *mtstate, diff --git a/src/test/regress/input/copy.source b/src/test/regress/input/copy.source index cb13606d14..cd98e83dcd 100644 --- a/src/test/regress/input/copy.source +++ b/src/test/regress/input/copy.source @@ -133,3 +133,37 @@ this is just a line full of junk that would error out if parsed \. copy copytest3 to stdout csv header; + +-- test copy from with a partitioned table +create table parted_copytest ( + a int, + b int, + c text +) partition by list (b); + +create table parted_copytest_a1 (c text, b int, a int); +create table parted_copytest_a2 (a int, c text, b int); + +alter table parted_copytest attach partition parted_copytest_a1 for values in(1); +alter table parted_copytest attach partition parted_copytest_a2 for values in(2); + +-- We must insert enough rows to trigger multi-inserts. These are only +-- enabled adaptively when there are few enough partition changes. +insert into parted_copytest select x,1,'One' from generate_series(1,1000) x; +insert into parted_copytest select x,2,'Two' from generate_series(1001,1010) x; +insert into parted_copytest select x,1,'One' from generate_series(1011,1020) x; + +copy (select * from parted_copytest order by a) to '@abs_builddir@/results/parted_copytest.csv'; + +truncate parted_copytest; + +copy parted_copytest from '@abs_builddir@/results/parted_copytest.csv'; + +select tableoid::regclass,count(*),sum(a) from parted_copytest +group by tableoid order by tableoid::regclass::name; + +-- clear out the parted_copytest.csv file to avoid taking up space +truncate parted_copytest; +copy (select * from parted_copytest order by a) to '@abs_builddir@/results/parted_copytest.csv'; + +drop table parted_copytest; diff --git a/src/test/regress/output/copy.source b/src/test/regress/output/copy.source index b7e372d61b..edeceafce3 100644 --- a/src/test/regress/output/copy.source +++ b/src/test/regress/output/copy.source @@ -95,3 +95,33 @@ copy copytest3 to stdout csv header; c1,"col with , comma","col with "" quote" 1,a,1 2,b,2 +-- test copy from with a partitioned table +create table parted_copytest ( + a int, + b int, + c text +) partition by list (b); +create table parted_copytest_a1 (c text, b int, a int); +create table parted_copytest_a2 (a int, c text, b int); +alter table parted_copytest attach partition parted_copytest_a1 for values in(1); +alter table parted_copytest attach partition parted_copytest_a2 for values in(2); +-- We must insert enough rows to trigger multi-inserts. These are only +-- enabled adaptively when there are few enough partition changes. +insert into parted_copytest select x,1,'One' from generate_series(1,1000) x; +insert into parted_copytest select x,2,'Two' from generate_series(1001,1010) x; +insert into parted_copytest select x,1,'One' from generate_series(1011,1020) x; +copy (select * from parted_copytest order by a) to '@abs_builddir@/results/parted_copytest.csv'; +truncate parted_copytest; +copy parted_copytest from '@abs_builddir@/results/parted_copytest.csv'; +select tableoid::regclass,count(*),sum(a) from parted_copytest +group by tableoid order by tableoid::regclass::name; + tableoid | count | sum +--------------------+-------+-------- + parted_copytest_a1 | 1010 | 510655 + parted_copytest_a2 | 10 | 10055 +(2 rows) + +-- clear out the parted_copytest.csv file to avoid taking up space +truncate parted_copytest; +copy (select * from parted_copytest order by a) to '@abs_builddir@/results/parted_copytest.csv'; +drop table parted_copytest;