From 03eb2f2b8777f8b919e64a7df10be79c0329fecf Mon Sep 17 00:00:00 2001 From: Thomas Munro Date: Mon, 4 Dec 2017 21:32:27 +1300 Subject: [PATCH] Make sure es_query_dsa is set correctly in the leader process. Commit e13029a5ce353574516c64fd1ec9c50201e705fd added es_query_dsa as a member of EState to provide a DSA area for use by executor nodes. That works for worker processes where all executor nodes use the same shared memory, but doesn't work correctly in the leader process where there may be more than one Gather or Gather Merge node each with its own DSA area. Repair by installing each Gather or Gather Merge node's DSA area into es_query_dsa only for the duration of appropriate calls into the query tree below. A bigger change adopting better scoping will be studied for version 11. Author: Thomas Munro Reviewed-By: Amit Kapila, Robert Haas Tested-By: Alexander Voytsekhovskyy, Amit Kapila, Andreas Seltenreich Discussion: https://postgr.es/m/CAEepm=1U6as=brnVvMNixEV2tpi8NuyQoTmO8Qef0-VV+=7MDA@mail.gmail.com --- src/backend/executor/execParallel.c | 14 ++++++++------ src/backend/executor/nodeGather.c | 6 ++++++ src/backend/executor/nodeGatherMerge.c | 4 ++++ 3 files changed, 18 insertions(+), 6 deletions(-) diff --git a/src/backend/executor/execParallel.c b/src/backend/executor/execParallel.c index 7dda399daf3..989cf5b80b1 100644 --- a/src/backend/executor/execParallel.c +++ b/src/backend/executor/execParallel.c @@ -543,12 +543,6 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate, int nworkers) pcxt->seg); } - /* - * Make the area available to executor nodes running in the leader. See - * also ParallelQueryMain which makes it available to workers. - */ - estate->es_query_dsa = pei->area; - /* * Give parallel-aware nodes a chance to initialize their shared data. * This also initializes the elements of instrumentation->ps_instrument, @@ -557,7 +551,11 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate, int nworkers) d.pcxt = pcxt; d.instrumentation = instrumentation; d.nnodes = 0; + + /* Install our DSA area while initializing the plan. */ + estate->es_query_dsa = pei->area; ExecParallelInitializeDSM(planstate, &d); + estate->es_query_dsa = NULL; /* * Make sure that the world hasn't shifted under our feet. This could @@ -609,6 +607,8 @@ void ExecParallelReinitialize(PlanState *planstate, ParallelExecutorInfo *pei) { + EState *estate = planstate->state; + /* Old workers must already be shut down */ Assert(pei->finished); @@ -618,7 +618,9 @@ ExecParallelReinitialize(PlanState *planstate, pei->finished = false; /* Traverse plan tree and let each child node reset associated state. */ + estate->es_query_dsa = pei->area; ExecParallelReInitializeDSM(planstate, pei->pcxt); + estate->es_query_dsa = NULL; } /* diff --git a/src/backend/executor/nodeGather.c b/src/backend/executor/nodeGather.c index 89f592828c1..597cbfaa16d 100644 --- a/src/backend/executor/nodeGather.c +++ b/src/backend/executor/nodeGather.c @@ -278,7 +278,13 @@ gather_getnext(GatherState *gatherstate) if (gatherstate->need_to_scan_locally) { + EState *estate = gatherstate->ps.state; + + /* Install our DSA area while executing the plan. */ + estate->es_query_dsa = + gatherstate->pei ? gatherstate->pei->area : NULL; outerTupleSlot = ExecProcNode(outerPlan); + estate->es_query_dsa = NULL; if (!TupIsNull(outerTupleSlot)) return outerTupleSlot; diff --git a/src/backend/executor/nodeGatherMerge.c b/src/backend/executor/nodeGatherMerge.c index 6b173543564..ee98f4cf30c 100644 --- a/src/backend/executor/nodeGatherMerge.c +++ b/src/backend/executor/nodeGatherMerge.c @@ -627,8 +627,12 @@ gather_merge_readnext(GatherMergeState *gm_state, int reader, bool nowait) { PlanState *outerPlan = outerPlanState(gm_state); TupleTableSlot *outerTupleSlot; + EState *estate = gm_state->ps.state; + /* Install our DSA area while executing the plan. */ + estate->es_query_dsa = gm_state->pei ? gm_state->pei->area : NULL; outerTupleSlot = ExecProcNode(outerPlan); + estate->es_query_dsa = NULL; if (!TupIsNull(outerTupleSlot)) { -- 2.15.0