Skip to content

Commit e13029a

Browse files
committed
Provide a DSA area for all parallel queries.
This will allow future parallel query code to dynamically allocate storage shared by all participants. Thomas Munro, with assorted changes by me.
1 parent 2604438 commit e13029a

File tree

5 files changed

+61
-2
lines changed

5 files changed

+61
-2
lines changed

doc/src/sgml/monitoring.sgml

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -818,7 +818,7 @@ postgres 27093 0.0 0.0 30096 2752 ? Ss 11:34 0:00 postgres: ser
818818

819819
<tbody>
820820
<row>
821-
<entry morerows="57"><literal>LWLock</></entry>
821+
<entry morerows="58"><literal>LWLock</></entry>
822822
<entry><literal>ShmemIndexLock</></entry>
823823
<entry>Waiting to find or allocate space in shared memory.</entry>
824824
</row>
@@ -1069,6 +1069,10 @@ postgres 27093 0.0 0.0 30096 2752 ? Ss 11:34 0:00 postgres: ser
10691069
<entry><literal>predicate_lock_manager</></entry>
10701070
<entry>Waiting to add or examine predicate lock information.</entry>
10711071
</row>
1072+
<row>
1073+
<entry><literal>parallel_query_dsa</></entry>
1074+
<entry>Waiting for parallel query dynamic shared memory allocation lock.</entry>
1075+
</row>
10721076
<row>
10731077
<entry morerows="9"><literal>Lock</></entry>
10741078
<entry><literal>relation</></entry>

src/backend/executor/execParallel.c

Lines changed: 50 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,7 @@
3434
#include "optimizer/planner.h"
3535
#include "storage/spin.h"
3636
#include "tcop/tcopprot.h"
37+
#include "utils/dsa.h"
3738
#include "utils/memutils.h"
3839
#include "utils/snapmgr.h"
3940

@@ -47,6 +48,7 @@
4748
#define PARALLEL_KEY_BUFFER_USAGE UINT64CONST(0xE000000000000003)
4849
#define PARALLEL_KEY_TUPLE_QUEUE UINT64CONST(0xE000000000000004)
4950
#define PARALLEL_KEY_INSTRUMENTATION UINT64CONST(0xE000000000000005)
51+
#define PARALLEL_KEY_DSA UINT64CONST(0xE000000000000006)
5052

5153
#define PARALLEL_TUPLE_QUEUE_SIZE 65536
5254

@@ -345,6 +347,7 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate, int nworkers)
345347
int param_len;
346348
int instrumentation_len = 0;
347349
int instrument_offset = 0;
350+
Size dsa_minsize = dsa_minimum_size();
348351

349352
/* Allocate object for return value. */
350353
pei = palloc0(sizeof(ParallelExecutorInfo));
@@ -413,6 +416,10 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate, int nworkers)
413416
shm_toc_estimate_keys(&pcxt->estimator, 1);
414417
}
415418

419+
/* Estimate space for DSA area. */
420+
shm_toc_estimate_chunk(&pcxt->estimator, dsa_minsize);
421+
shm_toc_estimate_keys(&pcxt->estimator, 1);
422+
416423
/* Everyone's had a chance to ask for space, so now create the DSM. */
417424
InitializeParallelDSM(pcxt);
418425

@@ -466,6 +473,29 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate, int nworkers)
466473
pei->instrumentation = instrumentation;
467474
}
468475

476+
/*
477+
* Create a DSA area that can be used by the leader and all workers.
478+
* (However, if we failed to create a DSM and are using private memory
479+
* instead, then skip this.)
480+
*/
481+
if (pcxt->seg != NULL)
482+
{
483+
char *area_space;
484+
485+
area_space = shm_toc_allocate(pcxt->toc, dsa_minsize);
486+
shm_toc_insert(pcxt->toc, PARALLEL_KEY_DSA, area_space);
487+
pei->area = dsa_create_in_place(area_space, dsa_minsize,
488+
LWTRANCHE_PARALLEL_QUERY_DSA,
489+
"parallel_query_dsa",
490+
pcxt->seg);
491+
}
492+
493+
/*
494+
* Make the area available to executor nodes running in the leader. See
495+
* also ParallelQueryMain which makes it available to workers.
496+
*/
497+
estate->es_query_dsa = pei->area;
498+
469499
/*
470500
* Give parallel-aware nodes a chance to initialize their shared data.
471501
* This also initializes the elements of instrumentation->ps_instrument,
@@ -571,6 +601,11 @@ ExecParallelFinish(ParallelExecutorInfo *pei)
571601
void
572602
ExecParallelCleanup(ParallelExecutorInfo *pei)
573603
{
604+
if (pei->area != NULL)
605+
{
606+
dsa_detach(pei->area);
607+
pei->area = NULL;
608+
}
574609
if (pei->pcxt != NULL)
575610
{
576611
DestroyParallelContext(pei->pcxt);
@@ -728,6 +763,8 @@ ParallelQueryMain(dsm_segment *seg, shm_toc *toc)
728763
QueryDesc *queryDesc;
729764
SharedExecutorInstrumentation *instrumentation;
730765
int instrument_options = 0;
766+
void *area_space;
767+
dsa_area *area;
731768

732769
/* Set up DestReceiver, SharedExecutorInstrumentation, and QueryDesc. */
733770
receiver = ExecParallelGetReceiver(seg, toc);
@@ -739,10 +776,21 @@ ParallelQueryMain(dsm_segment *seg, shm_toc *toc)
739776
/* Prepare to track buffer usage during query execution. */
740777
InstrStartParallelQuery();
741778

742-
/* Start up the executor, have it run the plan, and then shut it down. */
779+
/* Attach to the dynamic shared memory area. */
780+
area_space = shm_toc_lookup(toc, PARALLEL_KEY_DSA);
781+
area = dsa_attach_in_place(area_space, seg);
782+
783+
/* Start up the executor */
743784
ExecutorStart(queryDesc, 0);
785+
786+
/* Special executor initialization steps for parallel workers */
787+
queryDesc->planstate->state->es_query_dsa = area;
744788
ExecParallelInitializeWorker(queryDesc->planstate, toc);
789+
790+
/* Run the plan */
745791
ExecutorRun(queryDesc, ForwardScanDirection, 0L);
792+
793+
/* Shut down the executor */
746794
ExecutorFinish(queryDesc);
747795

748796
/* Report buffer usage during parallel execution. */
@@ -758,6 +806,7 @@ ParallelQueryMain(dsm_segment *seg, shm_toc *toc)
758806
ExecutorEnd(queryDesc);
759807

760808
/* Cleanup. */
809+
dsa_detach(area);
761810
FreeQueryDesc(queryDesc);
762811
(*receiver->rDestroy) (receiver);
763812
}

src/include/executor/execParallel.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
#include "nodes/execnodes.h"
1818
#include "nodes/parsenodes.h"
1919
#include "nodes/plannodes.h"
20+
#include "utils/dsa.h"
2021

2122
typedef struct SharedExecutorInstrumentation SharedExecutorInstrumentation;
2223

@@ -27,6 +28,7 @@ typedef struct ParallelExecutorInfo
2728
BufferUsage *buffer_usage;
2829
SharedExecutorInstrumentation *instrumentation;
2930
shm_mq_handle **tqueue;
31+
dsa_area *area;
3032
bool finished;
3133
} ParallelExecutorInfo;
3234

src/include/nodes/execnodes.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -427,6 +427,9 @@ typedef struct EState
427427
HeapTuple *es_epqTuple; /* array of EPQ substitute tuples */
428428
bool *es_epqTupleSet; /* true if EPQ tuple is provided */
429429
bool *es_epqScanDone; /* true if EPQ tuple has been fetched */
430+
431+
/* The per-query shared memory area to use for parallel execution. */
432+
struct dsa_area *es_query_dsa;
430433
} EState;
431434

432435

src/include/storage/lwlock.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -210,6 +210,7 @@ typedef enum BuiltinTrancheIds
210210
LWTRANCHE_BUFFER_MAPPING,
211211
LWTRANCHE_LOCK_MANAGER,
212212
LWTRANCHE_PREDICATE_LOCK_MANAGER,
213+
LWTRANCHE_PARALLEL_QUERY_DSA,
213214
LWTRANCHE_FIRST_USER_DEFINED
214215
} BuiltinTrancheIds;
215216

0 commit comments

Comments
 (0)
pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy