Add max_parallel_workers GUC.

Increase the default value of the existing max_worker_processes GUC
from 8 to 16, and add a new max_parallel_workers GUC with a maximum
of 8.  This way, even if the maximum amount of parallel query is
happening, there is still room for background workers that do other
things, as originally envisioned when max_worker_processes was added.

Julien Rouhaud, reviewed by Amit Kapila and by revised by me.
This commit is contained in:
Robert Haas 2016-12-02 07:42:58 -05:00
parent 5714931b07
commit b460f5d669
9 changed files with 93 additions and 10 deletions

View File

@ -1982,7 +1982,7 @@ include_dir 'conf.d'
<para>
Sets the maximum number of background processes that the system
can support. This parameter can only be set at server start. The
default is 8.
default is 16.
</para>
<para>
@ -2004,8 +2004,9 @@ include_dir 'conf.d'
Sets the maximum number of workers that can be started by a single
<literal>Gather</literal> node. Parallel workers are taken from the
pool of processes established by
<xref linkend="guc-max-worker-processes">. Note that the requested
number of workers may not actually be available at run time. If this
<xref linkend="guc-max-worker-processes">, limited by
<xref linkend="guc-max-parallel-workers">. Note that the requested
number of workers may not actually be available at runtime. If this
occurs, the plan will run with fewer workers than expected, which may
be inefficient. The default value is 2. Setting this value to 0
disables parallel query execution.
@ -2034,6 +2035,22 @@ include_dir 'conf.d'
</listitem>
</varlistentry>
<varlistentry id="guc-max-parallel-workers" xreflabel="max_parallel_workers">
<term><varname>max_parallel_workers</varname> (<type>integer</type>)
<indexterm>
<primary><varname>max_parallel_workers</> configuration parameter</primary>
</indexterm>
</term>
<listitem>
<para>
Sets the maximum number of workers that the system can support for
parallel queries. The default value is 8. When increasing or
decreasing this value, consider also adjusting
<xref linkend="guc-max-parallel-workers-per-gather">.
</para>
</listitem>
</varlistentry>
<varlistentry id="guc-backend-flush-after" xreflabel="backend_flush_after">
<term><varname>backend_flush_after</varname> (<type>integer</type>)
<indexterm>

View File

@ -454,7 +454,8 @@ LaunchParallelWorkers(ParallelContext *pcxt)
snprintf(worker.bgw_name, BGW_MAXLEN, "parallel worker for PID %d",
MyProcPid);
worker.bgw_flags =
BGWORKER_SHMEM_ACCESS | BGWORKER_BACKEND_DATABASE_CONNECTION;
BGWORKER_SHMEM_ACCESS | BGWORKER_BACKEND_DATABASE_CONNECTION
| BGWORKER_CLASS_PARALLEL;
worker.bgw_start_time = BgWorkerStart_ConsistentState;
worker.bgw_restart_time = BGW_NEVER_RESTART;
worker.bgw_main = ParallelWorkerMain;

View File

@ -80,9 +80,22 @@ typedef struct BackgroundWorkerSlot
BackgroundWorker worker;
} BackgroundWorkerSlot;
/*
* In order to limit the total number of parallel workers (according to
* max_parallel_workers GUC), we maintain the number of active parallel
* workers. Since the postmaster cannot take locks, two variables are used for
* this purpose: the number of registered parallel workers (modified by the
* backends, protected by BackgroundWorkerLock) and the number of terminated
* parallel workers (modified only by the postmaster, lockless). The active
* number of parallel workers is the number of registered workers minus the
* terminated ones. These counters can of course overflow, but it's not
* important here since the subtraction will still give the right number.
*/
typedef struct BackgroundWorkerArray
{
int total_slots;
uint32 parallel_register_count;
uint32 parallel_terminate_count;
BackgroundWorkerSlot slot[FLEXIBLE_ARRAY_MEMBER];
} BackgroundWorkerArray;
@ -127,6 +140,8 @@ BackgroundWorkerShmemInit(void)
int slotno = 0;
BackgroundWorkerData->total_slots = max_worker_processes;
BackgroundWorkerData->parallel_register_count = 0;
BackgroundWorkerData->parallel_terminate_count = 0;
/*
* Copy contents of worker list into shared memory. Record the shared
@ -267,9 +282,12 @@ BackgroundWorkerStateChange(void)
/*
* We need a memory barrier here to make sure that the load of
* bgw_notify_pid completes before the store to in_use.
* bgw_notify_pid and the update of parallel_terminate_count
* complete before the store to in_use.
*/
notify_pid = slot->worker.bgw_notify_pid;
if ((slot->worker.bgw_flags & BGWORKER_CLASS_PARALLEL) != 0)
BackgroundWorkerData->parallel_terminate_count++;
pg_memory_barrier();
slot->pid = 0;
slot->in_use = false;
@ -370,6 +388,9 @@ ForgetBackgroundWorker(slist_mutable_iter *cur)
Assert(rw->rw_shmem_slot < max_worker_processes);
slot = &BackgroundWorkerData->slot[rw->rw_shmem_slot];
if ((rw->rw_worker.bgw_flags & BGWORKER_CLASS_PARALLEL) != 0)
BackgroundWorkerData->parallel_terminate_count++;
slot->in_use = false;
ereport(DEBUG1,
@ -824,6 +845,7 @@ RegisterDynamicBackgroundWorker(BackgroundWorker *worker,
{
int slotno;
bool success = false;
bool parallel;
uint64 generation = 0;
/*
@ -840,8 +862,27 @@ RegisterDynamicBackgroundWorker(BackgroundWorker *worker,
if (!SanityCheckBackgroundWorker(worker, ERROR))
return false;
parallel = (worker->bgw_flags & BGWORKER_CLASS_PARALLEL) != 0;
LWLockAcquire(BackgroundWorkerLock, LW_EXCLUSIVE);
/*
* If this is a parallel worker, check whether there are already too many
* parallel workers; if so, don't register another one. Our view of
* parallel_terminate_count may be slightly stale, but that doesn't really
* matter: we would have gotten the same result if we'd arrived here
* slightly earlier anyway. There's no help for it, either, since the
* postmaster must not take locks; a memory barrier wouldn't guarantee
* anything useful.
*/
if (parallel && (BackgroundWorkerData->parallel_register_count -
BackgroundWorkerData->parallel_terminate_count) >=
max_parallel_workers)
{
LWLockRelease(BackgroundWorkerLock);
return false;
}
/*
* Look for an unused slot. If we find one, grab it.
*/
@ -856,6 +897,8 @@ RegisterDynamicBackgroundWorker(BackgroundWorker *worker,
slot->generation++;
slot->terminate = false;
generation = slot->generation;
if (parallel)
BackgroundWorkerData->parallel_register_count++;
/*
* Make sure postmaster doesn't see the slot as in use before it

View File

@ -121,7 +121,8 @@ int replacement_sort_tuples = 150000;
*/
int NBuffers = 1000;
int MaxConnections = 90;
int max_worker_processes = 8;
int max_worker_processes = 16;
int max_parallel_workers = 8;
int MaxBackends = 0;
int VacuumCostPageHit = 1; /* GUC parameters for vacuum */

View File

@ -2477,7 +2477,7 @@ static struct config_int ConfigureNamesInt[] =
NULL,
},
&max_worker_processes,
8, 0, MAX_BACKENDS,
16, 0, MAX_BACKENDS,
check_max_worker_processes, NULL, NULL
},
@ -2664,6 +2664,16 @@ static struct config_int ConfigureNamesInt[] =
NULL, NULL, NULL
},
{
{"max_parallel_workers", PGC_USERSET, RESOURCES_ASYNCHRONOUS,
gettext_noop("Sets the maximum number of parallel workers than can be active at one time."),
NULL
},
&max_parallel_workers,
8, 0, 1024,
NULL, NULL, NULL
},
{
{"autovacuum_work_mem", PGC_SIGHUP, RESOURCES_MEM,
gettext_noop("Sets the maximum memory to be used by each autovacuum worker process."),

View File

@ -161,8 +161,9 @@
# - Asynchronous Behavior -
#effective_io_concurrency = 1 # 1-1000; 0 disables prefetching
#max_worker_processes = 8 # (change requires restart)
#max_worker_processes = 16 # (change requires restart)
#max_parallel_workers_per_gather = 2 # taken from max_worker_processes
#max_parallel_workers = 8 # total maximum number of worker_processes
#old_snapshot_threshold = -1 # 1min-60d; -1 disables; 0 is immediate
# (change requires restart)
#backend_flush_after = 0 # measured in pages, 0 disables

View File

@ -584,7 +584,7 @@ GuessControlValues(void)
ControlFile.wal_log_hints = false;
ControlFile.track_commit_timestamp = false;
ControlFile.MaxConnections = 100;
ControlFile.max_worker_processes = 8;
ControlFile.max_worker_processes = 16;
ControlFile.max_prepared_xacts = 0;
ControlFile.max_locks_per_xact = 64;
@ -800,7 +800,7 @@ RewriteControlFile(void)
ControlFile.wal_log_hints = false;
ControlFile.track_commit_timestamp = false;
ControlFile.MaxConnections = 100;
ControlFile.max_worker_processes = 8;
ControlFile.max_worker_processes = 16;
ControlFile.max_prepared_xacts = 0;
ControlFile.max_locks_per_xact = 64;

View File

@ -157,6 +157,7 @@ extern PGDLLIMPORT int NBuffers;
extern int MaxBackends;
extern int MaxConnections;
extern int max_worker_processes;
extern int max_parallel_workers;
extern PGDLLIMPORT int MyProcPid;
extern PGDLLIMPORT pg_time_t MyStartTime;

View File

@ -58,6 +58,15 @@
*/
#define BGWORKER_BACKEND_DATABASE_CONNECTION 0x0002
/*
* This class is used internally for parallel queries, to keep track of the
* number of active parallel workers and make sure we never launch more than
* max_parallel_workers parallel workers at the same time. Third party
* background workers should not use this class.
*/
#define BGWORKER_CLASS_PARALLEL 0x0010
/* add additional bgworker classes here */
typedef void (*bgworker_main_type) (Datum main_arg);