Increase hash_mem_multiplier default to 2.0.

Double the default setting for hash_mem_multiplier, from 1.0 to 2.0.
This setting makes hash-based executor nodes use twice the usual
work_mem limit.

The PostgreSQL 15 release notes should have a compatibility note about
this change.

Author: Peter Geoghegan <pg@bowt.ie>
Discussion: https://postgr.es/m/CAH2-Wzndc_ROk6CY-bC6p9O53q974Y0Ey4WX8jcPbuTZYM4Q3A@mail.gmail.com
This commit is contained in:
Peter Geoghegan 2022-02-16 18:41:52 -08:00
parent 74388a1ac3
commit 8f388f6f55
10 changed files with 52 additions and 9 deletions

View File

@ -1849,9 +1849,8 @@ include_dir 'conf.d'
operations can use. The final limit is determined by
multiplying <varname>work_mem</varname> by
<varname>hash_mem_multiplier</varname>. The default value is
1.0, which makes hash-based operations subject to the same
simple <varname>work_mem</varname> maximum as sort-based
operations.
2.0, which makes hash-based operations use twice the usual
<varname>work_mem</varname> base amount.
</para>
<para>
Consider increasing <varname>hash_mem_multiplier</varname> in
@ -1859,7 +1858,7 @@ include_dir 'conf.d'
occurrence, especially when simply increasing
<varname>work_mem</varname> results in memory pressure (memory
pressure typically takes the form of intermittent out of
memory errors). A setting of 1.5 or 2.0 may be effective with
memory errors). The default setting of 2.0 is often effective with
mixed workloads. Higher settings in the range of 2.0 - 8.0 or
more may be effective in environments where
<varname>work_mem</varname> has already been increased to 40MB

View File

@ -122,7 +122,7 @@ int IntervalStyle = INTSTYLE_POSTGRES;
bool enableFsync = true;
bool allowSystemTableMods = false;
int work_mem = 4096;
double hash_mem_multiplier = 1.0;
double hash_mem_multiplier = 2.0;
int maintenance_work_mem = 65536;
int max_parallel_maintenance_workers = 2;

View File

@ -3762,7 +3762,7 @@ static struct config_real ConfigureNamesReal[] =
GUC_EXPLAIN
},
&hash_mem_multiplier,
1.0, 1.0, 1000.0,
2.0, 1.0, 1000.0,
NULL, NULL, NULL
},

View File

@ -136,7 +136,7 @@
# Caution: it is not advisable to set max_prepared_transactions nonzero unless
# you actively intend to use prepared transactions.
#work_mem = 4MB # min 64kB
#hash_mem_multiplier = 1.0 # 1-1000.0 multiplier on hash table work_mem
#hash_mem_multiplier = 2.0 # 1-1000.0 multiplier on hash table work_mem
#maintenance_work_mem = 64MB # min 1MB
#autovacuum_work_mem = -1 # min 1MB, or -1 to use maintenance_work_mem
#logical_decoding_work_mem = 64MB # min 64kB

View File

@ -1574,6 +1574,7 @@ select array(select row(v.a,s1.*) from (select two,four, count(*) from onek grou
-- test the knapsack
set enable_indexscan = false;
set hash_mem_multiplier = 1.0;
set work_mem = '64kB';
explain (costs off)
select unique1,
@ -1919,6 +1920,7 @@ select g100, g10, sum(g::numeric), count(*), max(g::text)
from gs_data_1 group by cube (g1000, g100,g10);
set enable_sort = true;
set work_mem to default;
set hash_mem_multiplier to default;
-- Compare results
(select * from gs_hash_1 except select * from gs_group_1)
union all

View File

@ -86,6 +86,7 @@ alter table wide set (parallel_workers = 2);
savepoint settings;
set local max_parallel_workers_per_gather = 0;
set local work_mem = '4MB';
set local hash_mem_multiplier = 1.0;
explain (costs off)
select count(*) from simple r join simple s using (id);
QUERY PLAN
@ -119,6 +120,7 @@ rollback to settings;
savepoint settings;
set local max_parallel_workers_per_gather = 2;
set local work_mem = '4MB';
set local hash_mem_multiplier = 1.0;
set local enable_parallel_hash = off;
explain (costs off)
select count(*) from simple r join simple s using (id);
@ -156,6 +158,7 @@ rollback to settings;
savepoint settings;
set local max_parallel_workers_per_gather = 2;
set local work_mem = '4MB';
set local hash_mem_multiplier = 1.0;
set local enable_parallel_hash = on;
explain (costs off)
select count(*) from simple r join simple s using (id);
@ -196,6 +199,7 @@ rollback to settings;
savepoint settings;
set local max_parallel_workers_per_gather = 0;
set local work_mem = '128kB';
set local hash_mem_multiplier = 1.0;
explain (costs off)
select count(*) from simple r join simple s using (id);
QUERY PLAN
@ -229,6 +233,7 @@ rollback to settings;
savepoint settings;
set local max_parallel_workers_per_gather = 2;
set local work_mem = '128kB';
set local hash_mem_multiplier = 1.0;
set local enable_parallel_hash = off;
explain (costs off)
select count(*) from simple r join simple s using (id);
@ -266,6 +271,7 @@ rollback to settings;
savepoint settings;
set local max_parallel_workers_per_gather = 2;
set local work_mem = '192kB';
set local hash_mem_multiplier = 1.0;
set local enable_parallel_hash = on;
explain (costs off)
select count(*) from simple r join simple s using (id);
@ -307,6 +313,7 @@ rollback to settings;
savepoint settings;
set local max_parallel_workers_per_gather = 0;
set local work_mem = '128kB';
set local hash_mem_multiplier = 1.0;
explain (costs off)
select count(*) FROM simple r JOIN bigger_than_it_looks s USING (id);
QUERY PLAN
@ -340,6 +347,7 @@ rollback to settings;
savepoint settings;
set local max_parallel_workers_per_gather = 2;
set local work_mem = '128kB';
set local hash_mem_multiplier = 1.0;
set local enable_parallel_hash = off;
explain (costs off)
select count(*) from simple r join bigger_than_it_looks s using (id);
@ -377,6 +385,7 @@ rollback to settings;
savepoint settings;
set local max_parallel_workers_per_gather = 1;
set local work_mem = '192kB';
set local hash_mem_multiplier = 1.0;
set local enable_parallel_hash = on;
explain (costs off)
select count(*) from simple r join bigger_than_it_looks s using (id);
@ -419,6 +428,7 @@ rollback to settings;
savepoint settings;
set local max_parallel_workers_per_gather = 0;
set local work_mem = '128kB';
set local hash_mem_multiplier = 1.0;
explain (costs off)
select count(*) from simple r join extremely_skewed s using (id);
QUERY PLAN
@ -451,6 +461,7 @@ rollback to settings;
savepoint settings;
set local max_parallel_workers_per_gather = 2;
set local work_mem = '128kB';
set local hash_mem_multiplier = 1.0;
set local enable_parallel_hash = off;
explain (costs off)
select count(*) from simple r join extremely_skewed s using (id);
@ -486,6 +497,7 @@ rollback to settings;
savepoint settings;
set local max_parallel_workers_per_gather = 1;
set local work_mem = '128kB';
set local hash_mem_multiplier = 1.0;
set local enable_parallel_hash = on;
explain (costs off)
select count(*) from simple r join extremely_skewed s using (id);
@ -523,6 +535,7 @@ rollback to settings;
savepoint settings;
set local max_parallel_workers_per_gather = 2;
set local work_mem = '4MB';
set local hash_mem_multiplier = 1.0;
set local parallel_leader_participation = off;
select * from hash_join_batches(
$$
@ -551,6 +564,7 @@ set max_parallel_workers_per_gather = 2;
set enable_material = off;
set enable_mergejoin = off;
set work_mem = '64kB';
set hash_mem_multiplier = 1.0;
explain (costs off)
select count(*) from join_foo
left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss
@ -602,6 +616,7 @@ set max_parallel_workers_per_gather = 2;
set enable_material = off;
set enable_mergejoin = off;
set work_mem = '4MB';
set hash_mem_multiplier = 1.0;
explain (costs off)
select count(*) from join_foo
left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss
@ -653,6 +668,7 @@ set max_parallel_workers_per_gather = 2;
set enable_material = off;
set enable_mergejoin = off;
set work_mem = '64kB';
set hash_mem_multiplier = 1.0;
explain (costs off)
select count(*) from join_foo
left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss
@ -704,6 +720,7 @@ set max_parallel_workers_per_gather = 2;
set enable_material = off;
set enable_mergejoin = off;
set work_mem = '4MB';
set hash_mem_multiplier = 1.0;
explain (costs off)
select count(*) from join_foo
left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss
@ -843,6 +860,7 @@ savepoint settings;
set max_parallel_workers_per_gather = 2;
set enable_parallel_hash = on;
set work_mem = '128kB';
set hash_mem_multiplier = 1.0;
explain (costs off)
select length(max(s.t))
from wide left join (select id, coalesce(t, '') || '' as t from wide) s using (id);

View File

@ -90,8 +90,9 @@ WHERE t1.unique1 < 1000;
1000 | 9.5000000000000000
(1 row)
-- Reduce work_mem so that we see some cache evictions
-- Reduce work_mem and hash_mem_multiplier so that we see some cache evictions
SET work_mem TO '64kB';
SET hash_mem_multiplier TO 1.0;
SET enable_mergejoin TO off;
-- Ensure we get some evictions. We're unable to validate the hits and misses
-- here as the number of entries that fit in the cache at once will vary
@ -238,6 +239,7 @@ WHERE unique1 < 3
RESET enable_seqscan;
RESET enable_mergejoin;
RESET work_mem;
RESET hash_mem_multiplier;
RESET enable_bitmapscan;
RESET enable_hashjoin;
-- Test parallel plans with Memoize

View File

@ -424,6 +424,7 @@ select array(select row(v.a,s1.*) from (select two,four, count(*) from onek grou
-- test the knapsack
set enable_indexscan = false;
set hash_mem_multiplier = 1.0;
set work_mem = '64kB';
explain (costs off)
select unique1,
@ -519,6 +520,7 @@ from gs_data_1 group by cube (g1000, g100,g10);
set enable_sort = true;
set work_mem to default;
set hash_mem_multiplier to default;
-- Compare results

View File

@ -95,6 +95,7 @@ alter table wide set (parallel_workers = 2);
savepoint settings;
set local max_parallel_workers_per_gather = 0;
set local work_mem = '4MB';
set local hash_mem_multiplier = 1.0;
explain (costs off)
select count(*) from simple r join simple s using (id);
select count(*) from simple r join simple s using (id);
@ -109,6 +110,7 @@ rollback to settings;
savepoint settings;
set local max_parallel_workers_per_gather = 2;
set local work_mem = '4MB';
set local hash_mem_multiplier = 1.0;
set local enable_parallel_hash = off;
explain (costs off)
select count(*) from simple r join simple s using (id);
@ -124,6 +126,7 @@ rollback to settings;
savepoint settings;
set local max_parallel_workers_per_gather = 2;
set local work_mem = '4MB';
set local hash_mem_multiplier = 1.0;
set local enable_parallel_hash = on;
explain (costs off)
select count(*) from simple r join simple s using (id);
@ -143,6 +146,7 @@ rollback to settings;
savepoint settings;
set local max_parallel_workers_per_gather = 0;
set local work_mem = '128kB';
set local hash_mem_multiplier = 1.0;
explain (costs off)
select count(*) from simple r join simple s using (id);
select count(*) from simple r join simple s using (id);
@ -157,6 +161,7 @@ rollback to settings;
savepoint settings;
set local max_parallel_workers_per_gather = 2;
set local work_mem = '128kB';
set local hash_mem_multiplier = 1.0;
set local enable_parallel_hash = off;
explain (costs off)
select count(*) from simple r join simple s using (id);
@ -172,6 +177,7 @@ rollback to settings;
savepoint settings;
set local max_parallel_workers_per_gather = 2;
set local work_mem = '192kB';
set local hash_mem_multiplier = 1.0;
set local enable_parallel_hash = on;
explain (costs off)
select count(*) from simple r join simple s using (id);
@ -192,6 +198,7 @@ rollback to settings;
savepoint settings;
set local max_parallel_workers_per_gather = 0;
set local work_mem = '128kB';
set local hash_mem_multiplier = 1.0;
explain (costs off)
select count(*) FROM simple r JOIN bigger_than_it_looks s USING (id);
select count(*) FROM simple r JOIN bigger_than_it_looks s USING (id);
@ -206,6 +213,7 @@ rollback to settings;
savepoint settings;
set local max_parallel_workers_per_gather = 2;
set local work_mem = '128kB';
set local hash_mem_multiplier = 1.0;
set local enable_parallel_hash = off;
explain (costs off)
select count(*) from simple r join bigger_than_it_looks s using (id);
@ -221,6 +229,7 @@ rollback to settings;
savepoint settings;
set local max_parallel_workers_per_gather = 1;
set local work_mem = '192kB';
set local hash_mem_multiplier = 1.0;
set local enable_parallel_hash = on;
explain (costs off)
select count(*) from simple r join bigger_than_it_looks s using (id);
@ -242,6 +251,7 @@ rollback to settings;
savepoint settings;
set local max_parallel_workers_per_gather = 0;
set local work_mem = '128kB';
set local hash_mem_multiplier = 1.0;
explain (costs off)
select count(*) from simple r join extremely_skewed s using (id);
select count(*) from simple r join extremely_skewed s using (id);
@ -255,6 +265,7 @@ rollback to settings;
savepoint settings;
set local max_parallel_workers_per_gather = 2;
set local work_mem = '128kB';
set local hash_mem_multiplier = 1.0;
set local enable_parallel_hash = off;
explain (costs off)
select count(*) from simple r join extremely_skewed s using (id);
@ -269,6 +280,7 @@ rollback to settings;
savepoint settings;
set local max_parallel_workers_per_gather = 1;
set local work_mem = '128kB';
set local hash_mem_multiplier = 1.0;
set local enable_parallel_hash = on;
explain (costs off)
select count(*) from simple r join extremely_skewed s using (id);
@ -285,6 +297,7 @@ rollback to settings;
savepoint settings;
set local max_parallel_workers_per_gather = 2;
set local work_mem = '4MB';
set local hash_mem_multiplier = 1.0;
set local parallel_leader_participation = off;
select * from hash_join_batches(
$$
@ -311,6 +324,7 @@ set max_parallel_workers_per_gather = 2;
set enable_material = off;
set enable_mergejoin = off;
set work_mem = '64kB';
set hash_mem_multiplier = 1.0;
explain (costs off)
select count(*) from join_foo
left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss
@ -338,6 +352,7 @@ set max_parallel_workers_per_gather = 2;
set enable_material = off;
set enable_mergejoin = off;
set work_mem = '4MB';
set hash_mem_multiplier = 1.0;
explain (costs off)
select count(*) from join_foo
left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss
@ -365,6 +380,7 @@ set max_parallel_workers_per_gather = 2;
set enable_material = off;
set enable_mergejoin = off;
set work_mem = '64kB';
set hash_mem_multiplier = 1.0;
explain (costs off)
select count(*) from join_foo
left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss
@ -392,6 +408,7 @@ set max_parallel_workers_per_gather = 2;
set enable_material = off;
set enable_mergejoin = off;
set work_mem = '4MB';
set hash_mem_multiplier = 1.0;
explain (costs off)
select count(*) from join_foo
left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss
@ -454,6 +471,7 @@ savepoint settings;
set max_parallel_workers_per_gather = 2;
set enable_parallel_hash = on;
set work_mem = '128kB';
set hash_mem_multiplier = 1.0;
explain (costs off)
select length(max(s.t))
from wide left join (select id, coalesce(t, '') || '' as t from wide) s using (id);

View File

@ -55,8 +55,9 @@ SELECT COUNT(*),AVG(t2.unique1) FROM tenk1 t1,
LATERAL (SELECT t2.unique1 FROM tenk1 t2 WHERE t1.twenty = t2.unique1) t2
WHERE t1.unique1 < 1000;
-- Reduce work_mem so that we see some cache evictions
-- Reduce work_mem and hash_mem_multiplier so that we see some cache evictions
SET work_mem TO '64kB';
SET hash_mem_multiplier TO 1.0;
SET enable_mergejoin TO off;
-- Ensure we get some evictions. We're unable to validate the hits and misses
-- here as the number of entries that fit in the cache at once will vary
@ -126,6 +127,7 @@ WHERE unique1 < 3
RESET enable_seqscan;
RESET enable_mergejoin;
RESET work_mem;
RESET hash_mem_multiplier;
RESET enable_bitmapscan;
RESET enable_hashjoin;