2003-09-13 18:44:49 +02:00
|
|
|
--
|
2022-04-06 22:56:06 +02:00
|
|
|
-- Test cumulative stats system
|
2003-09-13 18:44:49 +02:00
|
|
|
--
|
|
|
|
-- Must be run after tenk2 has been created (by create_table),
|
|
|
|
-- populated (by create_misc) and indexed (by create_index).
|
|
|
|
--
|
|
|
|
-- conditio sine qua non
|
2007-09-24 05:12:23 +02:00
|
|
|
SHOW track_counts; -- must be on
|
|
|
|
track_counts
|
|
|
|
--------------
|
2003-09-13 18:44:49 +02:00
|
|
|
on
|
|
|
|
(1 row)
|
|
|
|
|
2011-10-09 05:45:58 +02:00
|
|
|
-- ensure that both seqscan and indexscan plans are allowed
|
|
|
|
SET enable_seqscan TO on;
|
|
|
|
SET enable_indexscan TO on;
|
|
|
|
-- for the moment, we don't want index-only scans here
|
|
|
|
SET enable_indexonlyscan TO off;
|
2022-04-07 09:03:58 +02:00
|
|
|
-- not enabled by default, but we want to test it...
|
|
|
|
SET track_functions TO 'all';
|
2022-09-23 22:00:55 +02:00
|
|
|
-- record dboid for later use
|
|
|
|
SELECT oid AS dboid from pg_database where datname = current_database() \gset
|
2003-09-13 18:44:49 +02:00
|
|
|
-- save counters
|
2022-04-07 06:29:46 +02:00
|
|
|
BEGIN;
|
|
|
|
SET LOCAL stats_fetch_consistency = snapshot;
|
2017-05-13 15:42:12 +02:00
|
|
|
CREATE TABLE prevstats AS
|
2003-09-13 18:44:49 +02:00
|
|
|
SELECT t.seq_scan, t.seq_tup_read, t.idx_scan, t.idx_tup_fetch,
|
|
|
|
(b.heap_blks_read + b.heap_blks_hit) AS heap_blks,
|
2015-02-20 03:36:50 +01:00
|
|
|
(b.idx_blks_read + b.idx_blks_hit) AS idx_blks,
|
|
|
|
pg_stat_get_snapshot_timestamp() as snap_ts
|
2003-09-13 18:44:49 +02:00
|
|
|
FROM pg_catalog.pg_stat_user_tables AS t,
|
|
|
|
pg_catalog.pg_statio_user_tables AS b
|
|
|
|
WHERE t.relname='tenk2' AND b.relname='tenk2';
|
2022-04-07 06:29:46 +02:00
|
|
|
COMMIT;
|
2015-02-20 16:10:01 +01:00
|
|
|
-- test effects of TRUNCATE on n_live_tup/n_dead_tup counters
|
|
|
|
CREATE TABLE trunc_stats_test(id serial);
|
2018-12-07 18:11:59 +01:00
|
|
|
CREATE TABLE trunc_stats_test1(id serial, stuff text);
|
2015-02-20 16:10:01 +01:00
|
|
|
CREATE TABLE trunc_stats_test2(id serial);
|
2018-12-07 18:11:59 +01:00
|
|
|
CREATE TABLE trunc_stats_test3(id serial, stuff text);
|
2015-02-20 16:10:01 +01:00
|
|
|
CREATE TABLE trunc_stats_test4(id serial);
|
|
|
|
-- check that n_live_tup is reset to 0 after truncate
|
|
|
|
INSERT INTO trunc_stats_test DEFAULT VALUES;
|
|
|
|
INSERT INTO trunc_stats_test DEFAULT VALUES;
|
|
|
|
INSERT INTO trunc_stats_test DEFAULT VALUES;
|
|
|
|
TRUNCATE trunc_stats_test;
|
|
|
|
-- test involving a truncate in a transaction; 4 ins but only 1 live
|
|
|
|
INSERT INTO trunc_stats_test1 DEFAULT VALUES;
|
|
|
|
INSERT INTO trunc_stats_test1 DEFAULT VALUES;
|
|
|
|
INSERT INTO trunc_stats_test1 DEFAULT VALUES;
|
|
|
|
UPDATE trunc_stats_test1 SET id = id + 10 WHERE id IN (1, 2);
|
|
|
|
DELETE FROM trunc_stats_test1 WHERE id = 3;
|
|
|
|
BEGIN;
|
|
|
|
UPDATE trunc_stats_test1 SET id = id + 100;
|
|
|
|
TRUNCATE trunc_stats_test1;
|
|
|
|
INSERT INTO trunc_stats_test1 DEFAULT VALUES;
|
|
|
|
COMMIT;
|
|
|
|
-- use a savepoint: 1 insert, 1 live
|
|
|
|
BEGIN;
|
|
|
|
INSERT INTO trunc_stats_test2 DEFAULT VALUES;
|
|
|
|
INSERT INTO trunc_stats_test2 DEFAULT VALUES;
|
|
|
|
SAVEPOINT p1;
|
|
|
|
INSERT INTO trunc_stats_test2 DEFAULT VALUES;
|
|
|
|
TRUNCATE trunc_stats_test2;
|
|
|
|
INSERT INTO trunc_stats_test2 DEFAULT VALUES;
|
|
|
|
RELEASE SAVEPOINT p1;
|
|
|
|
COMMIT;
|
|
|
|
-- rollback a savepoint: this should count 4 inserts and have 2
|
|
|
|
-- live tuples after commit (and 2 dead ones due to aborted subxact)
|
|
|
|
BEGIN;
|
|
|
|
INSERT INTO trunc_stats_test3 DEFAULT VALUES;
|
|
|
|
INSERT INTO trunc_stats_test3 DEFAULT VALUES;
|
|
|
|
SAVEPOINT p1;
|
|
|
|
INSERT INTO trunc_stats_test3 DEFAULT VALUES;
|
|
|
|
INSERT INTO trunc_stats_test3 DEFAULT VALUES;
|
|
|
|
TRUNCATE trunc_stats_test3;
|
|
|
|
INSERT INTO trunc_stats_test3 DEFAULT VALUES;
|
|
|
|
ROLLBACK TO SAVEPOINT p1;
|
|
|
|
COMMIT;
|
|
|
|
-- rollback a truncate: this should count 2 inserts and produce 2 dead tuples
|
|
|
|
BEGIN;
|
|
|
|
INSERT INTO trunc_stats_test4 DEFAULT VALUES;
|
|
|
|
INSERT INTO trunc_stats_test4 DEFAULT VALUES;
|
|
|
|
TRUNCATE trunc_stats_test4;
|
|
|
|
INSERT INTO trunc_stats_test4 DEFAULT VALUES;
|
|
|
|
ROLLBACK;
|
2007-02-08 00:11:30 +01:00
|
|
|
-- do a seqscan
|
|
|
|
SELECT count(*) FROM tenk2;
|
|
|
|
count
|
|
|
|
-------
|
|
|
|
10000
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
-- do an indexscan
|
Allow bitmap scans to operate as index-only scans when possible.
If we don't have to return any columns from heap tuples, and there's
no need to recheck qual conditions, and the heap page is all-visible,
then we can skip fetching the heap page altogether.
Skip prefetching pages too, when possible, on the assumption that the
recheck flag will remain the same from one page to the next. While that
assumption is hardly bulletproof, it seems like a good bet most of the
time, and better than prefetching pages we don't need.
This commit installs the executor infrastructure, but doesn't change
any planner cost estimates, thus possibly causing bitmap scans to
not be chosen in cases where this change renders them the best choice.
I (tgl) am not entirely convinced that we need to account for this
behavior in the planner, because I think typically the bitmap scan would
get chosen anyway if it's the best bet. In any case the submitted patch
took way too many shortcuts, resulting in too many clearly-bad choices,
to be committable.
Alexander Kuzmenkov, reviewed by Alexey Chernyshov, and whacked around
rather heavily by me.
Discussion: https://postgr.es/m/239a8955-c0fc-f506-026d-c837e86c827b@postgrespro.ru
2017-11-01 22:38:12 +01:00
|
|
|
-- make sure it is not a bitmap scan, which might skip fetching heap tuples
|
|
|
|
SET enable_bitmapscan TO off;
|
2007-02-08 00:11:30 +01:00
|
|
|
SELECT count(*) FROM tenk2 WHERE unique1 = 1;
|
|
|
|
count
|
|
|
|
-------
|
|
|
|
1
|
|
|
|
(1 row)
|
|
|
|
|
Allow bitmap scans to operate as index-only scans when possible.
If we don't have to return any columns from heap tuples, and there's
no need to recheck qual conditions, and the heap page is all-visible,
then we can skip fetching the heap page altogether.
Skip prefetching pages too, when possible, on the assumption that the
recheck flag will remain the same from one page to the next. While that
assumption is hardly bulletproof, it seems like a good bet most of the
time, and better than prefetching pages we don't need.
This commit installs the executor infrastructure, but doesn't change
any planner cost estimates, thus possibly causing bitmap scans to
not be chosen in cases where this change renders them the best choice.
I (tgl) am not entirely convinced that we need to account for this
behavior in the planner, because I think typically the bitmap scan would
get chosen anyway if it's the best bet. In any case the submitted patch
took way too many shortcuts, resulting in too many clearly-bad choices,
to be committable.
Alexander Kuzmenkov, reviewed by Alexey Chernyshov, and whacked around
rather heavily by me.
Discussion: https://postgr.es/m/239a8955-c0fc-f506-026d-c837e86c827b@postgrespro.ru
2017-11-01 22:38:12 +01:00
|
|
|
RESET enable_bitmapscan;
|
2022-04-07 08:35:56 +02:00
|
|
|
-- ensure pending stats are flushed
|
|
|
|
SELECT pg_stat_force_next_flush();
|
|
|
|
pg_stat_force_next_flush
|
|
|
|
--------------------------
|
2005-07-23 16:18:57 +02:00
|
|
|
|
2003-09-13 18:44:49 +02:00
|
|
|
(1 row)
|
|
|
|
|
|
|
|
-- check effects
|
2022-04-07 06:29:46 +02:00
|
|
|
BEGIN;
|
|
|
|
SET LOCAL stats_fetch_consistency = snapshot;
|
2015-02-20 16:10:01 +01:00
|
|
|
SELECT relname, n_tup_ins, n_tup_upd, n_tup_del, n_live_tup, n_dead_tup
|
|
|
|
FROM pg_stat_user_tables
|
|
|
|
WHERE relname like 'trunc_stats_test%' order by relname;
|
|
|
|
relname | n_tup_ins | n_tup_upd | n_tup_del | n_live_tup | n_dead_tup
|
|
|
|
-------------------+-----------+-----------+-----------+------------+------------
|
|
|
|
trunc_stats_test | 3 | 0 | 0 | 0 | 0
|
|
|
|
trunc_stats_test1 | 4 | 2 | 1 | 1 | 0
|
|
|
|
trunc_stats_test2 | 1 | 0 | 0 | 1 | 0
|
|
|
|
trunc_stats_test3 | 4 | 0 | 0 | 2 | 2
|
|
|
|
trunc_stats_test4 | 2 | 0 | 0 | 0 | 2
|
|
|
|
(5 rows)
|
|
|
|
|
2003-09-13 18:44:49 +02:00
|
|
|
SELECT st.seq_scan >= pr.seq_scan + 1,
|
|
|
|
st.seq_tup_read >= pr.seq_tup_read + cl.reltuples,
|
|
|
|
st.idx_scan >= pr.idx_scan + 1,
|
|
|
|
st.idx_tup_fetch >= pr.idx_tup_fetch + 1
|
|
|
|
FROM pg_stat_user_tables AS st, pg_class AS cl, prevstats AS pr
|
|
|
|
WHERE st.relname='tenk2' AND cl.relname='tenk2';
|
2003-11-01 04:18:20 +01:00
|
|
|
?column? | ?column? | ?column? | ?column?
|
2003-09-13 18:44:49 +02:00
|
|
|
----------+----------+----------+----------
|
|
|
|
t | t | t | t
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT st.heap_blks_read + st.heap_blks_hit >= pr.heap_blks + cl.relpages,
|
|
|
|
st.idx_blks_read + st.idx_blks_hit >= pr.idx_blks + 1
|
|
|
|
FROM pg_statio_user_tables AS st, pg_class AS cl, prevstats AS pr
|
|
|
|
WHERE st.relname='tenk2' AND cl.relname='tenk2';
|
2003-11-01 04:18:20 +01:00
|
|
|
?column? | ?column?
|
2003-09-13 18:44:49 +02:00
|
|
|
----------+----------
|
|
|
|
t | t
|
|
|
|
(1 row)
|
|
|
|
|
2015-02-20 03:36:50 +01:00
|
|
|
SELECT pr.snap_ts < pg_stat_get_snapshot_timestamp() as snapshot_newer
|
|
|
|
FROM prevstats AS pr;
|
|
|
|
snapshot_newer
|
|
|
|
----------------
|
|
|
|
t
|
|
|
|
(1 row)
|
|
|
|
|
2022-04-07 06:29:46 +02:00
|
|
|
COMMIT;
|
2022-04-07 09:03:58 +02:00
|
|
|
----
|
|
|
|
-- Basic tests for track_functions
|
|
|
|
---
|
|
|
|
CREATE FUNCTION stats_test_func1() RETURNS VOID LANGUAGE plpgsql AS $$BEGIN END;$$;
|
|
|
|
SELECT 'stats_test_func1()'::regprocedure::oid AS stats_test_func1_oid \gset
|
|
|
|
CREATE FUNCTION stats_test_func2() RETURNS VOID LANGUAGE plpgsql AS $$BEGIN END;$$;
|
|
|
|
SELECT 'stats_test_func2()'::regprocedure::oid AS stats_test_func2_oid \gset
|
|
|
|
-- test that stats are accumulated
|
|
|
|
BEGIN;
|
|
|
|
SET LOCAL stats_fetch_consistency = none;
|
|
|
|
SELECT pg_stat_get_function_calls(:stats_test_func1_oid);
|
|
|
|
pg_stat_get_function_calls
|
|
|
|
----------------------------
|
|
|
|
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT pg_stat_get_xact_function_calls(:stats_test_func1_oid);
|
|
|
|
pg_stat_get_xact_function_calls
|
|
|
|
---------------------------------
|
|
|
|
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT stats_test_func1();
|
|
|
|
stats_test_func1
|
|
|
|
------------------
|
|
|
|
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT pg_stat_get_xact_function_calls(:stats_test_func1_oid);
|
|
|
|
pg_stat_get_xact_function_calls
|
|
|
|
---------------------------------
|
|
|
|
1
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT stats_test_func1();
|
|
|
|
stats_test_func1
|
|
|
|
------------------
|
|
|
|
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT pg_stat_get_xact_function_calls(:stats_test_func1_oid);
|
|
|
|
pg_stat_get_xact_function_calls
|
|
|
|
---------------------------------
|
|
|
|
2
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT pg_stat_get_function_calls(:stats_test_func1_oid);
|
|
|
|
pg_stat_get_function_calls
|
|
|
|
----------------------------
|
|
|
|
0
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
COMMIT;
|
|
|
|
-- Verify that function stats are not transactional
|
|
|
|
-- rolled back savepoint in committing transaction
|
|
|
|
BEGIN;
|
|
|
|
SELECT stats_test_func2();
|
|
|
|
stats_test_func2
|
|
|
|
------------------
|
|
|
|
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SAVEPOINT foo;
|
|
|
|
SELECT stats_test_func2();
|
|
|
|
stats_test_func2
|
|
|
|
------------------
|
|
|
|
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
ROLLBACK TO SAVEPOINT foo;
|
|
|
|
SELECT pg_stat_get_xact_function_calls(:stats_test_func2_oid);
|
|
|
|
pg_stat_get_xact_function_calls
|
|
|
|
---------------------------------
|
|
|
|
2
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT stats_test_func2();
|
|
|
|
stats_test_func2
|
|
|
|
------------------
|
|
|
|
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
COMMIT;
|
|
|
|
-- rolled back transaction
|
|
|
|
BEGIN;
|
|
|
|
SELECT stats_test_func2();
|
|
|
|
stats_test_func2
|
|
|
|
------------------
|
|
|
|
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
ROLLBACK;
|
|
|
|
SELECT pg_stat_force_next_flush();
|
|
|
|
pg_stat_force_next_flush
|
|
|
|
--------------------------
|
|
|
|
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
-- check collected stats
|
|
|
|
SELECT funcname, calls FROM pg_stat_user_functions WHERE funcid = :stats_test_func1_oid;
|
|
|
|
funcname | calls
|
|
|
|
------------------+-------
|
|
|
|
stats_test_func1 | 2
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT funcname, calls FROM pg_stat_user_functions WHERE funcid = :stats_test_func2_oid;
|
|
|
|
funcname | calls
|
|
|
|
------------------+-------
|
|
|
|
stats_test_func2 | 4
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
-- check that a rolled back drop function stats leaves stats alive
|
|
|
|
BEGIN;
|
|
|
|
SELECT funcname, calls FROM pg_stat_user_functions WHERE funcid = :stats_test_func1_oid;
|
|
|
|
funcname | calls
|
|
|
|
------------------+-------
|
|
|
|
stats_test_func1 | 2
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
DROP FUNCTION stats_test_func1();
|
|
|
|
-- shouldn't be visible via view
|
|
|
|
SELECT funcname, calls FROM pg_stat_user_functions WHERE funcid = :stats_test_func1_oid;
|
|
|
|
funcname | calls
|
|
|
|
----------+-------
|
|
|
|
(0 rows)
|
|
|
|
|
|
|
|
-- but still via oid access
|
|
|
|
SELECT pg_stat_get_function_calls(:stats_test_func1_oid);
|
|
|
|
pg_stat_get_function_calls
|
|
|
|
----------------------------
|
|
|
|
2
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
ROLLBACK;
|
|
|
|
SELECT funcname, calls FROM pg_stat_user_functions WHERE funcid = :stats_test_func1_oid;
|
|
|
|
funcname | calls
|
|
|
|
------------------+-------
|
|
|
|
stats_test_func1 | 2
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT pg_stat_get_function_calls(:stats_test_func1_oid);
|
|
|
|
pg_stat_get_function_calls
|
|
|
|
----------------------------
|
|
|
|
2
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
-- check that function dropped in main transaction leaves no stats behind
|
|
|
|
BEGIN;
|
|
|
|
DROP FUNCTION stats_test_func1();
|
|
|
|
COMMIT;
|
|
|
|
SELECT funcname, calls FROM pg_stat_user_functions WHERE funcid = :stats_test_func1_oid;
|
|
|
|
funcname | calls
|
|
|
|
----------+-------
|
|
|
|
(0 rows)
|
|
|
|
|
|
|
|
SELECT pg_stat_get_function_calls(:stats_test_func1_oid);
|
|
|
|
pg_stat_get_function_calls
|
|
|
|
----------------------------
|
|
|
|
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
-- check that function dropped in a subtransaction leaves no stats behind
|
|
|
|
BEGIN;
|
|
|
|
SELECT stats_test_func2();
|
|
|
|
stats_test_func2
|
|
|
|
------------------
|
|
|
|
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SAVEPOINT a;
|
|
|
|
SELECT stats_test_func2();
|
|
|
|
stats_test_func2
|
|
|
|
------------------
|
|
|
|
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SAVEPOINT b;
|
|
|
|
DROP FUNCTION stats_test_func2();
|
|
|
|
COMMIT;
|
|
|
|
SELECT funcname, calls FROM pg_stat_user_functions WHERE funcid = :stats_test_func2_oid;
|
|
|
|
funcname | calls
|
|
|
|
----------+-------
|
|
|
|
(0 rows)
|
|
|
|
|
|
|
|
SELECT pg_stat_get_function_calls(:stats_test_func2_oid);
|
|
|
|
pg_stat_get_function_calls
|
|
|
|
----------------------------
|
|
|
|
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
-- Check that stats for relations are dropped. For that we need to access stats
|
|
|
|
-- by oid after the DROP TABLE. Save oids.
|
|
|
|
CREATE TABLE drop_stats_test();
|
|
|
|
INSERT INTO drop_stats_test DEFAULT VALUES;
|
|
|
|
SELECT 'drop_stats_test'::regclass::oid AS drop_stats_test_oid \gset
|
|
|
|
CREATE TABLE drop_stats_test_xact();
|
|
|
|
INSERT INTO drop_stats_test_xact DEFAULT VALUES;
|
|
|
|
SELECT 'drop_stats_test_xact'::regclass::oid AS drop_stats_test_xact_oid \gset
|
|
|
|
CREATE TABLE drop_stats_test_subxact();
|
|
|
|
INSERT INTO drop_stats_test_subxact DEFAULT VALUES;
|
|
|
|
SELECT 'drop_stats_test_subxact'::regclass::oid AS drop_stats_test_subxact_oid \gset
|
|
|
|
SELECT pg_stat_force_next_flush();
|
|
|
|
pg_stat_force_next_flush
|
|
|
|
--------------------------
|
|
|
|
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT pg_stat_get_live_tuples(:drop_stats_test_oid);
|
|
|
|
pg_stat_get_live_tuples
|
|
|
|
-------------------------
|
|
|
|
1
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
DROP TABLE drop_stats_test;
|
|
|
|
SELECT pg_stat_get_live_tuples(:drop_stats_test_oid);
|
|
|
|
pg_stat_get_live_tuples
|
|
|
|
-------------------------
|
|
|
|
0
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT pg_stat_get_xact_tuples_inserted(:drop_stats_test_oid);
|
|
|
|
pg_stat_get_xact_tuples_inserted
|
|
|
|
----------------------------------
|
|
|
|
0
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
-- check that rollback protects against having stats dropped and that local
|
|
|
|
-- modifications don't pose a problem
|
|
|
|
SELECT pg_stat_get_live_tuples(:drop_stats_test_xact_oid);
|
|
|
|
pg_stat_get_live_tuples
|
|
|
|
-------------------------
|
|
|
|
1
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT pg_stat_get_tuples_inserted(:drop_stats_test_xact_oid);
|
|
|
|
pg_stat_get_tuples_inserted
|
|
|
|
-----------------------------
|
|
|
|
1
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT pg_stat_get_xact_tuples_inserted(:drop_stats_test_xact_oid);
|
|
|
|
pg_stat_get_xact_tuples_inserted
|
|
|
|
----------------------------------
|
|
|
|
0
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
BEGIN;
|
|
|
|
INSERT INTO drop_stats_test_xact DEFAULT VALUES;
|
|
|
|
SELECT pg_stat_get_xact_tuples_inserted(:drop_stats_test_xact_oid);
|
|
|
|
pg_stat_get_xact_tuples_inserted
|
|
|
|
----------------------------------
|
|
|
|
1
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
DROP TABLE drop_stats_test_xact;
|
|
|
|
SELECT pg_stat_get_xact_tuples_inserted(:drop_stats_test_xact_oid);
|
|
|
|
pg_stat_get_xact_tuples_inserted
|
|
|
|
----------------------------------
|
|
|
|
0
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
ROLLBACK;
|
|
|
|
SELECT pg_stat_force_next_flush();
|
|
|
|
pg_stat_force_next_flush
|
|
|
|
--------------------------
|
|
|
|
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT pg_stat_get_live_tuples(:drop_stats_test_xact_oid);
|
|
|
|
pg_stat_get_live_tuples
|
|
|
|
-------------------------
|
|
|
|
1
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT pg_stat_get_tuples_inserted(:drop_stats_test_xact_oid);
|
|
|
|
pg_stat_get_tuples_inserted
|
|
|
|
-----------------------------
|
|
|
|
2
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
-- transactional drop
|
|
|
|
SELECT pg_stat_get_live_tuples(:drop_stats_test_xact_oid);
|
|
|
|
pg_stat_get_live_tuples
|
|
|
|
-------------------------
|
|
|
|
1
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT pg_stat_get_tuples_inserted(:drop_stats_test_xact_oid);
|
|
|
|
pg_stat_get_tuples_inserted
|
|
|
|
-----------------------------
|
|
|
|
2
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
BEGIN;
|
|
|
|
INSERT INTO drop_stats_test_xact DEFAULT VALUES;
|
|
|
|
SELECT pg_stat_get_xact_tuples_inserted(:drop_stats_test_xact_oid);
|
|
|
|
pg_stat_get_xact_tuples_inserted
|
|
|
|
----------------------------------
|
|
|
|
1
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
DROP TABLE drop_stats_test_xact;
|
|
|
|
SELECT pg_stat_get_xact_tuples_inserted(:drop_stats_test_xact_oid);
|
|
|
|
pg_stat_get_xact_tuples_inserted
|
|
|
|
----------------------------------
|
|
|
|
0
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
COMMIT;
|
|
|
|
SELECT pg_stat_force_next_flush();
|
|
|
|
pg_stat_force_next_flush
|
|
|
|
--------------------------
|
|
|
|
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT pg_stat_get_live_tuples(:drop_stats_test_xact_oid);
|
|
|
|
pg_stat_get_live_tuples
|
|
|
|
-------------------------
|
|
|
|
0
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT pg_stat_get_tuples_inserted(:drop_stats_test_xact_oid);
|
|
|
|
pg_stat_get_tuples_inserted
|
|
|
|
-----------------------------
|
|
|
|
0
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
-- savepoint rollback (2 levels)
|
|
|
|
SELECT pg_stat_get_live_tuples(:drop_stats_test_subxact_oid);
|
|
|
|
pg_stat_get_live_tuples
|
|
|
|
-------------------------
|
|
|
|
1
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
BEGIN;
|
|
|
|
INSERT INTO drop_stats_test_subxact DEFAULT VALUES;
|
|
|
|
SAVEPOINT sp1;
|
|
|
|
INSERT INTO drop_stats_test_subxact DEFAULT VALUES;
|
|
|
|
SELECT pg_stat_get_xact_tuples_inserted(:drop_stats_test_subxact_oid);
|
|
|
|
pg_stat_get_xact_tuples_inserted
|
|
|
|
----------------------------------
|
|
|
|
2
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SAVEPOINT sp2;
|
|
|
|
DROP TABLE drop_stats_test_subxact;
|
|
|
|
ROLLBACK TO SAVEPOINT sp2;
|
|
|
|
SELECT pg_stat_get_xact_tuples_inserted(:drop_stats_test_subxact_oid);
|
|
|
|
pg_stat_get_xact_tuples_inserted
|
|
|
|
----------------------------------
|
|
|
|
2
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
COMMIT;
|
|
|
|
SELECT pg_stat_force_next_flush();
|
|
|
|
pg_stat_force_next_flush
|
|
|
|
--------------------------
|
|
|
|
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT pg_stat_get_live_tuples(:drop_stats_test_subxact_oid);
|
|
|
|
pg_stat_get_live_tuples
|
|
|
|
-------------------------
|
|
|
|
3
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
-- savepoint rolback (1 level)
|
|
|
|
SELECT pg_stat_get_live_tuples(:drop_stats_test_subxact_oid);
|
|
|
|
pg_stat_get_live_tuples
|
|
|
|
-------------------------
|
|
|
|
3
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
BEGIN;
|
|
|
|
SAVEPOINT sp1;
|
|
|
|
DROP TABLE drop_stats_test_subxact;
|
|
|
|
SAVEPOINT sp2;
|
|
|
|
ROLLBACK TO SAVEPOINT sp1;
|
|
|
|
COMMIT;
|
|
|
|
SELECT pg_stat_get_live_tuples(:drop_stats_test_subxact_oid);
|
|
|
|
pg_stat_get_live_tuples
|
|
|
|
-------------------------
|
|
|
|
3
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
-- and now actually drop
|
|
|
|
SELECT pg_stat_get_live_tuples(:drop_stats_test_subxact_oid);
|
|
|
|
pg_stat_get_live_tuples
|
|
|
|
-------------------------
|
|
|
|
3
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
BEGIN;
|
|
|
|
SAVEPOINT sp1;
|
|
|
|
DROP TABLE drop_stats_test_subxact;
|
|
|
|
SAVEPOINT sp2;
|
|
|
|
RELEASE SAVEPOINT sp1;
|
|
|
|
COMMIT;
|
|
|
|
SELECT pg_stat_get_live_tuples(:drop_stats_test_subxact_oid);
|
|
|
|
pg_stat_get_live_tuples
|
|
|
|
-------------------------
|
|
|
|
0
|
|
|
|
(1 row)
|
|
|
|
|
2015-02-20 16:10:01 +01:00
|
|
|
DROP TABLE trunc_stats_test, trunc_stats_test1, trunc_stats_test2, trunc_stats_test3, trunc_stats_test4;
|
2017-05-13 15:42:12 +02:00
|
|
|
DROP TABLE prevstats;
|
2022-10-14 20:11:34 +02:00
|
|
|
-----
|
|
|
|
-- Test that last_seq_scan, last_idx_scan are correctly maintained
|
|
|
|
--
|
|
|
|
-- Perform test using a temporary table. That way autovacuum etc won't
|
|
|
|
-- interfere. To be able to check that timestamps increase, we sleep for 100ms
|
|
|
|
-- between tests, assuming that there aren't systems with a coarser timestamp
|
|
|
|
-- granularity.
|
|
|
|
-----
|
|
|
|
BEGIN;
|
|
|
|
CREATE TEMPORARY TABLE test_last_scan(idx_col int primary key, noidx_col int);
|
|
|
|
INSERT INTO test_last_scan(idx_col, noidx_col) VALUES(1, 1);
|
|
|
|
SELECT pg_stat_force_next_flush();
|
|
|
|
pg_stat_force_next_flush
|
|
|
|
--------------------------
|
|
|
|
|
|
|
|
(1 row)
|
|
|
|
|
2022-11-08 02:50:09 +01:00
|
|
|
SELECT last_seq_scan, last_idx_scan FROM pg_stat_all_tables WHERE relid = 'test_last_scan'::regclass;
|
|
|
|
last_seq_scan | last_idx_scan
|
|
|
|
---------------+---------------
|
|
|
|
|
|
|
|
|
(1 row)
|
|
|
|
|
2022-10-14 20:11:34 +02:00
|
|
|
COMMIT;
|
|
|
|
SELECT pg_stat_reset_single_table_counters('test_last_scan'::regclass);
|
|
|
|
pg_stat_reset_single_table_counters
|
|
|
|
-------------------------------------
|
|
|
|
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT seq_scan, idx_scan FROM pg_stat_all_tables WHERE relid = 'test_last_scan'::regclass;
|
|
|
|
seq_scan | idx_scan
|
|
|
|
----------+----------
|
|
|
|
0 | 0
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
-- ensure we start out with exactly one index and sequential scan
|
|
|
|
BEGIN;
|
|
|
|
SET LOCAL enable_seqscan TO on;
|
|
|
|
SET LOCAL enable_indexscan TO on;
|
|
|
|
SET LOCAL enable_bitmapscan TO off;
|
|
|
|
EXPLAIN (COSTS off) SELECT count(*) FROM test_last_scan WHERE noidx_col = 1;
|
|
|
|
QUERY PLAN
|
|
|
|
----------------------------------
|
|
|
|
Aggregate
|
|
|
|
-> Seq Scan on test_last_scan
|
|
|
|
Filter: (noidx_col = 1)
|
|
|
|
(3 rows)
|
|
|
|
|
|
|
|
SELECT count(*) FROM test_last_scan WHERE noidx_col = 1;
|
|
|
|
count
|
|
|
|
-------
|
|
|
|
1
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SET LOCAL enable_seqscan TO off;
|
|
|
|
EXPLAIN (COSTS off) SELECT count(*) FROM test_last_scan WHERE idx_col = 1;
|
|
|
|
QUERY PLAN
|
|
|
|
--------------------------------------------------------------
|
|
|
|
Aggregate
|
|
|
|
-> Index Scan using test_last_scan_pkey on test_last_scan
|
|
|
|
Index Cond: (idx_col = 1)
|
|
|
|
(3 rows)
|
|
|
|
|
|
|
|
SELECT count(*) FROM test_last_scan WHERE idx_col = 1;
|
|
|
|
count
|
|
|
|
-------
|
|
|
|
1
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT pg_stat_force_next_flush();
|
|
|
|
pg_stat_force_next_flush
|
|
|
|
--------------------------
|
|
|
|
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
COMMIT;
|
|
|
|
-- fetch timestamps from before the next test
|
|
|
|
SELECT last_seq_scan AS test_last_seq, last_idx_scan AS test_last_idx
|
|
|
|
FROM pg_stat_all_tables WHERE relid = 'test_last_scan'::regclass \gset
|
|
|
|
SELECT pg_sleep(0.1); -- assume a minimum timestamp granularity of 100ms
|
|
|
|
pg_sleep
|
|
|
|
----------
|
|
|
|
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
-- cause one sequential scan
|
|
|
|
BEGIN;
|
|
|
|
SET LOCAL enable_seqscan TO on;
|
|
|
|
SET LOCAL enable_indexscan TO off;
|
|
|
|
SET LOCAL enable_bitmapscan TO off;
|
|
|
|
EXPLAIN (COSTS off) SELECT count(*) FROM test_last_scan WHERE noidx_col = 1;
|
|
|
|
QUERY PLAN
|
|
|
|
----------------------------------
|
|
|
|
Aggregate
|
|
|
|
-> Seq Scan on test_last_scan
|
|
|
|
Filter: (noidx_col = 1)
|
|
|
|
(3 rows)
|
|
|
|
|
|
|
|
SELECT count(*) FROM test_last_scan WHERE noidx_col = 1;
|
|
|
|
count
|
|
|
|
-------
|
|
|
|
1
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT pg_stat_force_next_flush();
|
|
|
|
pg_stat_force_next_flush
|
|
|
|
--------------------------
|
|
|
|
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
COMMIT;
|
|
|
|
-- check that just sequential scan stats were incremented
|
|
|
|
SELECT seq_scan, :'test_last_seq' < last_seq_scan AS seq_ok, idx_scan, :'test_last_idx' = last_idx_scan AS idx_ok
|
|
|
|
FROM pg_stat_all_tables WHERE relid = 'test_last_scan'::regclass;
|
|
|
|
seq_scan | seq_ok | idx_scan | idx_ok
|
|
|
|
----------+--------+----------+--------
|
|
|
|
2 | t | 1 | t
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
-- fetch timestamps from before the next test
|
|
|
|
SELECT last_seq_scan AS test_last_seq, last_idx_scan AS test_last_idx
|
|
|
|
FROM pg_stat_all_tables WHERE relid = 'test_last_scan'::regclass \gset
|
|
|
|
SELECT pg_sleep(0.1);
|
|
|
|
pg_sleep
|
|
|
|
----------
|
|
|
|
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
-- cause one index scan
|
|
|
|
BEGIN;
|
|
|
|
SET LOCAL enable_seqscan TO off;
|
|
|
|
SET LOCAL enable_indexscan TO on;
|
|
|
|
SET LOCAL enable_bitmapscan TO off;
|
|
|
|
EXPLAIN (COSTS off) SELECT count(*) FROM test_last_scan WHERE idx_col = 1;
|
|
|
|
QUERY PLAN
|
|
|
|
--------------------------------------------------------------
|
|
|
|
Aggregate
|
|
|
|
-> Index Scan using test_last_scan_pkey on test_last_scan
|
|
|
|
Index Cond: (idx_col = 1)
|
|
|
|
(3 rows)
|
|
|
|
|
|
|
|
SELECT count(*) FROM test_last_scan WHERE idx_col = 1;
|
|
|
|
count
|
|
|
|
-------
|
|
|
|
1
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT pg_stat_force_next_flush();
|
|
|
|
pg_stat_force_next_flush
|
|
|
|
--------------------------
|
|
|
|
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
COMMIT;
|
|
|
|
-- check that just index scan stats were incremented
|
|
|
|
SELECT seq_scan, :'test_last_seq' = last_seq_scan AS seq_ok, idx_scan, :'test_last_idx' < last_idx_scan AS idx_ok
|
|
|
|
FROM pg_stat_all_tables WHERE relid = 'test_last_scan'::regclass;
|
|
|
|
seq_scan | seq_ok | idx_scan | idx_ok
|
|
|
|
----------+--------+----------+--------
|
|
|
|
2 | t | 2 | t
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
-- fetch timestamps from before the next test
|
|
|
|
SELECT last_seq_scan AS test_last_seq, last_idx_scan AS test_last_idx
|
|
|
|
FROM pg_stat_all_tables WHERE relid = 'test_last_scan'::regclass \gset
|
|
|
|
SELECT pg_sleep(0.1);
|
|
|
|
pg_sleep
|
|
|
|
----------
|
|
|
|
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
-- cause one bitmap index scan
|
|
|
|
BEGIN;
|
|
|
|
SET LOCAL enable_seqscan TO off;
|
|
|
|
SET LOCAL enable_indexscan TO off;
|
|
|
|
SET LOCAL enable_bitmapscan TO on;
|
|
|
|
EXPLAIN (COSTS off) SELECT count(*) FROM test_last_scan WHERE idx_col = 1;
|
|
|
|
QUERY PLAN
|
|
|
|
------------------------------------------------------
|
|
|
|
Aggregate
|
|
|
|
-> Bitmap Heap Scan on test_last_scan
|
|
|
|
Recheck Cond: (idx_col = 1)
|
|
|
|
-> Bitmap Index Scan on test_last_scan_pkey
|
|
|
|
Index Cond: (idx_col = 1)
|
|
|
|
(5 rows)
|
|
|
|
|
|
|
|
SELECT count(*) FROM test_last_scan WHERE idx_col = 1;
|
|
|
|
count
|
|
|
|
-------
|
|
|
|
1
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT pg_stat_force_next_flush();
|
|
|
|
pg_stat_force_next_flush
|
|
|
|
--------------------------
|
|
|
|
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
COMMIT;
|
|
|
|
-- check that just index scan stats were incremented
|
|
|
|
SELECT seq_scan, :'test_last_seq' = last_seq_scan AS seq_ok, idx_scan, :'test_last_idx' < last_idx_scan AS idx_ok
|
|
|
|
FROM pg_stat_all_tables WHERE relid = 'test_last_scan'::regclass;
|
|
|
|
seq_scan | seq_ok | idx_scan | idx_ok
|
|
|
|
----------+--------+----------+--------
|
|
|
|
2 | t | 3 | t
|
|
|
|
(1 row)
|
|
|
|
|
2022-07-18 19:16:23 +02:00
|
|
|
-----
|
|
|
|
-- Test that various stats views are being properly populated
|
|
|
|
-----
|
|
|
|
-- Test that sessions is incremented when a new session is started in pg_stat_database
|
|
|
|
SELECT sessions AS db_stat_sessions FROM pg_stat_database WHERE datname = (SELECT current_database()) \gset
|
|
|
|
\c
|
2022-09-16 19:07:12 +02:00
|
|
|
SELECT pg_stat_force_next_flush();
|
|
|
|
pg_stat_force_next_flush
|
|
|
|
--------------------------
|
|
|
|
|
|
|
|
(1 row)
|
|
|
|
|
2022-07-18 19:16:23 +02:00
|
|
|
SELECT sessions > :db_stat_sessions FROM pg_stat_database WHERE datname = (SELECT current_database());
|
|
|
|
?column?
|
|
|
|
----------
|
|
|
|
t
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
-- Test pg_stat_bgwriter checkpointer-related stats, together with pg_stat_wal
|
|
|
|
SELECT checkpoints_req AS rqst_ckpts_before FROM pg_stat_bgwriter \gset
|
2022-09-29 18:14:39 +02:00
|
|
|
-- Test pg_stat_wal (and make a temp table so our temp schema exists)
|
2022-07-18 19:16:23 +02:00
|
|
|
SELECT wal_bytes AS wal_bytes_before FROM pg_stat_wal \gset
|
2022-09-29 18:14:39 +02:00
|
|
|
CREATE TEMP TABLE test_stats_temp AS SELECT 17;
|
2022-07-18 19:16:23 +02:00
|
|
|
DROP TABLE test_stats_temp;
|
|
|
|
-- Checkpoint twice: The checkpointer reports stats after reporting completion
|
|
|
|
-- of the checkpoint. But after a second checkpoint we'll see at least the
|
|
|
|
-- results of the first.
|
|
|
|
CHECKPOINT;
|
|
|
|
CHECKPOINT;
|
|
|
|
SELECT checkpoints_req > :rqst_ckpts_before FROM pg_stat_bgwriter;
|
|
|
|
?column?
|
|
|
|
----------
|
|
|
|
t
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT wal_bytes > :wal_bytes_before FROM pg_stat_wal;
|
|
|
|
?column?
|
|
|
|
----------
|
|
|
|
t
|
|
|
|
(1 row)
|
|
|
|
|
2022-09-29 18:14:39 +02:00
|
|
|
-- Test pg_stat_get_backend_idset() and some allied functions.
|
|
|
|
-- In particular, verify that their notion of backend ID matches
|
|
|
|
-- our temp schema index.
|
|
|
|
SELECT (current_schemas(true))[1] = ('pg_temp_' || beid::text) AS match
|
|
|
|
FROM pg_stat_get_backend_idset() beid
|
|
|
|
WHERE pg_stat_get_backend_pid(beid) = pg_backend_pid();
|
|
|
|
match
|
|
|
|
-------
|
|
|
|
t
|
|
|
|
(1 row)
|
|
|
|
|
2022-07-18 19:16:23 +02:00
|
|
|
-----
|
|
|
|
-- Test that resetting stats works for reset timestamp
|
|
|
|
-----
|
|
|
|
-- Test that reset_slru with a specified SLRU works.
|
|
|
|
SELECT stats_reset AS slru_commit_ts_reset_ts FROM pg_stat_slru WHERE name = 'CommitTs' \gset
|
|
|
|
SELECT stats_reset AS slru_notify_reset_ts FROM pg_stat_slru WHERE name = 'Notify' \gset
|
|
|
|
SELECT pg_stat_reset_slru('CommitTs');
|
|
|
|
pg_stat_reset_slru
|
|
|
|
--------------------
|
|
|
|
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT stats_reset > :'slru_commit_ts_reset_ts'::timestamptz FROM pg_stat_slru WHERE name = 'CommitTs';
|
|
|
|
?column?
|
|
|
|
----------
|
|
|
|
t
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT stats_reset AS slru_commit_ts_reset_ts FROM pg_stat_slru WHERE name = 'CommitTs' \gset
|
|
|
|
-- Test that multiple SLRUs are reset when no specific SLRU provided to reset function
|
|
|
|
SELECT pg_stat_reset_slru(NULL);
|
|
|
|
pg_stat_reset_slru
|
|
|
|
--------------------
|
|
|
|
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT stats_reset > :'slru_commit_ts_reset_ts'::timestamptz FROM pg_stat_slru WHERE name = 'CommitTs';
|
|
|
|
?column?
|
|
|
|
----------
|
|
|
|
t
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT stats_reset > :'slru_notify_reset_ts'::timestamptz FROM pg_stat_slru WHERE name = 'Notify';
|
|
|
|
?column?
|
|
|
|
----------
|
|
|
|
t
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
-- Test that reset_shared with archiver specified as the stats type works
|
|
|
|
SELECT stats_reset AS archiver_reset_ts FROM pg_stat_archiver \gset
|
|
|
|
SELECT pg_stat_reset_shared('archiver');
|
|
|
|
pg_stat_reset_shared
|
|
|
|
----------------------
|
|
|
|
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT stats_reset > :'archiver_reset_ts'::timestamptz FROM pg_stat_archiver;
|
|
|
|
?column?
|
|
|
|
----------
|
|
|
|
t
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT stats_reset AS archiver_reset_ts FROM pg_stat_archiver \gset
|
|
|
|
-- Test that reset_shared with bgwriter specified as the stats type works
|
|
|
|
SELECT stats_reset AS bgwriter_reset_ts FROM pg_stat_bgwriter \gset
|
|
|
|
SELECT pg_stat_reset_shared('bgwriter');
|
|
|
|
pg_stat_reset_shared
|
|
|
|
----------------------
|
|
|
|
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT stats_reset > :'bgwriter_reset_ts'::timestamptz FROM pg_stat_bgwriter;
|
|
|
|
?column?
|
|
|
|
----------
|
|
|
|
t
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT stats_reset AS bgwriter_reset_ts FROM pg_stat_bgwriter \gset
|
|
|
|
-- Test that reset_shared with wal specified as the stats type works
|
|
|
|
SELECT stats_reset AS wal_reset_ts FROM pg_stat_wal \gset
|
|
|
|
SELECT pg_stat_reset_shared('wal');
|
|
|
|
pg_stat_reset_shared
|
|
|
|
----------------------
|
|
|
|
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT stats_reset > :'wal_reset_ts'::timestamptz FROM pg_stat_wal;
|
|
|
|
?column?
|
|
|
|
----------
|
|
|
|
t
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT stats_reset AS wal_reset_ts FROM pg_stat_wal \gset
|
|
|
|
-- Test that reset_shared with no specified stats type doesn't reset anything
|
|
|
|
SELECT pg_stat_reset_shared(NULL);
|
|
|
|
pg_stat_reset_shared
|
|
|
|
----------------------
|
|
|
|
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT stats_reset = :'archiver_reset_ts'::timestamptz FROM pg_stat_archiver;
|
|
|
|
?column?
|
|
|
|
----------
|
|
|
|
t
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT stats_reset = :'bgwriter_reset_ts'::timestamptz FROM pg_stat_bgwriter;
|
|
|
|
?column?
|
|
|
|
----------
|
|
|
|
t
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT stats_reset = :'wal_reset_ts'::timestamptz FROM pg_stat_wal;
|
|
|
|
?column?
|
|
|
|
----------
|
|
|
|
t
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
-- Test that reset works for pg_stat_database
|
|
|
|
-- Since pg_stat_database stats_reset starts out as NULL, reset it once first so we have something to compare it to
|
|
|
|
SELECT pg_stat_reset();
|
|
|
|
pg_stat_reset
|
|
|
|
---------------
|
|
|
|
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT stats_reset AS db_reset_ts FROM pg_stat_database WHERE datname = (SELECT current_database()) \gset
|
|
|
|
SELECT pg_stat_reset();
|
|
|
|
pg_stat_reset
|
|
|
|
---------------
|
|
|
|
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT stats_reset > :'db_reset_ts'::timestamptz FROM pg_stat_database WHERE datname = (SELECT current_database());
|
|
|
|
?column?
|
|
|
|
----------
|
|
|
|
t
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
----
|
|
|
|
-- pg_stat_get_snapshot_timestamp behavior
|
|
|
|
----
|
|
|
|
BEGIN;
|
|
|
|
SET LOCAL stats_fetch_consistency = snapshot;
|
|
|
|
-- no snapshot yet, return NULL
|
|
|
|
SELECT pg_stat_get_snapshot_timestamp();
|
|
|
|
pg_stat_get_snapshot_timestamp
|
|
|
|
--------------------------------
|
|
|
|
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
-- any attempt at accessing stats will build snapshot
|
|
|
|
SELECT pg_stat_get_function_calls(0);
|
|
|
|
pg_stat_get_function_calls
|
|
|
|
----------------------------
|
|
|
|
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT pg_stat_get_snapshot_timestamp() >= NOW();
|
|
|
|
?column?
|
|
|
|
----------
|
|
|
|
t
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
-- shows NULL again after clearing
|
|
|
|
SELECT pg_stat_clear_snapshot();
|
|
|
|
pg_stat_clear_snapshot
|
|
|
|
------------------------
|
|
|
|
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT pg_stat_get_snapshot_timestamp();
|
|
|
|
pg_stat_get_snapshot_timestamp
|
|
|
|
--------------------------------
|
|
|
|
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
COMMIT;
|
|
|
|
----
|
|
|
|
-- pg_stat_have_stats behavior
|
|
|
|
----
|
|
|
|
-- fixed-numbered stats exist
|
|
|
|
SELECT pg_stat_have_stats('bgwriter', 0, 0);
|
|
|
|
pg_stat_have_stats
|
|
|
|
--------------------
|
|
|
|
t
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
-- unknown stats kinds error out
|
|
|
|
SELECT pg_stat_have_stats('zaphod', 0, 0);
|
|
|
|
ERROR: invalid statistics kind: "zaphod"
|
|
|
|
-- db stats have objoid 0
|
2022-09-23 22:00:55 +02:00
|
|
|
SELECT pg_stat_have_stats('database', :dboid, 1);
|
2022-07-18 19:16:23 +02:00
|
|
|
pg_stat_have_stats
|
|
|
|
--------------------
|
|
|
|
f
|
|
|
|
(1 row)
|
|
|
|
|
2022-09-23 22:00:55 +02:00
|
|
|
SELECT pg_stat_have_stats('database', :dboid, 0);
|
2022-07-18 19:16:23 +02:00
|
|
|
pg_stat_have_stats
|
|
|
|
--------------------
|
|
|
|
t
|
|
|
|
(1 row)
|
|
|
|
|
2022-09-23 22:00:55 +02:00
|
|
|
-- pg_stat_have_stats returns true for committed index creation
|
|
|
|
CREATE table stats_test_tab1 as select generate_series(1,10) a;
|
|
|
|
CREATE index stats_test_idx1 on stats_test_tab1(a);
|
|
|
|
SELECT 'stats_test_idx1'::regclass::oid AS stats_test_idx1_oid \gset
|
|
|
|
SET enable_seqscan TO off;
|
|
|
|
select a from stats_test_tab1 where a = 3;
|
|
|
|
a
|
|
|
|
---
|
|
|
|
3
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT pg_stat_have_stats('relation', :dboid, :stats_test_idx1_oid);
|
|
|
|
pg_stat_have_stats
|
|
|
|
--------------------
|
|
|
|
t
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
-- pg_stat_have_stats returns false for dropped index with stats
|
|
|
|
SELECT pg_stat_have_stats('relation', :dboid, :stats_test_idx1_oid);
|
|
|
|
pg_stat_have_stats
|
|
|
|
--------------------
|
|
|
|
t
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
DROP index stats_test_idx1;
|
|
|
|
SELECT pg_stat_have_stats('relation', :dboid, :stats_test_idx1_oid);
|
|
|
|
pg_stat_have_stats
|
|
|
|
--------------------
|
|
|
|
f
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
-- pg_stat_have_stats returns false for rolled back index creation
|
|
|
|
BEGIN;
|
|
|
|
CREATE index stats_test_idx1 on stats_test_tab1(a);
|
|
|
|
SELECT 'stats_test_idx1'::regclass::oid AS stats_test_idx1_oid \gset
|
|
|
|
select a from stats_test_tab1 where a = 3;
|
|
|
|
a
|
|
|
|
---
|
|
|
|
3
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT pg_stat_have_stats('relation', :dboid, :stats_test_idx1_oid);
|
|
|
|
pg_stat_have_stats
|
|
|
|
--------------------
|
|
|
|
t
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
ROLLBACK;
|
|
|
|
SELECT pg_stat_have_stats('relation', :dboid, :stats_test_idx1_oid);
|
|
|
|
pg_stat_have_stats
|
|
|
|
--------------------
|
|
|
|
f
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
-- pg_stat_have_stats returns true for reindex CONCURRENTLY
|
|
|
|
CREATE index stats_test_idx1 on stats_test_tab1(a);
|
|
|
|
SELECT 'stats_test_idx1'::regclass::oid AS stats_test_idx1_oid \gset
|
|
|
|
select a from stats_test_tab1 where a = 3;
|
|
|
|
a
|
|
|
|
---
|
|
|
|
3
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT pg_stat_have_stats('relation', :dboid, :stats_test_idx1_oid);
|
|
|
|
pg_stat_have_stats
|
|
|
|
--------------------
|
|
|
|
t
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
REINDEX index CONCURRENTLY stats_test_idx1;
|
|
|
|
-- false for previous oid
|
|
|
|
SELECT pg_stat_have_stats('relation', :dboid, :stats_test_idx1_oid);
|
|
|
|
pg_stat_have_stats
|
|
|
|
--------------------
|
|
|
|
f
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
-- true for new oid
|
|
|
|
SELECT 'stats_test_idx1'::regclass::oid AS stats_test_idx1_oid \gset
|
|
|
|
SELECT pg_stat_have_stats('relation', :dboid, :stats_test_idx1_oid);
|
|
|
|
pg_stat_have_stats
|
|
|
|
--------------------
|
|
|
|
t
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
-- pg_stat_have_stats returns true for a rolled back drop index with stats
|
|
|
|
BEGIN;
|
|
|
|
SELECT pg_stat_have_stats('relation', :dboid, :stats_test_idx1_oid);
|
|
|
|
pg_stat_have_stats
|
|
|
|
--------------------
|
|
|
|
t
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
DROP index stats_test_idx1;
|
|
|
|
ROLLBACK;
|
|
|
|
SELECT pg_stat_have_stats('relation', :dboid, :stats_test_idx1_oid);
|
|
|
|
pg_stat_have_stats
|
|
|
|
--------------------
|
|
|
|
t
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
-- put enable_seqscan back to on
|
|
|
|
SET enable_seqscan TO on;
|
2022-07-18 19:16:23 +02:00
|
|
|
-- ensure that stats accessors handle NULL input correctly
|
|
|
|
SELECT pg_stat_get_replication_slot(NULL);
|
|
|
|
pg_stat_get_replication_slot
|
|
|
|
------------------------------
|
|
|
|
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT pg_stat_get_subscription_stats(NULL);
|
|
|
|
pg_stat_get_subscription_stats
|
|
|
|
--------------------------------
|
|
|
|
|
|
|
|
(1 row)
|
|
|
|
|
2023-02-11 18:54:44 +01:00
|
|
|
-- Test that the following operations are tracked in pg_stat_io:
|
|
|
|
-- - reads of target blocks into shared buffers
|
|
|
|
-- - writes of shared buffers to permanent storage
|
|
|
|
-- - extends of relations using shared buffers
|
|
|
|
-- - fsyncs done to ensure the durability of data dirtying shared buffers
|
2023-03-31 04:22:40 +02:00
|
|
|
-- - shared buffer hits
|
2023-02-11 18:54:44 +01:00
|
|
|
-- There is no test for blocks evicted from shared buffers, because we cannot
|
|
|
|
-- be sure of the state of shared buffers at the point the test is run.
|
|
|
|
-- Create a regular table and insert some data to generate IOCONTEXT_NORMAL
|
|
|
|
-- extends.
|
|
|
|
SELECT sum(extends) AS io_sum_shared_before_extends
|
|
|
|
FROM pg_stat_io WHERE io_context = 'normal' AND io_object = 'relation' \gset
|
2023-03-21 02:16:06 +01:00
|
|
|
SELECT sum(writes) AS writes, sum(fsyncs) AS fsyncs
|
|
|
|
FROM pg_stat_io
|
|
|
|
WHERE io_object = 'relation' \gset io_sum_shared_before_
|
2023-02-11 18:54:44 +01:00
|
|
|
CREATE TABLE test_io_shared(a int);
|
|
|
|
INSERT INTO test_io_shared SELECT i FROM generate_series(1,100)i;
|
|
|
|
SELECT pg_stat_force_next_flush();
|
|
|
|
pg_stat_force_next_flush
|
|
|
|
--------------------------
|
|
|
|
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT sum(extends) AS io_sum_shared_after_extends
|
|
|
|
FROM pg_stat_io WHERE io_context = 'normal' AND io_object = 'relation' \gset
|
|
|
|
SELECT :io_sum_shared_after_extends > :io_sum_shared_before_extends;
|
|
|
|
?column?
|
|
|
|
----------
|
|
|
|
t
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
-- After a checkpoint, there should be some additional IOCONTEXT_NORMAL writes
|
|
|
|
-- and fsyncs.
|
|
|
|
-- See comment above for rationale for two explicit CHECKPOINTs.
|
|
|
|
CHECKPOINT;
|
|
|
|
CHECKPOINT;
|
|
|
|
SELECT sum(writes) AS writes, sum(fsyncs) AS fsyncs
|
|
|
|
FROM pg_stat_io
|
2023-03-21 02:16:06 +01:00
|
|
|
WHERE io_object = 'relation' \gset io_sum_shared_after_
|
2023-02-11 18:54:44 +01:00
|
|
|
SELECT :io_sum_shared_after_writes > :io_sum_shared_before_writes;
|
|
|
|
?column?
|
|
|
|
----------
|
|
|
|
t
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT current_setting('fsync') = 'off'
|
|
|
|
OR :io_sum_shared_after_fsyncs > :io_sum_shared_before_fsyncs;
|
|
|
|
?column?
|
|
|
|
----------
|
|
|
|
t
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
-- Change the tablespace so that the table is rewritten directly, then SELECT
|
|
|
|
-- from it to cause it to be read back into shared buffers.
|
|
|
|
SELECT sum(reads) AS io_sum_shared_before_reads
|
|
|
|
FROM pg_stat_io WHERE io_context = 'normal' AND io_object = 'relation' \gset
|
2023-03-07 19:03:42 +01:00
|
|
|
-- Do this in a transaction to prevent spurious failures due to concurrent accesses to our newly
|
|
|
|
-- rewritten table, e.g. by autovacuum.
|
|
|
|
BEGIN;
|
2023-02-11 18:54:44 +01:00
|
|
|
ALTER TABLE test_io_shared SET TABLESPACE regress_tblspace;
|
|
|
|
-- SELECT from the table so that the data is read into shared buffers and
|
|
|
|
-- io_context 'normal', io_object 'relation' reads are counted.
|
|
|
|
SELECT COUNT(*) FROM test_io_shared;
|
|
|
|
count
|
|
|
|
-------
|
|
|
|
100
|
|
|
|
(1 row)
|
|
|
|
|
2023-03-07 19:03:42 +01:00
|
|
|
COMMIT;
|
2023-02-11 18:54:44 +01:00
|
|
|
SELECT pg_stat_force_next_flush();
|
|
|
|
pg_stat_force_next_flush
|
|
|
|
--------------------------
|
|
|
|
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT sum(reads) AS io_sum_shared_after_reads
|
|
|
|
FROM pg_stat_io WHERE io_context = 'normal' AND io_object = 'relation' \gset
|
|
|
|
SELECT :io_sum_shared_after_reads > :io_sum_shared_before_reads;
|
|
|
|
?column?
|
|
|
|
----------
|
|
|
|
t
|
|
|
|
(1 row)
|
|
|
|
|
2023-03-31 04:22:40 +02:00
|
|
|
SELECT sum(hits) AS io_sum_shared_before_hits
|
|
|
|
FROM pg_stat_io WHERE io_context = 'normal' AND io_object = 'relation' \gset
|
|
|
|
-- Select from the table again to count hits.
|
|
|
|
-- Ensure we generate hits by forcing a nested loop self-join with no
|
|
|
|
-- materialize node. The outer side's buffer will stay pinned, preventing its
|
|
|
|
-- eviction, while we loop through the inner side and generate hits.
|
|
|
|
BEGIN;
|
|
|
|
SET LOCAL enable_nestloop TO on; SET LOCAL enable_mergejoin TO off;
|
|
|
|
SET LOCAL enable_hashjoin TO off; SET LOCAL enable_material TO off;
|
|
|
|
-- ensure plan stays as we expect it to
|
|
|
|
EXPLAIN (COSTS OFF) SELECT COUNT(*) FROM test_io_shared t1 INNER JOIN test_io_shared t2 USING (a);
|
|
|
|
QUERY PLAN
|
|
|
|
-------------------------------------------
|
|
|
|
Aggregate
|
|
|
|
-> Nested Loop
|
|
|
|
Join Filter: (t1.a = t2.a)
|
|
|
|
-> Seq Scan on test_io_shared t1
|
|
|
|
-> Seq Scan on test_io_shared t2
|
|
|
|
(5 rows)
|
|
|
|
|
|
|
|
SELECT COUNT(*) FROM test_io_shared t1 INNER JOIN test_io_shared t2 USING (a);
|
|
|
|
count
|
|
|
|
-------
|
|
|
|
100
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
COMMIT;
|
|
|
|
SELECT pg_stat_force_next_flush();
|
|
|
|
pg_stat_force_next_flush
|
|
|
|
--------------------------
|
|
|
|
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT sum(hits) AS io_sum_shared_after_hits
|
|
|
|
FROM pg_stat_io WHERE io_context = 'normal' AND io_object = 'relation' \gset
|
|
|
|
SELECT :io_sum_shared_after_hits > :io_sum_shared_before_hits;
|
|
|
|
?column?
|
|
|
|
----------
|
|
|
|
t
|
|
|
|
(1 row)
|
|
|
|
|
2023-02-11 18:54:44 +01:00
|
|
|
DROP TABLE test_io_shared;
|
|
|
|
-- Test that the follow IOCONTEXT_LOCAL IOOps are tracked in pg_stat_io:
|
|
|
|
-- - eviction of local buffers in order to reuse them
|
|
|
|
-- - reads of temporary table blocks into local buffers
|
|
|
|
-- - writes of local buffers to permanent storage
|
|
|
|
-- - extends of temporary tables
|
|
|
|
-- Set temp_buffers to its minimum so that we can trigger writes with fewer
|
|
|
|
-- inserted tuples. Do so in a new session in case temporary tables have been
|
|
|
|
-- accessed by previous tests in this session.
|
|
|
|
\c
|
|
|
|
SET temp_buffers TO 100;
|
|
|
|
CREATE TEMPORARY TABLE test_io_local(a int, b TEXT);
|
|
|
|
SELECT sum(extends) AS extends, sum(evictions) AS evictions, sum(writes) AS writes
|
|
|
|
FROM pg_stat_io
|
|
|
|
WHERE io_context = 'normal' AND io_object = 'temp relation' \gset io_sum_local_before_
|
|
|
|
-- Insert tuples into the temporary table, generating extends in the stats.
|
|
|
|
-- Insert enough values that we need to reuse and write out dirty local
|
|
|
|
-- buffers, generating evictions and writes.
|
|
|
|
INSERT INTO test_io_local SELECT generate_series(1, 5000) as id, repeat('a', 200);
|
|
|
|
-- Ensure the table is large enough to exceed our temp_buffers setting.
|
|
|
|
SELECT pg_relation_size('test_io_local') / current_setting('block_size')::int8 > 100;
|
|
|
|
?column?
|
|
|
|
----------
|
|
|
|
t
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT sum(reads) AS io_sum_local_before_reads
|
|
|
|
FROM pg_stat_io WHERE io_context = 'normal' AND io_object = 'temp relation' \gset
|
|
|
|
-- Read in evicted buffers, generating reads.
|
|
|
|
SELECT COUNT(*) FROM test_io_local;
|
|
|
|
count
|
|
|
|
-------
|
|
|
|
5000
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT pg_stat_force_next_flush();
|
|
|
|
pg_stat_force_next_flush
|
|
|
|
--------------------------
|
|
|
|
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT sum(evictions) AS evictions,
|
|
|
|
sum(reads) AS reads,
|
|
|
|
sum(writes) AS writes,
|
|
|
|
sum(extends) AS extends
|
|
|
|
FROM pg_stat_io
|
|
|
|
WHERE io_context = 'normal' AND io_object = 'temp relation' \gset io_sum_local_after_
|
|
|
|
SELECT :io_sum_local_after_evictions > :io_sum_local_before_evictions,
|
|
|
|
:io_sum_local_after_reads > :io_sum_local_before_reads,
|
|
|
|
:io_sum_local_after_writes > :io_sum_local_before_writes,
|
|
|
|
:io_sum_local_after_extends > :io_sum_local_before_extends;
|
|
|
|
?column? | ?column? | ?column? | ?column?
|
|
|
|
----------+----------+----------+----------
|
|
|
|
t | t | t | t
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
-- Change the tablespaces so that the temporary table is rewritten to other
|
|
|
|
-- local buffers, exercising a different codepath than standard local buffer
|
|
|
|
-- writes.
|
|
|
|
ALTER TABLE test_io_local SET TABLESPACE regress_tblspace;
|
|
|
|
SELECT pg_stat_force_next_flush();
|
|
|
|
pg_stat_force_next_flush
|
|
|
|
--------------------------
|
|
|
|
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT sum(writes) AS io_sum_local_new_tblspc_writes
|
|
|
|
FROM pg_stat_io WHERE io_context = 'normal' AND io_object = 'temp relation' \gset
|
|
|
|
SELECT :io_sum_local_new_tblspc_writes > :io_sum_local_after_writes;
|
|
|
|
?column?
|
|
|
|
----------
|
|
|
|
t
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
RESET temp_buffers;
|
|
|
|
-- Test that reuse of strategy buffers and reads of blocks into these reused
|
|
|
|
-- buffers while VACUUMing are tracked in pg_stat_io.
|
|
|
|
-- Set wal_skip_threshold smaller than the expected size of
|
|
|
|
-- test_io_vac_strategy so that, even if wal_level is minimal, VACUUM FULL will
|
|
|
|
-- fsync the newly rewritten test_io_vac_strategy instead of writing it to WAL.
|
|
|
|
-- Writing it to WAL will result in the newly written relation pages being in
|
|
|
|
-- shared buffers -- preventing us from testing BAS_VACUUM BufferAccessStrategy
|
|
|
|
-- reads.
|
|
|
|
SET wal_skip_threshold = '1 kB';
|
|
|
|
SELECT sum(reuses) AS reuses, sum(reads) AS reads
|
|
|
|
FROM pg_stat_io WHERE io_context = 'vacuum' \gset io_sum_vac_strategy_before_
|
|
|
|
CREATE TABLE test_io_vac_strategy(a int, b int) WITH (autovacuum_enabled = 'false');
|
|
|
|
INSERT INTO test_io_vac_strategy SELECT i, i from generate_series(1, 8000)i;
|
|
|
|
-- Ensure that the next VACUUM will need to perform IO by rewriting the table
|
|
|
|
-- first with VACUUM (FULL).
|
|
|
|
VACUUM (FULL) test_io_vac_strategy;
|
|
|
|
VACUUM (PARALLEL 0) test_io_vac_strategy;
|
|
|
|
SELECT pg_stat_force_next_flush();
|
|
|
|
pg_stat_force_next_flush
|
|
|
|
--------------------------
|
|
|
|
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT sum(reuses) AS reuses, sum(reads) AS reads
|
|
|
|
FROM pg_stat_io WHERE io_context = 'vacuum' \gset io_sum_vac_strategy_after_
|
|
|
|
SELECT :io_sum_vac_strategy_after_reads > :io_sum_vac_strategy_before_reads,
|
|
|
|
:io_sum_vac_strategy_after_reuses > :io_sum_vac_strategy_before_reuses;
|
|
|
|
?column? | ?column?
|
|
|
|
----------+----------
|
|
|
|
t | t
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
RESET wal_skip_threshold;
|
|
|
|
-- Test that extends done by a CTAS, which uses a BAS_BULKWRITE
|
|
|
|
-- BufferAccessStrategy, are tracked in pg_stat_io.
|
|
|
|
SELECT sum(extends) AS io_sum_bulkwrite_strategy_extends_before
|
|
|
|
FROM pg_stat_io WHERE io_context = 'bulkwrite' \gset
|
|
|
|
CREATE TABLE test_io_bulkwrite_strategy AS SELECT i FROM generate_series(1,100)i;
|
|
|
|
SELECT pg_stat_force_next_flush();
|
|
|
|
pg_stat_force_next_flush
|
|
|
|
--------------------------
|
|
|
|
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT sum(extends) AS io_sum_bulkwrite_strategy_extends_after
|
|
|
|
FROM pg_stat_io WHERE io_context = 'bulkwrite' \gset
|
|
|
|
SELECT :io_sum_bulkwrite_strategy_extends_after > :io_sum_bulkwrite_strategy_extends_before;
|
|
|
|
?column?
|
|
|
|
----------
|
|
|
|
t
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
-- Test IO stats reset
|
|
|
|
SELECT pg_stat_have_stats('io', 0, 0);
|
|
|
|
pg_stat_have_stats
|
|
|
|
--------------------
|
|
|
|
t
|
|
|
|
(1 row)
|
|
|
|
|
2023-03-31 04:22:40 +02:00
|
|
|
SELECT sum(evictions) + sum(reuses) + sum(extends) + sum(fsyncs) + sum(reads) + sum(writes) + sum(hits) AS io_stats_pre_reset
|
2023-02-11 18:54:44 +01:00
|
|
|
FROM pg_stat_io \gset
|
|
|
|
SELECT pg_stat_reset_shared('io');
|
|
|
|
pg_stat_reset_shared
|
|
|
|
----------------------
|
|
|
|
|
|
|
|
(1 row)
|
|
|
|
|
2023-03-31 04:22:40 +02:00
|
|
|
SELECT sum(evictions) + sum(reuses) + sum(extends) + sum(fsyncs) + sum(reads) + sum(writes) + sum(hits) AS io_stats_post_reset
|
2023-02-11 18:54:44 +01:00
|
|
|
FROM pg_stat_io \gset
|
|
|
|
SELECT :io_stats_post_reset < :io_stats_pre_reset;
|
|
|
|
?column?
|
|
|
|
----------
|
|
|
|
t
|
|
|
|
(1 row)
|
|
|
|
|
Ignore BRIN indexes when checking for HOT updates
When determining whether an index update may be skipped by using HOT, we
can ignore attributes indexed by block summarizing indexes without
references to individual tuples that need to be cleaned up.
A new type TU_UpdateIndexes provides a signal to the executor to
determine which indexes to update - no indexes, all indexes, or only the
summarizing indexes.
This also removes rd_indexattr list, and replaces it with rd_attrsvalid
flag. The list was not used anywhere, and a simple flag is sufficient.
This was originally committed as 5753d4ee32, but then got reverted by
e3fcca0d0d because of correctness issues.
Original patch by Josef Simanek, various fixes and improvements by Tomas
Vondra and me.
Authors: Matthias van de Meent, Josef Simanek, Tomas Vondra
Reviewed-by: Tomas Vondra, Alvaro Herrera
Discussion: https://postgr.es/m/05ebcb44-f383-86e3-4f31-0a97a55634cf@enterprisedb.com
Discussion: https://postgr.es/m/CAFp7QwpMRGcDAQumN7onN9HjrJ3u4X3ZRXdGFT0K5G2JWvnbWg%40mail.gmail.com
2023-03-20 10:34:07 +01:00
|
|
|
-- test BRIN index doesn't block HOT update
|
|
|
|
CREATE TABLE brin_hot (
|
|
|
|
id integer PRIMARY KEY,
|
|
|
|
val integer NOT NULL
|
|
|
|
) WITH (autovacuum_enabled = off, fillfactor = 70);
|
|
|
|
INSERT INTO brin_hot SELECT *, 0 FROM generate_series(1, 235);
|
|
|
|
CREATE INDEX val_brin ON brin_hot using brin(val);
|
|
|
|
CREATE FUNCTION wait_for_hot_stats() RETURNS void AS $$
|
|
|
|
DECLARE
|
|
|
|
start_time timestamptz := clock_timestamp();
|
|
|
|
updated bool;
|
|
|
|
BEGIN
|
|
|
|
-- we don't want to wait forever; loop will exit after 30 seconds
|
|
|
|
FOR i IN 1 .. 300 LOOP
|
|
|
|
SELECT (pg_stat_get_tuples_hot_updated('brin_hot'::regclass::oid) > 0) INTO updated;
|
|
|
|
EXIT WHEN updated;
|
|
|
|
|
|
|
|
-- wait a little
|
|
|
|
PERFORM pg_sleep_for('100 milliseconds');
|
|
|
|
-- reset stats snapshot so we can test again
|
|
|
|
PERFORM pg_stat_clear_snapshot();
|
|
|
|
END LOOP;
|
|
|
|
-- report time waited in postmaster log (where it won't change test output)
|
|
|
|
RAISE log 'wait_for_hot_stats delayed % seconds',
|
|
|
|
EXTRACT(epoch FROM clock_timestamp() - start_time);
|
|
|
|
END
|
|
|
|
$$ LANGUAGE plpgsql;
|
|
|
|
UPDATE brin_hot SET val = -3 WHERE id = 42;
|
|
|
|
-- We can't just call wait_for_hot_stats() at this point, because we only
|
|
|
|
-- transmit stats when the session goes idle, and we probably didn't
|
|
|
|
-- transmit the last couple of counts yet thanks to the rate-limiting logic
|
|
|
|
-- in pgstat_report_stat(). But instead of waiting for the rate limiter's
|
|
|
|
-- timeout to elapse, let's just start a new session. The old one will
|
|
|
|
-- then send its stats before dying.
|
|
|
|
\c -
|
|
|
|
SELECT wait_for_hot_stats();
|
|
|
|
wait_for_hot_stats
|
|
|
|
--------------------
|
|
|
|
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT pg_stat_get_tuples_hot_updated('brin_hot'::regclass::oid);
|
|
|
|
pg_stat_get_tuples_hot_updated
|
|
|
|
--------------------------------
|
|
|
|
1
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
DROP TABLE brin_hot;
|
|
|
|
DROP FUNCTION wait_for_hot_stats();
|
|
|
|
-- Test handling of index predicates - updating attributes in precicates
|
|
|
|
-- should not block HOT when summarizing indexes are involved. We update
|
|
|
|
-- a row that was not indexed due to the index predicate, and becomes
|
|
|
|
-- indexable - the HOT-updated tuple is forwarded to the BRIN index.
|
|
|
|
CREATE TABLE brin_hot_2 (a int, b int);
|
|
|
|
INSERT INTO brin_hot_2 VALUES (1, 100);
|
|
|
|
CREATE INDEX ON brin_hot_2 USING brin (b) WHERE a = 2;
|
|
|
|
UPDATE brin_hot_2 SET a = 2;
|
|
|
|
EXPLAIN (COSTS OFF) SELECT * FROM brin_hot_2 WHERE a = 2 AND b = 100;
|
|
|
|
QUERY PLAN
|
|
|
|
-----------------------------------
|
|
|
|
Seq Scan on brin_hot_2
|
|
|
|
Filter: ((a = 2) AND (b = 100))
|
|
|
|
(2 rows)
|
|
|
|
|
|
|
|
SELECT COUNT(*) FROM brin_hot_2 WHERE a = 2 AND b = 100;
|
|
|
|
count
|
|
|
|
-------
|
|
|
|
1
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SET enable_seqscan = off;
|
|
|
|
EXPLAIN (COSTS OFF) SELECT * FROM brin_hot_2 WHERE a = 2 AND b = 100;
|
|
|
|
QUERY PLAN
|
|
|
|
---------------------------------------------
|
|
|
|
Bitmap Heap Scan on brin_hot_2
|
|
|
|
Recheck Cond: ((b = 100) AND (a = 2))
|
|
|
|
-> Bitmap Index Scan on brin_hot_2_b_idx
|
|
|
|
Index Cond: (b = 100)
|
|
|
|
(4 rows)
|
|
|
|
|
|
|
|
SELECT COUNT(*) FROM brin_hot_2 WHERE a = 2 AND b = 100;
|
|
|
|
count
|
|
|
|
-------
|
|
|
|
1
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
DROP TABLE brin_hot_2;
|
|
|
|
-- Test that updates to indexed columns are still propagated to the
|
|
|
|
-- BRIN column.
|
|
|
|
-- https://postgr.es/m/05ebcb44-f383-86e3-4f31-0a97a55634cf@enterprisedb.com
|
|
|
|
CREATE TABLE brin_hot_3 (a int, filler text) WITH (fillfactor = 10);
|
|
|
|
INSERT INTO brin_hot_3 SELECT 1, repeat(' ', 500) FROM generate_series(1, 20);
|
|
|
|
CREATE INDEX ON brin_hot_3 USING brin (a) WITH (pages_per_range = 1);
|
|
|
|
UPDATE brin_hot_3 SET a = 2;
|
|
|
|
EXPLAIN (COSTS OFF) SELECT * FROM brin_hot_3 WHERE a = 2;
|
|
|
|
QUERY PLAN
|
|
|
|
---------------------------------------------
|
|
|
|
Bitmap Heap Scan on brin_hot_3
|
|
|
|
Recheck Cond: (a = 2)
|
|
|
|
-> Bitmap Index Scan on brin_hot_3_a_idx
|
|
|
|
Index Cond: (a = 2)
|
|
|
|
(4 rows)
|
|
|
|
|
|
|
|
SELECT COUNT(*) FROM brin_hot_3 WHERE a = 2;
|
|
|
|
count
|
|
|
|
-------
|
|
|
|
20
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
DROP TABLE brin_hot_3;
|
|
|
|
SET enable_seqscan = on;
|
2003-09-13 18:44:49 +02:00
|
|
|
-- End of Stats Test
|