Use REGBUF_NO_CHANGE at one more place in the hash index.

Commit 00d7fb5e2e started to use REGBUF_NO_CHANGE at a few places in the
code where we register the buffer before marking it dirty but missed
updating one of the code flows in the hash index where we free the overflow
page without any live tuples on it.

Author: Amit Kapila and Hayato Kuroda
Discussion: http://postgr.es/m/f045c8f7-ee24-ead6-3679-c04a43d21351@gmail.com
This commit is contained in:
Amit Kapila 2023-11-13 14:08:26 +05:30
parent 7606175991
commit 861f86beea
4 changed files with 91 additions and 2 deletions

View File

@ -655,7 +655,10 @@ hash_xlog_squeeze_page(XLogReaderState *record)
*/
(void) XLogReadBufferForRedoExtended(record, 0, RBM_NORMAL, true, &bucketbuf);
action = XLogReadBufferForRedo(record, 1, &writebuf);
if (xldata->ntups > 0 || xldata->is_prev_bucket_same_wrt)
action = XLogReadBufferForRedo(record, 1, &writebuf);
else
action = BLK_NOTFOUND;
}
/* replay the record for adding entries in overflow buffer */

View File

@ -668,14 +668,31 @@ _hash_freeovflpage(Relation rel, Buffer bucketbuf, Buffer ovflbuf,
XLogRegisterBuffer(0, bucketbuf, flags);
}
XLogRegisterBuffer(1, wbuf, REGBUF_STANDARD);
if (xlrec.ntups > 0)
{
XLogRegisterBuffer(1, wbuf, REGBUF_STANDARD);
XLogRegisterBufData(1, (char *) itup_offsets,
nitups * sizeof(OffsetNumber));
for (i = 0; i < nitups; i++)
XLogRegisterBufData(1, (char *) itups[i], tups_size[i]);
}
else if (xlrec.is_prim_bucket_same_wrt || xlrec.is_prev_bucket_same_wrt)
{
uint8 wbuf_flags;
/*
* A write buffer needs to be registered even if no tuples are
* added to it to ensure that we can acquire a cleanup lock on it
* if it is the same as primary bucket buffer or update the
* nextblkno if it is same as the previous bucket buffer.
*/
Assert(xlrec.ntups == 0);
wbuf_flags = REGBUF_STANDARD;
if (!xlrec.is_prev_bucket_same_wrt)
wbuf_flags |= REGBUF_NO_CHANGE;
XLogRegisterBuffer(1, wbuf, wbuf_flags);
}
XLogRegisterBuffer(2, ovflbuf, REGBUF_STANDARD);

View File

@ -271,6 +271,35 @@ ALTER INDEX hash_split_index SET (fillfactor = 10);
REINDEX INDEX hash_split_index;
-- Clean up.
DROP TABLE hash_split_heap;
-- Testcases for removing overflow pages.
CREATE TABLE hash_cleanup_heap(keycol INT);
CREATE INDEX hash_cleanup_index on hash_cleanup_heap USING HASH (keycol);
-- Insert tuples to both the primary bucket page and overflow pages.
INSERT INTO hash_cleanup_heap SELECT 1 FROM generate_series(1, 500) as i;
-- Fill overflow pages by "dead" tuples.
BEGIN;
INSERT INTO hash_cleanup_heap SELECT 1 FROM generate_series(1, 1000) as i;
ROLLBACK;
-- Checkpoint will ensure that all hash buffers are cleaned before we try
-- to remove overflow pages.
CHECKPOINT;
-- This will squeeze the bucket and remove overflow pages.
VACUUM hash_cleanup_heap;
TRUNCATE hash_cleanup_heap;
-- Insert a few tuples so that the primary bucket page doesn't get full and
-- tuples can be moved to it.
INSERT INTO hash_cleanup_heap SELECT 1 FROM generate_series(1, 50) as i;
-- Fill overflow pages by "dead" tuples.
BEGIN;
INSERT INTO hash_cleanup_heap SELECT 1 FROM generate_series(1, 1500) as i;
ROLLBACK;
-- And insert some tuples again. During squeeze operation, these will be moved
-- to the primary bucket allowing to test freeing intermediate overflow pages.
INSERT INTO hash_cleanup_heap SELECT 1 FROM generate_series(1, 500) as i;
CHECKPOINT;
VACUUM hash_cleanup_heap;
-- Clean up.
DROP TABLE hash_cleanup_heap;
-- Index on temp table.
CREATE TEMP TABLE hash_temp_heap (x int, y int);
INSERT INTO hash_temp_heap VALUES (1,1);

View File

@ -247,6 +247,46 @@ REINDEX INDEX hash_split_index;
-- Clean up.
DROP TABLE hash_split_heap;
-- Testcases for removing overflow pages.
CREATE TABLE hash_cleanup_heap(keycol INT);
CREATE INDEX hash_cleanup_index on hash_cleanup_heap USING HASH (keycol);
-- Insert tuples to both the primary bucket page and overflow pages.
INSERT INTO hash_cleanup_heap SELECT 1 FROM generate_series(1, 500) as i;
-- Fill overflow pages by "dead" tuples.
BEGIN;
INSERT INTO hash_cleanup_heap SELECT 1 FROM generate_series(1, 1000) as i;
ROLLBACK;
-- Checkpoint will ensure that all hash buffers are cleaned before we try
-- to remove overflow pages.
CHECKPOINT;
-- This will squeeze the bucket and remove overflow pages.
VACUUM hash_cleanup_heap;
TRUNCATE hash_cleanup_heap;
-- Insert a few tuples so that the primary bucket page doesn't get full and
-- tuples can be moved to it.
INSERT INTO hash_cleanup_heap SELECT 1 FROM generate_series(1, 50) as i;
-- Fill overflow pages by "dead" tuples.
BEGIN;
INSERT INTO hash_cleanup_heap SELECT 1 FROM generate_series(1, 1500) as i;
ROLLBACK;
-- And insert some tuples again. During squeeze operation, these will be moved
-- to the primary bucket allowing to test freeing intermediate overflow pages.
INSERT INTO hash_cleanup_heap SELECT 1 FROM generate_series(1, 500) as i;
CHECKPOINT;
VACUUM hash_cleanup_heap;
-- Clean up.
DROP TABLE hash_cleanup_heap;
-- Index on temp table.
CREATE TEMP TABLE hash_temp_heap (x int, y int);
INSERT INTO hash_temp_heap VALUES (1,1);