Remove lsn from HashScanPosData.

This was intended as infrastructure for weakening VACUUM's locking
requirements, similar to what was done for btree indexes in commit
2ed5b87f96.  However, for hash indexes,
it seems that the improvements which are possible are actually
extremely marginal.  Furthermore, performing the LSN cross-check will
end up skipping cleanup far more often than is necessary; we only care
about page modifications due to a VACUUM, but the LSN check will fail
if ANY modification has occurred.  So, rather than pressing forward
with that "optimization", just rip the LSN field out.

Patch by me, reviewed by Ashutosh Sharma and Amit Kapila

Discussion: http://postgr.es/m/CAA4eK1JxqqcuC5Un7YLQVhOYSZBS+t=3xqZuEkt5RyquyuxpwQ@mail.gmail.com
This commit is contained in:
Robert Haas 2017-09-26 09:16:45 -04:00
parent 79a4a665c0
commit 22c5e73562
3 changed files with 7 additions and 30 deletions

View File

@ -463,12 +463,6 @@ _hash_readpage(IndexScanDesc scan, Buffer *bufP, ScanDirection dir)
opaque = (HashPageOpaque) PageGetSpecialPointer(page);
so->currPos.buf = buf;
/*
* We save the LSN of the page as we read it, so that we know whether it
* is safe to apply LP_DEAD hints to the page later.
*/
so->currPos.lsn = PageGetLSN(page);
so->currPos.currPage = BufferGetBlockNumber(buf);
if (ScanDirectionIsForward(dir))
@ -508,7 +502,6 @@ _hash_readpage(IndexScanDesc scan, Buffer *bufP, ScanDirection dir)
{
so->currPos.buf = buf;
so->currPos.currPage = BufferGetBlockNumber(buf);
so->currPos.lsn = PageGetLSN(page);
}
else
{
@ -562,7 +555,6 @@ _hash_readpage(IndexScanDesc scan, Buffer *bufP, ScanDirection dir)
{
so->currPos.buf = buf;
so->currPos.currPage = BufferGetBlockNumber(buf);
so->currPos.lsn = PageGetLSN(page);
}
else
{

View File

@ -532,12 +532,13 @@ _hash_get_newbucket_from_oldbucket(Relation rel, Bucket old_bucket,
* We match items by heap TID before assuming they are the right ones to
* delete.
*
* Note that we keep the pin on the bucket page throughout the scan. Hence,
* there is no chance of VACUUM deleting any items from that page. However,
* having pin on the overflow page doesn't guarantee that vacuum won't delete
* any items.
*
* See _bt_killitems() for more details.
* There are never any scans active in a bucket at the time VACUUM begins,
* because VACUUM takes a cleanup lock on the primary bucket page and scans
* hold a pin. A scan can begin after VACUUM leaves the primary bucket page
* but before it finishes the entire bucket, but it can never pass VACUUM,
* because VACUUM always locks the next page before releasing the lock on
* the previous one. Therefore, we don't have to worry about accidentally
* killing a TID that has been reused for an unrelated tuple.
*/
void
_hash_kill_items(IndexScanDesc scan)
@ -579,21 +580,7 @@ _hash_kill_items(IndexScanDesc scan)
else
buf = _hash_getbuf(rel, blkno, HASH_READ, LH_OVERFLOW_PAGE);
/*
* If page LSN differs it means that the page was modified since the last
* read. killedItems could be not valid so applying LP_DEAD hints is not
* safe.
*/
page = BufferGetPage(buf);
if (PageGetLSN(page) != so->currPos.lsn)
{
if (havePin)
LockBuffer(buf, BUFFER_LOCK_UNLOCK);
else
_hash_relbuf(rel, buf);
return;
}
opaque = (HashPageOpaque) PageGetSpecialPointer(page);
maxoff = PageGetMaxOffsetNumber(page);

View File

@ -117,7 +117,6 @@ typedef struct HashScanPosItem /* what we remember about each match */
typedef struct HashScanPosData
{
Buffer buf; /* if valid, the buffer is pinned */
XLogRecPtr lsn; /* pos in the WAL stream when page was read */
BlockNumber currPage; /* current hash index page */
BlockNumber nextPage; /* next overflow page */
BlockNumber prevPage; /* prev overflow or bucket page */
@ -153,7 +152,6 @@ typedef struct HashScanPosData
#define HashScanPosInvalidate(scanpos) \
do { \
(scanpos).buf = InvalidBuffer; \
(scanpos).lsn = InvalidXLogRecPtr; \
(scanpos).currPage = InvalidBlockNumber; \
(scanpos).nextPage = InvalidBlockNumber; \
(scanpos).prevPage = InvalidBlockNumber; \