Remove _hash_chgbufaccess().

This is basically for the same reasons I got rid of _hash_wrtbuf()
in commit 25216c98938495fd741bf585dcbef45b3a9ffd40: it's not
convenient to have a function which encapsulates MarkBufferDirty(),
especially as we move towards having hash indexes be WAL-logged.

Patch by me, reviewed (but not entirely endorsed) by Amit Kapila.
This commit is contained in:
Robert Haas 2016-12-23 07:14:37 -05:00
parent 0a85c10225
commit 7819ba1ef6
6 changed files with 66 additions and 85 deletions

View File

@ -274,7 +274,7 @@ hashgettuple(IndexScanDesc scan, ScanDirection dir)
* Reacquire the read lock here.
*/
if (BufferIsValid(so->hashso_curbuf))
_hash_chgbufaccess(rel, so->hashso_curbuf, HASH_NOLOCK, HASH_READ);
LockBuffer(so->hashso_curbuf, BUFFER_LOCK_SHARE);
/*
* If we've already initialized this scan, we can just advance it in the
@ -354,7 +354,7 @@ hashgettuple(IndexScanDesc scan, ScanDirection dir)
/* Release read lock on current buffer, but keep it pinned */
if (BufferIsValid(so->hashso_curbuf))
_hash_chgbufaccess(rel, so->hashso_curbuf, HASH_READ, HASH_NOLOCK);
LockBuffer(so->hashso_curbuf, BUFFER_LOCK_UNLOCK);
/* Return current heap TID on success */
scan->xs_ctup.t_self = so->hashso_heappos;
@ -524,7 +524,7 @@ hashbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
orig_ntuples = metap->hashm_ntuples;
memcpy(&local_metapage, metap, sizeof(local_metapage));
/* release the lock, but keep pin */
_hash_chgbufaccess(rel, metabuf, HASH_READ, HASH_NOLOCK);
LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
/* Scan the buckets that we know exist */
cur_bucket = 0;
@ -576,9 +576,9 @@ loop_top:
* (and thus can't be further split), update our cached metapage
* data.
*/
_hash_chgbufaccess(rel, metabuf, HASH_NOLOCK, HASH_READ);
LockBuffer(metabuf, BUFFER_LOCK_SHARE);
memcpy(&local_metapage, metap, sizeof(local_metapage));
_hash_chgbufaccess(rel, metabuf, HASH_READ, HASH_NOLOCK);
LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
}
bucket_buf = buf;
@ -597,7 +597,7 @@ loop_top:
}
/* Write-lock metapage and check for split since we started */
_hash_chgbufaccess(rel, metabuf, HASH_NOLOCK, HASH_WRITE);
LockBuffer(metabuf, BUFFER_LOCK_EXCLUSIVE);
metap = HashPageGetMeta(BufferGetPage(metabuf));
if (cur_maxbucket != metap->hashm_maxbucket)
@ -605,7 +605,7 @@ loop_top:
/* There's been a split, so process the additional bucket(s) */
cur_maxbucket = metap->hashm_maxbucket;
memcpy(&local_metapage, metap, sizeof(local_metapage));
_hash_chgbufaccess(rel, metabuf, HASH_READ, HASH_NOLOCK);
LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
goto loop_top;
}
@ -821,7 +821,7 @@ hashbucketcleanup(Relation rel, Bucket cur_bucket, Buffer bucket_buf,
* page
*/
if (retain_pin)
_hash_chgbufaccess(rel, buf, HASH_READ, HASH_NOLOCK);
LockBuffer(buf, BUFFER_LOCK_UNLOCK);
else
_hash_relbuf(rel, buf);
@ -836,7 +836,7 @@ hashbucketcleanup(Relation rel, Bucket cur_bucket, Buffer bucket_buf,
if (buf != bucket_buf)
{
_hash_relbuf(rel, buf);
_hash_chgbufaccess(rel, bucket_buf, HASH_NOLOCK, HASH_WRITE);
LockBuffer(bucket_buf, BUFFER_LOCK_EXCLUSIVE);
}
/*
@ -866,7 +866,7 @@ hashbucketcleanup(Relation rel, Bucket cur_bucket, Buffer bucket_buf,
_hash_squeezebucket(rel, cur_bucket, bucket_blkno, bucket_buf,
bstrategy);
else
_hash_chgbufaccess(rel, bucket_buf, HASH_READ, HASH_NOLOCK);
LockBuffer(bucket_buf, BUFFER_LOCK_UNLOCK);
}
void

View File

@ -104,7 +104,7 @@ restart_insert:
lowmask = metap->hashm_lowmask;
/* Release metapage lock, but keep pin. */
_hash_chgbufaccess(rel, metabuf, HASH_READ, HASH_NOLOCK);
LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
/*
* If the previous iteration of this loop locked the primary page of
@ -125,7 +125,7 @@ restart_insert:
* Reacquire metapage lock and check that no bucket split has taken
* place while we were awaiting the bucket lock.
*/
_hash_chgbufaccess(rel, metabuf, HASH_NOLOCK, HASH_READ);
LockBuffer(metabuf, BUFFER_LOCK_SHARE);
oldblkno = blkno;
retry = true;
}
@ -149,7 +149,7 @@ restart_insert:
if (H_BUCKET_BEING_SPLIT(pageopaque) && IsBufferCleanupOK(buf))
{
/* release the lock on bucket buffer, before completing the split. */
_hash_chgbufaccess(rel, buf, HASH_READ, HASH_NOLOCK);
LockBuffer(buf, BUFFER_LOCK_UNLOCK);
_hash_finish_split(rel, metabuf, buf, pageopaque->hasho_bucket,
maxbucket, highmask, lowmask);
@ -180,7 +180,7 @@ restart_insert:
if (buf != bucket_buf)
_hash_relbuf(rel, buf);
else
_hash_chgbufaccess(rel, buf, HASH_READ, HASH_NOLOCK);
LockBuffer(buf, BUFFER_LOCK_UNLOCK);
buf = _hash_getbuf(rel, nextblkno, HASH_WRITE, LH_OVERFLOW_PAGE);
page = BufferGetPage(buf);
}
@ -192,7 +192,7 @@ restart_insert:
*/
/* release our write lock without modifying buffer */
_hash_chgbufaccess(rel, buf, HASH_READ, HASH_NOLOCK);
LockBuffer(buf, BUFFER_LOCK_UNLOCK);
/* chain to a new overflow page */
buf = _hash_addovflpage(rel, metabuf, buf, (buf == bucket_buf) ? true : false);
@ -223,7 +223,7 @@ restart_insert:
* Write-lock the metapage so we can increment the tuple count. After
* incrementing it, check to see if it's time for a split.
*/
_hash_chgbufaccess(rel, metabuf, HASH_NOLOCK, HASH_WRITE);
LockBuffer(metabuf, BUFFER_LOCK_EXCLUSIVE);
metap->hashm_ntuples += 1;
@ -232,7 +232,8 @@ restart_insert:
(double) metap->hashm_ffactor * (metap->hashm_maxbucket + 1);
/* Write out the metapage and drop lock, but keep pin */
_hash_chgbufaccess(rel, metabuf, HASH_WRITE, HASH_NOLOCK);
MarkBufferDirty(metabuf);
LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
/* Attempt to split if a split is needed */
if (do_expand)

View File

@ -110,7 +110,7 @@ _hash_addovflpage(Relation rel, Buffer metabuf, Buffer buf, bool retain_pin)
* Write-lock the tail page. It is okay to hold two buffer locks here
* since there cannot be anyone else contending for access to ovflbuf.
*/
_hash_chgbufaccess(rel, buf, HASH_NOLOCK, HASH_WRITE);
LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
/* probably redundant... */
_hash_checkpage(rel, buf, LH_BUCKET_PAGE | LH_OVERFLOW_PAGE);
@ -129,7 +129,7 @@ _hash_addovflpage(Relation rel, Buffer metabuf, Buffer buf, bool retain_pin)
/* we assume we do not need to write the unmodified page */
if ((pageopaque->hasho_flag & LH_BUCKET_PAGE) && retain_pin)
_hash_chgbufaccess(rel, buf, HASH_READ, HASH_NOLOCK);
LockBuffer(buf, BUFFER_LOCK_UNLOCK);
else
_hash_relbuf(rel, buf);
@ -151,7 +151,7 @@ _hash_addovflpage(Relation rel, Buffer metabuf, Buffer buf, bool retain_pin)
pageopaque->hasho_nextblkno = BufferGetBlockNumber(ovflbuf);
MarkBufferDirty(buf);
if ((pageopaque->hasho_flag & LH_BUCKET_PAGE) && retain_pin)
_hash_chgbufaccess(rel, buf, HASH_READ, HASH_NOLOCK);
LockBuffer(buf, BUFFER_LOCK_UNLOCK);
else
_hash_relbuf(rel, buf);
@ -187,7 +187,7 @@ _hash_getovflpage(Relation rel, Buffer metabuf)
j;
/* Get exclusive lock on the meta page */
_hash_chgbufaccess(rel, metabuf, HASH_NOLOCK, HASH_WRITE);
LockBuffer(metabuf, BUFFER_LOCK_EXCLUSIVE);
_hash_checkpage(rel, metabuf, LH_META_PAGE);
metap = HashPageGetMeta(BufferGetPage(metabuf));
@ -225,7 +225,7 @@ _hash_getovflpage(Relation rel, Buffer metabuf)
last_inpage = BMPGSZ_BIT(metap) - 1;
/* Release exclusive lock on metapage while reading bitmap page */
_hash_chgbufaccess(rel, metabuf, HASH_READ, HASH_NOLOCK);
LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
mapbuf = _hash_getbuf(rel, mapblkno, HASH_WRITE, LH_BITMAP_PAGE);
mappage = BufferGetPage(mapbuf);
@ -244,7 +244,7 @@ _hash_getovflpage(Relation rel, Buffer metabuf)
bit = 0;
/* Reacquire exclusive lock on the meta page */
_hash_chgbufaccess(rel, metabuf, HASH_NOLOCK, HASH_WRITE);
LockBuffer(metabuf, BUFFER_LOCK_EXCLUSIVE);
}
/*
@ -295,7 +295,8 @@ _hash_getovflpage(Relation rel, Buffer metabuf)
metap->hashm_firstfree = bit + 1;
/* Write updated metapage and release lock, but not pin */
_hash_chgbufaccess(rel, metabuf, HASH_WRITE, HASH_NOLOCK);
MarkBufferDirty(metabuf);
LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
return newbuf;
@ -309,7 +310,7 @@ found:
_hash_relbuf(rel, mapbuf);
/* Reacquire exclusive lock on the meta page */
_hash_chgbufaccess(rel, metabuf, HASH_NOLOCK, HASH_WRITE);
LockBuffer(metabuf, BUFFER_LOCK_EXCLUSIVE);
/* convert bit to absolute bit number */
bit += (i << BMPG_SHIFT(metap));
@ -326,12 +327,13 @@ found:
metap->hashm_firstfree = bit + 1;
/* Write updated metapage and release lock, but not pin */
_hash_chgbufaccess(rel, metabuf, HASH_WRITE, HASH_NOLOCK);
MarkBufferDirty(metabuf);
LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
}
else
{
/* We didn't change the metapage, so no need to write */
_hash_chgbufaccess(rel, metabuf, HASH_READ, HASH_NOLOCK);
LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
}
/* Fetch, init, and return the recycled page */
@ -483,7 +485,7 @@ _hash_freeovflpage(Relation rel, Buffer ovflbuf, Buffer wbuf,
blkno = metap->hashm_mapp[bitmappage];
/* Release metapage lock while we access the bitmap page */
_hash_chgbufaccess(rel, metabuf, HASH_READ, HASH_NOLOCK);
LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
/* Clear the bitmap bit to indicate that this overflow page is free */
mapbuf = _hash_getbuf(rel, blkno, HASH_WRITE, LH_BITMAP_PAGE);
@ -495,7 +497,7 @@ _hash_freeovflpage(Relation rel, Buffer ovflbuf, Buffer wbuf,
_hash_relbuf(rel, mapbuf);
/* Get write-lock on metapage to update firstfree */
_hash_chgbufaccess(rel, metabuf, HASH_NOLOCK, HASH_WRITE);
LockBuffer(metabuf, BUFFER_LOCK_EXCLUSIVE);
/* if this is now the first free page, update hashm_firstfree */
if (ovflbitno < metap->hashm_firstfree)
@ -633,7 +635,7 @@ _hash_squeezebucket(Relation rel,
*/
if (!BlockNumberIsValid(wopaque->hasho_nextblkno))
{
_hash_chgbufaccess(rel, wbuf, HASH_READ, HASH_NOLOCK);
LockBuffer(wbuf, BUFFER_LOCK_UNLOCK);
return;
}
@ -721,7 +723,7 @@ _hash_squeezebucket(Relation rel,
if (wbuf_dirty)
MarkBufferDirty(wbuf);
if (retain_pin)
_hash_chgbufaccess(rel, wbuf, HASH_READ, HASH_NOLOCK);
LockBuffer(wbuf, BUFFER_LOCK_UNLOCK);
else
_hash_relbuf(rel, wbuf);
@ -784,7 +786,7 @@ _hash_squeezebucket(Relation rel,
{
/* retain the pin on primary bucket page till end of bucket scan */
if (wblkno == bucket_blkno)
_hash_chgbufaccess(rel, wbuf, HASH_READ, HASH_NOLOCK);
LockBuffer(wbuf, BUFFER_LOCK_UNLOCK);
else
_hash_relbuf(rel, wbuf);
return;

View File

@ -289,32 +289,6 @@ _hash_dropscanbuf(Relation rel, HashScanOpaque so)
so->hashso_buc_split = false;
}
/*
* _hash_chgbufaccess() -- Change the lock type on a buffer, without
* dropping our pin on it.
*
* from_access and to_access may be HASH_READ, HASH_WRITE, or HASH_NOLOCK,
* the last indicating that no buffer-level lock is held or wanted.
*
* When from_access == HASH_WRITE, we assume the buffer is dirty and tell
* bufmgr it must be written out. If the caller wants to release a write
* lock on a page that's not been modified, it's okay to pass from_access
* as HASH_READ (a bit ugly, but handy in some places).
*/
void
_hash_chgbufaccess(Relation rel,
Buffer buf,
int from_access,
int to_access)
{
if (from_access == HASH_WRITE)
MarkBufferDirty(buf);
if (from_access != HASH_NOLOCK)
LockBuffer(buf, BUFFER_LOCK_UNLOCK);
if (to_access != HASH_NOLOCK)
LockBuffer(buf, to_access);
}
/*
* _hash_metapinit() -- Initialize the metadata page of a hash index,
@ -446,7 +420,8 @@ _hash_metapinit(Relation rel, double num_tuples, ForkNumber forkNum)
* won't accomplish anything. It's a bad idea to hold buffer locks for
* long intervals in any case, since that can block the bgwriter.
*/
_hash_chgbufaccess(rel, metabuf, HASH_WRITE, HASH_NOLOCK);
MarkBufferDirty(metabuf);
LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
/*
* Initialize the first N buckets
@ -469,7 +444,7 @@ _hash_metapinit(Relation rel, double num_tuples, ForkNumber forkNum)
}
/* Now reacquire buffer lock on metapage */
_hash_chgbufaccess(rel, metabuf, HASH_NOLOCK, HASH_WRITE);
LockBuffer(metabuf, BUFFER_LOCK_EXCLUSIVE);
/*
* Initialize first bitmap page
@ -528,7 +503,7 @@ restart_expand:
* Write-lock the meta page. It used to be necessary to acquire a
* heavyweight lock to begin a split, but that is no longer required.
*/
_hash_chgbufaccess(rel, metabuf, HASH_NOLOCK, HASH_WRITE);
LockBuffer(metabuf, BUFFER_LOCK_EXCLUSIVE);
_hash_checkpage(rel, metabuf, LH_META_PAGE);
metap = HashPageGetMeta(BufferGetPage(metabuf));
@ -609,8 +584,8 @@ restart_expand:
* Release the lock on metapage and old_bucket, before completing the
* split.
*/
_hash_chgbufaccess(rel, metabuf, HASH_READ, HASH_NOLOCK);
_hash_chgbufaccess(rel, buf_oblkno, HASH_READ, HASH_NOLOCK);
LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
LockBuffer(buf_oblkno, BUFFER_LOCK_UNLOCK);
_hash_finish_split(rel, metabuf, buf_oblkno, old_bucket, maxbucket,
highmask, lowmask);
@ -646,7 +621,7 @@ restart_expand:
lowmask = metap->hashm_lowmask;
/* Release the metapage lock. */
_hash_chgbufaccess(rel, metabuf, HASH_READ, HASH_NOLOCK);
LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
hashbucketcleanup(rel, old_bucket, buf_oblkno, start_oblkno, NULL,
maxbucket, highmask, lowmask, NULL, NULL, true,
@ -753,7 +728,8 @@ restart_expand:
lowmask = metap->hashm_lowmask;
/* Write out the metapage and drop lock, but keep pin */
_hash_chgbufaccess(rel, metabuf, HASH_WRITE, HASH_NOLOCK);
MarkBufferDirty(metabuf);
LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
/* Relocate records to the new bucket */
_hash_splitbucket(rel, metabuf,
@ -767,7 +743,7 @@ restart_expand:
fail:
/* We didn't write the metapage, so just drop lock */
_hash_chgbufaccess(rel, metabuf, HASH_READ, HASH_NOLOCK);
LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
}
@ -1001,7 +977,8 @@ _hash_splitbucket_guts(Relation rel,
if (PageGetFreeSpace(npage) < itemsz)
{
/* write out nbuf and drop lock, but keep pin */
_hash_chgbufaccess(rel, nbuf, HASH_WRITE, HASH_NOLOCK);
MarkBufferDirty(nbuf);
LockBuffer(nbuf, BUFFER_LOCK_UNLOCK);
/* chain to a new overflow page */
nbuf = _hash_addovflpage(rel, metabuf, nbuf, (nbuf == bucket_nbuf) ? true : false);
npage = BufferGetPage(nbuf);
@ -1033,7 +1010,7 @@ _hash_splitbucket_guts(Relation rel,
/* retain the pin on the old primary bucket */
if (obuf == bucket_obuf)
_hash_chgbufaccess(rel, obuf, HASH_READ, HASH_NOLOCK);
LockBuffer(obuf, BUFFER_LOCK_UNLOCK);
else
_hash_relbuf(rel, obuf);
@ -1056,18 +1033,21 @@ _hash_splitbucket_guts(Relation rel,
* bucket and then the new bucket.
*/
if (nbuf == bucket_nbuf)
_hash_chgbufaccess(rel, bucket_nbuf, HASH_WRITE, HASH_NOLOCK);
{
MarkBufferDirty(bucket_nbuf);
LockBuffer(bucket_nbuf, BUFFER_LOCK_UNLOCK);
}
else
{
MarkBufferDirty(nbuf);
_hash_relbuf(rel, nbuf);
}
_hash_chgbufaccess(rel, bucket_obuf, HASH_NOLOCK, HASH_WRITE);
LockBuffer(bucket_obuf, BUFFER_LOCK_EXCLUSIVE);
opage = BufferGetPage(bucket_obuf);
oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);
_hash_chgbufaccess(rel, bucket_nbuf, HASH_NOLOCK, HASH_WRITE);
LockBuffer(bucket_nbuf, BUFFER_LOCK_EXCLUSIVE);
npage = BufferGetPage(bucket_nbuf);
nopaque = (HashPageOpaque) PageGetSpecialPointer(npage);
@ -1172,7 +1152,7 @@ _hash_finish_split(Relation rel, Buffer metabuf, Buffer obuf, Bucket obucket,
* retain the pin on primary bucket.
*/
if (nbuf == bucket_nbuf)
_hash_chgbufaccess(rel, nbuf, HASH_READ, HASH_NOLOCK);
LockBuffer(nbuf, BUFFER_LOCK_UNLOCK);
else
_hash_relbuf(rel, nbuf);
@ -1194,7 +1174,7 @@ _hash_finish_split(Relation rel, Buffer metabuf, Buffer obuf, Bucket obucket,
}
if (!ConditionalLockBufferForCleanup(bucket_nbuf))
{
_hash_chgbufaccess(rel, obuf, HASH_READ, HASH_NOLOCK);
LockBuffer(obuf, BUFFER_LOCK_UNLOCK);
hash_destroy(tidhtab);
return;
}
@ -1208,6 +1188,6 @@ _hash_finish_split(Relation rel, Buffer metabuf, Buffer obuf, Bucket obucket,
maxbucket, highmask, lowmask);
_hash_relbuf(rel, bucket_nbuf);
_hash_chgbufaccess(rel, obuf, HASH_READ, HASH_NOLOCK);
LockBuffer(obuf, BUFFER_LOCK_UNLOCK);
hash_destroy(tidhtab);
}

View File

@ -83,7 +83,7 @@ _hash_readnext(IndexScanDesc scan,
* comments in _hash_first to know the reason of retaining pin.
*/
if (*bufp == so->hashso_bucket_buf || *bufp == so->hashso_split_bucket_buf)
_hash_chgbufaccess(rel, *bufp, HASH_READ, HASH_NOLOCK);
LockBuffer(*bufp, BUFFER_LOCK_UNLOCK);
else
_hash_relbuf(rel, *bufp);
@ -109,7 +109,7 @@ _hash_readnext(IndexScanDesc scan,
*/
Assert(BufferIsValid(*bufp));
_hash_chgbufaccess(rel, *bufp, HASH_NOLOCK, HASH_READ);
LockBuffer(*bufp, BUFFER_LOCK_SHARE);
/*
* setting hashso_buc_split to true indicates that we are scanning
@ -147,7 +147,7 @@ _hash_readprev(IndexScanDesc scan,
* comments in _hash_first to know the reason of retaining pin.
*/
if (*bufp == so->hashso_bucket_buf || *bufp == so->hashso_split_bucket_buf)
_hash_chgbufaccess(rel, *bufp, HASH_READ, HASH_NOLOCK);
LockBuffer(*bufp, BUFFER_LOCK_UNLOCK);
else
_hash_relbuf(rel, *bufp);
@ -182,7 +182,7 @@ _hash_readprev(IndexScanDesc scan,
*/
Assert(BufferIsValid(*bufp));
_hash_chgbufaccess(rel, *bufp, HASH_NOLOCK, HASH_READ);
LockBuffer(*bufp, BUFFER_LOCK_SHARE);
*pagep = BufferGetPage(*bufp);
*opaquep = (HashPageOpaque) PageGetSpecialPointer(*pagep);
@ -298,7 +298,7 @@ _hash_first(IndexScanDesc scan, ScanDirection dir)
blkno = BUCKET_TO_BLKNO(metap, bucket);
/* Release metapage lock, but keep pin. */
_hash_chgbufaccess(rel, metabuf, HASH_READ, HASH_NOLOCK);
LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
/*
* If the previous iteration of this loop locked what is still the
@ -319,7 +319,7 @@ _hash_first(IndexScanDesc scan, ScanDirection dir)
* Reacquire metapage lock and check that no bucket split has taken
* place while we were awaiting the bucket lock.
*/
_hash_chgbufaccess(rel, metabuf, HASH_NOLOCK, HASH_READ);
LockBuffer(metabuf, BUFFER_LOCK_SHARE);
oldblkno = blkno;
retry = true;
}
@ -359,7 +359,7 @@ _hash_first(IndexScanDesc scan, ScanDirection dir)
* release the lock on new bucket and re-acquire it after acquiring
* the lock on old bucket.
*/
_hash_chgbufaccess(rel, buf, HASH_READ, HASH_NOLOCK);
LockBuffer(buf, BUFFER_LOCK_UNLOCK);
old_buf = _hash_getbuf(rel, old_blkno, HASH_READ, LH_BUCKET_PAGE);
@ -368,9 +368,9 @@ _hash_first(IndexScanDesc scan, ScanDirection dir)
* scanning.
*/
so->hashso_split_bucket_buf = old_buf;
_hash_chgbufaccess(rel, old_buf, HASH_READ, HASH_NOLOCK);
LockBuffer(old_buf, BUFFER_LOCK_UNLOCK);
_hash_chgbufaccess(rel, buf, HASH_NOLOCK, HASH_READ);
LockBuffer(buf, BUFFER_LOCK_SHARE);
page = BufferGetPage(buf);
opaque = (HashPageOpaque) PageGetSpecialPointer(page);
Assert(opaque->hasho_bucket == bucket);

View File

@ -336,8 +336,6 @@ extern Buffer _hash_getbuf_with_strategy(Relation rel, BlockNumber blkno,
extern void _hash_relbuf(Relation rel, Buffer buf);
extern void _hash_dropbuf(Relation rel, Buffer buf);
extern void _hash_dropscanbuf(Relation rel, HashScanOpaque so);
extern void _hash_chgbufaccess(Relation rel, Buffer buf, int from_access,
int to_access);
extern uint32 _hash_metapinit(Relation rel, double num_tuples,
ForkNumber forkNum);
extern void _hash_pageinit(Page page, Size size);