Tweak hash index AM to use the new ReadOrZeroBuffer bufmgr API when fetching

pages it intends to zero immediately.  Just to show there is some use for that
function besides WAL recovery :-).
Along the way, fold _hash_checkpage and _hash_pageinit calls into _hash_getbuf
and friends, instead of expecting callers to do that separately.
This commit is contained in:
Tom Lane 2007-05-03 16:45:58 +00:00
parent 1aefa0489f
commit 0fef38da21
8 changed files with 110 additions and 85 deletions

View File

@ -1,5 +1,5 @@
/*
* $PostgreSQL: pgsql/contrib/pgstattuple/pgstattuple.c,v 1.26 2007/03/25 19:45:13 tgl Exp $
* $PostgreSQL: pgsql/contrib/pgstattuple/pgstattuple.c,v 1.27 2007/05/03 16:45:58 tgl Exp $
*
* Copyright (c) 2001,2002 Tatsuo Ishii
*
@ -360,7 +360,7 @@ pgstat_hash_page(pgstattuple_type * stat, Relation rel, BlockNumber blkno)
Page page;
_hash_getlock(rel, blkno, HASH_SHARE);
buf = _hash_getbuf(rel, blkno, HASH_READ);
buf = _hash_getbuf(rel, blkno, HASH_READ, 0);
page = BufferGetPage(buf);
if (PageGetSpecialSize(page) == MAXALIGN(sizeof(HashPageOpaqueData)))

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/hash/hash.c,v 1.93 2007/01/20 18:43:35 neilc Exp $
* $PostgreSQL: pgsql/src/backend/access/hash/hash.c,v 1.94 2007/05/03 16:45:58 tgl Exp $
*
* NOTES
* This file contains only the public interface routines.
@ -506,8 +506,7 @@ hashbulkdelete(PG_FUNCTION_ARGS)
* array cannot change under us; and it beats rereading the metapage for
* each bucket.
*/
metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_READ);
_hash_checkpage(rel, metabuf, LH_META_PAGE);
metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_READ, LH_META_PAGE);
metap = (HashMetaPage) BufferGetPage(metabuf);
orig_maxbucket = metap->hashm_maxbucket;
orig_ntuples = metap->hashm_ntuples;
@ -548,8 +547,8 @@ loop_top:
vacuum_delay_point();
buf = _hash_getbuf(rel, blkno, HASH_WRITE);
_hash_checkpage(rel, buf, LH_BUCKET_PAGE | LH_OVERFLOW_PAGE);
buf = _hash_getbuf(rel, blkno, HASH_WRITE,
LH_BUCKET_PAGE | LH_OVERFLOW_PAGE);
page = BufferGetPage(buf);
opaque = (HashPageOpaque) PageGetSpecialPointer(page);
Assert(opaque->hasho_bucket == cur_bucket);
@ -607,8 +606,7 @@ loop_top:
}
/* Write-lock metapage and check for split since we started */
metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_WRITE);
_hash_checkpage(rel, metabuf, LH_META_PAGE);
metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_WRITE, LH_META_PAGE);
metap = (HashMetaPage) BufferGetPage(metabuf);
if (cur_maxbucket != metap->hashm_maxbucket)

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/hash/hashinsert.c,v 1.44 2007/01/05 22:19:22 momjian Exp $
* $PostgreSQL: pgsql/src/backend/access/hash/hashinsert.c,v 1.45 2007/05/03 16:45:58 tgl Exp $
*
*-------------------------------------------------------------------------
*/
@ -66,8 +66,7 @@ _hash_doinsert(Relation rel, IndexTuple itup)
_hash_getlock(rel, 0, HASH_SHARE);
/* Read the metapage */
metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_READ);
_hash_checkpage(rel, metabuf, LH_META_PAGE);
metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_READ, LH_META_PAGE);
metap = (HashMetaPage) BufferGetPage(metabuf);
/*
@ -104,8 +103,7 @@ _hash_doinsert(Relation rel, IndexTuple itup)
_hash_droplock(rel, 0, HASH_SHARE);
/* Fetch the primary bucket page for the bucket */
buf = _hash_getbuf(rel, blkno, HASH_WRITE);
_hash_checkpage(rel, buf, LH_BUCKET_PAGE);
buf = _hash_getbuf(rel, blkno, HASH_WRITE, LH_BUCKET_PAGE);
page = BufferGetPage(buf);
pageopaque = (HashPageOpaque) PageGetSpecialPointer(page);
Assert(pageopaque->hasho_bucket == bucket);
@ -125,7 +123,7 @@ _hash_doinsert(Relation rel, IndexTuple itup)
* find out next pass through the loop test above.
*/
_hash_relbuf(rel, buf);
buf = _hash_getbuf(rel, nextblkno, HASH_WRITE);
buf = _hash_getbuf(rel, nextblkno, HASH_WRITE, LH_OVERFLOW_PAGE);
page = BufferGetPage(buf);
}
else
@ -145,8 +143,8 @@ _hash_doinsert(Relation rel, IndexTuple itup)
/* should fit now, given test above */
Assert(PageGetFreeSpace(page) >= itemsz);
}
_hash_checkpage(rel, buf, LH_OVERFLOW_PAGE);
pageopaque = (HashPageOpaque) PageGetSpecialPointer(page);
Assert(pageopaque->hasho_flag == LH_OVERFLOW_PAGE);
Assert(pageopaque->hasho_bucket == bucket);
}

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/hash/hashovfl.c,v 1.56 2007/04/19 20:24:04 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/hash/hashovfl.c,v 1.57 2007/05/03 16:45:58 tgl Exp $
*
* NOTES
* Overflow pages look like ordinary relation pages.
@ -107,7 +107,6 @@ _hash_addovflpage(Relation rel, Buffer metabuf, Buffer buf)
/* allocate and lock an empty overflow page */
ovflbuf = _hash_getovflpage(rel, metabuf);
ovflpage = BufferGetPage(ovflbuf);
/*
* Write-lock the tail page. It is okay to hold two buffer locks here
@ -115,12 +114,14 @@ _hash_addovflpage(Relation rel, Buffer metabuf, Buffer buf)
*/
_hash_chgbufaccess(rel, buf, HASH_NOLOCK, HASH_WRITE);
/* probably redundant... */
_hash_checkpage(rel, buf, LH_BUCKET_PAGE | LH_OVERFLOW_PAGE);
/* loop to find current tail page, in case someone else inserted too */
for (;;)
{
BlockNumber nextblkno;
_hash_checkpage(rel, buf, LH_BUCKET_PAGE | LH_OVERFLOW_PAGE);
page = BufferGetPage(buf);
pageopaque = (HashPageOpaque) PageGetSpecialPointer(page);
nextblkno = pageopaque->hasho_nextblkno;
@ -131,11 +132,11 @@ _hash_addovflpage(Relation rel, Buffer metabuf, Buffer buf)
/* we assume we do not need to write the unmodified page */
_hash_relbuf(rel, buf);
buf = _hash_getbuf(rel, nextblkno, HASH_WRITE);
buf = _hash_getbuf(rel, nextblkno, HASH_WRITE, LH_OVERFLOW_PAGE);
}
/* now that we have correct backlink, initialize new overflow page */
_hash_pageinit(ovflpage, BufferGetPageSize(ovflbuf));
ovflpage = BufferGetPage(ovflbuf);
ovflopaque = (HashPageOpaque) PageGetSpecialPointer(ovflpage);
ovflopaque->hasho_prevblkno = BufferGetBlockNumber(buf);
ovflopaque->hasho_nextblkno = InvalidBlockNumber;
@ -156,7 +157,8 @@ _hash_addovflpage(Relation rel, Buffer metabuf, Buffer buf)
* _hash_getovflpage()
*
* Find an available overflow page and return it. The returned buffer
* is pinned and write-locked, but its contents are not initialized.
* is pinned and write-locked, and has had _hash_pageinit() applied,
* but it is caller's responsibility to fill the special space.
*
* The caller must hold a pin, but no lock, on the metapage buffer.
* That buffer is left in the same state at exit.
@ -220,8 +222,7 @@ _hash_getovflpage(Relation rel, Buffer metabuf)
/* Release exclusive lock on metapage while reading bitmap page */
_hash_chgbufaccess(rel, metabuf, HASH_READ, HASH_NOLOCK);
mapbuf = _hash_getbuf(rel, mapblkno, HASH_WRITE);
_hash_checkpage(rel, mapbuf, LH_BITMAP_PAGE);
mapbuf = _hash_getbuf(rel, mapblkno, HASH_WRITE, LH_BITMAP_PAGE);
mappage = BufferGetPage(mapbuf);
freep = HashPageGetBitmap(mappage);
@ -277,7 +278,7 @@ _hash_getovflpage(Relation rel, Buffer metabuf)
* with metapage write lock held; would be better to use a lock that
* doesn't block incoming searches.
*/
newbuf = _hash_getnewbuf(rel, blkno, HASH_WRITE);
newbuf = _hash_getnewbuf(rel, blkno);
metap->hashm_spares[splitnum]++;
@ -327,8 +328,8 @@ found:
_hash_chgbufaccess(rel, metabuf, HASH_READ, HASH_NOLOCK);
}
/* Fetch and return the recycled page */
return _hash_getbuf(rel, blkno, HASH_WRITE);
/* Fetch, init, and return the recycled page */
return _hash_getinitbuf(rel, blkno);
}
/*
@ -412,30 +413,29 @@ _hash_freeovflpage(Relation rel, Buffer ovflbuf)
*/
if (BlockNumberIsValid(prevblkno))
{
Buffer prevbuf = _hash_getbuf(rel, prevblkno, HASH_WRITE);
Buffer prevbuf = _hash_getbuf(rel, prevblkno, HASH_WRITE,
LH_BUCKET_PAGE | LH_OVERFLOW_PAGE);
Page prevpage = BufferGetPage(prevbuf);
HashPageOpaque prevopaque = (HashPageOpaque) PageGetSpecialPointer(prevpage);
_hash_checkpage(rel, prevbuf, LH_BUCKET_PAGE | LH_OVERFLOW_PAGE);
Assert(prevopaque->hasho_bucket == bucket);
prevopaque->hasho_nextblkno = nextblkno;
_hash_wrtbuf(rel, prevbuf);
}
if (BlockNumberIsValid(nextblkno))
{
Buffer nextbuf = _hash_getbuf(rel, nextblkno, HASH_WRITE);
Buffer nextbuf = _hash_getbuf(rel, nextblkno, HASH_WRITE,
LH_OVERFLOW_PAGE);
Page nextpage = BufferGetPage(nextbuf);
HashPageOpaque nextopaque = (HashPageOpaque) PageGetSpecialPointer(nextpage);
_hash_checkpage(rel, nextbuf, LH_OVERFLOW_PAGE);
Assert(nextopaque->hasho_bucket == bucket);
nextopaque->hasho_prevblkno = prevblkno;
_hash_wrtbuf(rel, nextbuf);
}
/* Read the metapage so we can determine which bitmap page to use */
metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_READ);
_hash_checkpage(rel, metabuf, LH_META_PAGE);
metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_READ, LH_META_PAGE);
metap = (HashMetaPage) BufferGetPage(metabuf);
/* Identify which bit to set */
@ -452,8 +452,7 @@ _hash_freeovflpage(Relation rel, Buffer ovflbuf)
_hash_chgbufaccess(rel, metabuf, HASH_READ, HASH_NOLOCK);
/* Clear the bitmap bit to indicate that this overflow page is free */
mapbuf = _hash_getbuf(rel, blkno, HASH_WRITE);
_hash_checkpage(rel, mapbuf, LH_BITMAP_PAGE);
mapbuf = _hash_getbuf(rel, blkno, HASH_WRITE, LH_BITMAP_PAGE);
mappage = BufferGetPage(mapbuf);
freep = HashPageGetBitmap(mappage);
Assert(ISSET(freep, bitmapbit));
@ -507,11 +506,10 @@ _hash_initbitmap(Relation rel, HashMetaPage metap, BlockNumber blkno)
* page while holding the metapage lock, but this path is taken so seldom
* that it's not worth worrying about.
*/
buf = _hash_getnewbuf(rel, blkno, HASH_WRITE);
buf = _hash_getnewbuf(rel, blkno);
pg = BufferGetPage(buf);
/* initialize the page */
_hash_pageinit(pg, BufferGetPageSize(buf));
/* initialize the page's special space */
op = (HashPageOpaque) PageGetSpecialPointer(pg);
op->hasho_prevblkno = InvalidBlockNumber;
op->hasho_nextblkno = InvalidBlockNumber;
@ -583,8 +581,7 @@ _hash_squeezebucket(Relation rel,
* start squeezing into the base bucket page.
*/
wblkno = bucket_blkno;
wbuf = _hash_getbuf(rel, wblkno, HASH_WRITE);
_hash_checkpage(rel, wbuf, LH_BUCKET_PAGE);
wbuf = _hash_getbuf(rel, wblkno, HASH_WRITE, LH_BUCKET_PAGE);
wpage = BufferGetPage(wbuf);
wopaque = (HashPageOpaque) PageGetSpecialPointer(wpage);
@ -607,8 +604,7 @@ _hash_squeezebucket(Relation rel,
rblkno = ropaque->hasho_nextblkno;
if (ropaque != wopaque)
_hash_relbuf(rel, rbuf);
rbuf = _hash_getbuf(rel, rblkno, HASH_WRITE);
_hash_checkpage(rel, rbuf, LH_OVERFLOW_PAGE);
rbuf = _hash_getbuf(rel, rblkno, HASH_WRITE, LH_OVERFLOW_PAGE);
rpage = BufferGetPage(rbuf);
ropaque = (HashPageOpaque) PageGetSpecialPointer(rpage);
Assert(ropaque->hasho_bucket == bucket);
@ -648,8 +644,7 @@ _hash_squeezebucket(Relation rel,
return;
}
wbuf = _hash_getbuf(rel, wblkno, HASH_WRITE);
_hash_checkpage(rel, wbuf, LH_OVERFLOW_PAGE);
wbuf = _hash_getbuf(rel, wblkno, HASH_WRITE, LH_OVERFLOW_PAGE);
wpage = BufferGetPage(wbuf);
wopaque = (HashPageOpaque) PageGetSpecialPointer(wpage);
Assert(wopaque->hasho_bucket == bucket);
@ -701,8 +696,7 @@ _hash_squeezebucket(Relation rel,
/* free this overflow page, then get the previous one */
_hash_freeovflpage(rel, rbuf);
rbuf = _hash_getbuf(rel, rblkno, HASH_WRITE);
_hash_checkpage(rel, rbuf, LH_OVERFLOW_PAGE);
rbuf = _hash_getbuf(rel, rblkno, HASH_WRITE, LH_OVERFLOW_PAGE);
rpage = BufferGetPage(rbuf);
ropaque = (HashPageOpaque) PageGetSpecialPointer(rpage);
Assert(ropaque->hasho_bucket == bucket);

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/hash/hashpage.c,v 1.66 2007/04/19 20:24:04 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/hash/hashpage.c,v 1.67 2007/05/03 16:45:58 tgl Exp $
*
* NOTES
* Postgres hash pages look like ordinary relation pages. The opaque
@ -100,21 +100,21 @@ _hash_droplock(Relation rel, BlockNumber whichlock, int access)
* _hash_getbuf() -- Get a buffer by block number for read or write.
*
* 'access' must be HASH_READ, HASH_WRITE, or HASH_NOLOCK.
* 'flags' is a bitwise OR of the allowed page types.
*
* This must be used only to fetch pages that are expected to be valid
* already. _hash_checkpage() is applied using the given flags.
*
* When this routine returns, the appropriate lock is set on the
* requested buffer and its reference count has been incremented
* (ie, the buffer is "locked and pinned").
*
* P_NEW is disallowed because this routine should only be used
* P_NEW is disallowed because this routine can only be used
* to access pages that are known to be before the filesystem EOF.
* Extending the index should be done with _hash_getnewbuf.
*
* All call sites should call either _hash_checkpage or _hash_pageinit
* on the returned page, depending on whether the block is expected
* to be valid or not.
*/
Buffer
_hash_getbuf(Relation rel, BlockNumber blkno, int access)
_hash_getbuf(Relation rel, BlockNumber blkno, int access, int flags)
{
Buffer buf;
@ -127,13 +127,52 @@ _hash_getbuf(Relation rel, BlockNumber blkno, int access)
LockBuffer(buf, access);
/* ref count and lock type are correct */
_hash_checkpage(rel, buf, flags);
return buf;
}
/*
* _hash_getinitbuf() -- Get and initialize a buffer by block number.
*
* This must be used only to fetch pages that are known to be before
* the index's filesystem EOF, but are to be filled from scratch.
* _hash_pageinit() is applied automatically. Otherwise it has
* effects similar to _hash_getbuf() with access = HASH_WRITE.
*
* When this routine returns, a write lock is set on the
* requested buffer and its reference count has been incremented
* (ie, the buffer is "locked and pinned").
*
* P_NEW is disallowed because this routine can only be used
* to access pages that are known to be before the filesystem EOF.
* Extending the index should be done with _hash_getnewbuf.
*/
Buffer
_hash_getinitbuf(Relation rel, BlockNumber blkno)
{
Buffer buf;
if (blkno == P_NEW)
elog(ERROR, "hash AM does not use P_NEW");
buf = ReadOrZeroBuffer(rel, blkno);
LockBuffer(buf, HASH_WRITE);
/* ref count and lock type are correct */
/* initialize the page */
_hash_pageinit(BufferGetPage(buf), BufferGetPageSize(buf));
return buf;
}
/*
* _hash_getnewbuf() -- Get a new page at the end of the index.
*
* This has the same API as _hash_getbuf, except that we are adding
* This has the same API as _hash_getinitbuf, except that we are adding
* a page to the index, and hence expect the page to be past the
* logical EOF. (However, we have to support the case where it isn't,
* since a prior try might have crashed after extending the filesystem
@ -141,12 +180,9 @@ _hash_getbuf(Relation rel, BlockNumber blkno, int access)
*
* It is caller's responsibility to ensure that only one process can
* extend the index at a time.
*
* All call sites should call _hash_pageinit on the returned page.
* Also, it's difficult to imagine why access would not be HASH_WRITE.
*/
Buffer
_hash_getnewbuf(Relation rel, BlockNumber blkno, int access)
_hash_getnewbuf(Relation rel, BlockNumber blkno)
{
BlockNumber nblocks = RelationGetNumberOfBlocks(rel);
Buffer buf;
@ -166,12 +202,15 @@ _hash_getnewbuf(Relation rel, BlockNumber blkno, int access)
BufferGetBlockNumber(buf), blkno);
}
else
buf = ReadBuffer(rel, blkno);
buf = ReadOrZeroBuffer(rel, blkno);
if (access != HASH_NOLOCK)
LockBuffer(buf, access);
LockBuffer(buf, HASH_WRITE);
/* ref count and lock type are correct */
/* initialize the page */
_hash_pageinit(BufferGetPage(buf), BufferGetPageSize(buf));
return buf;
}
@ -292,9 +331,8 @@ _hash_metapinit(Relation rel)
* smgrextend() calls to occur. This ensures that the smgr level
* has the right idea of the physical index length.
*/
metabuf = _hash_getnewbuf(rel, HASH_METAPAGE, HASH_WRITE);
metabuf = _hash_getnewbuf(rel, HASH_METAPAGE);
pg = BufferGetPage(metabuf);
_hash_pageinit(pg, BufferGetPageSize(metabuf));
pageopaque = (HashPageOpaque) PageGetSpecialPointer(pg);
pageopaque->hasho_prevblkno = InvalidBlockNumber;
@ -350,9 +388,8 @@ _hash_metapinit(Relation rel)
*/
for (i = 0; i <= 1; i++)
{
buf = _hash_getnewbuf(rel, BUCKET_TO_BLKNO(metap, i), HASH_WRITE);
buf = _hash_getnewbuf(rel, BUCKET_TO_BLKNO(metap, i));
pg = BufferGetPage(buf);
_hash_pageinit(pg, BufferGetPageSize(buf));
pageopaque = (HashPageOpaque) PageGetSpecialPointer(pg);
pageopaque->hasho_prevblkno = InvalidBlockNumber;
pageopaque->hasho_nextblkno = InvalidBlockNumber;
@ -679,17 +716,15 @@ _hash_splitbucket(Relation rel,
* either bucket.
*/
oblkno = start_oblkno;
obuf = _hash_getbuf(rel, oblkno, HASH_WRITE);
_hash_checkpage(rel, obuf, LH_BUCKET_PAGE);
obuf = _hash_getbuf(rel, oblkno, HASH_WRITE, LH_BUCKET_PAGE);
opage = BufferGetPage(obuf);
oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);
nblkno = start_nblkno;
nbuf = _hash_getbuf(rel, nblkno, HASH_WRITE);
nbuf = _hash_getnewbuf(rel, nblkno);
npage = BufferGetPage(nbuf);
/* initialize the new bucket's primary page */
_hash_pageinit(npage, BufferGetPageSize(nbuf));
nopaque = (HashPageOpaque) PageGetSpecialPointer(npage);
nopaque->hasho_prevblkno = InvalidBlockNumber;
nopaque->hasho_nextblkno = InvalidBlockNumber;
@ -725,8 +760,7 @@ _hash_splitbucket(Relation rel,
*/
_hash_wrtbuf(rel, obuf);
obuf = _hash_getbuf(rel, oblkno, HASH_WRITE);
_hash_checkpage(rel, obuf, LH_OVERFLOW_PAGE);
obuf = _hash_getbuf(rel, oblkno, HASH_WRITE, LH_OVERFLOW_PAGE);
opage = BufferGetPage(obuf);
oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);
ooffnum = FirstOffsetNumber;
@ -763,7 +797,6 @@ _hash_splitbucket(Relation rel,
_hash_chgbufaccess(rel, nbuf, HASH_WRITE, HASH_NOLOCK);
/* chain to a new overflow page */
nbuf = _hash_addovflpage(rel, metabuf, nbuf);
_hash_checkpage(rel, nbuf, LH_OVERFLOW_PAGE);
npage = BufferGetPage(nbuf);
/* we don't need nopaque within the loop */
}

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/hash/hashsearch.c,v 1.48 2007/01/30 01:33:36 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/hash/hashsearch.c,v 1.49 2007/05/03 16:45:58 tgl Exp $
*
*-------------------------------------------------------------------------
*/
@ -73,8 +73,7 @@ _hash_readnext(Relation rel,
*bufp = InvalidBuffer;
if (BlockNumberIsValid(blkno))
{
*bufp = _hash_getbuf(rel, blkno, HASH_READ);
_hash_checkpage(rel, *bufp, LH_OVERFLOW_PAGE);
*bufp = _hash_getbuf(rel, blkno, HASH_READ, LH_OVERFLOW_PAGE);
*pagep = BufferGetPage(*bufp);
*opaquep = (HashPageOpaque) PageGetSpecialPointer(*pagep);
}
@ -94,8 +93,8 @@ _hash_readprev(Relation rel,
*bufp = InvalidBuffer;
if (BlockNumberIsValid(blkno))
{
*bufp = _hash_getbuf(rel, blkno, HASH_READ);
_hash_checkpage(rel, *bufp, LH_BUCKET_PAGE | LH_OVERFLOW_PAGE);
*bufp = _hash_getbuf(rel, blkno, HASH_READ,
LH_BUCKET_PAGE | LH_OVERFLOW_PAGE);
*pagep = BufferGetPage(*bufp);
*opaquep = (HashPageOpaque) PageGetSpecialPointer(*pagep);
}
@ -183,8 +182,7 @@ _hash_first(IndexScanDesc scan, ScanDirection dir)
_hash_getlock(rel, 0, HASH_SHARE);
/* Read the metapage */
metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_READ);
_hash_checkpage(rel, metabuf, LH_META_PAGE);
metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_READ, LH_META_PAGE);
metap = (HashMetaPage) BufferGetPage(metabuf);
/*
@ -213,8 +211,7 @@ _hash_first(IndexScanDesc scan, ScanDirection dir)
so->hashso_bucket_blkno = blkno;
/* Fetch the primary bucket page for the bucket */
buf = _hash_getbuf(rel, blkno, HASH_READ);
_hash_checkpage(rel, buf, LH_BUCKET_PAGE);
buf = _hash_getbuf(rel, blkno, HASH_READ, LH_BUCKET_PAGE);
page = BufferGetPage(buf);
opaque = (HashPageOpaque) PageGetSpecialPointer(page);
Assert(opaque->hasho_bucket == bucket);

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/hash/hashutil.c,v 1.51 2007/01/30 01:33:36 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/hash/hashutil.c,v 1.52 2007/05/03 16:45:58 tgl Exp $
*
*-------------------------------------------------------------------------
*/
@ -137,6 +137,9 @@ _hash_log2(uint32 num)
/*
* _hash_checkpage -- sanity checks on the format of all hash pages
*
* If flags is not zero, it is a bitwise OR of the acceptable values of
* hasho_flag.
*/
void
_hash_checkpage(Relation rel, Buffer buf, int flags)

View File

@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* $PostgreSQL: pgsql/src/include/access/hash.h,v 1.79 2007/04/19 20:24:04 tgl Exp $
* $PostgreSQL: pgsql/src/include/access/hash.h,v 1.80 2007/05/03 16:45:58 tgl Exp $
*
* NOTES
* modeled after Margo Seltzer's hash implementation for unix.
@ -283,8 +283,10 @@ extern void _hash_squeezebucket(Relation rel,
extern void _hash_getlock(Relation rel, BlockNumber whichlock, int access);
extern bool _hash_try_getlock(Relation rel, BlockNumber whichlock, int access);
extern void _hash_droplock(Relation rel, BlockNumber whichlock, int access);
extern Buffer _hash_getbuf(Relation rel, BlockNumber blkno, int access);
extern Buffer _hash_getnewbuf(Relation rel, BlockNumber blkno, int access);
extern Buffer _hash_getbuf(Relation rel, BlockNumber blkno,
int access, int flags);
extern Buffer _hash_getinitbuf(Relation rel, BlockNumber blkno);
extern Buffer _hash_getnewbuf(Relation rel, BlockNumber blkno);
extern void _hash_relbuf(Relation rel, Buffer buf);
extern void _hash_dropbuf(Relation rel, Buffer buf);
extern void _hash_wrtbuf(Relation rel, Buffer buf);