1996-07-09 08:22:35 +02:00
|
|
|
/*-------------------------------------------------------------------------
|
|
|
|
*
|
1999-02-14 00:22:53 +01:00
|
|
|
* hashinsert.c
|
1997-09-07 07:04:48 +02:00
|
|
|
* Item insertion in hash tables for Postgres.
|
1996-07-09 08:22:35 +02:00
|
|
|
*
|
2017-01-03 19:48:53 +01:00
|
|
|
* Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
|
2000-01-26 06:58:53 +01:00
|
|
|
* Portions Copyright (c) 1994, Regents of the University of California
|
1996-07-09 08:22:35 +02:00
|
|
|
*
|
|
|
|
*
|
|
|
|
* IDENTIFICATION
|
2010-09-20 22:08:53 +02:00
|
|
|
* src/backend/access/hash/hashinsert.c
|
1996-07-09 08:22:35 +02:00
|
|
|
*
|
|
|
|
*-------------------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
|
1999-07-16 01:04:24 +02:00
|
|
|
#include "postgres.h"
|
1997-09-07 07:04:48 +02:00
|
|
|
|
1999-07-16 01:04:24 +02:00
|
|
|
#include "access/hash.h"
|
2008-06-19 02:46:06 +02:00
|
|
|
#include "utils/rel.h"
|
2003-09-05 00:06:27 +02:00
|
|
|
|
|
|
|
|
1996-07-09 08:22:35 +02:00
|
|
|
/*
|
2006-01-26 00:26:11 +01:00
|
|
|
* _hash_doinsert() -- Handle insertion of a single index tuple.
|
1996-07-09 08:22:35 +02:00
|
|
|
*
|
1997-09-07 07:04:48 +02:00
|
|
|
* This routine is called by the public interface routines, hashbuild
|
2006-01-26 00:26:11 +01:00
|
|
|
* and hashinsert. By here, itup is completely filled in.
|
1996-07-09 08:22:35 +02:00
|
|
|
*/
|
2005-03-21 02:24:04 +01:00
|
|
|
void
|
2006-01-26 00:26:11 +01:00
|
|
|
_hash_doinsert(Relation rel, IndexTuple itup)
|
1996-07-09 08:22:35 +02:00
|
|
|
{
|
Improve hash index bucket split behavior.
Previously, the right to split a bucket was represented by a
heavyweight lock on the page number of the primary bucket page.
Unfortunately, this meant that every scan needed to take a heavyweight
lock on that bucket also, which was bad for concurrency. Instead, use
a cleanup lock on the primary bucket page to indicate the right to
begin a split, so that scans only need to retain a pin on that page,
which is they would have to acquire anyway, and which is also much
cheaper.
In addition to reducing the locking cost, this also avoids locking out
scans and inserts for the entire lifetime of the split: while the new
bucket is being populated with copies of the appropriate tuples from
the old bucket, scans and inserts can happen in parallel. There are
minor concurrency improvements for vacuum operations as well, though
the situation there is still far from ideal.
This patch also removes the unworldly assumption that a split will
never be interrupted. With the new code, a split is done in a series
of small steps and the system can pick up where it left off if it is
interrupted prior to completion. While this patch does not itself add
write-ahead logging for hash indexes, it is clearly a necessary first
step, since one of the things that could interrupt a split is the
removal of electrical power from the machine performing it.
Amit Kapila. I wrote the original design on which this patch is
based, and did a good bit of work on the comments and README through
multiple rounds of review, but all of the code is Amit's. Also
reviewed by Jesper Pedersen, Jeff Janes, and others.
Discussion: http://postgr.es/m/CAA4eK1LfzcZYxLoXS874Ad0+S-ZM60U9bwcyiUZx9mHZ-KCWhw@mail.gmail.com
2016-11-30 21:39:21 +01:00
|
|
|
Buffer buf = InvalidBuffer;
|
|
|
|
Buffer bucket_buf;
|
1997-09-08 04:41:22 +02:00
|
|
|
Buffer metabuf;
|
|
|
|
HashMetaPage metap;
|
Cache hash index's metapage in rel->rd_amcache.
This avoids a very significant amount of buffer manager traffic and
contention when scanning hash indexes, because it's no longer
necessary to lock and pin the metapage for every scan. We do need
some way of figuring out when the cache is too stale to use any more,
so that when we lock the primary bucket page to which the cached
metapage points us, we can tell whether a split has occurred since we
cached the metapage data. To do that, we use the hash_prevblkno field
in the primary bucket page, which would otherwise always be set to
InvalidBuffer.
This patch contains code so that it will continue working (although
less efficiently) with hash indexes built before this change, but
perhaps we should consider bumping the hash version and ripping out
the compatibility code. That decision can be made later, though.
Mithun Cy, reviewed by Jesper Pedersen, Amit Kapila, and by me.
Before committing, I made a number of cosmetic changes to the last
posted version of the patch, adjusted _hash_getcachedmetap to be more
careful about order of operation, and made some necessary updates to
the pageinspect documentation and regression tests.
2017-02-07 18:24:25 +01:00
|
|
|
HashMetaPage usedmetap = NULL;
|
2016-12-22 19:54:40 +01:00
|
|
|
Page metapage;
|
1997-09-08 04:41:22 +02:00
|
|
|
Page page;
|
2003-09-05 00:06:27 +02:00
|
|
|
HashPageOpaque pageopaque;
|
|
|
|
Size itemsz;
|
|
|
|
bool do_expand;
|
|
|
|
uint32 hashkey;
|
|
|
|
Bucket bucket;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
|
|
|
/*
|
2008-09-15 20:43:41 +02:00
|
|
|
* Get the hash key for the item (it's stored in the index tuple itself).
|
1997-09-07 07:04:48 +02:00
|
|
|
*/
|
2008-09-15 20:43:41 +02:00
|
|
|
hashkey = _hash_get_indextuple_hashkey(itup);
|
2003-09-05 00:06:27 +02:00
|
|
|
|
|
|
|
/* compute item size too */
|
2006-01-26 00:26:11 +01:00
|
|
|
itemsz = IndexTupleDSize(*itup);
|
2005-10-15 04:49:52 +02:00
|
|
|
itemsz = MAXALIGN(itemsz); /* be safe, PageAddItem will do this but we
|
|
|
|
* need to be consistent */
|
1997-09-07 07:04:48 +02:00
|
|
|
|
Improve hash index bucket split behavior.
Previously, the right to split a bucket was represented by a
heavyweight lock on the page number of the primary bucket page.
Unfortunately, this meant that every scan needed to take a heavyweight
lock on that bucket also, which was bad for concurrency. Instead, use
a cleanup lock on the primary bucket page to indicate the right to
begin a split, so that scans only need to retain a pin on that page,
which is they would have to acquire anyway, and which is also much
cheaper.
In addition to reducing the locking cost, this also avoids locking out
scans and inserts for the entire lifetime of the split: while the new
bucket is being populated with copies of the appropriate tuples from
the old bucket, scans and inserts can happen in parallel. There are
minor concurrency improvements for vacuum operations as well, though
the situation there is still far from ideal.
This patch also removes the unworldly assumption that a split will
never be interrupted. With the new code, a split is done in a series
of small steps and the system can pick up where it left off if it is
interrupted prior to completion. While this patch does not itself add
write-ahead logging for hash indexes, it is clearly a necessary first
step, since one of the things that could interrupt a split is the
removal of electrical power from the machine performing it.
Amit Kapila. I wrote the original design on which this patch is
based, and did a good bit of work on the comments and README through
multiple rounds of review, but all of the code is Amit's. Also
reviewed by Jesper Pedersen, Jeff Janes, and others.
Discussion: http://postgr.es/m/CAA4eK1LfzcZYxLoXS874Ad0+S-ZM60U9bwcyiUZx9mHZ-KCWhw@mail.gmail.com
2016-11-30 21:39:21 +01:00
|
|
|
restart_insert:
|
Cache hash index's metapage in rel->rd_amcache.
This avoids a very significant amount of buffer manager traffic and
contention when scanning hash indexes, because it's no longer
necessary to lock and pin the metapage for every scan. We do need
some way of figuring out when the cache is too stale to use any more,
so that when we lock the primary bucket page to which the cached
metapage points us, we can tell whether a split has occurred since we
cached the metapage data. To do that, we use the hash_prevblkno field
in the primary bucket page, which would otherwise always be set to
InvalidBuffer.
This patch contains code so that it will continue working (although
less efficiently) with hash indexes built before this change, but
perhaps we should consider bumping the hash version and ripping out
the compatibility code. That decision can be made later, though.
Mithun Cy, reviewed by Jesper Pedersen, Amit Kapila, and by me.
Before committing, I made a number of cosmetic changes to the last
posted version of the patch, adjusted _hash_getcachedmetap to be more
careful about order of operation, and made some necessary updates to
the pageinspect documentation and regression tests.
2017-02-07 18:24:25 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Read the metapage. We don't lock it yet; HashMaxItemSize() will
|
|
|
|
* examine pd_pagesize_version, but that can't change so we can examine
|
|
|
|
* it without a lock.
|
|
|
|
*/
|
|
|
|
metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_NOLOCK, LH_META_PAGE);
|
2016-12-22 19:54:40 +01:00
|
|
|
metapage = BufferGetPage(metabuf);
|
1997-09-07 07:04:48 +02:00
|
|
|
|
|
|
|
/*
|
2005-10-15 04:49:52 +02:00
|
|
|
* Check whether the item can fit on a hash page at all. (Eventually, we
|
|
|
|
* ought to try to apply TOAST methods if not.) Note that at this point,
|
|
|
|
* itemsz doesn't include the ItemId.
|
2008-09-15 20:43:41 +02:00
|
|
|
*
|
|
|
|
* XXX this is useless code if we are only storing hash keys.
|
1997-09-07 07:04:48 +02:00
|
|
|
*/
|
2016-12-22 19:54:40 +01:00
|
|
|
if (itemsz > HashMaxItemSize(metapage))
|
2003-09-05 00:06:27 +02:00
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
|
2014-01-23 23:18:23 +01:00
|
|
|
errmsg("index row size %zu exceeds hash maximum %zu",
|
2016-12-22 19:54:40 +01:00
|
|
|
itemsz, HashMaxItemSize(metapage)),
|
2005-10-15 04:49:52 +02:00
|
|
|
errhint("Values larger than a buffer page cannot be indexed.")));
|
1997-09-07 07:04:48 +02:00
|
|
|
|
Cache hash index's metapage in rel->rd_amcache.
This avoids a very significant amount of buffer manager traffic and
contention when scanning hash indexes, because it's no longer
necessary to lock and pin the metapage for every scan. We do need
some way of figuring out when the cache is too stale to use any more,
so that when we lock the primary bucket page to which the cached
metapage points us, we can tell whether a split has occurred since we
cached the metapage data. To do that, we use the hash_prevblkno field
in the primary bucket page, which would otherwise always be set to
InvalidBuffer.
This patch contains code so that it will continue working (although
less efficiently) with hash indexes built before this change, but
perhaps we should consider bumping the hash version and ripping out
the compatibility code. That decision can be made later, though.
Mithun Cy, reviewed by Jesper Pedersen, Amit Kapila, and by me.
Before committing, I made a number of cosmetic changes to the last
posted version of the patch, adjusted _hash_getcachedmetap to be more
careful about order of operation, and made some necessary updates to
the pageinspect documentation and regression tests.
2017-02-07 18:24:25 +01:00
|
|
|
/* Lock the primary bucket page for the target bucket. */
|
|
|
|
buf = _hash_getbucketbuf_from_hashkey(rel, hashkey, HASH_WRITE,
|
|
|
|
&usedmetap);
|
|
|
|
Assert(usedmetap != NULL);
|
1997-09-07 07:04:48 +02:00
|
|
|
|
Improve hash index bucket split behavior.
Previously, the right to split a bucket was represented by a
heavyweight lock on the page number of the primary bucket page.
Unfortunately, this meant that every scan needed to take a heavyweight
lock on that bucket also, which was bad for concurrency. Instead, use
a cleanup lock on the primary bucket page to indicate the right to
begin a split, so that scans only need to retain a pin on that page,
which is they would have to acquire anyway, and which is also much
cheaper.
In addition to reducing the locking cost, this also avoids locking out
scans and inserts for the entire lifetime of the split: while the new
bucket is being populated with copies of the appropriate tuples from
the old bucket, scans and inserts can happen in parallel. There are
minor concurrency improvements for vacuum operations as well, though
the situation there is still far from ideal.
This patch also removes the unworldly assumption that a split will
never be interrupted. With the new code, a split is done in a series
of small steps and the system can pick up where it left off if it is
interrupted prior to completion. While this patch does not itself add
write-ahead logging for hash indexes, it is clearly a necessary first
step, since one of the things that could interrupt a split is the
removal of electrical power from the machine performing it.
Amit Kapila. I wrote the original design on which this patch is
based, and did a good bit of work on the comments and README through
multiple rounds of review, but all of the code is Amit's. Also
reviewed by Jesper Pedersen, Jeff Janes, and others.
Discussion: http://postgr.es/m/CAA4eK1LfzcZYxLoXS874Ad0+S-ZM60U9bwcyiUZx9mHZ-KCWhw@mail.gmail.com
2016-11-30 21:39:21 +01:00
|
|
|
/* remember the primary bucket buffer to release the pin on it at end. */
|
|
|
|
bucket_buf = buf;
|
|
|
|
|
2016-04-20 15:31:19 +02:00
|
|
|
page = BufferGetPage(buf);
|
1996-07-09 08:22:35 +02:00
|
|
|
pageopaque = (HashPageOpaque) PageGetSpecialPointer(page);
|
Cache hash index's metapage in rel->rd_amcache.
This avoids a very significant amount of buffer manager traffic and
contention when scanning hash indexes, because it's no longer
necessary to lock and pin the metapage for every scan. We do need
some way of figuring out when the cache is too stale to use any more,
so that when we lock the primary bucket page to which the cached
metapage points us, we can tell whether a split has occurred since we
cached the metapage data. To do that, we use the hash_prevblkno field
in the primary bucket page, which would otherwise always be set to
InvalidBuffer.
This patch contains code so that it will continue working (although
less efficiently) with hash indexes built before this change, but
perhaps we should consider bumping the hash version and ripping out
the compatibility code. That decision can be made later, though.
Mithun Cy, reviewed by Jesper Pedersen, Amit Kapila, and by me.
Before committing, I made a number of cosmetic changes to the last
posted version of the patch, adjusted _hash_getcachedmetap to be more
careful about order of operation, and made some necessary updates to
the pageinspect documentation and regression tests.
2017-02-07 18:24:25 +01:00
|
|
|
bucket = pageopaque->hasho_bucket;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
Improve hash index bucket split behavior.
Previously, the right to split a bucket was represented by a
heavyweight lock on the page number of the primary bucket page.
Unfortunately, this meant that every scan needed to take a heavyweight
lock on that bucket also, which was bad for concurrency. Instead, use
a cleanup lock on the primary bucket page to indicate the right to
begin a split, so that scans only need to retain a pin on that page,
which is they would have to acquire anyway, and which is also much
cheaper.
In addition to reducing the locking cost, this also avoids locking out
scans and inserts for the entire lifetime of the split: while the new
bucket is being populated with copies of the appropriate tuples from
the old bucket, scans and inserts can happen in parallel. There are
minor concurrency improvements for vacuum operations as well, though
the situation there is still far from ideal.
This patch also removes the unworldly assumption that a split will
never be interrupted. With the new code, a split is done in a series
of small steps and the system can pick up where it left off if it is
interrupted prior to completion. While this patch does not itself add
write-ahead logging for hash indexes, it is clearly a necessary first
step, since one of the things that could interrupt a split is the
removal of electrical power from the machine performing it.
Amit Kapila. I wrote the original design on which this patch is
based, and did a good bit of work on the comments and README through
multiple rounds of review, but all of the code is Amit's. Also
reviewed by Jesper Pedersen, Jeff Janes, and others.
Discussion: http://postgr.es/m/CAA4eK1LfzcZYxLoXS874Ad0+S-ZM60U9bwcyiUZx9mHZ-KCWhw@mail.gmail.com
2016-11-30 21:39:21 +01:00
|
|
|
/*
|
|
|
|
* If this bucket is in the process of being split, try to finish the
|
|
|
|
* split before inserting, because that might create room for the
|
|
|
|
* insertion to proceed without allocating an additional overflow page.
|
|
|
|
* It's only interesting to finish the split if we're trying to insert
|
|
|
|
* into the bucket from which we're removing tuples (the "old" bucket),
|
|
|
|
* not if we're trying to insert into the bucket into which tuples are
|
|
|
|
* being moved (the "new" bucket).
|
|
|
|
*/
|
|
|
|
if (H_BUCKET_BEING_SPLIT(pageopaque) && IsBufferCleanupOK(buf))
|
|
|
|
{
|
|
|
|
/* release the lock on bucket buffer, before completing the split. */
|
2016-12-23 13:14:37 +01:00
|
|
|
LockBuffer(buf, BUFFER_LOCK_UNLOCK);
|
Improve hash index bucket split behavior.
Previously, the right to split a bucket was represented by a
heavyweight lock on the page number of the primary bucket page.
Unfortunately, this meant that every scan needed to take a heavyweight
lock on that bucket also, which was bad for concurrency. Instead, use
a cleanup lock on the primary bucket page to indicate the right to
begin a split, so that scans only need to retain a pin on that page,
which is they would have to acquire anyway, and which is also much
cheaper.
In addition to reducing the locking cost, this also avoids locking out
scans and inserts for the entire lifetime of the split: while the new
bucket is being populated with copies of the appropriate tuples from
the old bucket, scans and inserts can happen in parallel. There are
minor concurrency improvements for vacuum operations as well, though
the situation there is still far from ideal.
This patch also removes the unworldly assumption that a split will
never be interrupted. With the new code, a split is done in a series
of small steps and the system can pick up where it left off if it is
interrupted prior to completion. While this patch does not itself add
write-ahead logging for hash indexes, it is clearly a necessary first
step, since one of the things that could interrupt a split is the
removal of electrical power from the machine performing it.
Amit Kapila. I wrote the original design on which this patch is
based, and did a good bit of work on the comments and README through
multiple rounds of review, but all of the code is Amit's. Also
reviewed by Jesper Pedersen, Jeff Janes, and others.
Discussion: http://postgr.es/m/CAA4eK1LfzcZYxLoXS874Ad0+S-ZM60U9bwcyiUZx9mHZ-KCWhw@mail.gmail.com
2016-11-30 21:39:21 +01:00
|
|
|
|
Cache hash index's metapage in rel->rd_amcache.
This avoids a very significant amount of buffer manager traffic and
contention when scanning hash indexes, because it's no longer
necessary to lock and pin the metapage for every scan. We do need
some way of figuring out when the cache is too stale to use any more,
so that when we lock the primary bucket page to which the cached
metapage points us, we can tell whether a split has occurred since we
cached the metapage data. To do that, we use the hash_prevblkno field
in the primary bucket page, which would otherwise always be set to
InvalidBuffer.
This patch contains code so that it will continue working (although
less efficiently) with hash indexes built before this change, but
perhaps we should consider bumping the hash version and ripping out
the compatibility code. That decision can be made later, though.
Mithun Cy, reviewed by Jesper Pedersen, Amit Kapila, and by me.
Before committing, I made a number of cosmetic changes to the last
posted version of the patch, adjusted _hash_getcachedmetap to be more
careful about order of operation, and made some necessary updates to
the pageinspect documentation and regression tests.
2017-02-07 18:24:25 +01:00
|
|
|
_hash_finish_split(rel, metabuf, buf, bucket,
|
|
|
|
usedmetap->hashm_maxbucket,
|
|
|
|
usedmetap->hashm_highmask,
|
|
|
|
usedmetap->hashm_lowmask);
|
Improve hash index bucket split behavior.
Previously, the right to split a bucket was represented by a
heavyweight lock on the page number of the primary bucket page.
Unfortunately, this meant that every scan needed to take a heavyweight
lock on that bucket also, which was bad for concurrency. Instead, use
a cleanup lock on the primary bucket page to indicate the right to
begin a split, so that scans only need to retain a pin on that page,
which is they would have to acquire anyway, and which is also much
cheaper.
In addition to reducing the locking cost, this also avoids locking out
scans and inserts for the entire lifetime of the split: while the new
bucket is being populated with copies of the appropriate tuples from
the old bucket, scans and inserts can happen in parallel. There are
minor concurrency improvements for vacuum operations as well, though
the situation there is still far from ideal.
This patch also removes the unworldly assumption that a split will
never be interrupted. With the new code, a split is done in a series
of small steps and the system can pick up where it left off if it is
interrupted prior to completion. While this patch does not itself add
write-ahead logging for hash indexes, it is clearly a necessary first
step, since one of the things that could interrupt a split is the
removal of electrical power from the machine performing it.
Amit Kapila. I wrote the original design on which this patch is
based, and did a good bit of work on the comments and README through
multiple rounds of review, but all of the code is Amit's. Also
reviewed by Jesper Pedersen, Jeff Janes, and others.
Discussion: http://postgr.es/m/CAA4eK1LfzcZYxLoXS874Ad0+S-ZM60U9bwcyiUZx9mHZ-KCWhw@mail.gmail.com
2016-11-30 21:39:21 +01:00
|
|
|
|
|
|
|
/* release the pin on old and meta buffer. retry for insert. */
|
|
|
|
_hash_dropbuf(rel, buf);
|
|
|
|
_hash_dropbuf(rel, metabuf);
|
|
|
|
goto restart_insert;
|
|
|
|
}
|
|
|
|
|
2003-09-05 00:06:27 +02:00
|
|
|
/* Do the insertion */
|
1997-09-07 07:04:48 +02:00
|
|
|
while (PageGetFreeSpace(page) < itemsz)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* no space on this page; check for an overflow page
|
|
|
|
*/
|
2004-08-29 07:07:03 +02:00
|
|
|
BlockNumber nextblkno = pageopaque->hasho_nextblkno;
|
2003-09-05 00:06:27 +02:00
|
|
|
|
|
|
|
if (BlockNumberIsValid(nextblkno))
|
1997-09-07 07:04:48 +02:00
|
|
|
{
|
|
|
|
/*
|
2005-10-15 04:49:52 +02:00
|
|
|
* ovfl page exists; go get it. if it doesn't have room, we'll
|
Improve hash index bucket split behavior.
Previously, the right to split a bucket was represented by a
heavyweight lock on the page number of the primary bucket page.
Unfortunately, this meant that every scan needed to take a heavyweight
lock on that bucket also, which was bad for concurrency. Instead, use
a cleanup lock on the primary bucket page to indicate the right to
begin a split, so that scans only need to retain a pin on that page,
which is they would have to acquire anyway, and which is also much
cheaper.
In addition to reducing the locking cost, this also avoids locking out
scans and inserts for the entire lifetime of the split: while the new
bucket is being populated with copies of the appropriate tuples from
the old bucket, scans and inserts can happen in parallel. There are
minor concurrency improvements for vacuum operations as well, though
the situation there is still far from ideal.
This patch also removes the unworldly assumption that a split will
never be interrupted. With the new code, a split is done in a series
of small steps and the system can pick up where it left off if it is
interrupted prior to completion. While this patch does not itself add
write-ahead logging for hash indexes, it is clearly a necessary first
step, since one of the things that could interrupt a split is the
removal of electrical power from the machine performing it.
Amit Kapila. I wrote the original design on which this patch is
based, and did a good bit of work on the comments and README through
multiple rounds of review, but all of the code is Amit's. Also
reviewed by Jesper Pedersen, Jeff Janes, and others.
Discussion: http://postgr.es/m/CAA4eK1LfzcZYxLoXS874Ad0+S-ZM60U9bwcyiUZx9mHZ-KCWhw@mail.gmail.com
2016-11-30 21:39:21 +01:00
|
|
|
* find out next pass through the loop test above. we always
|
|
|
|
* release both the lock and pin if this is an overflow page, but
|
|
|
|
* only the lock if this is the primary bucket page, since the pin
|
|
|
|
* on the primary bucket must be retained throughout the scan.
|
1997-09-07 07:04:48 +02:00
|
|
|
*/
|
Improve hash index bucket split behavior.
Previously, the right to split a bucket was represented by a
heavyweight lock on the page number of the primary bucket page.
Unfortunately, this meant that every scan needed to take a heavyweight
lock on that bucket also, which was bad for concurrency. Instead, use
a cleanup lock on the primary bucket page to indicate the right to
begin a split, so that scans only need to retain a pin on that page,
which is they would have to acquire anyway, and which is also much
cheaper.
In addition to reducing the locking cost, this also avoids locking out
scans and inserts for the entire lifetime of the split: while the new
bucket is being populated with copies of the appropriate tuples from
the old bucket, scans and inserts can happen in parallel. There are
minor concurrency improvements for vacuum operations as well, though
the situation there is still far from ideal.
This patch also removes the unworldly assumption that a split will
never be interrupted. With the new code, a split is done in a series
of small steps and the system can pick up where it left off if it is
interrupted prior to completion. While this patch does not itself add
write-ahead logging for hash indexes, it is clearly a necessary first
step, since one of the things that could interrupt a split is the
removal of electrical power from the machine performing it.
Amit Kapila. I wrote the original design on which this patch is
based, and did a good bit of work on the comments and README through
multiple rounds of review, but all of the code is Amit's. Also
reviewed by Jesper Pedersen, Jeff Janes, and others.
Discussion: http://postgr.es/m/CAA4eK1LfzcZYxLoXS874Ad0+S-ZM60U9bwcyiUZx9mHZ-KCWhw@mail.gmail.com
2016-11-30 21:39:21 +01:00
|
|
|
if (buf != bucket_buf)
|
|
|
|
_hash_relbuf(rel, buf);
|
|
|
|
else
|
2016-12-23 13:14:37 +01:00
|
|
|
LockBuffer(buf, BUFFER_LOCK_UNLOCK);
|
2007-05-03 18:45:58 +02:00
|
|
|
buf = _hash_getbuf(rel, nextblkno, HASH_WRITE, LH_OVERFLOW_PAGE);
|
2016-04-20 15:31:19 +02:00
|
|
|
page = BufferGetPage(buf);
|
1997-09-07 07:04:48 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* we're at the end of the bucket chain and we haven't found a
|
|
|
|
* page with enough room. allocate a new overflow page.
|
|
|
|
*/
|
2003-09-05 00:06:27 +02:00
|
|
|
|
|
|
|
/* release our write lock without modifying buffer */
|
2016-12-23 13:14:37 +01:00
|
|
|
LockBuffer(buf, BUFFER_LOCK_UNLOCK);
|
2003-09-05 00:06:27 +02:00
|
|
|
|
|
|
|
/* chain to a new overflow page */
|
Improve hash index bucket split behavior.
Previously, the right to split a bucket was represented by a
heavyweight lock on the page number of the primary bucket page.
Unfortunately, this meant that every scan needed to take a heavyweight
lock on that bucket also, which was bad for concurrency. Instead, use
a cleanup lock on the primary bucket page to indicate the right to
begin a split, so that scans only need to retain a pin on that page,
which is they would have to acquire anyway, and which is also much
cheaper.
In addition to reducing the locking cost, this also avoids locking out
scans and inserts for the entire lifetime of the split: while the new
bucket is being populated with copies of the appropriate tuples from
the old bucket, scans and inserts can happen in parallel. There are
minor concurrency improvements for vacuum operations as well, though
the situation there is still far from ideal.
This patch also removes the unworldly assumption that a split will
never be interrupted. With the new code, a split is done in a series
of small steps and the system can pick up where it left off if it is
interrupted prior to completion. While this patch does not itself add
write-ahead logging for hash indexes, it is clearly a necessary first
step, since one of the things that could interrupt a split is the
removal of electrical power from the machine performing it.
Amit Kapila. I wrote the original design on which this patch is
based, and did a good bit of work on the comments and README through
multiple rounds of review, but all of the code is Amit's. Also
reviewed by Jesper Pedersen, Jeff Janes, and others.
Discussion: http://postgr.es/m/CAA4eK1LfzcZYxLoXS874Ad0+S-ZM60U9bwcyiUZx9mHZ-KCWhw@mail.gmail.com
2016-11-30 21:39:21 +01:00
|
|
|
buf = _hash_addovflpage(rel, metabuf, buf, (buf == bucket_buf) ? true : false);
|
2016-04-20 15:31:19 +02:00
|
|
|
page = BufferGetPage(buf);
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2003-09-05 00:06:27 +02:00
|
|
|
/* should fit now, given test above */
|
|
|
|
Assert(PageGetFreeSpace(page) >= itemsz);
|
1997-09-07 07:04:48 +02:00
|
|
|
}
|
|
|
|
pageopaque = (HashPageOpaque) PageGetSpecialPointer(page);
|
2007-05-03 18:45:58 +02:00
|
|
|
Assert(pageopaque->hasho_flag == LH_OVERFLOW_PAGE);
|
1997-09-07 07:04:48 +02:00
|
|
|
Assert(pageopaque->hasho_bucket == bucket);
|
|
|
|
}
|
|
|
|
|
2003-09-05 00:06:27 +02:00
|
|
|
/* found page with enough space, so add the item here */
|
2006-01-26 00:26:11 +01:00
|
|
|
(void) _hash_pgaddtup(rel, buf, itemsz, itup);
|
1997-09-07 07:04:48 +02:00
|
|
|
|
Improve hash index bucket split behavior.
Previously, the right to split a bucket was represented by a
heavyweight lock on the page number of the primary bucket page.
Unfortunately, this meant that every scan needed to take a heavyweight
lock on that bucket also, which was bad for concurrency. Instead, use
a cleanup lock on the primary bucket page to indicate the right to
begin a split, so that scans only need to retain a pin on that page,
which is they would have to acquire anyway, and which is also much
cheaper.
In addition to reducing the locking cost, this also avoids locking out
scans and inserts for the entire lifetime of the split: while the new
bucket is being populated with copies of the appropriate tuples from
the old bucket, scans and inserts can happen in parallel. There are
minor concurrency improvements for vacuum operations as well, though
the situation there is still far from ideal.
This patch also removes the unworldly assumption that a split will
never be interrupted. With the new code, a split is done in a series
of small steps and the system can pick up where it left off if it is
interrupted prior to completion. While this patch does not itself add
write-ahead logging for hash indexes, it is clearly a necessary first
step, since one of the things that could interrupt a split is the
removal of electrical power from the machine performing it.
Amit Kapila. I wrote the original design on which this patch is
based, and did a good bit of work on the comments and README through
multiple rounds of review, but all of the code is Amit's. Also
reviewed by Jesper Pedersen, Jeff Janes, and others.
Discussion: http://postgr.es/m/CAA4eK1LfzcZYxLoXS874Ad0+S-ZM60U9bwcyiUZx9mHZ-KCWhw@mail.gmail.com
2016-11-30 21:39:21 +01:00
|
|
|
/*
|
2016-12-16 15:29:21 +01:00
|
|
|
* dirty and release the modified page. if the page we modified was an
|
Improve hash index bucket split behavior.
Previously, the right to split a bucket was represented by a
heavyweight lock on the page number of the primary bucket page.
Unfortunately, this meant that every scan needed to take a heavyweight
lock on that bucket also, which was bad for concurrency. Instead, use
a cleanup lock on the primary bucket page to indicate the right to
begin a split, so that scans only need to retain a pin on that page,
which is they would have to acquire anyway, and which is also much
cheaper.
In addition to reducing the locking cost, this also avoids locking out
scans and inserts for the entire lifetime of the split: while the new
bucket is being populated with copies of the appropriate tuples from
the old bucket, scans and inserts can happen in parallel. There are
minor concurrency improvements for vacuum operations as well, though
the situation there is still far from ideal.
This patch also removes the unworldly assumption that a split will
never be interrupted. With the new code, a split is done in a series
of small steps and the system can pick up where it left off if it is
interrupted prior to completion. While this patch does not itself add
write-ahead logging for hash indexes, it is clearly a necessary first
step, since one of the things that could interrupt a split is the
removal of electrical power from the machine performing it.
Amit Kapila. I wrote the original design on which this patch is
based, and did a good bit of work on the comments and README through
multiple rounds of review, but all of the code is Amit's. Also
reviewed by Jesper Pedersen, Jeff Janes, and others.
Discussion: http://postgr.es/m/CAA4eK1LfzcZYxLoXS874Ad0+S-ZM60U9bwcyiUZx9mHZ-KCWhw@mail.gmail.com
2016-11-30 21:39:21 +01:00
|
|
|
* overflow page, we also need to separately drop the pin we retained on
|
|
|
|
* the primary bucket page.
|
|
|
|
*/
|
2016-12-16 15:29:21 +01:00
|
|
|
MarkBufferDirty(buf);
|
|
|
|
_hash_relbuf(rel, buf);
|
Improve hash index bucket split behavior.
Previously, the right to split a bucket was represented by a
heavyweight lock on the page number of the primary bucket page.
Unfortunately, this meant that every scan needed to take a heavyweight
lock on that bucket also, which was bad for concurrency. Instead, use
a cleanup lock on the primary bucket page to indicate the right to
begin a split, so that scans only need to retain a pin on that page,
which is they would have to acquire anyway, and which is also much
cheaper.
In addition to reducing the locking cost, this also avoids locking out
scans and inserts for the entire lifetime of the split: while the new
bucket is being populated with copies of the appropriate tuples from
the old bucket, scans and inserts can happen in parallel. There are
minor concurrency improvements for vacuum operations as well, though
the situation there is still far from ideal.
This patch also removes the unworldly assumption that a split will
never be interrupted. With the new code, a split is done in a series
of small steps and the system can pick up where it left off if it is
interrupted prior to completion. While this patch does not itself add
write-ahead logging for hash indexes, it is clearly a necessary first
step, since one of the things that could interrupt a split is the
removal of electrical power from the machine performing it.
Amit Kapila. I wrote the original design on which this patch is
based, and did a good bit of work on the comments and README through
multiple rounds of review, but all of the code is Amit's. Also
reviewed by Jesper Pedersen, Jeff Janes, and others.
Discussion: http://postgr.es/m/CAA4eK1LfzcZYxLoXS874Ad0+S-ZM60U9bwcyiUZx9mHZ-KCWhw@mail.gmail.com
2016-11-30 21:39:21 +01:00
|
|
|
if (buf != bucket_buf)
|
|
|
|
_hash_dropbuf(rel, bucket_buf);
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2003-09-05 00:06:27 +02:00
|
|
|
/*
|
2004-08-29 07:07:03 +02:00
|
|
|
* Write-lock the metapage so we can increment the tuple count. After
|
|
|
|
* incrementing it, check to see if it's time for a split.
|
2003-09-05 00:06:27 +02:00
|
|
|
*/
|
2016-12-23 13:14:37 +01:00
|
|
|
LockBuffer(metabuf, BUFFER_LOCK_EXCLUSIVE);
|
1997-09-07 07:04:48 +02:00
|
|
|
|
Cache hash index's metapage in rel->rd_amcache.
This avoids a very significant amount of buffer manager traffic and
contention when scanning hash indexes, because it's no longer
necessary to lock and pin the metapage for every scan. We do need
some way of figuring out when the cache is too stale to use any more,
so that when we lock the primary bucket page to which the cached
metapage points us, we can tell whether a split has occurred since we
cached the metapage data. To do that, we use the hash_prevblkno field
in the primary bucket page, which would otherwise always be set to
InvalidBuffer.
This patch contains code so that it will continue working (although
less efficiently) with hash indexes built before this change, but
perhaps we should consider bumping the hash version and ripping out
the compatibility code. That decision can be made later, though.
Mithun Cy, reviewed by Jesper Pedersen, Amit Kapila, and by me.
Before committing, I made a number of cosmetic changes to the last
posted version of the patch, adjusted _hash_getcachedmetap to be more
careful about order of operation, and made some necessary updates to
the pageinspect documentation and regression tests.
2017-02-07 18:24:25 +01:00
|
|
|
metap = HashPageGetMeta(metapage);
|
2003-09-05 00:06:27 +02:00
|
|
|
metap->hashm_ntuples += 1;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2003-09-05 00:06:27 +02:00
|
|
|
/* Make sure this stays in sync with _hash_expandtable() */
|
|
|
|
do_expand = metap->hashm_ntuples >
|
|
|
|
(double) metap->hashm_ffactor * (metap->hashm_maxbucket + 1);
|
|
|
|
|
|
|
|
/* Write out the metapage and drop lock, but keep pin */
|
2016-12-23 13:14:37 +01:00
|
|
|
MarkBufferDirty(metabuf);
|
|
|
|
LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
|
2003-09-05 00:06:27 +02:00
|
|
|
|
|
|
|
/* Attempt to split if a split is needed */
|
|
|
|
if (do_expand)
|
1997-09-07 07:04:48 +02:00
|
|
|
_hash_expandtable(rel, metabuf);
|
2003-09-05 00:06:27 +02:00
|
|
|
|
|
|
|
/* Finally drop our pin on the metapage */
|
|
|
|
_hash_dropbuf(rel, metabuf);
|
1997-09-07 07:04:48 +02:00
|
|
|
}
|
1996-07-09 08:22:35 +02:00
|
|
|
|
|
|
|
/*
|
1997-09-07 07:04:48 +02:00
|
|
|
* _hash_pgaddtup() -- add a tuple to a particular page in the index.
|
1996-07-09 08:22:35 +02:00
|
|
|
*
|
2009-11-01 22:25:25 +01:00
|
|
|
* This routine adds the tuple to the page as requested; it does not write out
|
|
|
|
* the page. It is an error to call pgaddtup() without pin and write lock on
|
|
|
|
* the target buffer.
|
|
|
|
*
|
|
|
|
* Returns the offset number at which the tuple was inserted. This function
|
|
|
|
* is responsible for preserving the condition that tuples in a hash index
|
|
|
|
* page are sorted by hashkey value.
|
1996-07-09 08:22:35 +02:00
|
|
|
*/
|
2009-11-01 22:25:25 +01:00
|
|
|
OffsetNumber
|
|
|
|
_hash_pgaddtup(Relation rel, Buffer buf, Size itemsize, IndexTuple itup)
|
1996-07-09 08:22:35 +02:00
|
|
|
{
|
1997-09-08 04:41:22 +02:00
|
|
|
OffsetNumber itup_off;
|
|
|
|
Page page;
|
2008-09-15 20:43:41 +02:00
|
|
|
uint32 hashkey;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2005-11-06 20:29:01 +01:00
|
|
|
_hash_checkpage(rel, buf, LH_BUCKET_PAGE | LH_OVERFLOW_PAGE);
|
2016-04-20 15:31:19 +02:00
|
|
|
page = BufferGetPage(buf);
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2008-09-15 20:43:41 +02:00
|
|
|
/* Find where to insert the tuple (preserving page's hashkey ordering) */
|
|
|
|
hashkey = _hash_get_indextuple_hashkey(itup);
|
|
|
|
itup_off = _hash_binsearch(page, hashkey);
|
|
|
|
|
2007-09-20 19:56:33 +02:00
|
|
|
if (PageAddItem(page, (Item) itup, itemsize, itup_off, false, false)
|
2001-03-07 22:20:26 +01:00
|
|
|
== InvalidOffsetNumber)
|
2003-07-21 22:29:40 +02:00
|
|
|
elog(ERROR, "failed to add index item to \"%s\"",
|
2001-03-07 22:20:26 +01:00
|
|
|
RelationGetRelationName(rel));
|
1997-09-07 07:04:48 +02:00
|
|
|
|
1998-09-01 05:29:17 +02:00
|
|
|
return itup_off;
|
1996-07-09 08:22:35 +02:00
|
|
|
}
|