1996-07-09 08:22:35 +02:00
|
|
|
/*-------------------------------------------------------------------------
|
|
|
|
*
|
2003-11-09 22:30:38 +01:00
|
|
|
* nbtinsert.c
|
1997-09-07 07:04:48 +02:00
|
|
|
* Item insertion in Lehman and Yao btrees for Postgres.
|
1996-07-09 08:22:35 +02:00
|
|
|
*
|
2019-01-02 18:44:25 +01:00
|
|
|
* Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
|
2000-01-26 06:58:53 +01:00
|
|
|
* Portions Copyright (c) 1994, Regents of the University of California
|
1996-07-09 08:22:35 +02:00
|
|
|
*
|
|
|
|
*
|
|
|
|
* IDENTIFICATION
|
2010-09-20 22:08:53 +02:00
|
|
|
* src/backend/access/nbtree/nbtinsert.c
|
1996-07-09 08:22:35 +02:00
|
|
|
*
|
|
|
|
*-------------------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
|
1999-07-16 01:04:24 +02:00
|
|
|
#include "postgres.h"
|
1996-10-23 09:42:13 +02:00
|
|
|
|
1999-07-16 07:00:38 +02:00
|
|
|
#include "access/nbtree.h"
|
2017-02-14 21:37:59 +01:00
|
|
|
#include "access/nbtxlog.h"
|
2019-03-26 00:52:55 +01:00
|
|
|
#include "access/tableam.h"
|
2006-07-13 18:49:20 +02:00
|
|
|
#include "access/transam.h"
|
2014-11-06 12:52:08 +01:00
|
|
|
#include "access/xloginsert.h"
|
2001-01-14 06:08:17 +01:00
|
|
|
#include "miscadmin.h"
|
2008-05-12 02:00:54 +02:00
|
|
|
#include "storage/lmgr.h"
|
Implement genuine serializable isolation level.
Until now, our Serializable mode has in fact been what's called Snapshot
Isolation, which allows some anomalies that could not occur in any
serialized ordering of the transactions. This patch fixes that using a
method called Serializable Snapshot Isolation, based on research papers by
Michael J. Cahill (see README-SSI for full references). In Serializable
Snapshot Isolation, transactions run like they do in Snapshot Isolation,
but a predicate lock manager observes the reads and writes performed and
aborts transactions if it detects that an anomaly might occur. This method
produces some false positives, ie. it sometimes aborts transactions even
though there is no anomaly.
To track reads we implement predicate locking, see storage/lmgr/predicate.c.
Whenever a tuple is read, a predicate lock is acquired on the tuple. Shared
memory is finite, so when a transaction takes many tuple-level locks on a
page, the locks are promoted to a single page-level lock, and further to a
single relation level lock if necessary. To lock key values with no matching
tuple, a sequential scan always takes a relation-level lock, and an index
scan acquires a page-level lock that covers the search key, whether or not
there are any matching keys at the moment.
A predicate lock doesn't conflict with any regular locks or with another
predicate locks in the normal sense. They're only used by the predicate lock
manager to detect the danger of anomalies. Only serializable transactions
participate in predicate locking, so there should be no extra overhead for
for other transactions.
Predicate locks can't be released at commit, but must be remembered until
all the transactions that overlapped with it have completed. That means that
we need to remember an unbounded amount of predicate locks, so we apply a
lossy but conservative method of tracking locks for committed transactions.
If we run short of shared memory, we overflow to a new "pg_serial" SLRU
pool.
We don't currently allow Serializable transactions in Hot Standby mode.
That would be hard, because even read-only transactions can cause anomalies
that wouldn't otherwise occur.
Serializable isolation mode now means the new fully serializable level.
Repeatable Read gives you the old Snapshot Isolation level that we have
always had.
Kevin Grittner and Dan Ports, reviewed by Jeff Davis, Heikki Linnakangas and
Anssi Kääriäinen
2011-02-07 22:46:51 +01:00
|
|
|
#include "storage/predicate.h"
|
2018-03-26 14:09:24 +02:00
|
|
|
#include "storage/smgr.h"
|
1996-10-20 12:53:18 +02:00
|
|
|
|
2018-04-11 00:21:03 +02:00
|
|
|
/* Minimum tree height for application of fastpath optimization */
|
|
|
|
#define BTREE_FASTPATH_MIN_LEVEL 2
|
1996-11-03 13:35:27 +01:00
|
|
|
|
2001-01-26 02:24:31 +01:00
|
|
|
|
|
|
|
static Buffer _bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf);
|
2000-07-21 08:42:39 +02:00
|
|
|
|
2019-03-20 17:30:57 +01:00
|
|
|
static TransactionId _bt_check_unique(Relation rel, BTInsertState insertstate,
|
2019-05-22 19:04:48 +02:00
|
|
|
Relation heapRel,
|
|
|
|
IndexUniqueCheck checkUnique, bool *is_unique,
|
|
|
|
uint32 *speculativeToken);
|
2019-03-20 17:30:57 +01:00
|
|
|
static OffsetNumber _bt_findinsertloc(Relation rel,
|
2019-05-22 19:04:48 +02:00
|
|
|
BTInsertState insertstate,
|
|
|
|
bool checkingunique,
|
|
|
|
BTStack stack,
|
|
|
|
Relation heapRel);
|
2019-03-20 17:30:57 +01:00
|
|
|
static void _bt_stepright(Relation rel, BTInsertState insertstate, BTStack stack);
|
Make heap TID a tiebreaker nbtree index column.
Make nbtree treat all index tuples as having a heap TID attribute.
Index searches can distinguish duplicates by heap TID, since heap TID is
always guaranteed to be unique. This general approach has numerous
benefits for performance, and is prerequisite to teaching VACUUM to
perform "retail index tuple deletion".
Naively adding a new attribute to every pivot tuple has unacceptable
overhead (it bloats internal pages), so suffix truncation of pivot
tuples is added. This will usually truncate away the "extra" heap TID
attribute from pivot tuples during a leaf page split, and may also
truncate away additional user attributes. This can increase fan-out,
especially in a multi-column index. Truncation can only occur at the
attribute granularity, which isn't particularly effective, but works
well enough for now. A future patch may add support for truncating
"within" text attributes by generating truncated key values using new
opclass infrastructure.
Only new indexes (BTREE_VERSION 4 indexes) will have insertions that
treat heap TID as a tiebreaker attribute, or will have pivot tuples
undergo suffix truncation during a leaf page split (on-disk
compatibility with versions 2 and 3 is preserved). Upgrades to version
4 cannot be performed on-the-fly, unlike upgrades from version 2 to
version 3. contrib/amcheck continues to work with version 2 and 3
indexes, while also enforcing stricter invariants when verifying version
4 indexes. These stricter invariants are the same invariants described
by "3.1.12 Sequencing" from the Lehman and Yao paper.
A later patch will enhance the logic used by nbtree to pick a split
point. This patch is likely to negatively impact performance without
smarter choices around the precise point to split leaf pages at. Making
these two mostly-distinct sets of enhancements into distinct commits
seems like it might clarify their design, even though neither commit is
particularly useful on its own.
The maximum allowed size of new tuples is reduced by an amount equal to
the space required to store an extra MAXALIGN()'d TID in a new high key
during leaf page splits. The user-facing definition of the "1/3 of a
page" restriction is already imprecise, and so does not need to be
revised. However, there should be a compatibility note in the v12
release notes.
Author: Peter Geoghegan
Reviewed-By: Heikki Linnakangas, Alexander Korotkov
Discussion: https://postgr.es/m/CAH2-WzkVb0Kom=R+88fDFb=JSxZMFvbHVC6Mn9LJ2n=X=kS-Uw@mail.gmail.com
2019-03-20 18:04:01 +01:00
|
|
|
static void _bt_insertonpg(Relation rel, BTScanInsert itup_key,
|
2019-05-22 19:04:48 +02:00
|
|
|
Buffer buf,
|
|
|
|
Buffer cbuf,
|
|
|
|
BTStack stack,
|
|
|
|
IndexTuple itup,
|
|
|
|
OffsetNumber newitemoff,
|
|
|
|
bool split_only_page);
|
Make heap TID a tiebreaker nbtree index column.
Make nbtree treat all index tuples as having a heap TID attribute.
Index searches can distinguish duplicates by heap TID, since heap TID is
always guaranteed to be unique. This general approach has numerous
benefits for performance, and is prerequisite to teaching VACUUM to
perform "retail index tuple deletion".
Naively adding a new attribute to every pivot tuple has unacceptable
overhead (it bloats internal pages), so suffix truncation of pivot
tuples is added. This will usually truncate away the "extra" heap TID
attribute from pivot tuples during a leaf page split, and may also
truncate away additional user attributes. This can increase fan-out,
especially in a multi-column index. Truncation can only occur at the
attribute granularity, which isn't particularly effective, but works
well enough for now. A future patch may add support for truncating
"within" text attributes by generating truncated key values using new
opclass infrastructure.
Only new indexes (BTREE_VERSION 4 indexes) will have insertions that
treat heap TID as a tiebreaker attribute, or will have pivot tuples
undergo suffix truncation during a leaf page split (on-disk
compatibility with versions 2 and 3 is preserved). Upgrades to version
4 cannot be performed on-the-fly, unlike upgrades from version 2 to
version 3. contrib/amcheck continues to work with version 2 and 3
indexes, while also enforcing stricter invariants when verifying version
4 indexes. These stricter invariants are the same invariants described
by "3.1.12 Sequencing" from the Lehman and Yao paper.
A later patch will enhance the logic used by nbtree to pick a split
point. This patch is likely to negatively impact performance without
smarter choices around the precise point to split leaf pages at. Making
these two mostly-distinct sets of enhancements into distinct commits
seems like it might clarify their design, even though neither commit is
particularly useful on its own.
The maximum allowed size of new tuples is reduced by an amount equal to
the space required to store an extra MAXALIGN()'d TID in a new high key
during leaf page splits. The user-facing definition of the "1/3 of a
page" restriction is already imprecise, and so does not need to be
revised. However, there should be a compatibility note in the v12
release notes.
Author: Peter Geoghegan
Reviewed-By: Heikki Linnakangas, Alexander Korotkov
Discussion: https://postgr.es/m/CAH2-WzkVb0Kom=R+88fDFb=JSxZMFvbHVC6Mn9LJ2n=X=kS-Uw@mail.gmail.com
2019-03-20 18:04:01 +01:00
|
|
|
static Buffer _bt_split(Relation rel, BTScanInsert itup_key, Buffer buf,
|
2019-05-22 19:04:48 +02:00
|
|
|
Buffer cbuf, OffsetNumber newitemoff, Size newitemsz,
|
|
|
|
IndexTuple newitem);
|
Make the handling of interrupted B-tree page splits more robust.
Splitting a page consists of two separate steps: splitting the child page,
and inserting the downlink for the new right page to the parent. Previously,
we handled the case that you crash in between those steps with a cleanup
routine after the WAL recovery had finished, which finished the incomplete
split. However, that doesn't help if the page split is interrupted but the
database doesn't crash, so that you don't perform WAL recovery. That could
happen for example if you run out of disk space.
Remove the end-of-recovery cleanup step. Instead, when a page is split, the
left page is marked with a new INCOMPLETE_SPLIT flag, and when the downlink
is inserted to the parent, the flag is cleared again. If an insertion sees
a page with the flag set, it knows that the split was interrupted for some
reason, and inserts the missing downlink before proceeding.
I used the same approach to fix GIN and GiST split algorithms earlier. This
was the last WAL cleanup routine, so we could get rid of that whole
machinery now, but I'll leave that for a separate patch.
Reviewed by Peter Geoghegan.
2014-03-18 19:12:58 +01:00
|
|
|
static void _bt_insert_parent(Relation rel, Buffer buf, Buffer rbuf,
|
2019-05-22 19:04:48 +02:00
|
|
|
BTStack stack, bool is_root, bool is_only);
|
2016-04-08 20:52:13 +02:00
|
|
|
static bool _bt_pgaddtup(Page page, Size itemsize, IndexTuple itup,
|
2019-05-22 19:04:48 +02:00
|
|
|
OffsetNumber itup_off);
|
2010-03-28 11:27:02 +02:00
|
|
|
static void _bt_vacuum_one_page(Relation rel, Buffer buffer, Relation heapRel);
|
1996-07-09 08:22:35 +02:00
|
|
|
|
|
|
|
/*
|
2006-01-26 00:04:21 +01:00
|
|
|
* _bt_doinsert() -- Handle insertion of a single index tuple in the tree.
|
1996-07-09 08:22:35 +02:00
|
|
|
*
|
2014-02-26 17:48:21 +01:00
|
|
|
* This routine is called by the public interface routine, btinsert.
|
|
|
|
* By here, itup is filled in, including the TID.
|
2009-07-29 22:56:21 +02:00
|
|
|
*
|
|
|
|
* If checkUnique is UNIQUE_CHECK_NO or UNIQUE_CHECK_PARTIAL, this
|
2014-05-06 18:12:18 +02:00
|
|
|
* will allow duplicates. Otherwise (UNIQUE_CHECK_YES or
|
2009-07-29 22:56:21 +02:00
|
|
|
* UNIQUE_CHECK_EXISTING) it will throw error for a duplicate.
|
|
|
|
* For UNIQUE_CHECK_EXISTING we merely run the duplicate check, and
|
|
|
|
* don't actually insert.
|
|
|
|
*
|
|
|
|
* The result value is only significant for UNIQUE_CHECK_PARTIAL:
|
2017-08-16 06:22:32 +02:00
|
|
|
* it must be true if the entry is known unique, else false.
|
|
|
|
* (In the current implementation we'll also return true after a
|
2009-07-29 22:56:21 +02:00
|
|
|
* successful UNIQUE_CHECK_YES or UNIQUE_CHECK_EXISTING call, but
|
|
|
|
* that's just a coding artifact.)
|
1996-07-09 08:22:35 +02:00
|
|
|
*/
|
2009-07-29 22:56:21 +02:00
|
|
|
bool
|
2006-01-26 00:04:21 +01:00
|
|
|
_bt_doinsert(Relation rel, IndexTuple itup,
|
2009-07-29 22:56:21 +02:00
|
|
|
IndexUniqueCheck checkUnique, Relation heapRel)
|
1996-07-09 08:22:35 +02:00
|
|
|
{
|
2009-07-29 22:56:21 +02:00
|
|
|
bool is_unique = false;
|
2019-03-20 17:30:57 +01:00
|
|
|
BTInsertStateData insertstate;
|
|
|
|
BTScanInsert itup_key;
|
2018-03-26 14:09:24 +02:00
|
|
|
BTStack stack = NULL;
|
1997-09-08 04:41:22 +02:00
|
|
|
Buffer buf;
|
2018-03-26 14:09:24 +02:00
|
|
|
bool fastpath;
|
2019-03-20 17:30:57 +01:00
|
|
|
bool checkingunique = (checkUnique != UNIQUE_CHECK_NO);
|
2018-04-07 22:00:39 +02:00
|
|
|
|
2006-01-17 01:09:01 +01:00
|
|
|
/* we need an insertion scan key to do our search, so build one */
|
2019-03-20 17:30:57 +01:00
|
|
|
itup_key = _bt_mkscankey(rel, itup);
|
Prevent O(N^2) unique index insertion edge case.
Commit dd299df8 made nbtree treat heap TID as a tiebreaker column,
establishing the principle that there is only one correct location (page
and page offset number) for every index tuple, no matter what.
Insertions of tuples into non-unique indexes proceed as if heap TID
(scan key's scantid) is just another user-attribute value, but
insertions into unique indexes are more delicate. The TID value in
scantid must initially be omitted to ensure that the unique index
insertion visits every leaf page that duplicates could be on. The
scantid is set once again after unique checking finishes successfully,
which can force _bt_findinsertloc() to step right one or more times, to
locate the leaf page that the new tuple must be inserted on.
Stepping right within _bt_findinsertloc() was assumed to occur no more
frequently than stepping right within _bt_check_unique(), but there was
one important case where that assumption was incorrect: inserting a
"duplicate" with NULL values. Since _bt_check_unique() didn't do any
real work in this case, it wasn't appropriate for _bt_findinsertloc() to
behave as if it was finishing off a conventional unique insertion, where
any existing physical duplicate must be dead or recently dead.
_bt_findinsertloc() might have to grovel through a substantial portion
of all of the leaf pages in the index to insert a single tuple, even
when there were no dead tuples.
To fix, treat insertions of tuples with NULLs into a unique index as if
they were insertions into a non-unique index: never unset scantid before
calling _bt_search() to descend the tree, and bypass _bt_check_unique()
entirely. _bt_check_unique() is no longer responsible for incoming
tuples with NULL values.
Discussion: https://postgr.es/m/CAH2-Wzm08nr+JPx4jMOa9CGqxWYDQ-_D4wtPBiKghXAUiUy-nQ@mail.gmail.com
2019-04-23 19:33:57 +02:00
|
|
|
|
|
|
|
if (checkingunique)
|
|
|
|
{
|
|
|
|
if (!itup_key->anynullkeys)
|
|
|
|
{
|
|
|
|
/* No (heapkeyspace) scantid until uniqueness established */
|
|
|
|
itup_key->scantid = NULL;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Scan key for new tuple contains NULL key values. Bypass
|
|
|
|
* checkingunique steps. They are unnecessary because core code
|
|
|
|
* considers NULL unequal to every value, including NULL.
|
|
|
|
*
|
|
|
|
* This optimization avoids O(N^2) behavior within the
|
|
|
|
* _bt_findinsertloc() heapkeyspace path when a unique index has a
|
|
|
|
* large number of "duplicates" with NULL key values.
|
|
|
|
*/
|
|
|
|
checkingunique = false;
|
|
|
|
/* Tuple is unique in the sense that core code cares about */
|
|
|
|
Assert(checkUnique != UNIQUE_CHECK_EXISTING);
|
|
|
|
is_unique = true;
|
|
|
|
}
|
|
|
|
}
|
2019-03-20 17:30:57 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Fill in the BTInsertState working area, to track the current page and
|
|
|
|
* position within the page to insert on
|
|
|
|
*/
|
|
|
|
insertstate.itup = itup;
|
|
|
|
/* PageAddItem will MAXALIGN(), but be consistent */
|
|
|
|
insertstate.itemsz = MAXALIGN(IndexTupleSize(itup));
|
|
|
|
insertstate.itup_key = itup_key;
|
|
|
|
insertstate.bounds_valid = false;
|
|
|
|
insertstate.buf = InvalidBuffer;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2018-03-26 14:09:24 +02:00
|
|
|
/*
|
|
|
|
* It's very common to have an index on an auto-incremented or
|
|
|
|
* monotonically increasing value. In such cases, every insertion happens
|
2018-04-11 00:21:03 +02:00
|
|
|
* towards the end of the index. We try to optimize that case by caching
|
2018-03-26 14:09:24 +02:00
|
|
|
* the right-most leaf of the index. If our cached block is still the
|
|
|
|
* rightmost leaf, has enough free space to accommodate a new entry and
|
|
|
|
* the insertion key is strictly greater than the first key in this page,
|
|
|
|
* then we can safely conclude that the new key will be inserted in the
|
2018-04-26 20:47:16 +02:00
|
|
|
* cached block. So we simply search within the cached block and insert
|
|
|
|
* the key at the appropriate location. We call it a fastpath.
|
2018-03-26 14:09:24 +02:00
|
|
|
*
|
|
|
|
* Testing has revealed, though, that the fastpath can result in increased
|
|
|
|
* contention on the exclusive-lock on the rightmost leaf page. So we
|
2018-04-26 20:47:16 +02:00
|
|
|
* conditionally check if the lock is available. If it's not available
|
|
|
|
* then we simply abandon the fastpath and take the regular path. This
|
|
|
|
* makes sense because unavailability of the lock also signals that some
|
|
|
|
* other backend might be concurrently inserting into the page, thus
|
|
|
|
* reducing our chances to finding an insertion place in this page.
|
2018-03-26 14:09:24 +02:00
|
|
|
*/
|
2000-07-21 08:42:39 +02:00
|
|
|
top:
|
2018-03-26 14:09:24 +02:00
|
|
|
fastpath = false;
|
|
|
|
if (RelationGetTargetBlock(rel) != InvalidBlockNumber)
|
|
|
|
{
|
2018-04-26 20:47:16 +02:00
|
|
|
Page page;
|
|
|
|
BTPageOpaque lpageop;
|
2007-03-03 21:13:06 +01:00
|
|
|
|
2018-03-26 14:09:24 +02:00
|
|
|
/*
|
|
|
|
* Conditionally acquire exclusive lock on the buffer before doing any
|
|
|
|
* checks. If we don't get the lock, we simply follow slowpath. If we
|
2018-04-26 20:47:16 +02:00
|
|
|
* do get the lock, this ensures that the index state cannot change,
|
|
|
|
* as far as the rightmost part of the index is concerned.
|
2018-03-26 14:09:24 +02:00
|
|
|
*/
|
|
|
|
buf = ReadBuffer(rel, RelationGetTargetBlock(rel));
|
1998-12-15 13:47:01 +01:00
|
|
|
|
2018-03-26 14:09:24 +02:00
|
|
|
if (ConditionalLockBuffer(buf))
|
|
|
|
{
|
|
|
|
_bt_checkpage(rel, buf);
|
|
|
|
|
|
|
|
page = BufferGetPage(buf);
|
|
|
|
|
|
|
|
lpageop = (BTPageOpaque) PageGetSpecialPointer(page);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check if the page is still the rightmost leaf page, has enough
|
2018-04-26 20:47:16 +02:00
|
|
|
* free space to accommodate the new tuple, and the insertion scan
|
|
|
|
* key is strictly greater than the first key on the page.
|
2018-03-26 14:09:24 +02:00
|
|
|
*/
|
|
|
|
if (P_ISLEAF(lpageop) && P_RIGHTMOST(lpageop) &&
|
2018-04-07 22:00:39 +02:00
|
|
|
!P_IGNORE(lpageop) &&
|
2019-03-20 17:30:57 +01:00
|
|
|
(PageGetFreeSpace(page) > insertstate.itemsz) &&
|
2018-04-07 22:00:39 +02:00
|
|
|
PageGetMaxOffsetNumber(page) >= P_FIRSTDATAKEY(lpageop) &&
|
2019-03-20 17:30:57 +01:00
|
|
|
_bt_compare(rel, itup_key, page, P_FIRSTDATAKEY(lpageop)) > 0)
|
2018-03-26 14:09:24 +02:00
|
|
|
{
|
2018-04-11 00:21:03 +02:00
|
|
|
/*
|
2018-04-11 00:35:56 +02:00
|
|
|
* The right-most block should never have an incomplete split.
|
|
|
|
* But be paranoid and check for it anyway.
|
2018-04-11 00:21:03 +02:00
|
|
|
*/
|
|
|
|
Assert(!P_INCOMPLETE_SPLIT(lpageop));
|
2018-03-26 14:09:24 +02:00
|
|
|
fastpath = true;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
_bt_relbuf(rel, buf);
|
|
|
|
|
|
|
|
/*
|
2018-04-01 21:01:28 +02:00
|
|
|
* Something did not work out. Just forget about the cached
|
2018-03-26 14:09:24 +02:00
|
|
|
* block and follow the normal path. It might be set again if
|
2018-04-01 21:01:28 +02:00
|
|
|
* the conditions are favourable.
|
2018-03-26 14:09:24 +02:00
|
|
|
*/
|
|
|
|
RelationSetTargetBlock(rel, InvalidBlockNumber);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
ReleaseBuffer(buf);
|
|
|
|
|
|
|
|
/*
|
2018-04-26 20:47:16 +02:00
|
|
|
* If someone's holding a lock, it's likely to change anyway, so
|
|
|
|
* don't try again until we get an updated rightmost leaf.
|
2018-03-26 14:09:24 +02:00
|
|
|
*/
|
|
|
|
RelationSetTargetBlock(rel, InvalidBlockNumber);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!fastpath)
|
|
|
|
{
|
|
|
|
/*
|
2018-07-27 23:31:40 +02:00
|
|
|
* Find the first page containing this key. Buffer returned by
|
|
|
|
* _bt_search() is locked in exclusive mode.
|
2018-03-26 14:09:24 +02:00
|
|
|
*/
|
2019-03-20 17:30:57 +01:00
|
|
|
stack = _bt_search(rel, itup_key, &buf, BT_WRITE, NULL);
|
2018-03-26 14:09:24 +02:00
|
|
|
}
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2019-03-20 17:30:57 +01:00
|
|
|
insertstate.buf = buf;
|
|
|
|
buf = InvalidBuffer; /* insertstate.buf now owns the buffer */
|
|
|
|
|
2000-07-21 08:42:39 +02:00
|
|
|
/*
|
2005-10-15 04:49:52 +02:00
|
|
|
* If we're not allowing duplicates, make sure the key isn't already in
|
|
|
|
* the index.
|
2002-01-01 21:32:37 +01:00
|
|
|
*
|
2005-11-22 19:17:34 +01:00
|
|
|
* NOTE: obviously, _bt_check_unique can only detect keys that are already
|
|
|
|
* in the index; so it cannot defend against concurrent insertions of the
|
2005-10-15 04:49:52 +02:00
|
|
|
* same key. We protect against that by means of holding a write lock on
|
Prevent O(N^2) unique index insertion edge case.
Commit dd299df8 made nbtree treat heap TID as a tiebreaker column,
establishing the principle that there is only one correct location (page
and page offset number) for every index tuple, no matter what.
Insertions of tuples into non-unique indexes proceed as if heap TID
(scan key's scantid) is just another user-attribute value, but
insertions into unique indexes are more delicate. The TID value in
scantid must initially be omitted to ensure that the unique index
insertion visits every leaf page that duplicates could be on. The
scantid is set once again after unique checking finishes successfully,
which can force _bt_findinsertloc() to step right one or more times, to
locate the leaf page that the new tuple must be inserted on.
Stepping right within _bt_findinsertloc() was assumed to occur no more
frequently than stepping right within _bt_check_unique(), but there was
one important case where that assumption was incorrect: inserting a
"duplicate" with NULL values. Since _bt_check_unique() didn't do any
real work in this case, it wasn't appropriate for _bt_findinsertloc() to
behave as if it was finishing off a conventional unique insertion, where
any existing physical duplicate must be dead or recently dead.
_bt_findinsertloc() might have to grovel through a substantial portion
of all of the leaf pages in the index to insert a single tuple, even
when there were no dead tuples.
To fix, treat insertions of tuples with NULLs into a unique index as if
they were insertions into a non-unique index: never unset scantid before
calling _bt_search() to descend the tree, and bypass _bt_check_unique()
entirely. _bt_check_unique() is no longer responsible for incoming
tuples with NULL values.
Discussion: https://postgr.es/m/CAH2-Wzm08nr+JPx4jMOa9CGqxWYDQ-_D4wtPBiKghXAUiUy-nQ@mail.gmail.com
2019-04-23 19:33:57 +02:00
|
|
|
* the first page the value could be on, with omitted/-inf value for the
|
Make heap TID a tiebreaker nbtree index column.
Make nbtree treat all index tuples as having a heap TID attribute.
Index searches can distinguish duplicates by heap TID, since heap TID is
always guaranteed to be unique. This general approach has numerous
benefits for performance, and is prerequisite to teaching VACUUM to
perform "retail index tuple deletion".
Naively adding a new attribute to every pivot tuple has unacceptable
overhead (it bloats internal pages), so suffix truncation of pivot
tuples is added. This will usually truncate away the "extra" heap TID
attribute from pivot tuples during a leaf page split, and may also
truncate away additional user attributes. This can increase fan-out,
especially in a multi-column index. Truncation can only occur at the
attribute granularity, which isn't particularly effective, but works
well enough for now. A future patch may add support for truncating
"within" text attributes by generating truncated key values using new
opclass infrastructure.
Only new indexes (BTREE_VERSION 4 indexes) will have insertions that
treat heap TID as a tiebreaker attribute, or will have pivot tuples
undergo suffix truncation during a leaf page split (on-disk
compatibility with versions 2 and 3 is preserved). Upgrades to version
4 cannot be performed on-the-fly, unlike upgrades from version 2 to
version 3. contrib/amcheck continues to work with version 2 and 3
indexes, while also enforcing stricter invariants when verifying version
4 indexes. These stricter invariants are the same invariants described
by "3.1.12 Sequencing" from the Lehman and Yao paper.
A later patch will enhance the logic used by nbtree to pick a split
point. This patch is likely to negatively impact performance without
smarter choices around the precise point to split leaf pages at. Making
these two mostly-distinct sets of enhancements into distinct commits
seems like it might clarify their design, even though neither commit is
particularly useful on its own.
The maximum allowed size of new tuples is reduced by an amount equal to
the space required to store an extra MAXALIGN()'d TID in a new high key
during leaf page splits. The user-facing definition of the "1/3 of a
page" restriction is already imprecise, and so does not need to be
revised. However, there should be a compatibility note in the v12
release notes.
Author: Peter Geoghegan
Reviewed-By: Heikki Linnakangas, Alexander Korotkov
Discussion: https://postgr.es/m/CAH2-WzkVb0Kom=R+88fDFb=JSxZMFvbHVC6Mn9LJ2n=X=kS-Uw@mail.gmail.com
2019-03-20 18:04:01 +01:00
|
|
|
* implicit heap TID tiebreaker attribute. Any other would-be inserter of
|
|
|
|
* the same key must acquire a write lock on the same page, so only one
|
|
|
|
* would-be inserter can be making the check at one time. Furthermore,
|
|
|
|
* once we are past the check we hold write locks continuously until we
|
|
|
|
* have performed our insertion, so no later inserter can fail to see our
|
|
|
|
* insertion. (This requires some care in _bt_findinsertloc.)
|
2002-01-01 21:32:37 +01:00
|
|
|
*
|
2005-11-22 19:17:34 +01:00
|
|
|
* If we must wait for another xact, we release the lock while waiting,
|
|
|
|
* and then must start over completely.
|
2009-07-29 22:56:21 +02:00
|
|
|
*
|
2010-02-26 03:01:40 +01:00
|
|
|
* For a partial uniqueness check, we don't wait for the other xact. Just
|
|
|
|
* let the tuple in and return false for possibly non-unique, or true for
|
|
|
|
* definitely unique.
|
2000-07-21 08:42:39 +02:00
|
|
|
*/
|
2019-03-20 17:30:57 +01:00
|
|
|
if (checkingunique)
|
1997-01-10 11:06:20 +01:00
|
|
|
{
|
2015-05-24 03:35:49 +02:00
|
|
|
TransactionId xwait;
|
|
|
|
uint32 speculativeToken;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2019-03-20 17:30:57 +01:00
|
|
|
xwait = _bt_check_unique(rel, &insertstate, heapRel, checkUnique,
|
|
|
|
&is_unique, &speculativeToken);
|
2000-07-21 08:42:39 +02:00
|
|
|
|
|
|
|
if (TransactionIdIsValid(xwait))
|
|
|
|
{
|
|
|
|
/* Have to wait for the other guy ... */
|
2019-03-20 17:30:57 +01:00
|
|
|
_bt_relbuf(rel, insertstate.buf);
|
|
|
|
insertstate.buf = InvalidBuffer;
|
2015-05-24 03:35:49 +02:00
|
|
|
|
Add support for INSERT ... ON CONFLICT DO NOTHING/UPDATE.
The newly added ON CONFLICT clause allows to specify an alternative to
raising a unique or exclusion constraint violation error when inserting.
ON CONFLICT refers to constraints that can either be specified using a
inference clause (by specifying the columns of a unique constraint) or
by naming a unique or exclusion constraint. DO NOTHING avoids the
constraint violation, without touching the pre-existing row. DO UPDATE
SET ... [WHERE ...] updates the pre-existing tuple, and has access to
both the tuple proposed for insertion and the existing tuple; the
optional WHERE clause can be used to prevent an update from being
executed. The UPDATE SET and WHERE clauses have access to the tuple
proposed for insertion using the "magic" EXCLUDED alias, and to the
pre-existing tuple using the table name or its alias.
This feature is often referred to as upsert.
This is implemented using a new infrastructure called "speculative
insertion". It is an optimistic variant of regular insertion that first
does a pre-check for existing tuples and then attempts an insert. If a
violating tuple was inserted concurrently, the speculatively inserted
tuple is deleted and a new attempt is made. If the pre-check finds a
matching tuple the alternative DO NOTHING or DO UPDATE action is taken.
If the insertion succeeds without detecting a conflict, the tuple is
deemed inserted.
To handle the possible ambiguity between the excluded alias and a table
named excluded, and for convenience with long relation names, INSERT
INTO now can alias its target table.
Bumps catversion as stored rules change.
Author: Peter Geoghegan, with significant contributions from Heikki
Linnakangas and Andres Freund. Testing infrastructure by Jeff Janes.
Reviewed-By: Heikki Linnakangas, Andres Freund, Robert Haas, Simon Riggs,
Dean Rasheed, Stephen Frost and many others.
2015-05-08 05:31:36 +02:00
|
|
|
/*
|
2015-05-24 03:35:49 +02:00
|
|
|
* If it's a speculative insertion, wait for it to finish (ie. to
|
|
|
|
* go ahead with the insertion, or kill the tuple). Otherwise
|
Add support for INSERT ... ON CONFLICT DO NOTHING/UPDATE.
The newly added ON CONFLICT clause allows to specify an alternative to
raising a unique or exclusion constraint violation error when inserting.
ON CONFLICT refers to constraints that can either be specified using a
inference clause (by specifying the columns of a unique constraint) or
by naming a unique or exclusion constraint. DO NOTHING avoids the
constraint violation, without touching the pre-existing row. DO UPDATE
SET ... [WHERE ...] updates the pre-existing tuple, and has access to
both the tuple proposed for insertion and the existing tuple; the
optional WHERE clause can be used to prevent an update from being
executed. The UPDATE SET and WHERE clauses have access to the tuple
proposed for insertion using the "magic" EXCLUDED alias, and to the
pre-existing tuple using the table name or its alias.
This feature is often referred to as upsert.
This is implemented using a new infrastructure called "speculative
insertion". It is an optimistic variant of regular insertion that first
does a pre-check for existing tuples and then attempts an insert. If a
violating tuple was inserted concurrently, the speculatively inserted
tuple is deleted and a new attempt is made. If the pre-check finds a
matching tuple the alternative DO NOTHING or DO UPDATE action is taken.
If the insertion succeeds without detecting a conflict, the tuple is
deemed inserted.
To handle the possible ambiguity between the excluded alias and a table
named excluded, and for convenience with long relation names, INSERT
INTO now can alias its target table.
Bumps catversion as stored rules change.
Author: Peter Geoghegan, with significant contributions from Heikki
Linnakangas and Andres Freund. Testing infrastructure by Jeff Janes.
Reviewed-By: Heikki Linnakangas, Andres Freund, Robert Haas, Simon Riggs,
Dean Rasheed, Stephen Frost and many others.
2015-05-08 05:31:36 +02:00
|
|
|
* wait for the transaction to finish as usual.
|
|
|
|
*/
|
|
|
|
if (speculativeToken)
|
|
|
|
SpeculativeInsertionWait(xwait, speculativeToken);
|
|
|
|
else
|
|
|
|
XactLockTableWait(xwait, rel, &itup->t_tid, XLTW_InsertIndex);
|
|
|
|
|
2000-07-21 08:42:39 +02:00
|
|
|
/* start over... */
|
2018-03-26 14:09:24 +02:00
|
|
|
if (stack)
|
|
|
|
_bt_freestack(stack);
|
2000-07-21 08:42:39 +02:00
|
|
|
goto top;
|
|
|
|
}
|
Make heap TID a tiebreaker nbtree index column.
Make nbtree treat all index tuples as having a heap TID attribute.
Index searches can distinguish duplicates by heap TID, since heap TID is
always guaranteed to be unique. This general approach has numerous
benefits for performance, and is prerequisite to teaching VACUUM to
perform "retail index tuple deletion".
Naively adding a new attribute to every pivot tuple has unacceptable
overhead (it bloats internal pages), so suffix truncation of pivot
tuples is added. This will usually truncate away the "extra" heap TID
attribute from pivot tuples during a leaf page split, and may also
truncate away additional user attributes. This can increase fan-out,
especially in a multi-column index. Truncation can only occur at the
attribute granularity, which isn't particularly effective, but works
well enough for now. A future patch may add support for truncating
"within" text attributes by generating truncated key values using new
opclass infrastructure.
Only new indexes (BTREE_VERSION 4 indexes) will have insertions that
treat heap TID as a tiebreaker attribute, or will have pivot tuples
undergo suffix truncation during a leaf page split (on-disk
compatibility with versions 2 and 3 is preserved). Upgrades to version
4 cannot be performed on-the-fly, unlike upgrades from version 2 to
version 3. contrib/amcheck continues to work with version 2 and 3
indexes, while also enforcing stricter invariants when verifying version
4 indexes. These stricter invariants are the same invariants described
by "3.1.12 Sequencing" from the Lehman and Yao paper.
A later patch will enhance the logic used by nbtree to pick a split
point. This patch is likely to negatively impact performance without
smarter choices around the precise point to split leaf pages at. Making
these two mostly-distinct sets of enhancements into distinct commits
seems like it might clarify their design, even though neither commit is
particularly useful on its own.
The maximum allowed size of new tuples is reduced by an amount equal to
the space required to store an extra MAXALIGN()'d TID in a new high key
during leaf page splits. The user-facing definition of the "1/3 of a
page" restriction is already imprecise, and so does not need to be
revised. However, there should be a compatibility note in the v12
release notes.
Author: Peter Geoghegan
Reviewed-By: Heikki Linnakangas, Alexander Korotkov
Discussion: https://postgr.es/m/CAH2-WzkVb0Kom=R+88fDFb=JSxZMFvbHVC6Mn9LJ2n=X=kS-Uw@mail.gmail.com
2019-03-20 18:04:01 +01:00
|
|
|
|
|
|
|
/* Uniqueness is established -- restore heap tid as scantid */
|
|
|
|
if (itup_key->heapkeyspace)
|
|
|
|
itup_key->scantid = &itup->t_tid;
|
2000-07-21 08:42:39 +02:00
|
|
|
}
|
|
|
|
|
2009-07-29 22:56:21 +02:00
|
|
|
if (checkUnique != UNIQUE_CHECK_EXISTING)
|
|
|
|
{
|
2019-03-20 17:30:57 +01:00
|
|
|
OffsetNumber newitemoff;
|
|
|
|
|
Implement genuine serializable isolation level.
Until now, our Serializable mode has in fact been what's called Snapshot
Isolation, which allows some anomalies that could not occur in any
serialized ordering of the transactions. This patch fixes that using a
method called Serializable Snapshot Isolation, based on research papers by
Michael J. Cahill (see README-SSI for full references). In Serializable
Snapshot Isolation, transactions run like they do in Snapshot Isolation,
but a predicate lock manager observes the reads and writes performed and
aborts transactions if it detects that an anomaly might occur. This method
produces some false positives, ie. it sometimes aborts transactions even
though there is no anomaly.
To track reads we implement predicate locking, see storage/lmgr/predicate.c.
Whenever a tuple is read, a predicate lock is acquired on the tuple. Shared
memory is finite, so when a transaction takes many tuple-level locks on a
page, the locks are promoted to a single page-level lock, and further to a
single relation level lock if necessary. To lock key values with no matching
tuple, a sequential scan always takes a relation-level lock, and an index
scan acquires a page-level lock that covers the search key, whether or not
there are any matching keys at the moment.
A predicate lock doesn't conflict with any regular locks or with another
predicate locks in the normal sense. They're only used by the predicate lock
manager to detect the danger of anomalies. Only serializable transactions
participate in predicate locking, so there should be no extra overhead for
for other transactions.
Predicate locks can't be released at commit, but must be remembered until
all the transactions that overlapped with it have completed. That means that
we need to remember an unbounded amount of predicate locks, so we apply a
lossy but conservative method of tracking locks for committed transactions.
If we run short of shared memory, we overflow to a new "pg_serial" SLRU
pool.
We don't currently allow Serializable transactions in Hot Standby mode.
That would be hard, because even read-only transactions can cause anomalies
that wouldn't otherwise occur.
Serializable isolation mode now means the new fully serializable level.
Repeatable Read gives you the old Snapshot Isolation level that we have
always had.
Kevin Grittner and Dan Ports, reviewed by Jeff Davis, Heikki Linnakangas and
Anssi Kääriäinen
2011-02-07 22:46:51 +01:00
|
|
|
/*
|
|
|
|
* The only conflict predicate locking cares about for indexes is when
|
Make heap TID a tiebreaker nbtree index column.
Make nbtree treat all index tuples as having a heap TID attribute.
Index searches can distinguish duplicates by heap TID, since heap TID is
always guaranteed to be unique. This general approach has numerous
benefits for performance, and is prerequisite to teaching VACUUM to
perform "retail index tuple deletion".
Naively adding a new attribute to every pivot tuple has unacceptable
overhead (it bloats internal pages), so suffix truncation of pivot
tuples is added. This will usually truncate away the "extra" heap TID
attribute from pivot tuples during a leaf page split, and may also
truncate away additional user attributes. This can increase fan-out,
especially in a multi-column index. Truncation can only occur at the
attribute granularity, which isn't particularly effective, but works
well enough for now. A future patch may add support for truncating
"within" text attributes by generating truncated key values using new
opclass infrastructure.
Only new indexes (BTREE_VERSION 4 indexes) will have insertions that
treat heap TID as a tiebreaker attribute, or will have pivot tuples
undergo suffix truncation during a leaf page split (on-disk
compatibility with versions 2 and 3 is preserved). Upgrades to version
4 cannot be performed on-the-fly, unlike upgrades from version 2 to
version 3. contrib/amcheck continues to work with version 2 and 3
indexes, while also enforcing stricter invariants when verifying version
4 indexes. These stricter invariants are the same invariants described
by "3.1.12 Sequencing" from the Lehman and Yao paper.
A later patch will enhance the logic used by nbtree to pick a split
point. This patch is likely to negatively impact performance without
smarter choices around the precise point to split leaf pages at. Making
these two mostly-distinct sets of enhancements into distinct commits
seems like it might clarify their design, even though neither commit is
particularly useful on its own.
The maximum allowed size of new tuples is reduced by an amount equal to
the space required to store an extra MAXALIGN()'d TID in a new high key
during leaf page splits. The user-facing definition of the "1/3 of a
page" restriction is already imprecise, and so does not need to be
revised. However, there should be a compatibility note in the v12
release notes.
Author: Peter Geoghegan
Reviewed-By: Heikki Linnakangas, Alexander Korotkov
Discussion: https://postgr.es/m/CAH2-WzkVb0Kom=R+88fDFb=JSxZMFvbHVC6Mn9LJ2n=X=kS-Uw@mail.gmail.com
2019-03-20 18:04:01 +01:00
|
|
|
* an index tuple insert conflicts with an existing lock. We don't
|
Prevent O(N^2) unique index insertion edge case.
Commit dd299df8 made nbtree treat heap TID as a tiebreaker column,
establishing the principle that there is only one correct location (page
and page offset number) for every index tuple, no matter what.
Insertions of tuples into non-unique indexes proceed as if heap TID
(scan key's scantid) is just another user-attribute value, but
insertions into unique indexes are more delicate. The TID value in
scantid must initially be omitted to ensure that the unique index
insertion visits every leaf page that duplicates could be on. The
scantid is set once again after unique checking finishes successfully,
which can force _bt_findinsertloc() to step right one or more times, to
locate the leaf page that the new tuple must be inserted on.
Stepping right within _bt_findinsertloc() was assumed to occur no more
frequently than stepping right within _bt_check_unique(), but there was
one important case where that assumption was incorrect: inserting a
"duplicate" with NULL values. Since _bt_check_unique() didn't do any
real work in this case, it wasn't appropriate for _bt_findinsertloc() to
behave as if it was finishing off a conventional unique insertion, where
any existing physical duplicate must be dead or recently dead.
_bt_findinsertloc() might have to grovel through a substantial portion
of all of the leaf pages in the index to insert a single tuple, even
when there were no dead tuples.
To fix, treat insertions of tuples with NULLs into a unique index as if
they were insertions into a non-unique index: never unset scantid before
calling _bt_search() to descend the tree, and bypass _bt_check_unique()
entirely. _bt_check_unique() is no longer responsible for incoming
tuples with NULL values.
Discussion: https://postgr.es/m/CAH2-Wzm08nr+JPx4jMOa9CGqxWYDQ-_D4wtPBiKghXAUiUy-nQ@mail.gmail.com
2019-04-23 19:33:57 +02:00
|
|
|
* know the actual page we're going to insert on for sure just yet in
|
|
|
|
* checkingunique and !heapkeyspace cases, but it's okay to use the
|
|
|
|
* first page the value could be on (with scantid omitted) instead.
|
Implement genuine serializable isolation level.
Until now, our Serializable mode has in fact been what's called Snapshot
Isolation, which allows some anomalies that could not occur in any
serialized ordering of the transactions. This patch fixes that using a
method called Serializable Snapshot Isolation, based on research papers by
Michael J. Cahill (see README-SSI for full references). In Serializable
Snapshot Isolation, transactions run like they do in Snapshot Isolation,
but a predicate lock manager observes the reads and writes performed and
aborts transactions if it detects that an anomaly might occur. This method
produces some false positives, ie. it sometimes aborts transactions even
though there is no anomaly.
To track reads we implement predicate locking, see storage/lmgr/predicate.c.
Whenever a tuple is read, a predicate lock is acquired on the tuple. Shared
memory is finite, so when a transaction takes many tuple-level locks on a
page, the locks are promoted to a single page-level lock, and further to a
single relation level lock if necessary. To lock key values with no matching
tuple, a sequential scan always takes a relation-level lock, and an index
scan acquires a page-level lock that covers the search key, whether or not
there are any matching keys at the moment.
A predicate lock doesn't conflict with any regular locks or with another
predicate locks in the normal sense. They're only used by the predicate lock
manager to detect the danger of anomalies. Only serializable transactions
participate in predicate locking, so there should be no extra overhead for
for other transactions.
Predicate locks can't be released at commit, but must be remembered until
all the transactions that overlapped with it have completed. That means that
we need to remember an unbounded amount of predicate locks, so we apply a
lossy but conservative method of tracking locks for committed transactions.
If we run short of shared memory, we overflow to a new "pg_serial" SLRU
pool.
We don't currently allow Serializable transactions in Hot Standby mode.
That would be hard, because even read-only transactions can cause anomalies
that wouldn't otherwise occur.
Serializable isolation mode now means the new fully serializable level.
Repeatable Read gives you the old Snapshot Isolation level that we have
always had.
Kevin Grittner and Dan Ports, reviewed by Jeff Davis, Heikki Linnakangas and
Anssi Kääriäinen
2011-02-07 22:46:51 +01:00
|
|
|
*/
|
2019-03-20 17:30:57 +01:00
|
|
|
CheckForSerializableConflictIn(rel, NULL, insertstate.buf);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Do the insertion. Note that insertstate contains cached binary
|
|
|
|
* search bounds established within _bt_check_unique when insertion is
|
|
|
|
* checkingunique.
|
|
|
|
*/
|
|
|
|
newitemoff = _bt_findinsertloc(rel, &insertstate, checkingunique,
|
|
|
|
stack, heapRel);
|
Make heap TID a tiebreaker nbtree index column.
Make nbtree treat all index tuples as having a heap TID attribute.
Index searches can distinguish duplicates by heap TID, since heap TID is
always guaranteed to be unique. This general approach has numerous
benefits for performance, and is prerequisite to teaching VACUUM to
perform "retail index tuple deletion".
Naively adding a new attribute to every pivot tuple has unacceptable
overhead (it bloats internal pages), so suffix truncation of pivot
tuples is added. This will usually truncate away the "extra" heap TID
attribute from pivot tuples during a leaf page split, and may also
truncate away additional user attributes. This can increase fan-out,
especially in a multi-column index. Truncation can only occur at the
attribute granularity, which isn't particularly effective, but works
well enough for now. A future patch may add support for truncating
"within" text attributes by generating truncated key values using new
opclass infrastructure.
Only new indexes (BTREE_VERSION 4 indexes) will have insertions that
treat heap TID as a tiebreaker attribute, or will have pivot tuples
undergo suffix truncation during a leaf page split (on-disk
compatibility with versions 2 and 3 is preserved). Upgrades to version
4 cannot be performed on-the-fly, unlike upgrades from version 2 to
version 3. contrib/amcheck continues to work with version 2 and 3
indexes, while also enforcing stricter invariants when verifying version
4 indexes. These stricter invariants are the same invariants described
by "3.1.12 Sequencing" from the Lehman and Yao paper.
A later patch will enhance the logic used by nbtree to pick a split
point. This patch is likely to negatively impact performance without
smarter choices around the precise point to split leaf pages at. Making
these two mostly-distinct sets of enhancements into distinct commits
seems like it might clarify their design, even though neither commit is
particularly useful on its own.
The maximum allowed size of new tuples is reduced by an amount equal to
the space required to store an extra MAXALIGN()'d TID in a new high key
during leaf page splits. The user-facing definition of the "1/3 of a
page" restriction is already imprecise, and so does not need to be
revised. However, there should be a compatibility note in the v12
release notes.
Author: Peter Geoghegan
Reviewed-By: Heikki Linnakangas, Alexander Korotkov
Discussion: https://postgr.es/m/CAH2-WzkVb0Kom=R+88fDFb=JSxZMFvbHVC6Mn9LJ2n=X=kS-Uw@mail.gmail.com
2019-03-20 18:04:01 +01:00
|
|
|
_bt_insertonpg(rel, itup_key, insertstate.buf, InvalidBuffer, stack,
|
|
|
|
itup, newitemoff, false);
|
2009-07-29 22:56:21 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* just release the buffer */
|
2019-03-20 17:30:57 +01:00
|
|
|
_bt_relbuf(rel, insertstate.buf);
|
2009-07-29 22:56:21 +02:00
|
|
|
}
|
2000-07-21 08:42:39 +02:00
|
|
|
|
|
|
|
/* be tidy */
|
2018-03-26 14:09:24 +02:00
|
|
|
if (stack)
|
|
|
|
_bt_freestack(stack);
|
2019-03-20 17:30:57 +01:00
|
|
|
pfree(itup_key);
|
2009-07-29 22:56:21 +02:00
|
|
|
|
|
|
|
return is_unique;
|
2000-07-21 08:42:39 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* _bt_check_unique() -- Check for violation of unique index constraint
|
|
|
|
*
|
2001-08-24 01:06:38 +02:00
|
|
|
* Returns InvalidTransactionId if there is no conflict, else an xact ID
|
2014-05-06 18:12:18 +02:00
|
|
|
* we must wait for to see if it commits a conflicting tuple. If an actual
|
Add support for INSERT ... ON CONFLICT DO NOTHING/UPDATE.
The newly added ON CONFLICT clause allows to specify an alternative to
raising a unique or exclusion constraint violation error when inserting.
ON CONFLICT refers to constraints that can either be specified using a
inference clause (by specifying the columns of a unique constraint) or
by naming a unique or exclusion constraint. DO NOTHING avoids the
constraint violation, without touching the pre-existing row. DO UPDATE
SET ... [WHERE ...] updates the pre-existing tuple, and has access to
both the tuple proposed for insertion and the existing tuple; the
optional WHERE clause can be used to prevent an update from being
executed. The UPDATE SET and WHERE clauses have access to the tuple
proposed for insertion using the "magic" EXCLUDED alias, and to the
pre-existing tuple using the table name or its alias.
This feature is often referred to as upsert.
This is implemented using a new infrastructure called "speculative
insertion". It is an optimistic variant of regular insertion that first
does a pre-check for existing tuples and then attempts an insert. If a
violating tuple was inserted concurrently, the speculatively inserted
tuple is deleted and a new attempt is made. If the pre-check finds a
matching tuple the alternative DO NOTHING or DO UPDATE action is taken.
If the insertion succeeds without detecting a conflict, the tuple is
deemed inserted.
To handle the possible ambiguity between the excluded alias and a table
named excluded, and for convenience with long relation names, INSERT
INTO now can alias its target table.
Bumps catversion as stored rules change.
Author: Peter Geoghegan, with significant contributions from Heikki
Linnakangas and Andres Freund. Testing infrastructure by Jeff Janes.
Reviewed-By: Heikki Linnakangas, Andres Freund, Robert Haas, Simon Riggs,
Dean Rasheed, Stephen Frost and many others.
2015-05-08 05:31:36 +02:00
|
|
|
* conflict is detected, no return --- just ereport(). If an xact ID is
|
|
|
|
* returned, and the conflicting tuple still has a speculative insertion in
|
|
|
|
* progress, *speculativeToken is set to non-zero, and the caller can wait for
|
|
|
|
* the verdict on the insertion using SpeculativeInsertionWait().
|
2009-07-29 22:56:21 +02:00
|
|
|
*
|
|
|
|
* However, if checkUnique == UNIQUE_CHECK_PARTIAL, we always return
|
|
|
|
* InvalidTransactionId because we don't want to wait. In this case we
|
|
|
|
* set *is_unique to false if there is a potential conflict, and the
|
|
|
|
* core code must redo the uniqueness check later.
|
2019-03-20 17:30:57 +01:00
|
|
|
*
|
|
|
|
* As a side-effect, sets state in insertstate that can later be used by
|
|
|
|
* _bt_findinsertloc() to reuse most of the binary search work we do
|
|
|
|
* here.
|
Prevent O(N^2) unique index insertion edge case.
Commit dd299df8 made nbtree treat heap TID as a tiebreaker column,
establishing the principle that there is only one correct location (page
and page offset number) for every index tuple, no matter what.
Insertions of tuples into non-unique indexes proceed as if heap TID
(scan key's scantid) is just another user-attribute value, but
insertions into unique indexes are more delicate. The TID value in
scantid must initially be omitted to ensure that the unique index
insertion visits every leaf page that duplicates could be on. The
scantid is set once again after unique checking finishes successfully,
which can force _bt_findinsertloc() to step right one or more times, to
locate the leaf page that the new tuple must be inserted on.
Stepping right within _bt_findinsertloc() was assumed to occur no more
frequently than stepping right within _bt_check_unique(), but there was
one important case where that assumption was incorrect: inserting a
"duplicate" with NULL values. Since _bt_check_unique() didn't do any
real work in this case, it wasn't appropriate for _bt_findinsertloc() to
behave as if it was finishing off a conventional unique insertion, where
any existing physical duplicate must be dead or recently dead.
_bt_findinsertloc() might have to grovel through a substantial portion
of all of the leaf pages in the index to insert a single tuple, even
when there were no dead tuples.
To fix, treat insertions of tuples with NULLs into a unique index as if
they were insertions into a non-unique index: never unset scantid before
calling _bt_search() to descend the tree, and bypass _bt_check_unique()
entirely. _bt_check_unique() is no longer responsible for incoming
tuples with NULL values.
Discussion: https://postgr.es/m/CAH2-Wzm08nr+JPx4jMOa9CGqxWYDQ-_D4wtPBiKghXAUiUy-nQ@mail.gmail.com
2019-04-23 19:33:57 +02:00
|
|
|
*
|
|
|
|
* Do not call here when there are NULL values in scan key. NULL should be
|
|
|
|
* considered unequal to NULL when checking for duplicates, but we are not
|
|
|
|
* prepared to handle that correctly.
|
2000-07-21 08:42:39 +02:00
|
|
|
*/
|
|
|
|
static TransactionId
|
2019-03-20 17:30:57 +01:00
|
|
|
_bt_check_unique(Relation rel, BTInsertState insertstate, Relation heapRel,
|
Add support for INSERT ... ON CONFLICT DO NOTHING/UPDATE.
The newly added ON CONFLICT clause allows to specify an alternative to
raising a unique or exclusion constraint violation error when inserting.
ON CONFLICT refers to constraints that can either be specified using a
inference clause (by specifying the columns of a unique constraint) or
by naming a unique or exclusion constraint. DO NOTHING avoids the
constraint violation, without touching the pre-existing row. DO UPDATE
SET ... [WHERE ...] updates the pre-existing tuple, and has access to
both the tuple proposed for insertion and the existing tuple; the
optional WHERE clause can be used to prevent an update from being
executed. The UPDATE SET and WHERE clauses have access to the tuple
proposed for insertion using the "magic" EXCLUDED alias, and to the
pre-existing tuple using the table name or its alias.
This feature is often referred to as upsert.
This is implemented using a new infrastructure called "speculative
insertion". It is an optimistic variant of regular insertion that first
does a pre-check for existing tuples and then attempts an insert. If a
violating tuple was inserted concurrently, the speculatively inserted
tuple is deleted and a new attempt is made. If the pre-check finds a
matching tuple the alternative DO NOTHING or DO UPDATE action is taken.
If the insertion succeeds without detecting a conflict, the tuple is
deemed inserted.
To handle the possible ambiguity between the excluded alias and a table
named excluded, and for convenience with long relation names, INSERT
INTO now can alias its target table.
Bumps catversion as stored rules change.
Author: Peter Geoghegan, with significant contributions from Heikki
Linnakangas and Andres Freund. Testing infrastructure by Jeff Janes.
Reviewed-By: Heikki Linnakangas, Andres Freund, Robert Haas, Simon Riggs,
Dean Rasheed, Stephen Frost and many others.
2015-05-08 05:31:36 +02:00
|
|
|
IndexUniqueCheck checkUnique, bool *is_unique,
|
|
|
|
uint32 *speculativeToken)
|
2000-07-21 08:42:39 +02:00
|
|
|
{
|
2019-03-20 17:30:57 +01:00
|
|
|
IndexTuple itup = insertstate->itup;
|
|
|
|
BTScanInsert itup_key = insertstate->itup_key;
|
2007-03-25 21:45:14 +02:00
|
|
|
SnapshotData SnapshotDirty;
|
2019-03-20 17:30:57 +01:00
|
|
|
OffsetNumber offset;
|
2007-03-03 21:13:06 +01:00
|
|
|
OffsetNumber maxoff;
|
2000-07-21 08:42:39 +02:00
|
|
|
Page page;
|
|
|
|
BTPageOpaque opaque;
|
|
|
|
Buffer nbuf = InvalidBuffer;
|
2009-07-29 22:56:21 +02:00
|
|
|
bool found = false;
|
|
|
|
|
|
|
|
/* Assume unique until we find a duplicate */
|
|
|
|
*is_unique = true;
|
2000-07-21 08:42:39 +02:00
|
|
|
|
2007-03-25 21:45:14 +02:00
|
|
|
InitDirtySnapshot(SnapshotDirty);
|
|
|
|
|
2019-03-20 17:30:57 +01:00
|
|
|
page = BufferGetPage(insertstate->buf);
|
2000-07-21 08:42:39 +02:00
|
|
|
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
|
|
|
|
maxoff = PageGetMaxOffsetNumber(page);
|
|
|
|
|
2019-03-20 17:30:57 +01:00
|
|
|
/*
|
|
|
|
* Find the first tuple with the same key.
|
|
|
|
*
|
|
|
|
* This also saves the binary search bounds in insertstate. We use them
|
|
|
|
* in the fastpath below, but also in the _bt_findinsertloc() call later.
|
|
|
|
*/
|
2019-04-04 18:38:08 +02:00
|
|
|
Assert(!insertstate->bounds_valid);
|
2019-03-20 17:30:57 +01:00
|
|
|
offset = _bt_binsrch_insert(rel, insertstate);
|
|
|
|
|
2000-07-21 08:42:39 +02:00
|
|
|
/*
|
|
|
|
* Scan over all equal tuples, looking for live conflicts.
|
|
|
|
*/
|
2019-03-20 17:30:57 +01:00
|
|
|
Assert(!insertstate->bounds_valid || insertstate->low == offset);
|
Prevent O(N^2) unique index insertion edge case.
Commit dd299df8 made nbtree treat heap TID as a tiebreaker column,
establishing the principle that there is only one correct location (page
and page offset number) for every index tuple, no matter what.
Insertions of tuples into non-unique indexes proceed as if heap TID
(scan key's scantid) is just another user-attribute value, but
insertions into unique indexes are more delicate. The TID value in
scantid must initially be omitted to ensure that the unique index
insertion visits every leaf page that duplicates could be on. The
scantid is set once again after unique checking finishes successfully,
which can force _bt_findinsertloc() to step right one or more times, to
locate the leaf page that the new tuple must be inserted on.
Stepping right within _bt_findinsertloc() was assumed to occur no more
frequently than stepping right within _bt_check_unique(), but there was
one important case where that assumption was incorrect: inserting a
"duplicate" with NULL values. Since _bt_check_unique() didn't do any
real work in this case, it wasn't appropriate for _bt_findinsertloc() to
behave as if it was finishing off a conventional unique insertion, where
any existing physical duplicate must be dead or recently dead.
_bt_findinsertloc() might have to grovel through a substantial portion
of all of the leaf pages in the index to insert a single tuple, even
when there were no dead tuples.
To fix, treat insertions of tuples with NULLs into a unique index as if
they were insertions into a non-unique index: never unset scantid before
calling _bt_search() to descend the tree, and bypass _bt_check_unique()
entirely. _bt_check_unique() is no longer responsible for incoming
tuples with NULL values.
Discussion: https://postgr.es/m/CAH2-Wzm08nr+JPx4jMOa9CGqxWYDQ-_D4wtPBiKghXAUiUy-nQ@mail.gmail.com
2019-04-23 19:33:57 +02:00
|
|
|
Assert(!itup_key->anynullkeys);
|
Make heap TID a tiebreaker nbtree index column.
Make nbtree treat all index tuples as having a heap TID attribute.
Index searches can distinguish duplicates by heap TID, since heap TID is
always guaranteed to be unique. This general approach has numerous
benefits for performance, and is prerequisite to teaching VACUUM to
perform "retail index tuple deletion".
Naively adding a new attribute to every pivot tuple has unacceptable
overhead (it bloats internal pages), so suffix truncation of pivot
tuples is added. This will usually truncate away the "extra" heap TID
attribute from pivot tuples during a leaf page split, and may also
truncate away additional user attributes. This can increase fan-out,
especially in a multi-column index. Truncation can only occur at the
attribute granularity, which isn't particularly effective, but works
well enough for now. A future patch may add support for truncating
"within" text attributes by generating truncated key values using new
opclass infrastructure.
Only new indexes (BTREE_VERSION 4 indexes) will have insertions that
treat heap TID as a tiebreaker attribute, or will have pivot tuples
undergo suffix truncation during a leaf page split (on-disk
compatibility with versions 2 and 3 is preserved). Upgrades to version
4 cannot be performed on-the-fly, unlike upgrades from version 2 to
version 3. contrib/amcheck continues to work with version 2 and 3
indexes, while also enforcing stricter invariants when verifying version
4 indexes. These stricter invariants are the same invariants described
by "3.1.12 Sequencing" from the Lehman and Yao paper.
A later patch will enhance the logic used by nbtree to pick a split
point. This patch is likely to negatively impact performance without
smarter choices around the precise point to split leaf pages at. Making
these two mostly-distinct sets of enhancements into distinct commits
seems like it might clarify their design, even though neither commit is
particularly useful on its own.
The maximum allowed size of new tuples is reduced by an amount equal to
the space required to store an extra MAXALIGN()'d TID in a new high key
during leaf page splits. The user-facing definition of the "1/3 of a
page" restriction is already imprecise, and so does not need to be
revised. However, there should be a compatibility note in the v12
release notes.
Author: Peter Geoghegan
Reviewed-By: Heikki Linnakangas, Alexander Korotkov
Discussion: https://postgr.es/m/CAH2-WzkVb0Kom=R+88fDFb=JSxZMFvbHVC6Mn9LJ2n=X=kS-Uw@mail.gmail.com
2019-03-20 18:04:01 +01:00
|
|
|
Assert(itup_key->scantid == NULL);
|
2000-07-21 08:42:39 +02:00
|
|
|
for (;;)
|
|
|
|
{
|
2002-05-24 20:57:57 +02:00
|
|
|
ItemId curitemid;
|
2006-01-26 00:04:21 +01:00
|
|
|
IndexTuple curitup;
|
2000-07-21 08:42:39 +02:00
|
|
|
BlockNumber nblkno;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2000-07-21 08:42:39 +02:00
|
|
|
/*
|
2003-09-03 00:10:16 +02:00
|
|
|
* make sure the offset points to an actual item before trying to
|
|
|
|
* examine it...
|
2000-07-21 08:42:39 +02:00
|
|
|
*/
|
|
|
|
if (offset <= maxoff)
|
1997-09-07 07:04:48 +02:00
|
|
|
{
|
2019-03-20 17:30:57 +01:00
|
|
|
/*
|
|
|
|
* Fastpath: In most cases, we can use cached search bounds to
|
|
|
|
* limit our consideration to items that are definitely
|
|
|
|
* duplicates. This fastpath doesn't apply when the original page
|
|
|
|
* is empty, or when initial offset is past the end of the
|
|
|
|
* original page, which may indicate that we need to examine a
|
|
|
|
* second or subsequent page.
|
|
|
|
*
|
Prevent O(N^2) unique index insertion edge case.
Commit dd299df8 made nbtree treat heap TID as a tiebreaker column,
establishing the principle that there is only one correct location (page
and page offset number) for every index tuple, no matter what.
Insertions of tuples into non-unique indexes proceed as if heap TID
(scan key's scantid) is just another user-attribute value, but
insertions into unique indexes are more delicate. The TID value in
scantid must initially be omitted to ensure that the unique index
insertion visits every leaf page that duplicates could be on. The
scantid is set once again after unique checking finishes successfully,
which can force _bt_findinsertloc() to step right one or more times, to
locate the leaf page that the new tuple must be inserted on.
Stepping right within _bt_findinsertloc() was assumed to occur no more
frequently than stepping right within _bt_check_unique(), but there was
one important case where that assumption was incorrect: inserting a
"duplicate" with NULL values. Since _bt_check_unique() didn't do any
real work in this case, it wasn't appropriate for _bt_findinsertloc() to
behave as if it was finishing off a conventional unique insertion, where
any existing physical duplicate must be dead or recently dead.
_bt_findinsertloc() might have to grovel through a substantial portion
of all of the leaf pages in the index to insert a single tuple, even
when there were no dead tuples.
To fix, treat insertions of tuples with NULLs into a unique index as if
they were insertions into a non-unique index: never unset scantid before
calling _bt_search() to descend the tree, and bypass _bt_check_unique()
entirely. _bt_check_unique() is no longer responsible for incoming
tuples with NULL values.
Discussion: https://postgr.es/m/CAH2-Wzm08nr+JPx4jMOa9CGqxWYDQ-_D4wtPBiKghXAUiUy-nQ@mail.gmail.com
2019-04-23 19:33:57 +02:00
|
|
|
* Note that this optimization allows us to avoid calling
|
|
|
|
* _bt_compare() directly when there are no duplicates, as long as
|
|
|
|
* the offset where the key will go is not at the end of the page.
|
2019-03-20 17:30:57 +01:00
|
|
|
*/
|
|
|
|
if (nbuf == InvalidBuffer && offset == insertstate->stricthigh)
|
|
|
|
{
|
|
|
|
Assert(insertstate->bounds_valid);
|
|
|
|
Assert(insertstate->low >= P_FIRSTDATAKEY(opaque));
|
|
|
|
Assert(insertstate->low <= insertstate->stricthigh);
|
Prevent O(N^2) unique index insertion edge case.
Commit dd299df8 made nbtree treat heap TID as a tiebreaker column,
establishing the principle that there is only one correct location (page
and page offset number) for every index tuple, no matter what.
Insertions of tuples into non-unique indexes proceed as if heap TID
(scan key's scantid) is just another user-attribute value, but
insertions into unique indexes are more delicate. The TID value in
scantid must initially be omitted to ensure that the unique index
insertion visits every leaf page that duplicates could be on. The
scantid is set once again after unique checking finishes successfully,
which can force _bt_findinsertloc() to step right one or more times, to
locate the leaf page that the new tuple must be inserted on.
Stepping right within _bt_findinsertloc() was assumed to occur no more
frequently than stepping right within _bt_check_unique(), but there was
one important case where that assumption was incorrect: inserting a
"duplicate" with NULL values. Since _bt_check_unique() didn't do any
real work in this case, it wasn't appropriate for _bt_findinsertloc() to
behave as if it was finishing off a conventional unique insertion, where
any existing physical duplicate must be dead or recently dead.
_bt_findinsertloc() might have to grovel through a substantial portion
of all of the leaf pages in the index to insert a single tuple, even
when there were no dead tuples.
To fix, treat insertions of tuples with NULLs into a unique index as if
they were insertions into a non-unique index: never unset scantid before
calling _bt_search() to descend the tree, and bypass _bt_check_unique()
entirely. _bt_check_unique() is no longer responsible for incoming
tuples with NULL values.
Discussion: https://postgr.es/m/CAH2-Wzm08nr+JPx4jMOa9CGqxWYDQ-_D4wtPBiKghXAUiUy-nQ@mail.gmail.com
2019-04-23 19:33:57 +02:00
|
|
|
Assert(_bt_compare(rel, itup_key, page, offset) < 0);
|
2019-03-20 17:30:57 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2002-05-24 20:57:57 +02:00
|
|
|
curitemid = PageGetItemId(page, offset);
|
2002-09-04 22:31:48 +02:00
|
|
|
|
1997-09-07 07:04:48 +02:00
|
|
|
/*
|
2003-09-03 00:10:16 +02:00
|
|
|
* We can skip items that are marked killed.
|
|
|
|
*
|
2019-03-20 17:30:57 +01:00
|
|
|
* In the presence of heavy update activity an index may contain
|
Prevent O(N^2) unique index insertion edge case.
Commit dd299df8 made nbtree treat heap TID as a tiebreaker column,
establishing the principle that there is only one correct location (page
and page offset number) for every index tuple, no matter what.
Insertions of tuples into non-unique indexes proceed as if heap TID
(scan key's scantid) is just another user-attribute value, but
insertions into unique indexes are more delicate. The TID value in
scantid must initially be omitted to ensure that the unique index
insertion visits every leaf page that duplicates could be on. The
scantid is set once again after unique checking finishes successfully,
which can force _bt_findinsertloc() to step right one or more times, to
locate the leaf page that the new tuple must be inserted on.
Stepping right within _bt_findinsertloc() was assumed to occur no more
frequently than stepping right within _bt_check_unique(), but there was
one important case where that assumption was incorrect: inserting a
"duplicate" with NULL values. Since _bt_check_unique() didn't do any
real work in this case, it wasn't appropriate for _bt_findinsertloc() to
behave as if it was finishing off a conventional unique insertion, where
any existing physical duplicate must be dead or recently dead.
_bt_findinsertloc() might have to grovel through a substantial portion
of all of the leaf pages in the index to insert a single tuple, even
when there were no dead tuples.
To fix, treat insertions of tuples with NULLs into a unique index as if
they were insertions into a non-unique index: never unset scantid before
calling _bt_search() to descend the tree, and bypass _bt_check_unique()
entirely. _bt_check_unique() is no longer responsible for incoming
tuples with NULL values.
Discussion: https://postgr.es/m/CAH2-Wzm08nr+JPx4jMOa9CGqxWYDQ-_D4wtPBiKghXAUiUy-nQ@mail.gmail.com
2019-04-23 19:33:57 +02:00
|
|
|
* many killed items with the same key; running _bt_compare() on
|
2019-03-20 17:30:57 +01:00
|
|
|
* each killed item gets expensive. Just advance over killed
|
Prevent O(N^2) unique index insertion edge case.
Commit dd299df8 made nbtree treat heap TID as a tiebreaker column,
establishing the principle that there is only one correct location (page
and page offset number) for every index tuple, no matter what.
Insertions of tuples into non-unique indexes proceed as if heap TID
(scan key's scantid) is just another user-attribute value, but
insertions into unique indexes are more delicate. The TID value in
scantid must initially be omitted to ensure that the unique index
insertion visits every leaf page that duplicates could be on. The
scantid is set once again after unique checking finishes successfully,
which can force _bt_findinsertloc() to step right one or more times, to
locate the leaf page that the new tuple must be inserted on.
Stepping right within _bt_findinsertloc() was assumed to occur no more
frequently than stepping right within _bt_check_unique(), but there was
one important case where that assumption was incorrect: inserting a
"duplicate" with NULL values. Since _bt_check_unique() didn't do any
real work in this case, it wasn't appropriate for _bt_findinsertloc() to
behave as if it was finishing off a conventional unique insertion, where
any existing physical duplicate must be dead or recently dead.
_bt_findinsertloc() might have to grovel through a substantial portion
of all of the leaf pages in the index to insert a single tuple, even
when there were no dead tuples.
To fix, treat insertions of tuples with NULLs into a unique index as if
they were insertions into a non-unique index: never unset scantid before
calling _bt_search() to descend the tree, and bypass _bt_check_unique()
entirely. _bt_check_unique() is no longer responsible for incoming
tuples with NULL values.
Discussion: https://postgr.es/m/CAH2-Wzm08nr+JPx4jMOa9CGqxWYDQ-_D4wtPBiKghXAUiUy-nQ@mail.gmail.com
2019-04-23 19:33:57 +02:00
|
|
|
* items as quickly as we can. We only apply _bt_compare() when
|
2019-03-20 17:30:57 +01:00
|
|
|
* we get to a non-killed item. Even those comparisons could be
|
|
|
|
* avoided (in the common case where there is only one page to
|
|
|
|
* visit) by reusing bounds, but just skipping dead items is fast
|
|
|
|
* enough.
|
1997-09-07 07:04:48 +02:00
|
|
|
*/
|
2007-09-13 00:10:26 +02:00
|
|
|
if (!ItemIdIsDead(curitemid))
|
2000-07-21 08:42:39 +02:00
|
|
|
{
|
2007-09-20 19:56:33 +02:00
|
|
|
ItemPointerData htid;
|
2007-11-15 22:14:46 +01:00
|
|
|
bool all_dead;
|
2007-09-20 19:56:33 +02:00
|
|
|
|
Prevent O(N^2) unique index insertion edge case.
Commit dd299df8 made nbtree treat heap TID as a tiebreaker column,
establishing the principle that there is only one correct location (page
and page offset number) for every index tuple, no matter what.
Insertions of tuples into non-unique indexes proceed as if heap TID
(scan key's scantid) is just another user-attribute value, but
insertions into unique indexes are more delicate. The TID value in
scantid must initially be omitted to ensure that the unique index
insertion visits every leaf page that duplicates could be on. The
scantid is set once again after unique checking finishes successfully,
which can force _bt_findinsertloc() to step right one or more times, to
locate the leaf page that the new tuple must be inserted on.
Stepping right within _bt_findinsertloc() was assumed to occur no more
frequently than stepping right within _bt_check_unique(), but there was
one important case where that assumption was incorrect: inserting a
"duplicate" with NULL values. Since _bt_check_unique() didn't do any
real work in this case, it wasn't appropriate for _bt_findinsertloc() to
behave as if it was finishing off a conventional unique insertion, where
any existing physical duplicate must be dead or recently dead.
_bt_findinsertloc() might have to grovel through a substantial portion
of all of the leaf pages in the index to insert a single tuple, even
when there were no dead tuples.
To fix, treat insertions of tuples with NULLs into a unique index as if
they were insertions into a non-unique index: never unset scantid before
calling _bt_search() to descend the tree, and bypass _bt_check_unique()
entirely. _bt_check_unique() is no longer responsible for incoming
tuples with NULL values.
Discussion: https://postgr.es/m/CAH2-Wzm08nr+JPx4jMOa9CGqxWYDQ-_D4wtPBiKghXAUiUy-nQ@mail.gmail.com
2019-04-23 19:33:57 +02:00
|
|
|
if (_bt_compare(rel, itup_key, page, offset) != 0)
|
2004-08-29 07:07:03 +02:00
|
|
|
break; /* we're past all the equal tuples */
|
2003-09-03 00:10:16 +02:00
|
|
|
|
|
|
|
/* okay, we gotta fetch the heap tuple ... */
|
2006-01-26 00:04:21 +01:00
|
|
|
curitup = (IndexTuple) PageGetItem(page, curitemid);
|
2007-09-20 19:56:33 +02:00
|
|
|
htid = curitup->t_tid;
|
|
|
|
|
2009-07-29 22:56:21 +02:00
|
|
|
/*
|
|
|
|
* If we are doing a recheck, we expect to find the tuple we
|
2014-05-06 18:12:18 +02:00
|
|
|
* are rechecking. It's not a duplicate, but we have to keep
|
2009-07-29 22:56:21 +02:00
|
|
|
* scanning.
|
|
|
|
*/
|
|
|
|
if (checkUnique == UNIQUE_CHECK_EXISTING &&
|
|
|
|
ItemPointerCompare(&htid, &itup->t_tid) == 0)
|
|
|
|
{
|
|
|
|
found = true;
|
|
|
|
}
|
|
|
|
|
2007-09-20 19:56:33 +02:00
|
|
|
/*
|
2019-03-26 00:52:55 +01:00
|
|
|
* Check if there's any table tuples for this index entry
|
|
|
|
* satisfying SnapshotDirty. This is necessary because for AMs
|
|
|
|
* with optimizations like heap's HOT, we have just a single
|
|
|
|
* index entry for the entire chain.
|
2007-09-20 19:56:33 +02:00
|
|
|
*/
|
2019-03-26 00:52:55 +01:00
|
|
|
else if (table_index_fetch_tuple_check(heapRel, &htid,
|
|
|
|
&SnapshotDirty,
|
|
|
|
&all_dead))
|
2002-05-24 20:57:57 +02:00
|
|
|
{
|
2009-07-29 22:56:21 +02:00
|
|
|
TransactionId xwait;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* It is a duplicate. If we are only doing a partial
|
2010-02-26 03:01:40 +01:00
|
|
|
* check, then don't bother checking if the tuple is being
|
|
|
|
* updated in another transaction. Just return the fact
|
|
|
|
* that it is a potential conflict and leave the full
|
2019-04-04 18:38:08 +02:00
|
|
|
* check till later. Don't invalidate binary search
|
|
|
|
* bounds.
|
2009-07-29 22:56:21 +02:00
|
|
|
*/
|
|
|
|
if (checkUnique == UNIQUE_CHECK_PARTIAL)
|
|
|
|
{
|
|
|
|
if (nbuf != InvalidBuffer)
|
|
|
|
_bt_relbuf(rel, nbuf);
|
|
|
|
*is_unique = false;
|
|
|
|
return InvalidTransactionId;
|
|
|
|
}
|
2000-07-21 08:42:39 +02:00
|
|
|
|
2002-05-24 20:57:57 +02:00
|
|
|
/*
|
|
|
|
* If this tuple is being updated by other transaction
|
|
|
|
* then we have to wait for its commit/abort.
|
|
|
|
*/
|
2009-07-29 22:56:21 +02:00
|
|
|
xwait = (TransactionIdIsValid(SnapshotDirty.xmin)) ?
|
|
|
|
SnapshotDirty.xmin : SnapshotDirty.xmax;
|
|
|
|
|
2002-05-24 20:57:57 +02:00
|
|
|
if (TransactionIdIsValid(xwait))
|
|
|
|
{
|
|
|
|
if (nbuf != InvalidBuffer)
|
|
|
|
_bt_relbuf(rel, nbuf);
|
|
|
|
/* Tell _bt_doinsert to wait... */
|
Add support for INSERT ... ON CONFLICT DO NOTHING/UPDATE.
The newly added ON CONFLICT clause allows to specify an alternative to
raising a unique or exclusion constraint violation error when inserting.
ON CONFLICT refers to constraints that can either be specified using a
inference clause (by specifying the columns of a unique constraint) or
by naming a unique or exclusion constraint. DO NOTHING avoids the
constraint violation, without touching the pre-existing row. DO UPDATE
SET ... [WHERE ...] updates the pre-existing tuple, and has access to
both the tuple proposed for insertion and the existing tuple; the
optional WHERE clause can be used to prevent an update from being
executed. The UPDATE SET and WHERE clauses have access to the tuple
proposed for insertion using the "magic" EXCLUDED alias, and to the
pre-existing tuple using the table name or its alias.
This feature is often referred to as upsert.
This is implemented using a new infrastructure called "speculative
insertion". It is an optimistic variant of regular insertion that first
does a pre-check for existing tuples and then attempts an insert. If a
violating tuple was inserted concurrently, the speculatively inserted
tuple is deleted and a new attempt is made. If the pre-check finds a
matching tuple the alternative DO NOTHING or DO UPDATE action is taken.
If the insertion succeeds without detecting a conflict, the tuple is
deemed inserted.
To handle the possible ambiguity between the excluded alias and a table
named excluded, and for convenience with long relation names, INSERT
INTO now can alias its target table.
Bumps catversion as stored rules change.
Author: Peter Geoghegan, with significant contributions from Heikki
Linnakangas and Andres Freund. Testing infrastructure by Jeff Janes.
Reviewed-By: Heikki Linnakangas, Andres Freund, Robert Haas, Simon Riggs,
Dean Rasheed, Stephen Frost and many others.
2015-05-08 05:31:36 +02:00
|
|
|
*speculativeToken = SnapshotDirty.speculativeToken;
|
2019-04-04 18:38:08 +02:00
|
|
|
/* Caller releases lock on buf immediately */
|
|
|
|
insertstate->bounds_valid = false;
|
2002-05-24 20:57:57 +02:00
|
|
|
return xwait;
|
|
|
|
}
|
1998-12-15 13:47:01 +01:00
|
|
|
|
2002-05-24 20:57:57 +02:00
|
|
|
/*
|
2006-08-25 06:06:58 +02:00
|
|
|
* Otherwise we have a definite conflict. But before
|
|
|
|
* complaining, look to see if the tuple we want to insert
|
|
|
|
* is itself now committed dead --- if so, don't complain.
|
|
|
|
* This is a waste of time in normal scenarios but we must
|
|
|
|
* do it to support CREATE INDEX CONCURRENTLY.
|
2007-11-15 22:14:46 +01:00
|
|
|
*
|
2007-09-20 19:56:33 +02:00
|
|
|
* We must follow HOT-chains here because during
|
|
|
|
* concurrent index build, we insert the root TID though
|
|
|
|
* the actual tuple may be somewhere in the HOT-chain.
|
2007-11-15 22:14:46 +01:00
|
|
|
* While following the chain we might not stop at the
|
|
|
|
* exact tuple which triggered the insert, but that's OK
|
|
|
|
* because if we find a live tuple anywhere in this chain,
|
|
|
|
* we have a unique key conflict. The other live tuple is
|
|
|
|
* not part of this chain because it had a different index
|
|
|
|
* entry.
|
2002-05-24 20:57:57 +02:00
|
|
|
*/
|
2007-09-20 19:56:33 +02:00
|
|
|
htid = itup->t_tid;
|
2019-03-26 00:52:55 +01:00
|
|
|
if (table_index_fetch_tuple_check(heapRel, &htid,
|
|
|
|
SnapshotSelf, NULL))
|
2006-08-25 06:06:58 +02:00
|
|
|
{
|
|
|
|
/* Normal case --- it's still live */
|
|
|
|
}
|
2007-09-20 19:56:33 +02:00
|
|
|
else
|
2006-08-25 06:06:58 +02:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* It's been deleted, so no error, and no need to
|
|
|
|
* continue searching
|
|
|
|
*/
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2016-04-07 18:12:35 +02:00
|
|
|
/*
|
|
|
|
* Check for a conflict-in as we would if we were going to
|
|
|
|
* write to this page. We aren't actually going to write,
|
|
|
|
* but we want a chance to report SSI conflicts that would
|
2016-06-10 00:02:36 +02:00
|
|
|
* otherwise be masked by this unique constraint
|
|
|
|
* violation.
|
2016-04-07 18:12:35 +02:00
|
|
|
*/
|
2019-03-20 17:30:57 +01:00
|
|
|
CheckForSerializableConflictIn(rel, NULL, insertstate->buf);
|
2016-04-07 18:12:35 +02:00
|
|
|
|
2009-07-29 22:56:21 +02:00
|
|
|
/*
|
2010-02-26 03:01:40 +01:00
|
|
|
* This is a definite conflict. Break the tuple down into
|
|
|
|
* datums and report the error. But first, make sure we
|
|
|
|
* release the buffer locks we're holding ---
|
2009-08-01 22:59:17 +02:00
|
|
|
* BuildIndexValueDescription could make catalog accesses,
|
2010-02-26 03:01:40 +01:00
|
|
|
* which in the worst case might touch this same index and
|
|
|
|
* cause deadlocks.
|
2009-07-29 22:56:21 +02:00
|
|
|
*/
|
2009-08-01 21:59:41 +02:00
|
|
|
if (nbuf != InvalidBuffer)
|
|
|
|
_bt_relbuf(rel, nbuf);
|
2019-03-20 17:30:57 +01:00
|
|
|
_bt_relbuf(rel, insertstate->buf);
|
|
|
|
insertstate->buf = InvalidBuffer;
|
2019-04-04 18:38:08 +02:00
|
|
|
insertstate->bounds_valid = false;
|
2009-08-01 21:59:41 +02:00
|
|
|
|
|
|
|
{
|
2010-02-26 03:01:40 +01:00
|
|
|
Datum values[INDEX_MAX_KEYS];
|
|
|
|
bool isnull[INDEX_MAX_KEYS];
|
Fix column-privilege leak in error-message paths
While building error messages to return to the user,
BuildIndexValueDescription, ExecBuildSlotValueDescription and
ri_ReportViolation would happily include the entire key or entire row in
the result returned to the user, even if the user didn't have access to
view all of the columns being included.
Instead, include only those columns which the user is providing or which
the user has select rights on. If the user does not have any rights
to view the table or any of the columns involved then no detail is
provided and a NULL value is returned from BuildIndexValueDescription
and ExecBuildSlotValueDescription. Note that, for key cases, the user
must have access to all of the columns for the key to be shown; a
partial key will not be returned.
Further, in master only, do not return any data for cases where row
security is enabled on the relation and row security should be applied
for the user. This required a bit of refactoring and moving of things
around related to RLS- note the addition of utils/misc/rls.c.
Back-patch all the way, as column-level privileges are now in all
supported versions.
This has been assigned CVE-2014-8161, but since the issue and the patch
have already been publicized on pgsql-hackers, there's no point in trying
to hide this commit.
2015-01-12 23:04:11 +01:00
|
|
|
char *key_desc;
|
2009-08-01 21:59:41 +02:00
|
|
|
|
|
|
|
index_deform_tuple(itup, RelationGetDescr(rel),
|
|
|
|
values, isnull);
|
Fix column-privilege leak in error-message paths
While building error messages to return to the user,
BuildIndexValueDescription, ExecBuildSlotValueDescription and
ri_ReportViolation would happily include the entire key or entire row in
the result returned to the user, even if the user didn't have access to
view all of the columns being included.
Instead, include only those columns which the user is providing or which
the user has select rights on. If the user does not have any rights
to view the table or any of the columns involved then no detail is
provided and a NULL value is returned from BuildIndexValueDescription
and ExecBuildSlotValueDescription. Note that, for key cases, the user
must have access to all of the columns for the key to be shown; a
partial key will not be returned.
Further, in master only, do not return any data for cases where row
security is enabled on the relation and row security should be applied
for the user. This required a bit of refactoring and moving of things
around related to RLS- note the addition of utils/misc/rls.c.
Back-patch all the way, as column-level privileges are now in all
supported versions.
This has been assigned CVE-2014-8161, but since the issue and the patch
have already been publicized on pgsql-hackers, there's no point in trying
to hide this commit.
2015-01-12 23:04:11 +01:00
|
|
|
|
|
|
|
key_desc = BuildIndexValueDescription(rel, values,
|
|
|
|
isnull);
|
|
|
|
|
2009-08-01 22:59:17 +02:00
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_UNIQUE_VIOLATION),
|
|
|
|
errmsg("duplicate key value violates unique constraint \"%s\"",
|
|
|
|
RelationGetRelationName(rel)),
|
Phase 3 of pgindent updates.
Don't move parenthesized lines to the left, even if that means they
flow past the right margin.
By default, BSD indent lines up statement continuation lines that are
within parentheses so that they start just to the right of the preceding
left parenthesis. However, traditionally, if that resulted in the
continuation line extending to the right of the desired right margin,
then indent would push it left just far enough to not overrun the margin,
if it could do so without making the continuation line start to the left of
the current statement indent. That makes for a weird mix of indentations
unless one has been completely rigid about never violating the 80-column
limit.
This behavior has been pretty universally panned by Postgres developers.
Hence, disable it with indent's new -lpl switch, so that parenthesized
lines are always lined up with the preceding left paren.
This patch is much less interesting than the first round of indent
changes, but also bulkier, so I thought it best to separate the effects.
Discussion: https://postgr.es/m/E1dAmxK-0006EE-1r@gemulon.postgresql.org
Discussion: https://postgr.es/m/30527.1495162840@sss.pgh.pa.us
2017-06-21 21:35:54 +02:00
|
|
|
key_desc ? errdetail("Key %s already exists.",
|
|
|
|
key_desc) : 0,
|
Provide database object names as separate fields in error messages.
This patch addresses the problem that applications currently have to
extract object names from possibly-localized textual error messages,
if they want to know for example which index caused a UNIQUE_VIOLATION
failure. It adds new error message fields to the wire protocol, which
can carry the name of a table, table column, data type, or constraint
associated with the error. (Since the protocol spec has always instructed
clients to ignore unrecognized field types, this should not create any
compatibility problem.)
Support for providing these new fields has been added to just a limited set
of error reports (mainly, those in the "integrity constraint violation"
SQLSTATE class), but we will doubtless add them to more calls in future.
Pavel Stehule, reviewed and extensively revised by Peter Geoghegan, with
additional hacking by Tom Lane.
2013-01-29 23:06:26 +01:00
|
|
|
errtableconstraint(heapRel,
|
Phase 3 of pgindent updates.
Don't move parenthesized lines to the left, even if that means they
flow past the right margin.
By default, BSD indent lines up statement continuation lines that are
within parentheses so that they start just to the right of the preceding
left parenthesis. However, traditionally, if that resulted in the
continuation line extending to the right of the desired right margin,
then indent would push it left just far enough to not overrun the margin,
if it could do so without making the continuation line start to the left of
the current statement indent. That makes for a weird mix of indentations
unless one has been completely rigid about never violating the 80-column
limit.
This behavior has been pretty universally panned by Postgres developers.
Hence, disable it with indent's new -lpl switch, so that parenthesized
lines are always lined up with the preceding left paren.
This patch is much less interesting than the first round of indent
changes, but also bulkier, so I thought it best to separate the effects.
Discussion: https://postgr.es/m/E1dAmxK-0006EE-1r@gemulon.postgresql.org
Discussion: https://postgr.es/m/30527.1495162840@sss.pgh.pa.us
2017-06-21 21:35:54 +02:00
|
|
|
RelationGetRelationName(rel))));
|
2009-08-01 21:59:41 +02:00
|
|
|
}
|
1997-09-07 07:04:48 +02:00
|
|
|
}
|
2007-09-20 19:56:33 +02:00
|
|
|
else if (all_dead)
|
2002-05-24 20:57:57 +02:00
|
|
|
{
|
|
|
|
/*
|
2007-09-20 19:56:33 +02:00
|
|
|
* The conflicting tuple (or whole HOT chain) is dead to
|
|
|
|
* everyone, so we may as well mark the index entry
|
|
|
|
* killed.
|
2002-05-24 20:57:57 +02:00
|
|
|
*/
|
2007-09-20 19:56:33 +02:00
|
|
|
ItemIdMarkDead(curitemid);
|
|
|
|
opaque->btpo_flags |= BTP_HAS_GARBAGE;
|
2013-03-22 14:54:07 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Mark buffer with a dirty hint, since state is not
|
|
|
|
* crucial. Be sure to mark the proper buffer dirty.
|
|
|
|
*/
|
2007-09-20 19:56:33 +02:00
|
|
|
if (nbuf != InvalidBuffer)
|
2013-06-17 17:02:12 +02:00
|
|
|
MarkBufferDirtyHint(nbuf, true);
|
2007-09-20 19:56:33 +02:00
|
|
|
else
|
2019-03-20 17:30:57 +01:00
|
|
|
MarkBufferDirtyHint(insertstate->buf, true);
|
2002-05-24 20:57:57 +02:00
|
|
|
}
|
1997-01-10 11:06:20 +01:00
|
|
|
}
|
2000-07-21 08:42:39 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Advance to next tuple to continue checking.
|
|
|
|
*/
|
|
|
|
if (offset < maxoff)
|
|
|
|
offset = OffsetNumberNext(offset);
|
|
|
|
else
|
|
|
|
{
|
2019-03-20 17:30:57 +01:00
|
|
|
int highkeycmp;
|
|
|
|
|
2000-07-21 08:42:39 +02:00
|
|
|
/* If scankey == hikey we gotta check the next page too */
|
|
|
|
if (P_RIGHTMOST(opaque))
|
|
|
|
break;
|
2019-03-20 17:30:57 +01:00
|
|
|
highkeycmp = _bt_compare(rel, itup_key, page, P_HIKEY);
|
|
|
|
Assert(highkeycmp <= 0);
|
|
|
|
if (highkeycmp != 0)
|
2000-07-21 08:42:39 +02:00
|
|
|
break;
|
2003-02-22 01:45:05 +01:00
|
|
|
/* Advance to next non-dead page --- there must be one */
|
|
|
|
for (;;)
|
|
|
|
{
|
|
|
|
nblkno = opaque->btpo_next;
|
2004-04-21 20:24:26 +02:00
|
|
|
nbuf = _bt_relandgetbuf(rel, nbuf, nblkno, BT_READ);
|
2016-04-20 15:31:19 +02:00
|
|
|
page = BufferGetPage(nbuf);
|
2003-02-22 01:45:05 +01:00
|
|
|
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
|
|
|
|
if (!P_IGNORE(opaque))
|
|
|
|
break;
|
|
|
|
if (P_RIGHTMOST(opaque))
|
2007-12-31 05:52:05 +01:00
|
|
|
elog(ERROR, "fell off the end of index \"%s\"",
|
2003-02-22 01:45:05 +01:00
|
|
|
RelationGetRelationName(rel));
|
|
|
|
}
|
2000-07-21 08:42:39 +02:00
|
|
|
maxoff = PageGetMaxOffsetNumber(page);
|
|
|
|
offset = P_FIRSTDATAKEY(opaque);
|
2019-04-04 18:38:08 +02:00
|
|
|
/* Don't invalidate binary search bounds */
|
1997-09-07 07:04:48 +02:00
|
|
|
}
|
1997-01-10 11:06:20 +01:00
|
|
|
}
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2009-07-29 22:56:21 +02:00
|
|
|
/*
|
2010-02-26 03:01:40 +01:00
|
|
|
* If we are doing a recheck then we should have found the tuple we are
|
|
|
|
* checking. Otherwise there's something very wrong --- probably, the
|
|
|
|
* index is on a non-immutable expression.
|
2009-07-29 22:56:21 +02:00
|
|
|
*/
|
|
|
|
if (checkUnique == UNIQUE_CHECK_EXISTING && !found)
|
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_INTERNAL_ERROR),
|
|
|
|
errmsg("failed to re-find tuple within index \"%s\"",
|
|
|
|
RelationGetRelationName(rel)),
|
Phase 3 of pgindent updates.
Don't move parenthesized lines to the left, even if that means they
flow past the right margin.
By default, BSD indent lines up statement continuation lines that are
within parentheses so that they start just to the right of the preceding
left parenthesis. However, traditionally, if that resulted in the
continuation line extending to the right of the desired right margin,
then indent would push it left just far enough to not overrun the margin,
if it could do so without making the continuation line start to the left of
the current statement indent. That makes for a weird mix of indentations
unless one has been completely rigid about never violating the 80-column
limit.
This behavior has been pretty universally panned by Postgres developers.
Hence, disable it with indent's new -lpl switch, so that parenthesized
lines are always lined up with the preceding left paren.
This patch is much less interesting than the first round of indent
changes, but also bulkier, so I thought it best to separate the effects.
Discussion: https://postgr.es/m/E1dAmxK-0006EE-1r@gemulon.postgresql.org
Discussion: https://postgr.es/m/30527.1495162840@sss.pgh.pa.us
2017-06-21 21:35:54 +02:00
|
|
|
errhint("This may be because of a non-immutable index expression."),
|
Provide database object names as separate fields in error messages.
This patch addresses the problem that applications currently have to
extract object names from possibly-localized textual error messages,
if they want to know for example which index caused a UNIQUE_VIOLATION
failure. It adds new error message fields to the wire protocol, which
can carry the name of a table, table column, data type, or constraint
associated with the error. (Since the protocol spec has always instructed
clients to ignore unrecognized field types, this should not create any
compatibility problem.)
Support for providing these new fields has been added to just a limited set
of error reports (mainly, those in the "integrity constraint violation"
SQLSTATE class), but we will doubtless add them to more calls in future.
Pavel Stehule, reviewed and extensively revised by Peter Geoghegan, with
additional hacking by Tom Lane.
2013-01-29 23:06:26 +01:00
|
|
|
errtableconstraint(heapRel,
|
|
|
|
RelationGetRelationName(rel))));
|
2009-07-29 22:56:21 +02:00
|
|
|
|
2000-07-21 08:42:39 +02:00
|
|
|
if (nbuf != InvalidBuffer)
|
Restructure index AM interface for index building and index tuple deletion,
per previous discussion on pghackers. Most of the duplicate code in
different AMs' ambuild routines has been moved out to a common routine
in index.c; this means that all index types now do the right things about
inserting recently-dead tuples, etc. (I also removed support for EXTEND
INDEX in the ambuild routines, since that's about to go away anyway, and
it cluttered the code a lot.) The retail indextuple deletion routines have
been replaced by a "bulk delete" routine in which the indexscan is inside
the access method. I haven't pushed this change as far as it should go yet,
but it should allow considerable simplification of the internal bookkeeping
for deletions. Also, add flag columns to pg_am to eliminate various
hardcoded tests on AM OIDs, and remove unused pg_am columns.
Fix rtree and gist index types to not attempt to store NULLs; before this,
gist usually crashed, while rtree managed not to crash but computed wacko
bounding boxes for NULL entries (which might have had something to do with
the performance problems we've heard about occasionally).
Add AtEOXact routines to hash, rtree, and gist, all of which have static
state that needs to be reset after an error. We discovered this need long
ago for btree, but missed the other guys.
Oh, one more thing: concurrent VACUUM is now the default.
2001-07-16 00:48:19 +02:00
|
|
|
_bt_relbuf(rel, nbuf);
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2001-08-24 01:06:38 +02:00
|
|
|
return InvalidTransactionId;
|
1996-07-09 08:22:35 +02:00
|
|
|
}
|
|
|
|
|
2007-03-03 21:13:06 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* _bt_findinsertloc() -- Finds an insert location for a tuple
|
2000-07-21 08:42:39 +02:00
|
|
|
*
|
Make heap TID a tiebreaker nbtree index column.
Make nbtree treat all index tuples as having a heap TID attribute.
Index searches can distinguish duplicates by heap TID, since heap TID is
always guaranteed to be unique. This general approach has numerous
benefits for performance, and is prerequisite to teaching VACUUM to
perform "retail index tuple deletion".
Naively adding a new attribute to every pivot tuple has unacceptable
overhead (it bloats internal pages), so suffix truncation of pivot
tuples is added. This will usually truncate away the "extra" heap TID
attribute from pivot tuples during a leaf page split, and may also
truncate away additional user attributes. This can increase fan-out,
especially in a multi-column index. Truncation can only occur at the
attribute granularity, which isn't particularly effective, but works
well enough for now. A future patch may add support for truncating
"within" text attributes by generating truncated key values using new
opclass infrastructure.
Only new indexes (BTREE_VERSION 4 indexes) will have insertions that
treat heap TID as a tiebreaker attribute, or will have pivot tuples
undergo suffix truncation during a leaf page split (on-disk
compatibility with versions 2 and 3 is preserved). Upgrades to version
4 cannot be performed on-the-fly, unlike upgrades from version 2 to
version 3. contrib/amcheck continues to work with version 2 and 3
indexes, while also enforcing stricter invariants when verifying version
4 indexes. These stricter invariants are the same invariants described
by "3.1.12 Sequencing" from the Lehman and Yao paper.
A later patch will enhance the logic used by nbtree to pick a split
point. This patch is likely to negatively impact performance without
smarter choices around the precise point to split leaf pages at. Making
these two mostly-distinct sets of enhancements into distinct commits
seems like it might clarify their design, even though neither commit is
particularly useful on its own.
The maximum allowed size of new tuples is reduced by an amount equal to
the space required to store an extra MAXALIGN()'d TID in a new high key
during leaf page splits. The user-facing definition of the "1/3 of a
page" restriction is already imprecise, and so does not need to be
revised. However, there should be a compatibility note in the v12
release notes.
Author: Peter Geoghegan
Reviewed-By: Heikki Linnakangas, Alexander Korotkov
Discussion: https://postgr.es/m/CAH2-WzkVb0Kom=R+88fDFb=JSxZMFvbHVC6Mn9LJ2n=X=kS-Uw@mail.gmail.com
2019-03-20 18:04:01 +01:00
|
|
|
* On entry, insertstate buffer contains the page the new tuple belongs
|
|
|
|
* on. It is exclusive-locked and pinned by the caller.
|
|
|
|
*
|
|
|
|
* If 'checkingunique' is true, the buffer on entry is the first page
|
|
|
|
* that contains duplicates of the new key. If there are duplicates on
|
|
|
|
* multiple pages, the correct insertion position might be some page to
|
|
|
|
* the right, rather than the first page. In that case, this function
|
|
|
|
* moves right to the correct target page.
|
2019-03-20 17:30:57 +01:00
|
|
|
*
|
Make heap TID a tiebreaker nbtree index column.
Make nbtree treat all index tuples as having a heap TID attribute.
Index searches can distinguish duplicates by heap TID, since heap TID is
always guaranteed to be unique. This general approach has numerous
benefits for performance, and is prerequisite to teaching VACUUM to
perform "retail index tuple deletion".
Naively adding a new attribute to every pivot tuple has unacceptable
overhead (it bloats internal pages), so suffix truncation of pivot
tuples is added. This will usually truncate away the "extra" heap TID
attribute from pivot tuples during a leaf page split, and may also
truncate away additional user attributes. This can increase fan-out,
especially in a multi-column index. Truncation can only occur at the
attribute granularity, which isn't particularly effective, but works
well enough for now. A future patch may add support for truncating
"within" text attributes by generating truncated key values using new
opclass infrastructure.
Only new indexes (BTREE_VERSION 4 indexes) will have insertions that
treat heap TID as a tiebreaker attribute, or will have pivot tuples
undergo suffix truncation during a leaf page split (on-disk
compatibility with versions 2 and 3 is preserved). Upgrades to version
4 cannot be performed on-the-fly, unlike upgrades from version 2 to
version 3. contrib/amcheck continues to work with version 2 and 3
indexes, while also enforcing stricter invariants when verifying version
4 indexes. These stricter invariants are the same invariants described
by "3.1.12 Sequencing" from the Lehman and Yao paper.
A later patch will enhance the logic used by nbtree to pick a split
point. This patch is likely to negatively impact performance without
smarter choices around the precise point to split leaf pages at. Making
these two mostly-distinct sets of enhancements into distinct commits
seems like it might clarify their design, even though neither commit is
particularly useful on its own.
The maximum allowed size of new tuples is reduced by an amount equal to
the space required to store an extra MAXALIGN()'d TID in a new high key
during leaf page splits. The user-facing definition of the "1/3 of a
page" restriction is already imprecise, and so does not need to be
revised. However, there should be a compatibility note in the v12
release notes.
Author: Peter Geoghegan
Reviewed-By: Heikki Linnakangas, Alexander Korotkov
Discussion: https://postgr.es/m/CAH2-WzkVb0Kom=R+88fDFb=JSxZMFvbHVC6Mn9LJ2n=X=kS-Uw@mail.gmail.com
2019-03-20 18:04:01 +01:00
|
|
|
* (In a !heapkeyspace index, there can be multiple pages with the same
|
|
|
|
* high key, where the new tuple could legitimately be placed on. In
|
|
|
|
* that case, the caller passes the first page containing duplicates,
|
2019-05-26 14:58:18 +02:00
|
|
|
* just like when checkingunique=true. If that page doesn't have enough
|
Make heap TID a tiebreaker nbtree index column.
Make nbtree treat all index tuples as having a heap TID attribute.
Index searches can distinguish duplicates by heap TID, since heap TID is
always guaranteed to be unique. This general approach has numerous
benefits for performance, and is prerequisite to teaching VACUUM to
perform "retail index tuple deletion".
Naively adding a new attribute to every pivot tuple has unacceptable
overhead (it bloats internal pages), so suffix truncation of pivot
tuples is added. This will usually truncate away the "extra" heap TID
attribute from pivot tuples during a leaf page split, and may also
truncate away additional user attributes. This can increase fan-out,
especially in a multi-column index. Truncation can only occur at the
attribute granularity, which isn't particularly effective, but works
well enough for now. A future patch may add support for truncating
"within" text attributes by generating truncated key values using new
opclass infrastructure.
Only new indexes (BTREE_VERSION 4 indexes) will have insertions that
treat heap TID as a tiebreaker attribute, or will have pivot tuples
undergo suffix truncation during a leaf page split (on-disk
compatibility with versions 2 and 3 is preserved). Upgrades to version
4 cannot be performed on-the-fly, unlike upgrades from version 2 to
version 3. contrib/amcheck continues to work with version 2 and 3
indexes, while also enforcing stricter invariants when verifying version
4 indexes. These stricter invariants are the same invariants described
by "3.1.12 Sequencing" from the Lehman and Yao paper.
A later patch will enhance the logic used by nbtree to pick a split
point. This patch is likely to negatively impact performance without
smarter choices around the precise point to split leaf pages at. Making
these two mostly-distinct sets of enhancements into distinct commits
seems like it might clarify their design, even though neither commit is
particularly useful on its own.
The maximum allowed size of new tuples is reduced by an amount equal to
the space required to store an extra MAXALIGN()'d TID in a new high key
during leaf page splits. The user-facing definition of the "1/3 of a
page" restriction is already imprecise, and so does not need to be
revised. However, there should be a compatibility note in the v12
release notes.
Author: Peter Geoghegan
Reviewed-By: Heikki Linnakangas, Alexander Korotkov
Discussion: https://postgr.es/m/CAH2-WzkVb0Kom=R+88fDFb=JSxZMFvbHVC6Mn9LJ2n=X=kS-Uw@mail.gmail.com
2019-03-20 18:04:01 +01:00
|
|
|
* room for the new tuple, this function moves right, trying to find a
|
|
|
|
* legal page that does.)
|
2007-03-03 21:13:06 +01:00
|
|
|
*
|
2019-03-20 17:30:57 +01:00
|
|
|
* On exit, insertstate buffer contains the chosen insertion page, and
|
|
|
|
* the offset within that page is returned. If _bt_findinsertloc needed
|
|
|
|
* to move right, the lock and pin on the original page are released, and
|
|
|
|
* the new buffer is exclusively locked and pinned instead.
|
2007-03-03 21:13:06 +01:00
|
|
|
*
|
2019-03-20 17:30:57 +01:00
|
|
|
* If insertstate contains cached binary search bounds, we will take
|
|
|
|
* advantage of them. This avoids repeating comparisons that we made in
|
|
|
|
* _bt_check_unique() already.
|
Make heap TID a tiebreaker nbtree index column.
Make nbtree treat all index tuples as having a heap TID attribute.
Index searches can distinguish duplicates by heap TID, since heap TID is
always guaranteed to be unique. This general approach has numerous
benefits for performance, and is prerequisite to teaching VACUUM to
perform "retail index tuple deletion".
Naively adding a new attribute to every pivot tuple has unacceptable
overhead (it bloats internal pages), so suffix truncation of pivot
tuples is added. This will usually truncate away the "extra" heap TID
attribute from pivot tuples during a leaf page split, and may also
truncate away additional user attributes. This can increase fan-out,
especially in a multi-column index. Truncation can only occur at the
attribute granularity, which isn't particularly effective, but works
well enough for now. A future patch may add support for truncating
"within" text attributes by generating truncated key values using new
opclass infrastructure.
Only new indexes (BTREE_VERSION 4 indexes) will have insertions that
treat heap TID as a tiebreaker attribute, or will have pivot tuples
undergo suffix truncation during a leaf page split (on-disk
compatibility with versions 2 and 3 is preserved). Upgrades to version
4 cannot be performed on-the-fly, unlike upgrades from version 2 to
version 3. contrib/amcheck continues to work with version 2 and 3
indexes, while also enforcing stricter invariants when verifying version
4 indexes. These stricter invariants are the same invariants described
by "3.1.12 Sequencing" from the Lehman and Yao paper.
A later patch will enhance the logic used by nbtree to pick a split
point. This patch is likely to negatively impact performance without
smarter choices around the precise point to split leaf pages at. Making
these two mostly-distinct sets of enhancements into distinct commits
seems like it might clarify their design, even though neither commit is
particularly useful on its own.
The maximum allowed size of new tuples is reduced by an amount equal to
the space required to store an extra MAXALIGN()'d TID in a new high key
during leaf page splits. The user-facing definition of the "1/3 of a
page" restriction is already imprecise, and so does not need to be
revised. However, there should be a compatibility note in the v12
release notes.
Author: Peter Geoghegan
Reviewed-By: Heikki Linnakangas, Alexander Korotkov
Discussion: https://postgr.es/m/CAH2-WzkVb0Kom=R+88fDFb=JSxZMFvbHVC6Mn9LJ2n=X=kS-Uw@mail.gmail.com
2019-03-20 18:04:01 +01:00
|
|
|
*
|
|
|
|
* If there is not enough room on the page for the new tuple, we try to
|
|
|
|
* make room by removing any LP_DEAD tuples.
|
1996-07-09 08:22:35 +02:00
|
|
|
*/
|
2019-03-20 17:30:57 +01:00
|
|
|
static OffsetNumber
|
2007-03-03 21:13:06 +01:00
|
|
|
_bt_findinsertloc(Relation rel,
|
2019-03-20 17:30:57 +01:00
|
|
|
BTInsertState insertstate,
|
|
|
|
bool checkingunique,
|
Make the handling of interrupted B-tree page splits more robust.
Splitting a page consists of two separate steps: splitting the child page,
and inserting the downlink for the new right page to the parent. Previously,
we handled the case that you crash in between those steps with a cleanup
routine after the WAL recovery had finished, which finished the incomplete
split. However, that doesn't help if the page split is interrupted but the
database doesn't crash, so that you don't perform WAL recovery. That could
happen for example if you run out of disk space.
Remove the end-of-recovery cleanup step. Instead, when a page is split, the
left page is marked with a new INCOMPLETE_SPLIT flag, and when the downlink
is inserted to the parent, the flag is cleared again. If an insertion sees
a page with the flag set, it knows that the split was interrupted for some
reason, and inserts the missing downlink before proceeding.
I used the same approach to fix GIN and GiST split algorithms earlier. This
was the last WAL cleanup routine, so we could get rid of that whole
machinery now, but I'll leave that for a separate patch.
Reviewed by Peter Geoghegan.
2014-03-18 19:12:58 +01:00
|
|
|
BTStack stack,
|
2010-03-28 11:27:02 +02:00
|
|
|
Relation heapRel)
|
1996-07-09 08:22:35 +02:00
|
|
|
{
|
2019-03-20 17:30:57 +01:00
|
|
|
BTScanInsert itup_key = insertstate->itup_key;
|
|
|
|
Page page = BufferGetPage(insertstate->buf);
|
1997-09-08 04:41:22 +02:00
|
|
|
BTPageOpaque lpageop;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
|
|
|
lpageop = (BTPageOpaque) PageGetSpecialPointer(page);
|
|
|
|
|
Make heap TID a tiebreaker nbtree index column.
Make nbtree treat all index tuples as having a heap TID attribute.
Index searches can distinguish duplicates by heap TID, since heap TID is
always guaranteed to be unique. This general approach has numerous
benefits for performance, and is prerequisite to teaching VACUUM to
perform "retail index tuple deletion".
Naively adding a new attribute to every pivot tuple has unacceptable
overhead (it bloats internal pages), so suffix truncation of pivot
tuples is added. This will usually truncate away the "extra" heap TID
attribute from pivot tuples during a leaf page split, and may also
truncate away additional user attributes. This can increase fan-out,
especially in a multi-column index. Truncation can only occur at the
attribute granularity, which isn't particularly effective, but works
well enough for now. A future patch may add support for truncating
"within" text attributes by generating truncated key values using new
opclass infrastructure.
Only new indexes (BTREE_VERSION 4 indexes) will have insertions that
treat heap TID as a tiebreaker attribute, or will have pivot tuples
undergo suffix truncation during a leaf page split (on-disk
compatibility with versions 2 and 3 is preserved). Upgrades to version
4 cannot be performed on-the-fly, unlike upgrades from version 2 to
version 3. contrib/amcheck continues to work with version 2 and 3
indexes, while also enforcing stricter invariants when verifying version
4 indexes. These stricter invariants are the same invariants described
by "3.1.12 Sequencing" from the Lehman and Yao paper.
A later patch will enhance the logic used by nbtree to pick a split
point. This patch is likely to negatively impact performance without
smarter choices around the precise point to split leaf pages at. Making
these two mostly-distinct sets of enhancements into distinct commits
seems like it might clarify their design, even though neither commit is
particularly useful on its own.
The maximum allowed size of new tuples is reduced by an amount equal to
the space required to store an extra MAXALIGN()'d TID in a new high key
during leaf page splits. The user-facing definition of the "1/3 of a
page" restriction is already imprecise, and so does not need to be
revised. However, there should be a compatibility note in the v12
release notes.
Author: Peter Geoghegan
Reviewed-By: Heikki Linnakangas, Alexander Korotkov
Discussion: https://postgr.es/m/CAH2-WzkVb0Kom=R+88fDFb=JSxZMFvbHVC6Mn9LJ2n=X=kS-Uw@mail.gmail.com
2019-03-20 18:04:01 +01:00
|
|
|
/* Check 1/3 of a page restriction */
|
|
|
|
if (unlikely(insertstate->itemsz > BTMaxItemSize(page)))
|
|
|
|
_bt_check_third_page(rel, heapRel, itup_key->heapkeyspace, page,
|
|
|
|
insertstate->itup);
|
1999-12-26 04:48:22 +01:00
|
|
|
|
2019-03-20 17:30:57 +01:00
|
|
|
Assert(P_ISLEAF(lpageop) && !P_INCOMPLETE_SPLIT(lpageop));
|
|
|
|
Assert(!insertstate->bounds_valid || checkingunique);
|
Make heap TID a tiebreaker nbtree index column.
Make nbtree treat all index tuples as having a heap TID attribute.
Index searches can distinguish duplicates by heap TID, since heap TID is
always guaranteed to be unique. This general approach has numerous
benefits for performance, and is prerequisite to teaching VACUUM to
perform "retail index tuple deletion".
Naively adding a new attribute to every pivot tuple has unacceptable
overhead (it bloats internal pages), so suffix truncation of pivot
tuples is added. This will usually truncate away the "extra" heap TID
attribute from pivot tuples during a leaf page split, and may also
truncate away additional user attributes. This can increase fan-out,
especially in a multi-column index. Truncation can only occur at the
attribute granularity, which isn't particularly effective, but works
well enough for now. A future patch may add support for truncating
"within" text attributes by generating truncated key values using new
opclass infrastructure.
Only new indexes (BTREE_VERSION 4 indexes) will have insertions that
treat heap TID as a tiebreaker attribute, or will have pivot tuples
undergo suffix truncation during a leaf page split (on-disk
compatibility with versions 2 and 3 is preserved). Upgrades to version
4 cannot be performed on-the-fly, unlike upgrades from version 2 to
version 3. contrib/amcheck continues to work with version 2 and 3
indexes, while also enforcing stricter invariants when verifying version
4 indexes. These stricter invariants are the same invariants described
by "3.1.12 Sequencing" from the Lehman and Yao paper.
A later patch will enhance the logic used by nbtree to pick a split
point. This patch is likely to negatively impact performance without
smarter choices around the precise point to split leaf pages at. Making
these two mostly-distinct sets of enhancements into distinct commits
seems like it might clarify their design, even though neither commit is
particularly useful on its own.
The maximum allowed size of new tuples is reduced by an amount equal to
the space required to store an extra MAXALIGN()'d TID in a new high key
during leaf page splits. The user-facing definition of the "1/3 of a
page" restriction is already imprecise, and so does not need to be
revised. However, there should be a compatibility note in the v12
release notes.
Author: Peter Geoghegan
Reviewed-By: Heikki Linnakangas, Alexander Korotkov
Discussion: https://postgr.es/m/CAH2-WzkVb0Kom=R+88fDFb=JSxZMFvbHVC6Mn9LJ2n=X=kS-Uw@mail.gmail.com
2019-03-20 18:04:01 +01:00
|
|
|
Assert(!itup_key->heapkeyspace || itup_key->scantid != NULL);
|
|
|
|
Assert(itup_key->heapkeyspace || itup_key->scantid == NULL);
|
2000-08-26 01:13:33 +02:00
|
|
|
|
Make heap TID a tiebreaker nbtree index column.
Make nbtree treat all index tuples as having a heap TID attribute.
Index searches can distinguish duplicates by heap TID, since heap TID is
always guaranteed to be unique. This general approach has numerous
benefits for performance, and is prerequisite to teaching VACUUM to
perform "retail index tuple deletion".
Naively adding a new attribute to every pivot tuple has unacceptable
overhead (it bloats internal pages), so suffix truncation of pivot
tuples is added. This will usually truncate away the "extra" heap TID
attribute from pivot tuples during a leaf page split, and may also
truncate away additional user attributes. This can increase fan-out,
especially in a multi-column index. Truncation can only occur at the
attribute granularity, which isn't particularly effective, but works
well enough for now. A future patch may add support for truncating
"within" text attributes by generating truncated key values using new
opclass infrastructure.
Only new indexes (BTREE_VERSION 4 indexes) will have insertions that
treat heap TID as a tiebreaker attribute, or will have pivot tuples
undergo suffix truncation during a leaf page split (on-disk
compatibility with versions 2 and 3 is preserved). Upgrades to version
4 cannot be performed on-the-fly, unlike upgrades from version 2 to
version 3. contrib/amcheck continues to work with version 2 and 3
indexes, while also enforcing stricter invariants when verifying version
4 indexes. These stricter invariants are the same invariants described
by "3.1.12 Sequencing" from the Lehman and Yao paper.
A later patch will enhance the logic used by nbtree to pick a split
point. This patch is likely to negatively impact performance without
smarter choices around the precise point to split leaf pages at. Making
these two mostly-distinct sets of enhancements into distinct commits
seems like it might clarify their design, even though neither commit is
particularly useful on its own.
The maximum allowed size of new tuples is reduced by an amount equal to
the space required to store an extra MAXALIGN()'d TID in a new high key
during leaf page splits. The user-facing definition of the "1/3 of a
page" restriction is already imprecise, and so does not need to be
revised. However, there should be a compatibility note in the v12
release notes.
Author: Peter Geoghegan
Reviewed-By: Heikki Linnakangas, Alexander Korotkov
Discussion: https://postgr.es/m/CAH2-WzkVb0Kom=R+88fDFb=JSxZMFvbHVC6Mn9LJ2n=X=kS-Uw@mail.gmail.com
2019-03-20 18:04:01 +01:00
|
|
|
if (itup_key->heapkeyspace)
|
2019-03-20 17:30:57 +01:00
|
|
|
{
|
2007-03-03 21:13:06 +01:00
|
|
|
/*
|
Make heap TID a tiebreaker nbtree index column.
Make nbtree treat all index tuples as having a heap TID attribute.
Index searches can distinguish duplicates by heap TID, since heap TID is
always guaranteed to be unique. This general approach has numerous
benefits for performance, and is prerequisite to teaching VACUUM to
perform "retail index tuple deletion".
Naively adding a new attribute to every pivot tuple has unacceptable
overhead (it bloats internal pages), so suffix truncation of pivot
tuples is added. This will usually truncate away the "extra" heap TID
attribute from pivot tuples during a leaf page split, and may also
truncate away additional user attributes. This can increase fan-out,
especially in a multi-column index. Truncation can only occur at the
attribute granularity, which isn't particularly effective, but works
well enough for now. A future patch may add support for truncating
"within" text attributes by generating truncated key values using new
opclass infrastructure.
Only new indexes (BTREE_VERSION 4 indexes) will have insertions that
treat heap TID as a tiebreaker attribute, or will have pivot tuples
undergo suffix truncation during a leaf page split (on-disk
compatibility with versions 2 and 3 is preserved). Upgrades to version
4 cannot be performed on-the-fly, unlike upgrades from version 2 to
version 3. contrib/amcheck continues to work with version 2 and 3
indexes, while also enforcing stricter invariants when verifying version
4 indexes. These stricter invariants are the same invariants described
by "3.1.12 Sequencing" from the Lehman and Yao paper.
A later patch will enhance the logic used by nbtree to pick a split
point. This patch is likely to negatively impact performance without
smarter choices around the precise point to split leaf pages at. Making
these two mostly-distinct sets of enhancements into distinct commits
seems like it might clarify their design, even though neither commit is
particularly useful on its own.
The maximum allowed size of new tuples is reduced by an amount equal to
the space required to store an extra MAXALIGN()'d TID in a new high key
during leaf page splits. The user-facing definition of the "1/3 of a
page" restriction is already imprecise, and so does not need to be
revised. However, there should be a compatibility note in the v12
release notes.
Author: Peter Geoghegan
Reviewed-By: Heikki Linnakangas, Alexander Korotkov
Discussion: https://postgr.es/m/CAH2-WzkVb0Kom=R+88fDFb=JSxZMFvbHVC6Mn9LJ2n=X=kS-Uw@mail.gmail.com
2019-03-20 18:04:01 +01:00
|
|
|
* If we're inserting into a unique index, we may have to walk right
|
|
|
|
* through leaf pages to find the one leaf page that we must insert on
|
|
|
|
* to.
|
|
|
|
*
|
|
|
|
* This is needed for checkingunique callers because a scantid was not
|
|
|
|
* used when we called _bt_search(). scantid can only be set after
|
|
|
|
* _bt_check_unique() has checked for duplicates. The buffer
|
|
|
|
* initially stored in insertstate->buf has the page where the first
|
|
|
|
* duplicate key might be found, which isn't always the page that new
|
|
|
|
* tuple belongs on. The heap TID attribute for new tuple (scantid)
|
|
|
|
* could force us to insert on a sibling page, though that should be
|
|
|
|
* very rare in practice.
|
2007-03-03 21:13:06 +01:00
|
|
|
*/
|
Make heap TID a tiebreaker nbtree index column.
Make nbtree treat all index tuples as having a heap TID attribute.
Index searches can distinguish duplicates by heap TID, since heap TID is
always guaranteed to be unique. This general approach has numerous
benefits for performance, and is prerequisite to teaching VACUUM to
perform "retail index tuple deletion".
Naively adding a new attribute to every pivot tuple has unacceptable
overhead (it bloats internal pages), so suffix truncation of pivot
tuples is added. This will usually truncate away the "extra" heap TID
attribute from pivot tuples during a leaf page split, and may also
truncate away additional user attributes. This can increase fan-out,
especially in a multi-column index. Truncation can only occur at the
attribute granularity, which isn't particularly effective, but works
well enough for now. A future patch may add support for truncating
"within" text attributes by generating truncated key values using new
opclass infrastructure.
Only new indexes (BTREE_VERSION 4 indexes) will have insertions that
treat heap TID as a tiebreaker attribute, or will have pivot tuples
undergo suffix truncation during a leaf page split (on-disk
compatibility with versions 2 and 3 is preserved). Upgrades to version
4 cannot be performed on-the-fly, unlike upgrades from version 2 to
version 3. contrib/amcheck continues to work with version 2 and 3
indexes, while also enforcing stricter invariants when verifying version
4 indexes. These stricter invariants are the same invariants described
by "3.1.12 Sequencing" from the Lehman and Yao paper.
A later patch will enhance the logic used by nbtree to pick a split
point. This patch is likely to negatively impact performance without
smarter choices around the precise point to split leaf pages at. Making
these two mostly-distinct sets of enhancements into distinct commits
seems like it might clarify their design, even though neither commit is
particularly useful on its own.
The maximum allowed size of new tuples is reduced by an amount equal to
the space required to store an extra MAXALIGN()'d TID in a new high key
during leaf page splits. The user-facing definition of the "1/3 of a
page" restriction is already imprecise, and so does not need to be
revised. However, there should be a compatibility note in the v12
release notes.
Author: Peter Geoghegan
Reviewed-By: Heikki Linnakangas, Alexander Korotkov
Discussion: https://postgr.es/m/CAH2-WzkVb0Kom=R+88fDFb=JSxZMFvbHVC6Mn9LJ2n=X=kS-Uw@mail.gmail.com
2019-03-20 18:04:01 +01:00
|
|
|
if (checkingunique)
|
1997-09-07 07:04:48 +02:00
|
|
|
{
|
Make heap TID a tiebreaker nbtree index column.
Make nbtree treat all index tuples as having a heap TID attribute.
Index searches can distinguish duplicates by heap TID, since heap TID is
always guaranteed to be unique. This general approach has numerous
benefits for performance, and is prerequisite to teaching VACUUM to
perform "retail index tuple deletion".
Naively adding a new attribute to every pivot tuple has unacceptable
overhead (it bloats internal pages), so suffix truncation of pivot
tuples is added. This will usually truncate away the "extra" heap TID
attribute from pivot tuples during a leaf page split, and may also
truncate away additional user attributes. This can increase fan-out,
especially in a multi-column index. Truncation can only occur at the
attribute granularity, which isn't particularly effective, but works
well enough for now. A future patch may add support for truncating
"within" text attributes by generating truncated key values using new
opclass infrastructure.
Only new indexes (BTREE_VERSION 4 indexes) will have insertions that
treat heap TID as a tiebreaker attribute, or will have pivot tuples
undergo suffix truncation during a leaf page split (on-disk
compatibility with versions 2 and 3 is preserved). Upgrades to version
4 cannot be performed on-the-fly, unlike upgrades from version 2 to
version 3. contrib/amcheck continues to work with version 2 and 3
indexes, while also enforcing stricter invariants when verifying version
4 indexes. These stricter invariants are the same invariants described
by "3.1.12 Sequencing" from the Lehman and Yao paper.
A later patch will enhance the logic used by nbtree to pick a split
point. This patch is likely to negatively impact performance without
smarter choices around the precise point to split leaf pages at. Making
these two mostly-distinct sets of enhancements into distinct commits
seems like it might clarify their design, even though neither commit is
particularly useful on its own.
The maximum allowed size of new tuples is reduced by an amount equal to
the space required to store an extra MAXALIGN()'d TID in a new high key
during leaf page splits. The user-facing definition of the "1/3 of a
page" restriction is already imprecise, and so does not need to be
revised. However, there should be a compatibility note in the v12
release notes.
Author: Peter Geoghegan
Reviewed-By: Heikki Linnakangas, Alexander Korotkov
Discussion: https://postgr.es/m/CAH2-WzkVb0Kom=R+88fDFb=JSxZMFvbHVC6Mn9LJ2n=X=kS-Uw@mail.gmail.com
2019-03-20 18:04:01 +01:00
|
|
|
for (;;)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Does the new tuple belong on this page?
|
|
|
|
*
|
|
|
|
* The earlier _bt_check_unique() call may well have
|
|
|
|
* established a strict upper bound on the offset for the new
|
|
|
|
* item. If it's not the last item of the page (i.e. if there
|
|
|
|
* is at least one tuple on the page that goes after the tuple
|
|
|
|
* we're inserting) then we know that the tuple belongs on
|
|
|
|
* this page. We can skip the high key check.
|
|
|
|
*/
|
|
|
|
if (insertstate->bounds_valid &&
|
|
|
|
insertstate->low <= insertstate->stricthigh &&
|
|
|
|
insertstate->stricthigh <= PageGetMaxOffsetNumber(page))
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* Test '<=', not '!=', since scantid is set now */
|
|
|
|
if (P_RIGHTMOST(lpageop) ||
|
|
|
|
_bt_compare(rel, itup_key, page, P_HIKEY) <= 0)
|
|
|
|
break;
|
2006-07-25 21:13:00 +02:00
|
|
|
|
Make heap TID a tiebreaker nbtree index column.
Make nbtree treat all index tuples as having a heap TID attribute.
Index searches can distinguish duplicates by heap TID, since heap TID is
always guaranteed to be unique. This general approach has numerous
benefits for performance, and is prerequisite to teaching VACUUM to
perform "retail index tuple deletion".
Naively adding a new attribute to every pivot tuple has unacceptable
overhead (it bloats internal pages), so suffix truncation of pivot
tuples is added. This will usually truncate away the "extra" heap TID
attribute from pivot tuples during a leaf page split, and may also
truncate away additional user attributes. This can increase fan-out,
especially in a multi-column index. Truncation can only occur at the
attribute granularity, which isn't particularly effective, but works
well enough for now. A future patch may add support for truncating
"within" text attributes by generating truncated key values using new
opclass infrastructure.
Only new indexes (BTREE_VERSION 4 indexes) will have insertions that
treat heap TID as a tiebreaker attribute, or will have pivot tuples
undergo suffix truncation during a leaf page split (on-disk
compatibility with versions 2 and 3 is preserved). Upgrades to version
4 cannot be performed on-the-fly, unlike upgrades from version 2 to
version 3. contrib/amcheck continues to work with version 2 and 3
indexes, while also enforcing stricter invariants when verifying version
4 indexes. These stricter invariants are the same invariants described
by "3.1.12 Sequencing" from the Lehman and Yao paper.
A later patch will enhance the logic used by nbtree to pick a split
point. This patch is likely to negatively impact performance without
smarter choices around the precise point to split leaf pages at. Making
these two mostly-distinct sets of enhancements into distinct commits
seems like it might clarify their design, even though neither commit is
particularly useful on its own.
The maximum allowed size of new tuples is reduced by an amount equal to
the space required to store an extra MAXALIGN()'d TID in a new high key
during leaf page splits. The user-facing definition of the "1/3 of a
page" restriction is already imprecise, and so does not need to be
revised. However, there should be a compatibility note in the v12
release notes.
Author: Peter Geoghegan
Reviewed-By: Heikki Linnakangas, Alexander Korotkov
Discussion: https://postgr.es/m/CAH2-WzkVb0Kom=R+88fDFb=JSxZMFvbHVC6Mn9LJ2n=X=kS-Uw@mail.gmail.com
2019-03-20 18:04:01 +01:00
|
|
|
_bt_stepright(rel, insertstate, stack);
|
|
|
|
/* Update local state after stepping right */
|
|
|
|
page = BufferGetPage(insertstate->buf);
|
|
|
|
lpageop = (BTPageOpaque) PageGetSpecialPointer(page);
|
|
|
|
}
|
1997-09-07 07:04:48 +02:00
|
|
|
}
|
2001-03-22 05:01:46 +01:00
|
|
|
|
1997-09-07 07:04:48 +02:00
|
|
|
/*
|
Make heap TID a tiebreaker nbtree index column.
Make nbtree treat all index tuples as having a heap TID attribute.
Index searches can distinguish duplicates by heap TID, since heap TID is
always guaranteed to be unique. This general approach has numerous
benefits for performance, and is prerequisite to teaching VACUUM to
perform "retail index tuple deletion".
Naively adding a new attribute to every pivot tuple has unacceptable
overhead (it bloats internal pages), so suffix truncation of pivot
tuples is added. This will usually truncate away the "extra" heap TID
attribute from pivot tuples during a leaf page split, and may also
truncate away additional user attributes. This can increase fan-out,
especially in a multi-column index. Truncation can only occur at the
attribute granularity, which isn't particularly effective, but works
well enough for now. A future patch may add support for truncating
"within" text attributes by generating truncated key values using new
opclass infrastructure.
Only new indexes (BTREE_VERSION 4 indexes) will have insertions that
treat heap TID as a tiebreaker attribute, or will have pivot tuples
undergo suffix truncation during a leaf page split (on-disk
compatibility with versions 2 and 3 is preserved). Upgrades to version
4 cannot be performed on-the-fly, unlike upgrades from version 2 to
version 3. contrib/amcheck continues to work with version 2 and 3
indexes, while also enforcing stricter invariants when verifying version
4 indexes. These stricter invariants are the same invariants described
by "3.1.12 Sequencing" from the Lehman and Yao paper.
A later patch will enhance the logic used by nbtree to pick a split
point. This patch is likely to negatively impact performance without
smarter choices around the precise point to split leaf pages at. Making
these two mostly-distinct sets of enhancements into distinct commits
seems like it might clarify their design, even though neither commit is
particularly useful on its own.
The maximum allowed size of new tuples is reduced by an amount equal to
the space required to store an extra MAXALIGN()'d TID in a new high key
during leaf page splits. The user-facing definition of the "1/3 of a
page" restriction is already imprecise, and so does not need to be
revised. However, there should be a compatibility note in the v12
release notes.
Author: Peter Geoghegan
Reviewed-By: Heikki Linnakangas, Alexander Korotkov
Discussion: https://postgr.es/m/CAH2-WzkVb0Kom=R+88fDFb=JSxZMFvbHVC6Mn9LJ2n=X=kS-Uw@mail.gmail.com
2019-03-20 18:04:01 +01:00
|
|
|
* If the target page is full, see if we can obtain enough space by
|
|
|
|
* erasing LP_DEAD items
|
|
|
|
*/
|
|
|
|
if (PageGetFreeSpace(page) < insertstate->itemsz &&
|
|
|
|
P_HAS_GARBAGE(lpageop))
|
|
|
|
{
|
|
|
|
_bt_vacuum_one_page(rel, insertstate->buf, heapRel);
|
|
|
|
insertstate->bounds_valid = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/*----------
|
|
|
|
* This is a !heapkeyspace (version 2 or 3) index. The current page
|
|
|
|
* is the first page that we could insert the new tuple to, but there
|
|
|
|
* may be other pages to the right that we could opt to use instead.
|
2019-03-20 17:30:57 +01:00
|
|
|
*
|
Make heap TID a tiebreaker nbtree index column.
Make nbtree treat all index tuples as having a heap TID attribute.
Index searches can distinguish duplicates by heap TID, since heap TID is
always guaranteed to be unique. This general approach has numerous
benefits for performance, and is prerequisite to teaching VACUUM to
perform "retail index tuple deletion".
Naively adding a new attribute to every pivot tuple has unacceptable
overhead (it bloats internal pages), so suffix truncation of pivot
tuples is added. This will usually truncate away the "extra" heap TID
attribute from pivot tuples during a leaf page split, and may also
truncate away additional user attributes. This can increase fan-out,
especially in a multi-column index. Truncation can only occur at the
attribute granularity, which isn't particularly effective, but works
well enough for now. A future patch may add support for truncating
"within" text attributes by generating truncated key values using new
opclass infrastructure.
Only new indexes (BTREE_VERSION 4 indexes) will have insertions that
treat heap TID as a tiebreaker attribute, or will have pivot tuples
undergo suffix truncation during a leaf page split (on-disk
compatibility with versions 2 and 3 is preserved). Upgrades to version
4 cannot be performed on-the-fly, unlike upgrades from version 2 to
version 3. contrib/amcheck continues to work with version 2 and 3
indexes, while also enforcing stricter invariants when verifying version
4 indexes. These stricter invariants are the same invariants described
by "3.1.12 Sequencing" from the Lehman and Yao paper.
A later patch will enhance the logic used by nbtree to pick a split
point. This patch is likely to negatively impact performance without
smarter choices around the precise point to split leaf pages at. Making
these two mostly-distinct sets of enhancements into distinct commits
seems like it might clarify their design, even though neither commit is
particularly useful on its own.
The maximum allowed size of new tuples is reduced by an amount equal to
the space required to store an extra MAXALIGN()'d TID in a new high key
during leaf page splits. The user-facing definition of the "1/3 of a
page" restriction is already imprecise, and so does not need to be
revised. However, there should be a compatibility note in the v12
release notes.
Author: Peter Geoghegan
Reviewed-By: Heikki Linnakangas, Alexander Korotkov
Discussion: https://postgr.es/m/CAH2-WzkVb0Kom=R+88fDFb=JSxZMFvbHVC6Mn9LJ2n=X=kS-Uw@mail.gmail.com
2019-03-20 18:04:01 +01:00
|
|
|
* If the new key is equal to one or more existing keys, we can
|
|
|
|
* legitimately place it anywhere in the series of equal keys. In
|
|
|
|
* fact, if the new key is equal to the page's "high key" we can place
|
|
|
|
* it on the next page. If it is equal to the high key, and there's
|
|
|
|
* not room to insert the new tuple on the current page without
|
|
|
|
* splitting, then we move right hoping to find more free space and
|
|
|
|
* avoid a split.
|
|
|
|
*
|
|
|
|
* Keep scanning right until we
|
|
|
|
* (a) find a page with enough free space,
|
|
|
|
* (b) reach the last page where the tuple can legally go, or
|
|
|
|
* (c) get tired of searching.
|
|
|
|
* (c) is not flippant; it is important because if there are many
|
|
|
|
* pages' worth of equal keys, it's better to split one of the early
|
|
|
|
* pages than to scan all the way to the end of the run of equal keys
|
|
|
|
* on every insert. We implement "get tired" as a random choice,
|
|
|
|
* since stopping after scanning a fixed number of pages wouldn't work
|
|
|
|
* well (we'd never reach the right-hand side of previously split
|
|
|
|
* pages). The probability of moving right is set at 0.99, which may
|
|
|
|
* seem too high to change the behavior much, but it does an excellent
|
|
|
|
* job of preventing O(N^2) behavior with many equal keys.
|
|
|
|
*----------
|
1997-09-07 07:04:48 +02:00
|
|
|
*/
|
Make heap TID a tiebreaker nbtree index column.
Make nbtree treat all index tuples as having a heap TID attribute.
Index searches can distinguish duplicates by heap TID, since heap TID is
always guaranteed to be unique. This general approach has numerous
benefits for performance, and is prerequisite to teaching VACUUM to
perform "retail index tuple deletion".
Naively adding a new attribute to every pivot tuple has unacceptable
overhead (it bloats internal pages), so suffix truncation of pivot
tuples is added. This will usually truncate away the "extra" heap TID
attribute from pivot tuples during a leaf page split, and may also
truncate away additional user attributes. This can increase fan-out,
especially in a multi-column index. Truncation can only occur at the
attribute granularity, which isn't particularly effective, but works
well enough for now. A future patch may add support for truncating
"within" text attributes by generating truncated key values using new
opclass infrastructure.
Only new indexes (BTREE_VERSION 4 indexes) will have insertions that
treat heap TID as a tiebreaker attribute, or will have pivot tuples
undergo suffix truncation during a leaf page split (on-disk
compatibility with versions 2 and 3 is preserved). Upgrades to version
4 cannot be performed on-the-fly, unlike upgrades from version 2 to
version 3. contrib/amcheck continues to work with version 2 and 3
indexes, while also enforcing stricter invariants when verifying version
4 indexes. These stricter invariants are the same invariants described
by "3.1.12 Sequencing" from the Lehman and Yao paper.
A later patch will enhance the logic used by nbtree to pick a split
point. This patch is likely to negatively impact performance without
smarter choices around the precise point to split leaf pages at. Making
these two mostly-distinct sets of enhancements into distinct commits
seems like it might clarify their design, even though neither commit is
particularly useful on its own.
The maximum allowed size of new tuples is reduced by an amount equal to
the space required to store an extra MAXALIGN()'d TID in a new high key
during leaf page splits. The user-facing definition of the "1/3 of a
page" restriction is already imprecise, and so does not need to be
revised. However, there should be a compatibility note in the v12
release notes.
Author: Peter Geoghegan
Reviewed-By: Heikki Linnakangas, Alexander Korotkov
Discussion: https://postgr.es/m/CAH2-WzkVb0Kom=R+88fDFb=JSxZMFvbHVC6Mn9LJ2n=X=kS-Uw@mail.gmail.com
2019-03-20 18:04:01 +01:00
|
|
|
while (PageGetFreeSpace(page) < insertstate->itemsz)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Before considering moving right, see if we can obtain enough
|
|
|
|
* space by erasing LP_DEAD items
|
|
|
|
*/
|
|
|
|
if (P_HAS_GARBAGE(lpageop))
|
|
|
|
{
|
|
|
|
_bt_vacuum_one_page(rel, insertstate->buf, heapRel);
|
|
|
|
insertstate->bounds_valid = false;
|
2019-03-20 17:30:57 +01:00
|
|
|
|
Make heap TID a tiebreaker nbtree index column.
Make nbtree treat all index tuples as having a heap TID attribute.
Index searches can distinguish duplicates by heap TID, since heap TID is
always guaranteed to be unique. This general approach has numerous
benefits for performance, and is prerequisite to teaching VACUUM to
perform "retail index tuple deletion".
Naively adding a new attribute to every pivot tuple has unacceptable
overhead (it bloats internal pages), so suffix truncation of pivot
tuples is added. This will usually truncate away the "extra" heap TID
attribute from pivot tuples during a leaf page split, and may also
truncate away additional user attributes. This can increase fan-out,
especially in a multi-column index. Truncation can only occur at the
attribute granularity, which isn't particularly effective, but works
well enough for now. A future patch may add support for truncating
"within" text attributes by generating truncated key values using new
opclass infrastructure.
Only new indexes (BTREE_VERSION 4 indexes) will have insertions that
treat heap TID as a tiebreaker attribute, or will have pivot tuples
undergo suffix truncation during a leaf page split (on-disk
compatibility with versions 2 and 3 is preserved). Upgrades to version
4 cannot be performed on-the-fly, unlike upgrades from version 2 to
version 3. contrib/amcheck continues to work with version 2 and 3
indexes, while also enforcing stricter invariants when verifying version
4 indexes. These stricter invariants are the same invariants described
by "3.1.12 Sequencing" from the Lehman and Yao paper.
A later patch will enhance the logic used by nbtree to pick a split
point. This patch is likely to negatively impact performance without
smarter choices around the precise point to split leaf pages at. Making
these two mostly-distinct sets of enhancements into distinct commits
seems like it might clarify their design, even though neither commit is
particularly useful on its own.
The maximum allowed size of new tuples is reduced by an amount equal to
the space required to store an extra MAXALIGN()'d TID in a new high key
during leaf page splits. The user-facing definition of the "1/3 of a
page" restriction is already imprecise, and so does not need to be
revised. However, there should be a compatibility note in the v12
release notes.
Author: Peter Geoghegan
Reviewed-By: Heikki Linnakangas, Alexander Korotkov
Discussion: https://postgr.es/m/CAH2-WzkVb0Kom=R+88fDFb=JSxZMFvbHVC6Mn9LJ2n=X=kS-Uw@mail.gmail.com
2019-03-20 18:04:01 +01:00
|
|
|
if (PageGetFreeSpace(page) >= insertstate->itemsz)
|
|
|
|
break; /* OK, now we have enough space */
|
|
|
|
}
|
2007-03-03 21:13:06 +01:00
|
|
|
|
Make heap TID a tiebreaker nbtree index column.
Make nbtree treat all index tuples as having a heap TID attribute.
Index searches can distinguish duplicates by heap TID, since heap TID is
always guaranteed to be unique. This general approach has numerous
benefits for performance, and is prerequisite to teaching VACUUM to
perform "retail index tuple deletion".
Naively adding a new attribute to every pivot tuple has unacceptable
overhead (it bloats internal pages), so suffix truncation of pivot
tuples is added. This will usually truncate away the "extra" heap TID
attribute from pivot tuples during a leaf page split, and may also
truncate away additional user attributes. This can increase fan-out,
especially in a multi-column index. Truncation can only occur at the
attribute granularity, which isn't particularly effective, but works
well enough for now. A future patch may add support for truncating
"within" text attributes by generating truncated key values using new
opclass infrastructure.
Only new indexes (BTREE_VERSION 4 indexes) will have insertions that
treat heap TID as a tiebreaker attribute, or will have pivot tuples
undergo suffix truncation during a leaf page split (on-disk
compatibility with versions 2 and 3 is preserved). Upgrades to version
4 cannot be performed on-the-fly, unlike upgrades from version 2 to
version 3. contrib/amcheck continues to work with version 2 and 3
indexes, while also enforcing stricter invariants when verifying version
4 indexes. These stricter invariants are the same invariants described
by "3.1.12 Sequencing" from the Lehman and Yao paper.
A later patch will enhance the logic used by nbtree to pick a split
point. This patch is likely to negatively impact performance without
smarter choices around the precise point to split leaf pages at. Making
these two mostly-distinct sets of enhancements into distinct commits
seems like it might clarify their design, even though neither commit is
particularly useful on its own.
The maximum allowed size of new tuples is reduced by an amount equal to
the space required to store an extra MAXALIGN()'d TID in a new high key
during leaf page splits. The user-facing definition of the "1/3 of a
page" restriction is already imprecise, and so does not need to be
revised. However, there should be a compatibility note in the v12
release notes.
Author: Peter Geoghegan
Reviewed-By: Heikki Linnakangas, Alexander Korotkov
Discussion: https://postgr.es/m/CAH2-WzkVb0Kom=R+88fDFb=JSxZMFvbHVC6Mn9LJ2n=X=kS-Uw@mail.gmail.com
2019-03-20 18:04:01 +01:00
|
|
|
/*
|
|
|
|
* Nope, so check conditions (b) and (c) enumerated above
|
|
|
|
*
|
|
|
|
* The earlier _bt_check_unique() call may well have established a
|
|
|
|
* strict upper bound on the offset for the new item. If it's not
|
|
|
|
* the last item of the page (i.e. if there is at least one tuple
|
|
|
|
* on the page that's greater than the tuple we're inserting to)
|
|
|
|
* then we know that the tuple belongs on this page. We can skip
|
|
|
|
* the high key check.
|
|
|
|
*/
|
|
|
|
if (insertstate->bounds_valid &&
|
|
|
|
insertstate->low <= insertstate->stricthigh &&
|
|
|
|
insertstate->stricthigh <= PageGetMaxOffsetNumber(page))
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (P_RIGHTMOST(lpageop) ||
|
|
|
|
_bt_compare(rel, itup_key, page, P_HIKEY) != 0 ||
|
|
|
|
random() <= (MAX_RANDOM_VALUE / 100))
|
|
|
|
break;
|
|
|
|
|
|
|
|
_bt_stepright(rel, insertstate, stack);
|
|
|
|
/* Update local state after stepping right */
|
|
|
|
page = BufferGetPage(insertstate->buf);
|
|
|
|
lpageop = (BTPageOpaque) PageGetSpecialPointer(page);
|
|
|
|
}
|
2019-03-20 17:30:57 +01:00
|
|
|
}
|
2007-03-03 21:13:06 +01:00
|
|
|
|
2019-03-20 17:30:57 +01:00
|
|
|
/*
|
|
|
|
* We should now be on the correct page. Find the offset within the page
|
|
|
|
* for the new tuple. (Possibly reusing earlier search bounds.)
|
|
|
|
*/
|
|
|
|
Assert(P_RIGHTMOST(lpageop) ||
|
|
|
|
_bt_compare(rel, itup_key, page, P_HIKEY) <= 0);
|
Make the handling of interrupted B-tree page splits more robust.
Splitting a page consists of two separate steps: splitting the child page,
and inserting the downlink for the new right page to the parent. Previously,
we handled the case that you crash in between those steps with a cleanup
routine after the WAL recovery had finished, which finished the incomplete
split. However, that doesn't help if the page split is interrupted but the
database doesn't crash, so that you don't perform WAL recovery. That could
happen for example if you run out of disk space.
Remove the end-of-recovery cleanup step. Instead, when a page is split, the
left page is marked with a new INCOMPLETE_SPLIT flag, and when the downlink
is inserted to the parent, the flag is cleared again. If an insertion sees
a page with the flag set, it knows that the split was interrupted for some
reason, and inserts the missing downlink before proceeding.
I used the same approach to fix GIN and GiST split algorithms earlier. This
was the last WAL cleanup routine, so we could get rid of that whole
machinery now, but I'll leave that for a separate patch.
Reviewed by Peter Geoghegan.
2014-03-18 19:12:58 +01:00
|
|
|
|
2019-03-20 17:30:57 +01:00
|
|
|
return _bt_binsrch_insert(rel, insertstate);
|
|
|
|
}
|
Make the handling of interrupted B-tree page splits more robust.
Splitting a page consists of two separate steps: splitting the child page,
and inserting the downlink for the new right page to the parent. Previously,
we handled the case that you crash in between those steps with a cleanup
routine after the WAL recovery had finished, which finished the incomplete
split. However, that doesn't help if the page split is interrupted but the
database doesn't crash, so that you don't perform WAL recovery. That could
happen for example if you run out of disk space.
Remove the end-of-recovery cleanup step. Instead, when a page is split, the
left page is marked with a new INCOMPLETE_SPLIT flag, and when the downlink
is inserted to the parent, the flag is cleared again. If an insertion sees
a page with the flag set, it knows that the split was interrupted for some
reason, and inserts the missing downlink before proceeding.
I used the same approach to fix GIN and GiST split algorithms earlier. This
was the last WAL cleanup routine, so we could get rid of that whole
machinery now, but I'll leave that for a separate patch.
Reviewed by Peter Geoghegan.
2014-03-18 19:12:58 +01:00
|
|
|
|
2019-03-20 17:30:57 +01:00
|
|
|
/*
|
|
|
|
* Step right to next non-dead page, during insertion.
|
|
|
|
*
|
|
|
|
* This is a bit more complicated than moving right in a search. We must
|
|
|
|
* write-lock the target page before releasing write lock on current page;
|
|
|
|
* else someone else's _bt_check_unique scan could fail to see our insertion.
|
|
|
|
* Write locks on intermediate dead pages won't do because we don't know when
|
|
|
|
* they will get de-linked from the tree.
|
Make heap TID a tiebreaker nbtree index column.
Make nbtree treat all index tuples as having a heap TID attribute.
Index searches can distinguish duplicates by heap TID, since heap TID is
always guaranteed to be unique. This general approach has numerous
benefits for performance, and is prerequisite to teaching VACUUM to
perform "retail index tuple deletion".
Naively adding a new attribute to every pivot tuple has unacceptable
overhead (it bloats internal pages), so suffix truncation of pivot
tuples is added. This will usually truncate away the "extra" heap TID
attribute from pivot tuples during a leaf page split, and may also
truncate away additional user attributes. This can increase fan-out,
especially in a multi-column index. Truncation can only occur at the
attribute granularity, which isn't particularly effective, but works
well enough for now. A future patch may add support for truncating
"within" text attributes by generating truncated key values using new
opclass infrastructure.
Only new indexes (BTREE_VERSION 4 indexes) will have insertions that
treat heap TID as a tiebreaker attribute, or will have pivot tuples
undergo suffix truncation during a leaf page split (on-disk
compatibility with versions 2 and 3 is preserved). Upgrades to version
4 cannot be performed on-the-fly, unlike upgrades from version 2 to
version 3. contrib/amcheck continues to work with version 2 and 3
indexes, while also enforcing stricter invariants when verifying version
4 indexes. These stricter invariants are the same invariants described
by "3.1.12 Sequencing" from the Lehman and Yao paper.
A later patch will enhance the logic used by nbtree to pick a split
point. This patch is likely to negatively impact performance without
smarter choices around the precise point to split leaf pages at. Making
these two mostly-distinct sets of enhancements into distinct commits
seems like it might clarify their design, even though neither commit is
particularly useful on its own.
The maximum allowed size of new tuples is reduced by an amount equal to
the space required to store an extra MAXALIGN()'d TID in a new high key
during leaf page splits. The user-facing definition of the "1/3 of a
page" restriction is already imprecise, and so does not need to be
revised. However, there should be a compatibility note in the v12
release notes.
Author: Peter Geoghegan
Reviewed-By: Heikki Linnakangas, Alexander Korotkov
Discussion: https://postgr.es/m/CAH2-WzkVb0Kom=R+88fDFb=JSxZMFvbHVC6Mn9LJ2n=X=kS-Uw@mail.gmail.com
2019-03-20 18:04:01 +01:00
|
|
|
*
|
|
|
|
* This is more aggressive than it needs to be for non-unique !heapkeyspace
|
|
|
|
* indexes.
|
2019-03-20 17:30:57 +01:00
|
|
|
*/
|
|
|
|
static void
|
|
|
|
_bt_stepright(Relation rel, BTInsertState insertstate, BTStack stack)
|
|
|
|
{
|
|
|
|
Page page;
|
|
|
|
BTPageOpaque lpageop;
|
|
|
|
Buffer rbuf;
|
|
|
|
BlockNumber rblkno;
|
|
|
|
|
|
|
|
page = BufferGetPage(insertstate->buf);
|
|
|
|
lpageop = (BTPageOpaque) PageGetSpecialPointer(page);
|
|
|
|
|
|
|
|
rbuf = InvalidBuffer;
|
|
|
|
rblkno = lpageop->btpo_next;
|
|
|
|
for (;;)
|
|
|
|
{
|
|
|
|
rbuf = _bt_relandgetbuf(rel, rbuf, rblkno, BT_WRITE);
|
|
|
|
page = BufferGetPage(rbuf);
|
|
|
|
lpageop = (BTPageOpaque) PageGetSpecialPointer(page);
|
Make the handling of interrupted B-tree page splits more robust.
Splitting a page consists of two separate steps: splitting the child page,
and inserting the downlink for the new right page to the parent. Previously,
we handled the case that you crash in between those steps with a cleanup
routine after the WAL recovery had finished, which finished the incomplete
split. However, that doesn't help if the page split is interrupted but the
database doesn't crash, so that you don't perform WAL recovery. That could
happen for example if you run out of disk space.
Remove the end-of-recovery cleanup step. Instead, when a page is split, the
left page is marked with a new INCOMPLETE_SPLIT flag, and when the downlink
is inserted to the parent, the flag is cleared again. If an insertion sees
a page with the flag set, it knows that the split was interrupted for some
reason, and inserts the missing downlink before proceeding.
I used the same approach to fix GIN and GiST split algorithms earlier. This
was the last WAL cleanup routine, so we could get rid of that whole
machinery now, but I'll leave that for a separate patch.
Reviewed by Peter Geoghegan.
2014-03-18 19:12:58 +01:00
|
|
|
|
2019-03-20 17:30:57 +01:00
|
|
|
/*
|
|
|
|
* If this page was incompletely split, finish the split now. We do
|
|
|
|
* this while holding a lock on the left sibling, which is not good
|
|
|
|
* because finishing the split could be a fairly lengthy operation.
|
|
|
|
* But this should happen very seldom.
|
|
|
|
*/
|
|
|
|
if (P_INCOMPLETE_SPLIT(lpageop))
|
|
|
|
{
|
|
|
|
_bt_finish_split(rel, rbuf, stack);
|
|
|
|
rbuf = InvalidBuffer;
|
|
|
|
continue;
|
2007-03-03 21:13:06 +01:00
|
|
|
}
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2019-03-20 17:30:57 +01:00
|
|
|
if (!P_IGNORE(lpageop))
|
|
|
|
break;
|
|
|
|
if (P_RIGHTMOST(lpageop))
|
|
|
|
elog(ERROR, "fell off the end of index \"%s\"",
|
|
|
|
RelationGetRelationName(rel));
|
2007-03-03 21:13:06 +01:00
|
|
|
|
2019-03-20 17:30:57 +01:00
|
|
|
rblkno = lpageop->btpo_next;
|
|
|
|
}
|
|
|
|
/* rbuf locked; unlock buf, update state for caller */
|
|
|
|
_bt_relbuf(rel, insertstate->buf);
|
|
|
|
insertstate->buf = rbuf;
|
|
|
|
insertstate->bounds_valid = false;
|
2007-03-03 21:13:06 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/*----------
|
|
|
|
* _bt_insertonpg() -- Insert a tuple on a particular page in the index.
|
|
|
|
*
|
|
|
|
* This recursive procedure does the following things:
|
|
|
|
*
|
Make heap TID a tiebreaker nbtree index column.
Make nbtree treat all index tuples as having a heap TID attribute.
Index searches can distinguish duplicates by heap TID, since heap TID is
always guaranteed to be unique. This general approach has numerous
benefits for performance, and is prerequisite to teaching VACUUM to
perform "retail index tuple deletion".
Naively adding a new attribute to every pivot tuple has unacceptable
overhead (it bloats internal pages), so suffix truncation of pivot
tuples is added. This will usually truncate away the "extra" heap TID
attribute from pivot tuples during a leaf page split, and may also
truncate away additional user attributes. This can increase fan-out,
especially in a multi-column index. Truncation can only occur at the
attribute granularity, which isn't particularly effective, but works
well enough for now. A future patch may add support for truncating
"within" text attributes by generating truncated key values using new
opclass infrastructure.
Only new indexes (BTREE_VERSION 4 indexes) will have insertions that
treat heap TID as a tiebreaker attribute, or will have pivot tuples
undergo suffix truncation during a leaf page split (on-disk
compatibility with versions 2 and 3 is preserved). Upgrades to version
4 cannot be performed on-the-fly, unlike upgrades from version 2 to
version 3. contrib/amcheck continues to work with version 2 and 3
indexes, while also enforcing stricter invariants when verifying version
4 indexes. These stricter invariants are the same invariants described
by "3.1.12 Sequencing" from the Lehman and Yao paper.
A later patch will enhance the logic used by nbtree to pick a split
point. This patch is likely to negatively impact performance without
smarter choices around the precise point to split leaf pages at. Making
these two mostly-distinct sets of enhancements into distinct commits
seems like it might clarify their design, even though neither commit is
particularly useful on its own.
The maximum allowed size of new tuples is reduced by an amount equal to
the space required to store an extra MAXALIGN()'d TID in a new high key
during leaf page splits. The user-facing definition of the "1/3 of a
page" restriction is already imprecise, and so does not need to be
revised. However, there should be a compatibility note in the v12
release notes.
Author: Peter Geoghegan
Reviewed-By: Heikki Linnakangas, Alexander Korotkov
Discussion: https://postgr.es/m/CAH2-WzkVb0Kom=R+88fDFb=JSxZMFvbHVC6Mn9LJ2n=X=kS-Uw@mail.gmail.com
2019-03-20 18:04:01 +01:00
|
|
|
* + if necessary, splits the target page, using 'itup_key' for
|
|
|
|
* suffix truncation on leaf pages (caller passes NULL for
|
|
|
|
* non-leaf pages).
|
2007-03-03 21:13:06 +01:00
|
|
|
* + inserts the tuple.
|
|
|
|
* + if the page was split, pops the parent stack, and finds the
|
|
|
|
* right place to insert the new child pointer (by walking
|
|
|
|
* right using information stored in the parent stack).
|
|
|
|
* + invokes itself with the appropriate tuple for the right
|
|
|
|
* child page on the parent.
|
|
|
|
* + updates the metapage if a true root or fast root is split.
|
|
|
|
*
|
Make the handling of interrupted B-tree page splits more robust.
Splitting a page consists of two separate steps: splitting the child page,
and inserting the downlink for the new right page to the parent. Previously,
we handled the case that you crash in between those steps with a cleanup
routine after the WAL recovery had finished, which finished the incomplete
split. However, that doesn't help if the page split is interrupted but the
database doesn't crash, so that you don't perform WAL recovery. That could
happen for example if you run out of disk space.
Remove the end-of-recovery cleanup step. Instead, when a page is split, the
left page is marked with a new INCOMPLETE_SPLIT flag, and when the downlink
is inserted to the parent, the flag is cleared again. If an insertion sees
a page with the flag set, it knows that the split was interrupted for some
reason, and inserts the missing downlink before proceeding.
I used the same approach to fix GIN and GiST split algorithms earlier. This
was the last WAL cleanup routine, so we could get rid of that whole
machinery now, but I'll leave that for a separate patch.
Reviewed by Peter Geoghegan.
2014-03-18 19:12:58 +01:00
|
|
|
* On entry, we must have the correct buffer in which to do the
|
2014-05-06 18:12:18 +02:00
|
|
|
* insertion, and the buffer must be pinned and write-locked. On return,
|
2007-03-03 21:13:06 +01:00
|
|
|
* we will have dropped both the pin and the lock on the buffer.
|
|
|
|
*
|
2018-04-19 10:08:45 +02:00
|
|
|
* This routine only performs retail tuple insertions. 'itup' should
|
|
|
|
* always be either a non-highkey leaf item, or a downlink (new high
|
|
|
|
* key items are created indirectly, when a page is split). When
|
|
|
|
* inserting to a non-leaf page, 'cbuf' is the left-sibling of the page
|
|
|
|
* we're inserting the downlink for. This function will clear the
|
Make the handling of interrupted B-tree page splits more robust.
Splitting a page consists of two separate steps: splitting the child page,
and inserting the downlink for the new right page to the parent. Previously,
we handled the case that you crash in between those steps with a cleanup
routine after the WAL recovery had finished, which finished the incomplete
split. However, that doesn't help if the page split is interrupted but the
database doesn't crash, so that you don't perform WAL recovery. That could
happen for example if you run out of disk space.
Remove the end-of-recovery cleanup step. Instead, when a page is split, the
left page is marked with a new INCOMPLETE_SPLIT flag, and when the downlink
is inserted to the parent, the flag is cleared again. If an insertion sees
a page with the flag set, it knows that the split was interrupted for some
reason, and inserts the missing downlink before proceeding.
I used the same approach to fix GIN and GiST split algorithms earlier. This
was the last WAL cleanup routine, so we could get rid of that whole
machinery now, but I'll leave that for a separate patch.
Reviewed by Peter Geoghegan.
2014-03-18 19:12:58 +01:00
|
|
|
* INCOMPLETE_SPLIT flag on it, and release the buffer.
|
2007-03-03 21:13:06 +01:00
|
|
|
*----------
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
_bt_insertonpg(Relation rel,
|
Make heap TID a tiebreaker nbtree index column.
Make nbtree treat all index tuples as having a heap TID attribute.
Index searches can distinguish duplicates by heap TID, since heap TID is
always guaranteed to be unique. This general approach has numerous
benefits for performance, and is prerequisite to teaching VACUUM to
perform "retail index tuple deletion".
Naively adding a new attribute to every pivot tuple has unacceptable
overhead (it bloats internal pages), so suffix truncation of pivot
tuples is added. This will usually truncate away the "extra" heap TID
attribute from pivot tuples during a leaf page split, and may also
truncate away additional user attributes. This can increase fan-out,
especially in a multi-column index. Truncation can only occur at the
attribute granularity, which isn't particularly effective, but works
well enough for now. A future patch may add support for truncating
"within" text attributes by generating truncated key values using new
opclass infrastructure.
Only new indexes (BTREE_VERSION 4 indexes) will have insertions that
treat heap TID as a tiebreaker attribute, or will have pivot tuples
undergo suffix truncation during a leaf page split (on-disk
compatibility with versions 2 and 3 is preserved). Upgrades to version
4 cannot be performed on-the-fly, unlike upgrades from version 2 to
version 3. contrib/amcheck continues to work with version 2 and 3
indexes, while also enforcing stricter invariants when verifying version
4 indexes. These stricter invariants are the same invariants described
by "3.1.12 Sequencing" from the Lehman and Yao paper.
A later patch will enhance the logic used by nbtree to pick a split
point. This patch is likely to negatively impact performance without
smarter choices around the precise point to split leaf pages at. Making
these two mostly-distinct sets of enhancements into distinct commits
seems like it might clarify their design, even though neither commit is
particularly useful on its own.
The maximum allowed size of new tuples is reduced by an amount equal to
the space required to store an extra MAXALIGN()'d TID in a new high key
during leaf page splits. The user-facing definition of the "1/3 of a
page" restriction is already imprecise, and so does not need to be
revised. However, there should be a compatibility note in the v12
release notes.
Author: Peter Geoghegan
Reviewed-By: Heikki Linnakangas, Alexander Korotkov
Discussion: https://postgr.es/m/CAH2-WzkVb0Kom=R+88fDFb=JSxZMFvbHVC6Mn9LJ2n=X=kS-Uw@mail.gmail.com
2019-03-20 18:04:01 +01:00
|
|
|
BTScanInsert itup_key,
|
2007-03-03 21:13:06 +01:00
|
|
|
Buffer buf,
|
Make the handling of interrupted B-tree page splits more robust.
Splitting a page consists of two separate steps: splitting the child page,
and inserting the downlink for the new right page to the parent. Previously,
we handled the case that you crash in between those steps with a cleanup
routine after the WAL recovery had finished, which finished the incomplete
split. However, that doesn't help if the page split is interrupted but the
database doesn't crash, so that you don't perform WAL recovery. That could
happen for example if you run out of disk space.
Remove the end-of-recovery cleanup step. Instead, when a page is split, the
left page is marked with a new INCOMPLETE_SPLIT flag, and when the downlink
is inserted to the parent, the flag is cleared again. If an insertion sees
a page with the flag set, it knows that the split was interrupted for some
reason, and inserts the missing downlink before proceeding.
I used the same approach to fix GIN and GiST split algorithms earlier. This
was the last WAL cleanup routine, so we could get rid of that whole
machinery now, but I'll leave that for a separate patch.
Reviewed by Peter Geoghegan.
2014-03-18 19:12:58 +01:00
|
|
|
Buffer cbuf,
|
2007-03-03 21:13:06 +01:00
|
|
|
BTStack stack,
|
|
|
|
IndexTuple itup,
|
|
|
|
OffsetNumber newitemoff,
|
|
|
|
bool split_only_page)
|
|
|
|
{
|
|
|
|
Page page;
|
|
|
|
BTPageOpaque lpageop;
|
|
|
|
Size itemsz;
|
|
|
|
|
2016-04-20 15:31:19 +02:00
|
|
|
page = BufferGetPage(buf);
|
2007-03-03 21:13:06 +01:00
|
|
|
lpageop = (BTPageOpaque) PageGetSpecialPointer(page);
|
|
|
|
|
Make the handling of interrupted B-tree page splits more robust.
Splitting a page consists of two separate steps: splitting the child page,
and inserting the downlink for the new right page to the parent. Previously,
we handled the case that you crash in between those steps with a cleanup
routine after the WAL recovery had finished, which finished the incomplete
split. However, that doesn't help if the page split is interrupted but the
database doesn't crash, so that you don't perform WAL recovery. That could
happen for example if you run out of disk space.
Remove the end-of-recovery cleanup step. Instead, when a page is split, the
left page is marked with a new INCOMPLETE_SPLIT flag, and when the downlink
is inserted to the parent, the flag is cleared again. If an insertion sees
a page with the flag set, it knows that the split was interrupted for some
reason, and inserts the missing downlink before proceeding.
I used the same approach to fix GIN and GiST split algorithms earlier. This
was the last WAL cleanup routine, so we could get rid of that whole
machinery now, but I'll leave that for a separate patch.
Reviewed by Peter Geoghegan.
2014-03-18 19:12:58 +01:00
|
|
|
/* child buffer must be given iff inserting on an internal page */
|
|
|
|
Assert(P_ISLEAF(lpageop) == !BufferIsValid(cbuf));
|
Adjust INCLUDE index truncation comments and code.
Add several assertions that ensure that we're dealing with a pivot tuple
without non-key attributes where that's expected. Also, remove the
assertion within _bt_isequal(), restoring the v10 function signature. A
similar check will be performed for the page highkey within
_bt_moveright() in most cases. Also avoid dropping all objects within
regression tests, to increase pg_dump test coverage for INCLUDE indexes.
Rather than using infrastructure that's generally intended to be used
with reference counted heap tuple descriptors during truncation, use the
same function that was introduced to store flat TupleDescs in shared
memory (we use a temp palloc'd buffer). This isn't strictly necessary,
but seems more future-proof than the old approach. It also lets us
avoid including rel.h within indextuple.c, which was arguably a
modularity violation. Also, we now call index_deform_tuple() with the
truncated TupleDesc, not the source TupleDesc, since that's more robust,
and saves a few cycles.
In passing, fix a memory leak by pfree'ing truncated pivot tuple memory
during CREATE INDEX. Also pfree during a page split, just to be
consistent.
Refactor _bt_check_natts() to be more readable.
Author: Peter Geoghegan with some editorization by me
Reviewed by: Alexander Korotkov, Teodor Sigaev
Discussion: https://www.postgresql.org/message-id/CAH2-Wz%3DkCWuXeMrBCopC-tFs3FbiVxQNjjgNKdG2sHxZ5k2y3w%40mail.gmail.com
2018-04-19 07:45:58 +02:00
|
|
|
/* tuple must have appropriate number of attributes */
|
|
|
|
Assert(!P_ISLEAF(lpageop) ||
|
|
|
|
BTreeTupleGetNAtts(itup, rel) ==
|
|
|
|
IndexRelationGetNumberOfAttributes(rel));
|
|
|
|
Assert(P_ISLEAF(lpageop) ||
|
Make heap TID a tiebreaker nbtree index column.
Make nbtree treat all index tuples as having a heap TID attribute.
Index searches can distinguish duplicates by heap TID, since heap TID is
always guaranteed to be unique. This general approach has numerous
benefits for performance, and is prerequisite to teaching VACUUM to
perform "retail index tuple deletion".
Naively adding a new attribute to every pivot tuple has unacceptable
overhead (it bloats internal pages), so suffix truncation of pivot
tuples is added. This will usually truncate away the "extra" heap TID
attribute from pivot tuples during a leaf page split, and may also
truncate away additional user attributes. This can increase fan-out,
especially in a multi-column index. Truncation can only occur at the
attribute granularity, which isn't particularly effective, but works
well enough for now. A future patch may add support for truncating
"within" text attributes by generating truncated key values using new
opclass infrastructure.
Only new indexes (BTREE_VERSION 4 indexes) will have insertions that
treat heap TID as a tiebreaker attribute, or will have pivot tuples
undergo suffix truncation during a leaf page split (on-disk
compatibility with versions 2 and 3 is preserved). Upgrades to version
4 cannot be performed on-the-fly, unlike upgrades from version 2 to
version 3. contrib/amcheck continues to work with version 2 and 3
indexes, while also enforcing stricter invariants when verifying version
4 indexes. These stricter invariants are the same invariants described
by "3.1.12 Sequencing" from the Lehman and Yao paper.
A later patch will enhance the logic used by nbtree to pick a split
point. This patch is likely to negatively impact performance without
smarter choices around the precise point to split leaf pages at. Making
these two mostly-distinct sets of enhancements into distinct commits
seems like it might clarify their design, even though neither commit is
particularly useful on its own.
The maximum allowed size of new tuples is reduced by an amount equal to
the space required to store an extra MAXALIGN()'d TID in a new high key
during leaf page splits. The user-facing definition of the "1/3 of a
page" restriction is already imprecise, and so does not need to be
revised. However, there should be a compatibility note in the v12
release notes.
Author: Peter Geoghegan
Reviewed-By: Heikki Linnakangas, Alexander Korotkov
Discussion: https://postgr.es/m/CAH2-WzkVb0Kom=R+88fDFb=JSxZMFvbHVC6Mn9LJ2n=X=kS-Uw@mail.gmail.com
2019-03-20 18:04:01 +01:00
|
|
|
BTreeTupleGetNAtts(itup, rel) <=
|
Adjust INCLUDE index truncation comments and code.
Add several assertions that ensure that we're dealing with a pivot tuple
without non-key attributes where that's expected. Also, remove the
assertion within _bt_isequal(), restoring the v10 function signature. A
similar check will be performed for the page highkey within
_bt_moveright() in most cases. Also avoid dropping all objects within
regression tests, to increase pg_dump test coverage for INCLUDE indexes.
Rather than using infrastructure that's generally intended to be used
with reference counted heap tuple descriptors during truncation, use the
same function that was introduced to store flat TupleDescs in shared
memory (we use a temp palloc'd buffer). This isn't strictly necessary,
but seems more future-proof than the old approach. It also lets us
avoid including rel.h within indextuple.c, which was arguably a
modularity violation. Also, we now call index_deform_tuple() with the
truncated TupleDesc, not the source TupleDesc, since that's more robust,
and saves a few cycles.
In passing, fix a memory leak by pfree'ing truncated pivot tuple memory
during CREATE INDEX. Also pfree during a page split, just to be
consistent.
Refactor _bt_check_natts() to be more readable.
Author: Peter Geoghegan with some editorization by me
Reviewed by: Alexander Korotkov, Teodor Sigaev
Discussion: https://www.postgresql.org/message-id/CAH2-Wz%3DkCWuXeMrBCopC-tFs3FbiVxQNjjgNKdG2sHxZ5k2y3w%40mail.gmail.com
2018-04-19 07:45:58 +02:00
|
|
|
IndexRelationGetNumberOfKeyAttributes(rel));
|
Make the handling of interrupted B-tree page splits more robust.
Splitting a page consists of two separate steps: splitting the child page,
and inserting the downlink for the new right page to the parent. Previously,
we handled the case that you crash in between those steps with a cleanup
routine after the WAL recovery had finished, which finished the incomplete
split. However, that doesn't help if the page split is interrupted but the
database doesn't crash, so that you don't perform WAL recovery. That could
happen for example if you run out of disk space.
Remove the end-of-recovery cleanup step. Instead, when a page is split, the
left page is marked with a new INCOMPLETE_SPLIT flag, and when the downlink
is inserted to the parent, the flag is cleared again. If an insertion sees
a page with the flag set, it knows that the split was interrupted for some
reason, and inserts the missing downlink before proceeding.
I used the same approach to fix GIN and GiST split algorithms earlier. This
was the last WAL cleanup routine, so we could get rid of that whole
machinery now, but I'll leave that for a separate patch.
Reviewed by Peter Geoghegan.
2014-03-18 19:12:58 +01:00
|
|
|
|
|
|
|
/* The caller should've finished any incomplete splits already. */
|
|
|
|
if (P_INCOMPLETE_SPLIT(lpageop))
|
|
|
|
elog(ERROR, "cannot insert to incompletely split page %u",
|
|
|
|
BufferGetBlockNumber(buf));
|
|
|
|
|
2018-03-01 01:25:54 +01:00
|
|
|
itemsz = IndexTupleSize(itup);
|
2007-03-03 21:13:06 +01:00
|
|
|
itemsz = MAXALIGN(itemsz); /* be safe, PageAddItem will do this but we
|
|
|
|
* need to be consistent */
|
|
|
|
|
2000-07-21 08:42:39 +02:00
|
|
|
/*
|
|
|
|
* Do we need to split the page to fit the item on it?
|
2000-07-21 21:21:00 +02:00
|
|
|
*
|
2005-11-22 19:17:34 +01:00
|
|
|
* Note: PageGetFreeSpace() subtracts sizeof(ItemIdData) from its result,
|
|
|
|
* so this comparison is correct even though we appear to be accounting
|
|
|
|
* only for the item and not for its line pointer.
|
2000-07-21 08:42:39 +02:00
|
|
|
*/
|
|
|
|
if (PageGetFreeSpace(page) < itemsz)
|
|
|
|
{
|
|
|
|
bool is_root = P_ISROOT(lpageop);
|
2003-02-21 01:06:22 +01:00
|
|
|
bool is_only = P_LEFTMOST(lpageop) && P_RIGHTMOST(lpageop);
|
|
|
|
Buffer rbuf;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2018-04-11 00:21:03 +02:00
|
|
|
/*
|
2018-04-26 20:47:16 +02:00
|
|
|
* If we're here then a pagesplit is needed. We should never reach
|
|
|
|
* here if we're using the fastpath since we should have checked for
|
|
|
|
* all the required conditions, including the fact that this page has
|
|
|
|
* enough freespace. Note that this routine can in theory deal with
|
|
|
|
* the situation where a NULL stack pointer is passed (that's what
|
2018-12-19 01:59:50 +01:00
|
|
|
* would happen if the fastpath is taken). But that path is much
|
|
|
|
* slower, defeating the very purpose of the optimization. The
|
|
|
|
* following assertion should protect us from any future code changes
|
|
|
|
* that invalidate those assumptions.
|
2018-04-11 00:21:03 +02:00
|
|
|
*
|
|
|
|
* Note that whenever we fail to take the fastpath, we clear the
|
|
|
|
* cached block. Checking for a valid cached block at this point is
|
|
|
|
* enough to decide whether we're in a fastpath or not.
|
|
|
|
*/
|
|
|
|
Assert(!(P_ISLEAF(lpageop) &&
|
2018-04-26 20:47:16 +02:00
|
|
|
BlockNumberIsValid(RelationGetTargetBlock(rel))));
|
2018-04-11 00:21:03 +02:00
|
|
|
|
2000-07-21 08:42:39 +02:00
|
|
|
/* split the buffer into left and right halves */
|
Don't leave behind junk nbtree pages during split.
Commit 8fa30f906be reduced the elevel of a number of "can't happen"
_bt_split() errors from PANIC to ERROR. At the same time, the new right
page buffer for the split could continue to be acquired well before the
critical section. This was possible because it was relatively
straightforward to make sure that _bt_split() could not throw an error,
with a few specific exceptions. The exceptional cases were safe because
they involved specific, well understood errors, making it possible to
consistently zero the right page before actually raising an error using
elog(). There was no danger of leaving around a junk page, provided
_bt_split() stuck to this coding rule.
Commit 8224de4f, which introduced INCLUDE indexes, added code to make
_bt_split() truncate away non-key attributes. This happened at a point
that broke the rule around zeroing the right page in _bt_split(). If
truncation failed (perhaps due to palloc() failure), that would result
in an errant right page buffer with junk contents. This could confuse
VACUUM when it attempted to delete the page, and should be avoided on
general principle.
To fix, reorganize _bt_split() so that truncation occurs before the new
right page buffer is even acquired. A junk page/buffer will not be left
behind if _bt_nonkey_truncate()/_bt_truncate() raise an error.
Discussion: https://postgr.es/m/CAH2-WzkcWT_-NH7EeL=Az4efg0KCV+wArygW8zKB=+HoP=VWMw@mail.gmail.com
Backpatch: 11-, where INCLUDE indexes were introduced.
2019-05-13 19:27:59 +02:00
|
|
|
rbuf = _bt_split(rel, itup_key, buf, cbuf, newitemoff, itemsz, itup);
|
Implement genuine serializable isolation level.
Until now, our Serializable mode has in fact been what's called Snapshot
Isolation, which allows some anomalies that could not occur in any
serialized ordering of the transactions. This patch fixes that using a
method called Serializable Snapshot Isolation, based on research papers by
Michael J. Cahill (see README-SSI for full references). In Serializable
Snapshot Isolation, transactions run like they do in Snapshot Isolation,
but a predicate lock manager observes the reads and writes performed and
aborts transactions if it detects that an anomaly might occur. This method
produces some false positives, ie. it sometimes aborts transactions even
though there is no anomaly.
To track reads we implement predicate locking, see storage/lmgr/predicate.c.
Whenever a tuple is read, a predicate lock is acquired on the tuple. Shared
memory is finite, so when a transaction takes many tuple-level locks on a
page, the locks are promoted to a single page-level lock, and further to a
single relation level lock if necessary. To lock key values with no matching
tuple, a sequential scan always takes a relation-level lock, and an index
scan acquires a page-level lock that covers the search key, whether or not
there are any matching keys at the moment.
A predicate lock doesn't conflict with any regular locks or with another
predicate locks in the normal sense. They're only used by the predicate lock
manager to detect the danger of anomalies. Only serializable transactions
participate in predicate locking, so there should be no extra overhead for
for other transactions.
Predicate locks can't be released at commit, but must be remembered until
all the transactions that overlapped with it have completed. That means that
we need to remember an unbounded amount of predicate locks, so we apply a
lossy but conservative method of tracking locks for committed transactions.
If we run short of shared memory, we overflow to a new "pg_serial" SLRU
pool.
We don't currently allow Serializable transactions in Hot Standby mode.
That would be hard, because even read-only transactions can cause anomalies
that wouldn't otherwise occur.
Serializable isolation mode now means the new fully serializable level.
Repeatable Read gives you the old Snapshot Isolation level that we have
always had.
Kevin Grittner and Dan Ports, reviewed by Jeff Davis, Heikki Linnakangas and
Anssi Kääriäinen
2011-02-07 22:46:51 +01:00
|
|
|
PredicateLockPageSplit(rel,
|
|
|
|
BufferGetBlockNumber(buf),
|
|
|
|
BufferGetBlockNumber(rbuf));
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2000-07-21 08:42:39 +02:00
|
|
|
/*----------
|
1997-09-07 07:04:48 +02:00
|
|
|
* By here,
|
|
|
|
*
|
2000-07-21 08:42:39 +02:00
|
|
|
* + our target page has been split;
|
|
|
|
* + the original tuple has been inserted;
|
|
|
|
* + we have write locks on both the old (left half)
|
|
|
|
* and new (right half) buffers, after the split; and
|
|
|
|
* + we know the key we want to insert into the parent
|
|
|
|
* (it's the "high key" on the left child page).
|
|
|
|
*
|
|
|
|
* We're ready to do the parent insertion. We need to hold onto the
|
|
|
|
* locks for the child pages until we locate the parent, but we can
|
2019-03-13 00:40:05 +01:00
|
|
|
* at least release the lock on the right child before doing the
|
|
|
|
* actual insertion. The lock on the left child will be released
|
|
|
|
* last of all by parent insertion, where it is the 'cbuf' of parent
|
|
|
|
* page.
|
2000-07-21 08:42:39 +02:00
|
|
|
*----------
|
1997-06-10 09:28:50 +02:00
|
|
|
*/
|
2003-02-21 01:06:22 +01:00
|
|
|
_bt_insert_parent(rel, buf, rbuf, stack, is_root, is_only);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
Buffer metabuf = InvalidBuffer;
|
|
|
|
Page metapg = NULL;
|
|
|
|
BTMetaPageData *metad = NULL;
|
2005-09-25 00:54:44 +02:00
|
|
|
OffsetNumber itup_off;
|
|
|
|
BlockNumber itup_blkno;
|
2018-04-26 20:47:16 +02:00
|
|
|
BlockNumber cachedBlock = InvalidBlockNumber;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2003-02-21 01:06:22 +01:00
|
|
|
itup_off = newitemoff;
|
|
|
|
itup_blkno = BufferGetBlockNumber(buf);
|
|
|
|
|
|
|
|
/*
|
2005-10-15 04:49:52 +02:00
|
|
|
* If we are doing this insert because we split a page that was the
|
|
|
|
* only one on its tree level, but was not the root, it may have been
|
|
|
|
* the "fast root". We need to ensure that the fast root link points
|
|
|
|
* at or above the current page. We can safely acquire a lock on the
|
|
|
|
* metapage here --- see comments for _bt_newroot().
|
2003-02-21 01:06:22 +01:00
|
|
|
*/
|
|
|
|
if (split_only_page)
|
1996-12-06 10:45:30 +01:00
|
|
|
{
|
2006-04-13 05:53:05 +02:00
|
|
|
Assert(!P_ISLEAF(lpageop));
|
|
|
|
|
2003-02-21 01:06:22 +01:00
|
|
|
metabuf = _bt_getbuf(rel, BTREE_METAPAGE, BT_WRITE);
|
2016-04-20 15:31:19 +02:00
|
|
|
metapg = BufferGetPage(metabuf);
|
2003-02-21 01:06:22 +01:00
|
|
|
metad = BTPageGetMeta(metapg);
|
2001-01-29 08:28:17 +01:00
|
|
|
|
2003-02-21 01:06:22 +01:00
|
|
|
if (metad->btm_fastlevel >= lpageop->btpo.level)
|
|
|
|
{
|
|
|
|
/* no update wanted */
|
|
|
|
_bt_relbuf(rel, metabuf);
|
|
|
|
metabuf = InvalidBuffer;
|
1997-09-07 07:04:48 +02:00
|
|
|
}
|
2003-02-21 01:06:22 +01:00
|
|
|
}
|
1997-09-07 07:04:48 +02:00
|
|
|
|
Adjust INCLUDE index truncation comments and code.
Add several assertions that ensure that we're dealing with a pivot tuple
without non-key attributes where that's expected. Also, remove the
assertion within _bt_isequal(), restoring the v10 function signature. A
similar check will be performed for the page highkey within
_bt_moveright() in most cases. Also avoid dropping all objects within
regression tests, to increase pg_dump test coverage for INCLUDE indexes.
Rather than using infrastructure that's generally intended to be used
with reference counted heap tuple descriptors during truncation, use the
same function that was introduced to store flat TupleDescs in shared
memory (we use a temp palloc'd buffer). This isn't strictly necessary,
but seems more future-proof than the old approach. It also lets us
avoid including rel.h within indextuple.c, which was arguably a
modularity violation. Also, we now call index_deform_tuple() with the
truncated TupleDesc, not the source TupleDesc, since that's more robust,
and saves a few cycles.
In passing, fix a memory leak by pfree'ing truncated pivot tuple memory
during CREATE INDEX. Also pfree during a page split, just to be
consistent.
Refactor _bt_check_natts() to be more readable.
Author: Peter Geoghegan with some editorization by me
Reviewed by: Alexander Korotkov, Teodor Sigaev
Discussion: https://www.postgresql.org/message-id/CAH2-Wz%3DkCWuXeMrBCopC-tFs3FbiVxQNjjgNKdG2sHxZ5k2y3w%40mail.gmail.com
2018-04-19 07:45:58 +02:00
|
|
|
/*
|
|
|
|
* Every internal page should have exactly one negative infinity item
|
|
|
|
* at all times. Only _bt_split() and _bt_newroot() should add items
|
|
|
|
* that become negative infinity items through truncation, since
|
|
|
|
* they're the only routines that allocate new internal pages. Do not
|
|
|
|
* allow a retail insertion of a new item at the negative infinity
|
|
|
|
* offset.
|
|
|
|
*/
|
|
|
|
if (!P_ISLEAF(lpageop) && newitemoff == P_FIRSTDATAKEY(lpageop))
|
|
|
|
elog(ERROR, "cannot insert second negative infinity item in block %u of index \"%s\"",
|
|
|
|
itup_blkno, RelationGetRelationName(rel));
|
|
|
|
|
2003-07-21 22:29:40 +02:00
|
|
|
/* Do the update. No ereport(ERROR) until changes are logged */
|
2003-02-21 01:06:22 +01:00
|
|
|
START_CRIT_SECTION();
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2010-08-29 21:33:14 +02:00
|
|
|
if (!_bt_pgaddtup(page, itemsz, itup, newitemoff))
|
|
|
|
elog(PANIC, "failed to add new item to block %u in index \"%s\"",
|
|
|
|
itup_blkno, RelationGetRelationName(rel));
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2006-04-01 01:32:07 +02:00
|
|
|
MarkBufferDirty(buf);
|
|
|
|
|
2003-02-21 01:06:22 +01:00
|
|
|
if (BufferIsValid(metabuf))
|
|
|
|
{
|
Skip full index scan during cleanup of B-tree indexes when possible
Vacuum of index consists from two stages: multiple (zero of more) ambulkdelete
calls and one amvacuumcleanup call. When workload on particular table
is append-only, then autovacuum isn't intended to touch this table. However,
user may run vacuum manually in order to fill visibility map and get benefits
of index-only scans. Then ambulkdelete wouldn't be called for indexes
of such table (because no heap tuples were deleted), only amvacuumcleanup would
be called In this case, amvacuumcleanup would perform full index scan for
two objectives: put recyclable pages into free space map and update index
statistics.
This patch allows btvacuumclanup to skip full index scan when two conditions
are satisfied: no pages are going to be put into free space map and index
statistics isn't stalled. In order to check first condition, we store
oldest btpo_xact in the meta-page. When it's precedes RecentGlobalXmin, then
there are some recyclable pages. In order to check second condition we store
number of heap tuples observed during previous full index scan by cleanup.
If fraction of newly inserted tuples is less than
vacuum_cleanup_index_scale_factor, then statistics isn't considered to be
stalled. vacuum_cleanup_index_scale_factor can be defined as both reloption and GUC (default).
This patch bumps B-tree meta-page version. Upgrade of meta-page is performed
"on the fly": during VACUUM meta-page is rewritten with new version. No special
handling in pg_upgrade is required.
Author: Masahiko Sawada, Alexander Korotkov
Review by: Peter Geoghegan, Kyotaro Horiguchi, Alexander Korotkov, Yura Sokolov
Discussion: https://www.postgresql.org/message-id/flat/CAD21AoAX+d2oD_nrd9O2YkpzHaFr=uQeGr9s1rKC3O4ENc568g@mail.gmail.com
2018-04-04 18:29:00 +02:00
|
|
|
/* upgrade meta-page if needed */
|
Make heap TID a tiebreaker nbtree index column.
Make nbtree treat all index tuples as having a heap TID attribute.
Index searches can distinguish duplicates by heap TID, since heap TID is
always guaranteed to be unique. This general approach has numerous
benefits for performance, and is prerequisite to teaching VACUUM to
perform "retail index tuple deletion".
Naively adding a new attribute to every pivot tuple has unacceptable
overhead (it bloats internal pages), so suffix truncation of pivot
tuples is added. This will usually truncate away the "extra" heap TID
attribute from pivot tuples during a leaf page split, and may also
truncate away additional user attributes. This can increase fan-out,
especially in a multi-column index. Truncation can only occur at the
attribute granularity, which isn't particularly effective, but works
well enough for now. A future patch may add support for truncating
"within" text attributes by generating truncated key values using new
opclass infrastructure.
Only new indexes (BTREE_VERSION 4 indexes) will have insertions that
treat heap TID as a tiebreaker attribute, or will have pivot tuples
undergo suffix truncation during a leaf page split (on-disk
compatibility with versions 2 and 3 is preserved). Upgrades to version
4 cannot be performed on-the-fly, unlike upgrades from version 2 to
version 3. contrib/amcheck continues to work with version 2 and 3
indexes, while also enforcing stricter invariants when verifying version
4 indexes. These stricter invariants are the same invariants described
by "3.1.12 Sequencing" from the Lehman and Yao paper.
A later patch will enhance the logic used by nbtree to pick a split
point. This patch is likely to negatively impact performance without
smarter choices around the precise point to split leaf pages at. Making
these two mostly-distinct sets of enhancements into distinct commits
seems like it might clarify their design, even though neither commit is
particularly useful on its own.
The maximum allowed size of new tuples is reduced by an amount equal to
the space required to store an extra MAXALIGN()'d TID in a new high key
during leaf page splits. The user-facing definition of the "1/3 of a
page" restriction is already imprecise, and so does not need to be
revised. However, there should be a compatibility note in the v12
release notes.
Author: Peter Geoghegan
Reviewed-By: Heikki Linnakangas, Alexander Korotkov
Discussion: https://postgr.es/m/CAH2-WzkVb0Kom=R+88fDFb=JSxZMFvbHVC6Mn9LJ2n=X=kS-Uw@mail.gmail.com
2019-03-20 18:04:01 +01:00
|
|
|
if (metad->btm_version < BTREE_NOVAC_VERSION)
|
Skip full index scan during cleanup of B-tree indexes when possible
Vacuum of index consists from two stages: multiple (zero of more) ambulkdelete
calls and one amvacuumcleanup call. When workload on particular table
is append-only, then autovacuum isn't intended to touch this table. However,
user may run vacuum manually in order to fill visibility map and get benefits
of index-only scans. Then ambulkdelete wouldn't be called for indexes
of such table (because no heap tuples were deleted), only amvacuumcleanup would
be called In this case, amvacuumcleanup would perform full index scan for
two objectives: put recyclable pages into free space map and update index
statistics.
This patch allows btvacuumclanup to skip full index scan when two conditions
are satisfied: no pages are going to be put into free space map and index
statistics isn't stalled. In order to check first condition, we store
oldest btpo_xact in the meta-page. When it's precedes RecentGlobalXmin, then
there are some recyclable pages. In order to check second condition we store
number of heap tuples observed during previous full index scan by cleanup.
If fraction of newly inserted tuples is less than
vacuum_cleanup_index_scale_factor, then statistics isn't considered to be
stalled. vacuum_cleanup_index_scale_factor can be defined as both reloption and GUC (default).
This patch bumps B-tree meta-page version. Upgrade of meta-page is performed
"on the fly": during VACUUM meta-page is rewritten with new version. No special
handling in pg_upgrade is required.
Author: Masahiko Sawada, Alexander Korotkov
Review by: Peter Geoghegan, Kyotaro Horiguchi, Alexander Korotkov, Yura Sokolov
Discussion: https://www.postgresql.org/message-id/flat/CAD21AoAX+d2oD_nrd9O2YkpzHaFr=uQeGr9s1rKC3O4ENc568g@mail.gmail.com
2018-04-04 18:29:00 +02:00
|
|
|
_bt_upgrademetapage(metapg);
|
2003-02-21 01:06:22 +01:00
|
|
|
metad->btm_fastroot = itup_blkno;
|
|
|
|
metad->btm_fastlevel = lpageop->btpo.level;
|
2006-04-01 01:32:07 +02:00
|
|
|
MarkBufferDirty(metabuf);
|
2003-02-21 01:06:22 +01:00
|
|
|
}
|
1997-09-07 07:04:48 +02:00
|
|
|
|
Make the handling of interrupted B-tree page splits more robust.
Splitting a page consists of two separate steps: splitting the child page,
and inserting the downlink for the new right page to the parent. Previously,
we handled the case that you crash in between those steps with a cleanup
routine after the WAL recovery had finished, which finished the incomplete
split. However, that doesn't help if the page split is interrupted but the
database doesn't crash, so that you don't perform WAL recovery. That could
happen for example if you run out of disk space.
Remove the end-of-recovery cleanup step. Instead, when a page is split, the
left page is marked with a new INCOMPLETE_SPLIT flag, and when the downlink
is inserted to the parent, the flag is cleared again. If an insertion sees
a page with the flag set, it knows that the split was interrupted for some
reason, and inserts the missing downlink before proceeding.
I used the same approach to fix GIN and GiST split algorithms earlier. This
was the last WAL cleanup routine, so we could get rid of that whole
machinery now, but I'll leave that for a separate patch.
Reviewed by Peter Geoghegan.
2014-03-18 19:12:58 +01:00
|
|
|
/* clear INCOMPLETE_SPLIT flag on child if inserting a downlink */
|
|
|
|
if (BufferIsValid(cbuf))
|
|
|
|
{
|
2016-04-20 15:31:19 +02:00
|
|
|
Page cpage = BufferGetPage(cbuf);
|
Make the handling of interrupted B-tree page splits more robust.
Splitting a page consists of two separate steps: splitting the child page,
and inserting the downlink for the new right page to the parent. Previously,
we handled the case that you crash in between those steps with a cleanup
routine after the WAL recovery had finished, which finished the incomplete
split. However, that doesn't help if the page split is interrupted but the
database doesn't crash, so that you don't perform WAL recovery. That could
happen for example if you run out of disk space.
Remove the end-of-recovery cleanup step. Instead, when a page is split, the
left page is marked with a new INCOMPLETE_SPLIT flag, and when the downlink
is inserted to the parent, the flag is cleared again. If an insertion sees
a page with the flag set, it knows that the split was interrupted for some
reason, and inserts the missing downlink before proceeding.
I used the same approach to fix GIN and GiST split algorithms earlier. This
was the last WAL cleanup routine, so we could get rid of that whole
machinery now, but I'll leave that for a separate patch.
Reviewed by Peter Geoghegan.
2014-03-18 19:12:58 +01:00
|
|
|
BTPageOpaque cpageop = (BTPageOpaque) PageGetSpecialPointer(cpage);
|
|
|
|
|
|
|
|
Assert(P_INCOMPLETE_SPLIT(cpageop));
|
|
|
|
cpageop->btpo_flags &= ~BTP_INCOMPLETE_SPLIT;
|
|
|
|
MarkBufferDirty(cbuf);
|
|
|
|
}
|
|
|
|
|
2018-04-11 00:21:03 +02:00
|
|
|
/*
|
|
|
|
* Cache the block information if we just inserted into the rightmost
|
|
|
|
* leaf page of the index and it's not the root page. For very small
|
|
|
|
* index where root is also the leaf, there is no point trying for any
|
|
|
|
* optimization.
|
|
|
|
*/
|
|
|
|
if (P_RIGHTMOST(lpageop) && P_ISLEAF(lpageop) && !P_ISROOT(lpageop))
|
|
|
|
cachedBlock = BufferGetBlockNumber(buf);
|
|
|
|
|
2003-02-21 01:06:22 +01:00
|
|
|
/* XLOG stuff */
|
2010-12-13 18:34:26 +01:00
|
|
|
if (RelationNeedsWAL(rel))
|
2003-02-21 01:06:22 +01:00
|
|
|
{
|
|
|
|
xl_btree_insert xlrec;
|
|
|
|
xl_btree_metadata xlmeta;
|
|
|
|
uint8 xlinfo;
|
|
|
|
XLogRecPtr recptr;
|
|
|
|
|
Revamp the WAL record format.
Each WAL record now carries information about the modified relation and
block(s) in a standardized format. That makes it easier to write tools that
need that information, like pg_rewind, prefetching the blocks to speed up
recovery, etc.
There's a whole new API for building WAL records, replacing the XLogRecData
chains used previously. The new API consists of XLogRegister* functions,
which are called for each buffer and chunk of data that is added to the
record. The new API also gives more control over when a full-page image is
written, by passing flags to the XLogRegisterBuffer function.
This also simplifies the XLogReadBufferForRedo() calls. The function can dig
the relation and block number from the WAL record, so they no longer need to
be passed as arguments.
For the convenience of redo routines, XLogReader now disects each WAL record
after reading it, copying the main data part and the per-block data into
MAXALIGNed buffers. The data chunks are not aligned within the WAL record,
but the redo routines can assume that the pointers returned by XLogRecGet*
functions are. Redo routines are now passed the XLogReaderState, which
contains the record in the already-disected format, instead of the plain
XLogRecord.
The new record format also makes the fixed size XLogRecord header smaller,
by removing the xl_len field. The length of the "main data" portion is now
stored at the end of the WAL record, and there's a separate header after
XLogRecord for it. The alignment padding at the end of XLogRecord is also
removed. This compansates for the fact that the new format would otherwise
be more bulky than the old format.
Reviewed by Andres Freund, Amit Kapila, Michael Paquier, Alvaro Herrera,
Fujii Masao.
2014-11-20 16:56:26 +01:00
|
|
|
xlrec.offnum = itup_off;
|
2003-02-21 01:06:22 +01:00
|
|
|
|
Revamp the WAL record format.
Each WAL record now carries information about the modified relation and
block(s) in a standardized format. That makes it easier to write tools that
need that information, like pg_rewind, prefetching the blocks to speed up
recovery, etc.
There's a whole new API for building WAL records, replacing the XLogRecData
chains used previously. The new API consists of XLogRegister* functions,
which are called for each buffer and chunk of data that is added to the
record. The new API also gives more control over when a full-page image is
written, by passing flags to the XLogRegisterBuffer function.
This also simplifies the XLogReadBufferForRedo() calls. The function can dig
the relation and block number from the WAL record, so they no longer need to
be passed as arguments.
For the convenience of redo routines, XLogReader now disects each WAL record
after reading it, copying the main data part and the per-block data into
MAXALIGNed buffers. The data chunks are not aligned within the WAL record,
but the redo routines can assume that the pointers returned by XLogRecGet*
functions are. Redo routines are now passed the XLogReaderState, which
contains the record in the already-disected format, instead of the plain
XLogRecord.
The new record format also makes the fixed size XLogRecord header smaller,
by removing the xl_len field. The length of the "main data" portion is now
stored at the end of the WAL record, and there's a separate header after
XLogRecord for it. The alignment padding at the end of XLogRecord is also
removed. This compansates for the fact that the new format would otherwise
be more bulky than the old format.
Reviewed by Andres Freund, Amit Kapila, Michael Paquier, Alvaro Herrera,
Fujii Masao.
2014-11-20 16:56:26 +01:00
|
|
|
XLogBeginInsert();
|
|
|
|
XLogRegisterData((char *) &xlrec, SizeOfBtreeInsert);
|
2003-02-21 01:06:22 +01:00
|
|
|
|
2006-04-13 05:53:05 +02:00
|
|
|
if (P_ISLEAF(lpageop))
|
|
|
|
xlinfo = XLOG_BTREE_INSERT_LEAF;
|
|
|
|
else
|
|
|
|
{
|
Make the handling of interrupted B-tree page splits more robust.
Splitting a page consists of two separate steps: splitting the child page,
and inserting the downlink for the new right page to the parent. Previously,
we handled the case that you crash in between those steps with a cleanup
routine after the WAL recovery had finished, which finished the incomplete
split. However, that doesn't help if the page split is interrupted but the
database doesn't crash, so that you don't perform WAL recovery. That could
happen for example if you run out of disk space.
Remove the end-of-recovery cleanup step. Instead, when a page is split, the
left page is marked with a new INCOMPLETE_SPLIT flag, and when the downlink
is inserted to the parent, the flag is cleared again. If an insertion sees
a page with the flag set, it knows that the split was interrupted for some
reason, and inserts the missing downlink before proceeding.
I used the same approach to fix GIN and GiST split algorithms earlier. This
was the last WAL cleanup routine, so we could get rid of that whole
machinery now, but I'll leave that for a separate patch.
Reviewed by Peter Geoghegan.
2014-03-18 19:12:58 +01:00
|
|
|
/*
|
Revamp the WAL record format.
Each WAL record now carries information about the modified relation and
block(s) in a standardized format. That makes it easier to write tools that
need that information, like pg_rewind, prefetching the blocks to speed up
recovery, etc.
There's a whole new API for building WAL records, replacing the XLogRecData
chains used previously. The new API consists of XLogRegister* functions,
which are called for each buffer and chunk of data that is added to the
record. The new API also gives more control over when a full-page image is
written, by passing flags to the XLogRegisterBuffer function.
This also simplifies the XLogReadBufferForRedo() calls. The function can dig
the relation and block number from the WAL record, so they no longer need to
be passed as arguments.
For the convenience of redo routines, XLogReader now disects each WAL record
after reading it, copying the main data part and the per-block data into
MAXALIGNed buffers. The data chunks are not aligned within the WAL record,
but the redo routines can assume that the pointers returned by XLogRecGet*
functions are. Redo routines are now passed the XLogReaderState, which
contains the record in the already-disected format, instead of the plain
XLogRecord.
The new record format also makes the fixed size XLogRecord header smaller,
by removing the xl_len field. The length of the "main data" portion is now
stored at the end of the WAL record, and there's a separate header after
XLogRecord for it. The alignment padding at the end of XLogRecord is also
removed. This compansates for the fact that the new format would otherwise
be more bulky than the old format.
Reviewed by Andres Freund, Amit Kapila, Michael Paquier, Alvaro Herrera,
Fujii Masao.
2014-11-20 16:56:26 +01:00
|
|
|
* Register the left child whose INCOMPLETE_SPLIT flag was
|
|
|
|
* cleared.
|
Make the handling of interrupted B-tree page splits more robust.
Splitting a page consists of two separate steps: splitting the child page,
and inserting the downlink for the new right page to the parent. Previously,
we handled the case that you crash in between those steps with a cleanup
routine after the WAL recovery had finished, which finished the incomplete
split. However, that doesn't help if the page split is interrupted but the
database doesn't crash, so that you don't perform WAL recovery. That could
happen for example if you run out of disk space.
Remove the end-of-recovery cleanup step. Instead, when a page is split, the
left page is marked with a new INCOMPLETE_SPLIT flag, and when the downlink
is inserted to the parent, the flag is cleared again. If an insertion sees
a page with the flag set, it knows that the split was interrupted for some
reason, and inserts the missing downlink before proceeding.
I used the same approach to fix GIN and GiST split algorithms earlier. This
was the last WAL cleanup routine, so we could get rid of that whole
machinery now, but I'll leave that for a separate patch.
Reviewed by Peter Geoghegan.
2014-03-18 19:12:58 +01:00
|
|
|
*/
|
Revamp the WAL record format.
Each WAL record now carries information about the modified relation and
block(s) in a standardized format. That makes it easier to write tools that
need that information, like pg_rewind, prefetching the blocks to speed up
recovery, etc.
There's a whole new API for building WAL records, replacing the XLogRecData
chains used previously. The new API consists of XLogRegister* functions,
which are called for each buffer and chunk of data that is added to the
record. The new API also gives more control over when a full-page image is
written, by passing flags to the XLogRegisterBuffer function.
This also simplifies the XLogReadBufferForRedo() calls. The function can dig
the relation and block number from the WAL record, so they no longer need to
be passed as arguments.
For the convenience of redo routines, XLogReader now disects each WAL record
after reading it, copying the main data part and the per-block data into
MAXALIGNed buffers. The data chunks are not aligned within the WAL record,
but the redo routines can assume that the pointers returned by XLogRecGet*
functions are. Redo routines are now passed the XLogReaderState, which
contains the record in the already-disected format, instead of the plain
XLogRecord.
The new record format also makes the fixed size XLogRecord header smaller,
by removing the xl_len field. The length of the "main data" portion is now
stored at the end of the WAL record, and there's a separate header after
XLogRecord for it. The alignment padding at the end of XLogRecord is also
removed. This compansates for the fact that the new format would otherwise
be more bulky than the old format.
Reviewed by Andres Freund, Amit Kapila, Michael Paquier, Alvaro Herrera,
Fujii Masao.
2014-11-20 16:56:26 +01:00
|
|
|
XLogRegisterBuffer(1, cbuf, REGBUF_STANDARD);
|
2006-04-13 05:53:05 +02:00
|
|
|
|
|
|
|
xlinfo = XLOG_BTREE_INSERT_UPPER;
|
|
|
|
}
|
|
|
|
|
2003-02-21 01:06:22 +01:00
|
|
|
if (BufferIsValid(metabuf))
|
|
|
|
{
|
Make heap TID a tiebreaker nbtree index column.
Make nbtree treat all index tuples as having a heap TID attribute.
Index searches can distinguish duplicates by heap TID, since heap TID is
always guaranteed to be unique. This general approach has numerous
benefits for performance, and is prerequisite to teaching VACUUM to
perform "retail index tuple deletion".
Naively adding a new attribute to every pivot tuple has unacceptable
overhead (it bloats internal pages), so suffix truncation of pivot
tuples is added. This will usually truncate away the "extra" heap TID
attribute from pivot tuples during a leaf page split, and may also
truncate away additional user attributes. This can increase fan-out,
especially in a multi-column index. Truncation can only occur at the
attribute granularity, which isn't particularly effective, but works
well enough for now. A future patch may add support for truncating
"within" text attributes by generating truncated key values using new
opclass infrastructure.
Only new indexes (BTREE_VERSION 4 indexes) will have insertions that
treat heap TID as a tiebreaker attribute, or will have pivot tuples
undergo suffix truncation during a leaf page split (on-disk
compatibility with versions 2 and 3 is preserved). Upgrades to version
4 cannot be performed on-the-fly, unlike upgrades from version 2 to
version 3. contrib/amcheck continues to work with version 2 and 3
indexes, while also enforcing stricter invariants when verifying version
4 indexes. These stricter invariants are the same invariants described
by "3.1.12 Sequencing" from the Lehman and Yao paper.
A later patch will enhance the logic used by nbtree to pick a split
point. This patch is likely to negatively impact performance without
smarter choices around the precise point to split leaf pages at. Making
these two mostly-distinct sets of enhancements into distinct commits
seems like it might clarify their design, even though neither commit is
particularly useful on its own.
The maximum allowed size of new tuples is reduced by an amount equal to
the space required to store an extra MAXALIGN()'d TID in a new high key
during leaf page splits. The user-facing definition of the "1/3 of a
page" restriction is already imprecise, and so does not need to be
revised. However, there should be a compatibility note in the v12
release notes.
Author: Peter Geoghegan
Reviewed-By: Heikki Linnakangas, Alexander Korotkov
Discussion: https://postgr.es/m/CAH2-WzkVb0Kom=R+88fDFb=JSxZMFvbHVC6Mn9LJ2n=X=kS-Uw@mail.gmail.com
2019-03-20 18:04:01 +01:00
|
|
|
Assert(metad->btm_version >= BTREE_NOVAC_VERSION);
|
|
|
|
xlmeta.version = metad->btm_version;
|
2003-02-21 01:06:22 +01:00
|
|
|
xlmeta.root = metad->btm_root;
|
|
|
|
xlmeta.level = metad->btm_level;
|
|
|
|
xlmeta.fastroot = metad->btm_fastroot;
|
|
|
|
xlmeta.fastlevel = metad->btm_fastlevel;
|
Skip full index scan during cleanup of B-tree indexes when possible
Vacuum of index consists from two stages: multiple (zero of more) ambulkdelete
calls and one amvacuumcleanup call. When workload on particular table
is append-only, then autovacuum isn't intended to touch this table. However,
user may run vacuum manually in order to fill visibility map and get benefits
of index-only scans. Then ambulkdelete wouldn't be called for indexes
of such table (because no heap tuples were deleted), only amvacuumcleanup would
be called In this case, amvacuumcleanup would perform full index scan for
two objectives: put recyclable pages into free space map and update index
statistics.
This patch allows btvacuumclanup to skip full index scan when two conditions
are satisfied: no pages are going to be put into free space map and index
statistics isn't stalled. In order to check first condition, we store
oldest btpo_xact in the meta-page. When it's precedes RecentGlobalXmin, then
there are some recyclable pages. In order to check second condition we store
number of heap tuples observed during previous full index scan by cleanup.
If fraction of newly inserted tuples is less than
vacuum_cleanup_index_scale_factor, then statistics isn't considered to be
stalled. vacuum_cleanup_index_scale_factor can be defined as both reloption and GUC (default).
This patch bumps B-tree meta-page version. Upgrade of meta-page is performed
"on the fly": during VACUUM meta-page is rewritten with new version. No special
handling in pg_upgrade is required.
Author: Masahiko Sawada, Alexander Korotkov
Review by: Peter Geoghegan, Kyotaro Horiguchi, Alexander Korotkov, Yura Sokolov
Discussion: https://www.postgresql.org/message-id/flat/CAD21AoAX+d2oD_nrd9O2YkpzHaFr=uQeGr9s1rKC3O4ENc568g@mail.gmail.com
2018-04-04 18:29:00 +02:00
|
|
|
xlmeta.oldest_btpo_xact = metad->btm_oldest_btpo_xact;
|
|
|
|
xlmeta.last_cleanup_num_heap_tuples =
|
|
|
|
metad->btm_last_cleanup_num_heap_tuples;
|
2003-02-21 01:06:22 +01:00
|
|
|
|
2017-11-03 21:31:32 +01:00
|
|
|
XLogRegisterBuffer(2, metabuf, REGBUF_WILL_INIT | REGBUF_STANDARD);
|
Revamp the WAL record format.
Each WAL record now carries information about the modified relation and
block(s) in a standardized format. That makes it easier to write tools that
need that information, like pg_rewind, prefetching the blocks to speed up
recovery, etc.
There's a whole new API for building WAL records, replacing the XLogRecData
chains used previously. The new API consists of XLogRegister* functions,
which are called for each buffer and chunk of data that is added to the
record. The new API also gives more control over when a full-page image is
written, by passing flags to the XLogRegisterBuffer function.
This also simplifies the XLogReadBufferForRedo() calls. The function can dig
the relation and block number from the WAL record, so they no longer need to
be passed as arguments.
For the convenience of redo routines, XLogReader now disects each WAL record
after reading it, copying the main data part and the per-block data into
MAXALIGNed buffers. The data chunks are not aligned within the WAL record,
but the redo routines can assume that the pointers returned by XLogRecGet*
functions are. Redo routines are now passed the XLogReaderState, which
contains the record in the already-disected format, instead of the plain
XLogRecord.
The new record format also makes the fixed size XLogRecord header smaller,
by removing the xl_len field. The length of the "main data" portion is now
stored at the end of the WAL record, and there's a separate header after
XLogRecord for it. The alignment padding at the end of XLogRecord is also
removed. This compansates for the fact that the new format would otherwise
be more bulky than the old format.
Reviewed by Andres Freund, Amit Kapila, Michael Paquier, Alvaro Herrera,
Fujii Masao.
2014-11-20 16:56:26 +01:00
|
|
|
XLogRegisterBufData(2, (char *) &xlmeta, sizeof(xl_btree_metadata));
|
2006-04-13 05:53:05 +02:00
|
|
|
|
2003-02-21 01:06:22 +01:00
|
|
|
xlinfo = XLOG_BTREE_INSERT_META;
|
|
|
|
}
|
2001-01-31 02:08:36 +01:00
|
|
|
|
Revamp the WAL record format.
Each WAL record now carries information about the modified relation and
block(s) in a standardized format. That makes it easier to write tools that
need that information, like pg_rewind, prefetching the blocks to speed up
recovery, etc.
There's a whole new API for building WAL records, replacing the XLogRecData
chains used previously. The new API consists of XLogRegister* functions,
which are called for each buffer and chunk of data that is added to the
record. The new API also gives more control over when a full-page image is
written, by passing flags to the XLogRegisterBuffer function.
This also simplifies the XLogReadBufferForRedo() calls. The function can dig
the relation and block number from the WAL record, so they no longer need to
be passed as arguments.
For the convenience of redo routines, XLogReader now disects each WAL record
after reading it, copying the main data part and the per-block data into
MAXALIGNed buffers. The data chunks are not aligned within the WAL record,
but the redo routines can assume that the pointers returned by XLogRecGet*
functions are. Redo routines are now passed the XLogReaderState, which
contains the record in the already-disected format, instead of the plain
XLogRecord.
The new record format also makes the fixed size XLogRecord header smaller,
by removing the xl_len field. The length of the "main data" portion is now
stored at the end of the WAL record, and there's a separate header after
XLogRecord for it. The alignment padding at the end of XLogRecord is also
removed. This compansates for the fact that the new format would otherwise
be more bulky than the old format.
Reviewed by Andres Freund, Amit Kapila, Michael Paquier, Alvaro Herrera,
Fujii Masao.
2014-11-20 16:56:26 +01:00
|
|
|
XLogRegisterBuffer(0, buf, REGBUF_STANDARD);
|
Adjust INCLUDE index truncation comments and code.
Add several assertions that ensure that we're dealing with a pivot tuple
without non-key attributes where that's expected. Also, remove the
assertion within _bt_isequal(), restoring the v10 function signature. A
similar check will be performed for the page highkey within
_bt_moveright() in most cases. Also avoid dropping all objects within
regression tests, to increase pg_dump test coverage for INCLUDE indexes.
Rather than using infrastructure that's generally intended to be used
with reference counted heap tuple descriptors during truncation, use the
same function that was introduced to store flat TupleDescs in shared
memory (we use a temp palloc'd buffer). This isn't strictly necessary,
but seems more future-proof than the old approach. It also lets us
avoid including rel.h within indextuple.c, which was arguably a
modularity violation. Also, we now call index_deform_tuple() with the
truncated TupleDesc, not the source TupleDesc, since that's more robust,
and saves a few cycles.
In passing, fix a memory leak by pfree'ing truncated pivot tuple memory
during CREATE INDEX. Also pfree during a page split, just to be
consistent.
Refactor _bt_check_natts() to be more readable.
Author: Peter Geoghegan with some editorization by me
Reviewed by: Alexander Korotkov, Teodor Sigaev
Discussion: https://www.postgresql.org/message-id/CAH2-Wz%3DkCWuXeMrBCopC-tFs3FbiVxQNjjgNKdG2sHxZ5k2y3w%40mail.gmail.com
2018-04-19 07:45:58 +02:00
|
|
|
XLogRegisterBufData(0, (char *) itup, IndexTupleSize(itup));
|
2003-02-21 01:06:22 +01:00
|
|
|
|
Revamp the WAL record format.
Each WAL record now carries information about the modified relation and
block(s) in a standardized format. That makes it easier to write tools that
need that information, like pg_rewind, prefetching the blocks to speed up
recovery, etc.
There's a whole new API for building WAL records, replacing the XLogRecData
chains used previously. The new API consists of XLogRegister* functions,
which are called for each buffer and chunk of data that is added to the
record. The new API also gives more control over when a full-page image is
written, by passing flags to the XLogRegisterBuffer function.
This also simplifies the XLogReadBufferForRedo() calls. The function can dig
the relation and block number from the WAL record, so they no longer need to
be passed as arguments.
For the convenience of redo routines, XLogReader now disects each WAL record
after reading it, copying the main data part and the per-block data into
MAXALIGNed buffers. The data chunks are not aligned within the WAL record,
but the redo routines can assume that the pointers returned by XLogRecGet*
functions are. Redo routines are now passed the XLogReaderState, which
contains the record in the already-disected format, instead of the plain
XLogRecord.
The new record format also makes the fixed size XLogRecord header smaller,
by removing the xl_len field. The length of the "main data" portion is now
stored at the end of the WAL record, and there's a separate header after
XLogRecord for it. The alignment padding at the end of XLogRecord is also
removed. This compansates for the fact that the new format would otherwise
be more bulky than the old format.
Reviewed by Andres Freund, Amit Kapila, Michael Paquier, Alvaro Herrera,
Fujii Masao.
2014-11-20 16:56:26 +01:00
|
|
|
recptr = XLogInsert(RM_BTREE_ID, xlinfo);
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2003-02-21 01:06:22 +01:00
|
|
|
if (BufferIsValid(metabuf))
|
2001-02-02 20:49:15 +01:00
|
|
|
{
|
2003-02-21 01:06:22 +01:00
|
|
|
PageSetLSN(metapg, recptr);
|
2001-02-02 20:49:15 +01:00
|
|
|
}
|
2014-04-01 18:19:47 +02:00
|
|
|
if (BufferIsValid(cbuf))
|
|
|
|
{
|
2016-04-20 15:31:19 +02:00
|
|
|
PageSetLSN(BufferGetPage(cbuf), recptr);
|
2014-04-01 18:19:47 +02:00
|
|
|
}
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2003-02-21 01:06:22 +01:00
|
|
|
PageSetLSN(page, recptr);
|
1996-12-06 10:45:30 +01:00
|
|
|
}
|
2000-10-04 02:04:43 +02:00
|
|
|
|
2003-02-21 01:06:22 +01:00
|
|
|
END_CRIT_SECTION();
|
2000-10-04 02:04:43 +02:00
|
|
|
|
Remove unnecessary relcache flushes after changing btree metapages.
These flushes were added in my commit d2896a9ed, which added the btree
logic that keeps a cached copy of the index metapage data in index relcache
entries. The idea was to ensure that other backends would promptly update
their cached copies after a change. However, this is not really necessary,
since _bt_getroot() has adequate defenses against believing a stale root
page link, and _bt_getrootheight() doesn't have to be 100% right.
Moreover, if it were necessary, a relcache flush would be an unreliable way
to do it, since the sinval mechanism believes that relcache flush requests
represent transactional updates, and therefore discards them on transaction
rollback. Therefore, we might as well drop these flush requests and save
the time to rebuild the whole relcache entry after a metapage change.
If we ever try to support in-place truncation of btree indexes, it might
be necessary to revisit this issue so that _bt_getroot() can't get caught
by trying to follow a metapage link to a page that no longer exists.
A possible solution to that is to make use of an smgr, rather than
relcache, inval request to force other backends to discard their cached
metapages. But for the moment this is not worth pursuing.
2014-02-05 19:43:37 +01:00
|
|
|
/* release buffers */
|
2003-02-21 01:06:22 +01:00
|
|
|
if (BufferIsValid(metabuf))
|
2006-04-01 01:32:07 +02:00
|
|
|
_bt_relbuf(rel, metabuf);
|
Make the handling of interrupted B-tree page splits more robust.
Splitting a page consists of two separate steps: splitting the child page,
and inserting the downlink for the new right page to the parent. Previously,
we handled the case that you crash in between those steps with a cleanup
routine after the WAL recovery had finished, which finished the incomplete
split. However, that doesn't help if the page split is interrupted but the
database doesn't crash, so that you don't perform WAL recovery. That could
happen for example if you run out of disk space.
Remove the end-of-recovery cleanup step. Instead, when a page is split, the
left page is marked with a new INCOMPLETE_SPLIT flag, and when the downlink
is inserted to the parent, the flag is cleared again. If an insertion sees
a page with the flag set, it knows that the split was interrupted for some
reason, and inserts the missing downlink before proceeding.
I used the same approach to fix GIN and GiST split algorithms earlier. This
was the last WAL cleanup routine, so we could get rid of that whole
machinery now, but I'll leave that for a separate patch.
Reviewed by Peter Geoghegan.
2014-03-18 19:12:58 +01:00
|
|
|
if (BufferIsValid(cbuf))
|
|
|
|
_bt_relbuf(rel, cbuf);
|
2006-04-01 01:32:07 +02:00
|
|
|
_bt_relbuf(rel, buf);
|
2018-04-11 00:21:03 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If we decided to cache the insertion target block, then set it now.
|
|
|
|
* But before that, check for the height of the tree and don't go for
|
|
|
|
* the optimization for small indexes. We defer that check to this
|
|
|
|
* point to ensure that we don't call _bt_getrootheight while holding
|
|
|
|
* lock on any other block.
|
|
|
|
*
|
|
|
|
* We do this after dropping locks on all buffers. So the information
|
|
|
|
* about whether the insertion block is still the rightmost block or
|
|
|
|
* not may have changed in between. But we will deal with that during
|
2018-04-26 20:47:16 +02:00
|
|
|
* next insert operation. No special care is required while setting
|
|
|
|
* it.
|
2018-04-11 00:21:03 +02:00
|
|
|
*/
|
|
|
|
if (BlockNumberIsValid(cachedBlock) &&
|
|
|
|
_bt_getrootheight(rel) >= BTREE_FASTPATH_MIN_LEVEL)
|
|
|
|
RelationSetTargetBlock(rel, cachedBlock);
|
1997-09-07 07:04:48 +02:00
|
|
|
}
|
1996-07-09 08:22:35 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
1997-09-07 07:04:48 +02:00
|
|
|
* _bt_split() -- split a page in the btree.
|
1996-07-09 08:22:35 +02:00
|
|
|
*
|
2006-04-13 05:53:05 +02:00
|
|
|
* On entry, buf is the page to split, and is pinned and write-locked.
|
Don't leave behind junk nbtree pages during split.
Commit 8fa30f906be reduced the elevel of a number of "can't happen"
_bt_split() errors from PANIC to ERROR. At the same time, the new right
page buffer for the split could continue to be acquired well before the
critical section. This was possible because it was relatively
straightforward to make sure that _bt_split() could not throw an error,
with a few specific exceptions. The exceptional cases were safe because
they involved specific, well understood errors, making it possible to
consistently zero the right page before actually raising an error using
elog(). There was no danger of leaving around a junk page, provided
_bt_split() stuck to this coding rule.
Commit 8224de4f, which introduced INCLUDE indexes, added code to make
_bt_split() truncate away non-key attributes. This happened at a point
that broke the rule around zeroing the right page in _bt_split(). If
truncation failed (perhaps due to palloc() failure), that would result
in an errant right page buffer with junk contents. This could confuse
VACUUM when it attempted to delete the page, and should be avoided on
general principle.
To fix, reorganize _bt_split() so that truncation occurs before the new
right page buffer is even acquired. A junk page/buffer will not be left
behind if _bt_nonkey_truncate()/_bt_truncate() raise an error.
Discussion: https://postgr.es/m/CAH2-WzkcWT_-NH7EeL=Az4efg0KCV+wArygW8zKB=+HoP=VWMw@mail.gmail.com
Backpatch: 11-, where INCLUDE indexes were introduced.
2019-05-13 19:27:59 +02:00
|
|
|
* newitemoff etc. tell us about the new item that must be inserted
|
|
|
|
* along with the data from the original page.
|
2000-07-21 08:42:39 +02:00
|
|
|
*
|
Make heap TID a tiebreaker nbtree index column.
Make nbtree treat all index tuples as having a heap TID attribute.
Index searches can distinguish duplicates by heap TID, since heap TID is
always guaranteed to be unique. This general approach has numerous
benefits for performance, and is prerequisite to teaching VACUUM to
perform "retail index tuple deletion".
Naively adding a new attribute to every pivot tuple has unacceptable
overhead (it bloats internal pages), so suffix truncation of pivot
tuples is added. This will usually truncate away the "extra" heap TID
attribute from pivot tuples during a leaf page split, and may also
truncate away additional user attributes. This can increase fan-out,
especially in a multi-column index. Truncation can only occur at the
attribute granularity, which isn't particularly effective, but works
well enough for now. A future patch may add support for truncating
"within" text attributes by generating truncated key values using new
opclass infrastructure.
Only new indexes (BTREE_VERSION 4 indexes) will have insertions that
treat heap TID as a tiebreaker attribute, or will have pivot tuples
undergo suffix truncation during a leaf page split (on-disk
compatibility with versions 2 and 3 is preserved). Upgrades to version
4 cannot be performed on-the-fly, unlike upgrades from version 2 to
version 3. contrib/amcheck continues to work with version 2 and 3
indexes, while also enforcing stricter invariants when verifying version
4 indexes. These stricter invariants are the same invariants described
by "3.1.12 Sequencing" from the Lehman and Yao paper.
A later patch will enhance the logic used by nbtree to pick a split
point. This patch is likely to negatively impact performance without
smarter choices around the precise point to split leaf pages at. Making
these two mostly-distinct sets of enhancements into distinct commits
seems like it might clarify their design, even though neither commit is
particularly useful on its own.
The maximum allowed size of new tuples is reduced by an amount equal to
the space required to store an extra MAXALIGN()'d TID in a new high key
during leaf page splits. The user-facing definition of the "1/3 of a
page" restriction is already imprecise, and so does not need to be
revised. However, there should be a compatibility note in the v12
release notes.
Author: Peter Geoghegan
Reviewed-By: Heikki Linnakangas, Alexander Korotkov
Discussion: https://postgr.es/m/CAH2-WzkVb0Kom=R+88fDFb=JSxZMFvbHVC6Mn9LJ2n=X=kS-Uw@mail.gmail.com
2019-03-20 18:04:01 +01:00
|
|
|
* itup_key is used for suffix truncation on leaf pages (internal
|
|
|
|
* page callers pass NULL). When splitting a non-leaf page, 'cbuf'
|
|
|
|
* is the left-sibling of the page we're inserting the downlink for.
|
|
|
|
* This function will clear the INCOMPLETE_SPLIT flag on it, and
|
|
|
|
* release the buffer.
|
Make the handling of interrupted B-tree page splits more robust.
Splitting a page consists of two separate steps: splitting the child page,
and inserting the downlink for the new right page to the parent. Previously,
we handled the case that you crash in between those steps with a cleanup
routine after the WAL recovery had finished, which finished the incomplete
split. However, that doesn't help if the page split is interrupted but the
database doesn't crash, so that you don't perform WAL recovery. That could
happen for example if you run out of disk space.
Remove the end-of-recovery cleanup step. Instead, when a page is split, the
left page is marked with a new INCOMPLETE_SPLIT flag, and when the downlink
is inserted to the parent, the flag is cleared again. If an insertion sees
a page with the flag set, it knows that the split was interrupted for some
reason, and inserts the missing downlink before proceeding.
I used the same approach to fix GIN and GiST split algorithms earlier. This
was the last WAL cleanup routine, so we could get rid of that whole
machinery now, but I'll leave that for a separate patch.
Reviewed by Peter Geoghegan.
2014-03-18 19:12:58 +01:00
|
|
|
*
|
2000-07-21 08:42:39 +02:00
|
|
|
* Returns the new right sibling of buf, pinned and write-locked.
|
2005-09-25 00:54:44 +02:00
|
|
|
* The pin and lock on buf are maintained.
|
1996-07-09 08:22:35 +02:00
|
|
|
*/
|
1997-09-08 04:41:22 +02:00
|
|
|
static Buffer
|
Make heap TID a tiebreaker nbtree index column.
Make nbtree treat all index tuples as having a heap TID attribute.
Index searches can distinguish duplicates by heap TID, since heap TID is
always guaranteed to be unique. This general approach has numerous
benefits for performance, and is prerequisite to teaching VACUUM to
perform "retail index tuple deletion".
Naively adding a new attribute to every pivot tuple has unacceptable
overhead (it bloats internal pages), so suffix truncation of pivot
tuples is added. This will usually truncate away the "extra" heap TID
attribute from pivot tuples during a leaf page split, and may also
truncate away additional user attributes. This can increase fan-out,
especially in a multi-column index. Truncation can only occur at the
attribute granularity, which isn't particularly effective, but works
well enough for now. A future patch may add support for truncating
"within" text attributes by generating truncated key values using new
opclass infrastructure.
Only new indexes (BTREE_VERSION 4 indexes) will have insertions that
treat heap TID as a tiebreaker attribute, or will have pivot tuples
undergo suffix truncation during a leaf page split (on-disk
compatibility with versions 2 and 3 is preserved). Upgrades to version
4 cannot be performed on-the-fly, unlike upgrades from version 2 to
version 3. contrib/amcheck continues to work with version 2 and 3
indexes, while also enforcing stricter invariants when verifying version
4 indexes. These stricter invariants are the same invariants described
by "3.1.12 Sequencing" from the Lehman and Yao paper.
A later patch will enhance the logic used by nbtree to pick a split
point. This patch is likely to negatively impact performance without
smarter choices around the precise point to split leaf pages at. Making
these two mostly-distinct sets of enhancements into distinct commits
seems like it might clarify their design, even though neither commit is
particularly useful on its own.
The maximum allowed size of new tuples is reduced by an amount equal to
the space required to store an extra MAXALIGN()'d TID in a new high key
during leaf page splits. The user-facing definition of the "1/3 of a
page" restriction is already imprecise, and so does not need to be
revised. However, there should be a compatibility note in the v12
release notes.
Author: Peter Geoghegan
Reviewed-By: Heikki Linnakangas, Alexander Korotkov
Discussion: https://postgr.es/m/CAH2-WzkVb0Kom=R+88fDFb=JSxZMFvbHVC6Mn9LJ2n=X=kS-Uw@mail.gmail.com
2019-03-20 18:04:01 +01:00
|
|
|
_bt_split(Relation rel, BTScanInsert itup_key, Buffer buf, Buffer cbuf,
|
Don't leave behind junk nbtree pages during split.
Commit 8fa30f906be reduced the elevel of a number of "can't happen"
_bt_split() errors from PANIC to ERROR. At the same time, the new right
page buffer for the split could continue to be acquired well before the
critical section. This was possible because it was relatively
straightforward to make sure that _bt_split() could not throw an error,
with a few specific exceptions. The exceptional cases were safe because
they involved specific, well understood errors, making it possible to
consistently zero the right page before actually raising an error using
elog(). There was no danger of leaving around a junk page, provided
_bt_split() stuck to this coding rule.
Commit 8224de4f, which introduced INCLUDE indexes, added code to make
_bt_split() truncate away non-key attributes. This happened at a point
that broke the rule around zeroing the right page in _bt_split(). If
truncation failed (perhaps due to palloc() failure), that would result
in an errant right page buffer with junk contents. This could confuse
VACUUM when it attempted to delete the page, and should be avoided on
general principle.
To fix, reorganize _bt_split() so that truncation occurs before the new
right page buffer is even acquired. A junk page/buffer will not be left
behind if _bt_nonkey_truncate()/_bt_truncate() raise an error.
Discussion: https://postgr.es/m/CAH2-WzkcWT_-NH7EeL=Az4efg0KCV+wArygW8zKB=+HoP=VWMw@mail.gmail.com
Backpatch: 11-, where INCLUDE indexes were introduced.
2019-05-13 19:27:59 +02:00
|
|
|
OffsetNumber newitemoff, Size newitemsz, IndexTuple newitem)
|
1996-07-09 08:22:35 +02:00
|
|
|
{
|
1997-09-08 04:41:22 +02:00
|
|
|
Buffer rbuf;
|
|
|
|
Page origpage;
|
|
|
|
Page leftpage,
|
|
|
|
rightpage;
|
2010-08-29 21:33:14 +02:00
|
|
|
BlockNumber origpagenumber,
|
|
|
|
rightpagenumber;
|
1997-09-08 04:41:22 +02:00
|
|
|
BTPageOpaque ropaque,
|
|
|
|
lopaque,
|
|
|
|
oopaque;
|
2003-02-22 01:45:05 +01:00
|
|
|
Buffer sbuf = InvalidBuffer;
|
|
|
|
Page spage = NULL;
|
|
|
|
BTPageOpaque sopaque = NULL;
|
1997-09-08 04:41:22 +02:00
|
|
|
Size itemsz;
|
|
|
|
ItemId itemid;
|
2006-01-26 00:04:21 +01:00
|
|
|
IndexTuple item;
|
1997-09-08 04:41:22 +02:00
|
|
|
OffsetNumber leftoff,
|
|
|
|
rightoff;
|
Don't leave behind junk nbtree pages during split.
Commit 8fa30f906be reduced the elevel of a number of "can't happen"
_bt_split() errors from PANIC to ERROR. At the same time, the new right
page buffer for the split could continue to be acquired well before the
critical section. This was possible because it was relatively
straightforward to make sure that _bt_split() could not throw an error,
with a few specific exceptions. The exceptional cases were safe because
they involved specific, well understood errors, making it possible to
consistently zero the right page before actually raising an error using
elog(). There was no danger of leaving around a junk page, provided
_bt_split() stuck to this coding rule.
Commit 8224de4f, which introduced INCLUDE indexes, added code to make
_bt_split() truncate away non-key attributes. This happened at a point
that broke the rule around zeroing the right page in _bt_split(). If
truncation failed (perhaps due to palloc() failure), that would result
in an errant right page buffer with junk contents. This could confuse
VACUUM when it attempted to delete the page, and should be avoided on
general principle.
To fix, reorganize _bt_split() so that truncation occurs before the new
right page buffer is even acquired. A junk page/buffer will not be left
behind if _bt_nonkey_truncate()/_bt_truncate() raise an error.
Discussion: https://postgr.es/m/CAH2-WzkcWT_-NH7EeL=Az4efg0KCV+wArygW8zKB=+HoP=VWMw@mail.gmail.com
Backpatch: 11-, where INCLUDE indexes were introduced.
2019-05-13 19:27:59 +02:00
|
|
|
OffsetNumber firstright;
|
1997-09-08 04:41:22 +02:00
|
|
|
OffsetNumber maxoff;
|
|
|
|
OffsetNumber i;
|
Don't leave behind junk nbtree pages during split.
Commit 8fa30f906be reduced the elevel of a number of "can't happen"
_bt_split() errors from PANIC to ERROR. At the same time, the new right
page buffer for the split could continue to be acquired well before the
critical section. This was possible because it was relatively
straightforward to make sure that _bt_split() could not throw an error,
with a few specific exceptions. The exceptional cases were safe because
they involved specific, well understood errors, making it possible to
consistently zero the right page before actually raising an error using
elog(). There was no danger of leaving around a junk page, provided
_bt_split() stuck to this coding rule.
Commit 8224de4f, which introduced INCLUDE indexes, added code to make
_bt_split() truncate away non-key attributes. This happened at a point
that broke the rule around zeroing the right page in _bt_split(). If
truncation failed (perhaps due to palloc() failure), that would result
in an errant right page buffer with junk contents. This could confuse
VACUUM when it attempted to delete the page, and should be avoided on
general principle.
To fix, reorganize _bt_split() so that truncation occurs before the new
right page buffer is even acquired. A junk page/buffer will not be left
behind if _bt_nonkey_truncate()/_bt_truncate() raise an error.
Discussion: https://postgr.es/m/CAH2-WzkcWT_-NH7EeL=Az4efg0KCV+wArygW8zKB=+HoP=VWMw@mail.gmail.com
Backpatch: 11-, where INCLUDE indexes were introduced.
2019-05-13 19:27:59 +02:00
|
|
|
bool newitemonleft,
|
|
|
|
isleaf;
|
2018-04-07 22:00:39 +02:00
|
|
|
IndexTuple lefthikey;
|
|
|
|
int indnatts = IndexRelationGetNumberOfAttributes(rel);
|
|
|
|
int indnkeyatts = IndexRelationGetNumberOfKeyAttributes(rel);
|
2000-10-13 14:05:22 +02:00
|
|
|
|
2010-08-29 21:33:14 +02:00
|
|
|
/*
|
|
|
|
* origpage is the original page to be split. leftpage is a temporary
|
|
|
|
* buffer that receives the left-sibling data, which will be copied back
|
Don't leave behind junk nbtree pages during split.
Commit 8fa30f906be reduced the elevel of a number of "can't happen"
_bt_split() errors from PANIC to ERROR. At the same time, the new right
page buffer for the split could continue to be acquired well before the
critical section. This was possible because it was relatively
straightforward to make sure that _bt_split() could not throw an error,
with a few specific exceptions. The exceptional cases were safe because
they involved specific, well understood errors, making it possible to
consistently zero the right page before actually raising an error using
elog(). There was no danger of leaving around a junk page, provided
_bt_split() stuck to this coding rule.
Commit 8224de4f, which introduced INCLUDE indexes, added code to make
_bt_split() truncate away non-key attributes. This happened at a point
that broke the rule around zeroing the right page in _bt_split(). If
truncation failed (perhaps due to palloc() failure), that would result
in an errant right page buffer with junk contents. This could confuse
VACUUM when it attempted to delete the page, and should be avoided on
general principle.
To fix, reorganize _bt_split() so that truncation occurs before the new
right page buffer is even acquired. A junk page/buffer will not be left
behind if _bt_nonkey_truncate()/_bt_truncate() raise an error.
Discussion: https://postgr.es/m/CAH2-WzkcWT_-NH7EeL=Az4efg0KCV+wArygW8zKB=+HoP=VWMw@mail.gmail.com
Backpatch: 11-, where INCLUDE indexes were introduced.
2019-05-13 19:27:59 +02:00
|
|
|
* into origpage on success. rightpage is the new page that will receive
|
|
|
|
* the right-sibling data.
|
|
|
|
*
|
|
|
|
* leftpage is allocated after choosing a split point. rightpage's new
|
|
|
|
* buffer isn't acquired until after leftpage is initialized and has new
|
|
|
|
* high key, the last point where splitting the page may fail (barring
|
|
|
|
* corruption). Failing before acquiring new buffer won't have lasting
|
|
|
|
* consequences, since origpage won't have been modified and leftpage is
|
|
|
|
* only workspace.
|
2010-08-29 21:33:14 +02:00
|
|
|
*/
|
2016-04-20 15:31:19 +02:00
|
|
|
origpage = BufferGetPage(buf);
|
Don't leave behind junk nbtree pages during split.
Commit 8fa30f906be reduced the elevel of a number of "can't happen"
_bt_split() errors from PANIC to ERROR. At the same time, the new right
page buffer for the split could continue to be acquired well before the
critical section. This was possible because it was relatively
straightforward to make sure that _bt_split() could not throw an error,
with a few specific exceptions. The exceptional cases were safe because
they involved specific, well understood errors, making it possible to
consistently zero the right page before actually raising an error using
elog(). There was no danger of leaving around a junk page, provided
_bt_split() stuck to this coding rule.
Commit 8224de4f, which introduced INCLUDE indexes, added code to make
_bt_split() truncate away non-key attributes. This happened at a point
that broke the rule around zeroing the right page in _bt_split(). If
truncation failed (perhaps due to palloc() failure), that would result
in an errant right page buffer with junk contents. This could confuse
VACUUM when it attempted to delete the page, and should be avoided on
general principle.
To fix, reorganize _bt_split() so that truncation occurs before the new
right page buffer is even acquired. A junk page/buffer will not be left
behind if _bt_nonkey_truncate()/_bt_truncate() raise an error.
Discussion: https://postgr.es/m/CAH2-WzkcWT_-NH7EeL=Az4efg0KCV+wArygW8zKB=+HoP=VWMw@mail.gmail.com
Backpatch: 11-, where INCLUDE indexes were introduced.
2019-05-13 19:27:59 +02:00
|
|
|
oopaque = (BTPageOpaque) PageGetSpecialPointer(origpage);
|
2010-08-29 21:33:14 +02:00
|
|
|
origpagenumber = BufferGetBlockNumber(buf);
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2007-04-11 22:47:38 +02:00
|
|
|
/*
|
Don't leave behind junk nbtree pages during split.
Commit 8fa30f906be reduced the elevel of a number of "can't happen"
_bt_split() errors from PANIC to ERROR. At the same time, the new right
page buffer for the split could continue to be acquired well before the
critical section. This was possible because it was relatively
straightforward to make sure that _bt_split() could not throw an error,
with a few specific exceptions. The exceptional cases were safe because
they involved specific, well understood errors, making it possible to
consistently zero the right page before actually raising an error using
elog(). There was no danger of leaving around a junk page, provided
_bt_split() stuck to this coding rule.
Commit 8224de4f, which introduced INCLUDE indexes, added code to make
_bt_split() truncate away non-key attributes. This happened at a point
that broke the rule around zeroing the right page in _bt_split(). If
truncation failed (perhaps due to palloc() failure), that would result
in an errant right page buffer with junk contents. This could confuse
VACUUM when it attempted to delete the page, and should be avoided on
general principle.
To fix, reorganize _bt_split() so that truncation occurs before the new
right page buffer is even acquired. A junk page/buffer will not be left
behind if _bt_nonkey_truncate()/_bt_truncate() raise an error.
Discussion: https://postgr.es/m/CAH2-WzkcWT_-NH7EeL=Az4efg0KCV+wArygW8zKB=+HoP=VWMw@mail.gmail.com
Backpatch: 11-, where INCLUDE indexes were introduced.
2019-05-13 19:27:59 +02:00
|
|
|
* Choose a point to split origpage at.
|
|
|
|
*
|
|
|
|
* A split point can be thought of as a point _between_ two existing
|
|
|
|
* tuples on origpage (lastleft and firstright tuples), provided you
|
|
|
|
* pretend that the new item that didn't fit is already on origpage.
|
|
|
|
*
|
|
|
|
* Since origpage does not actually contain newitem, the representation of
|
|
|
|
* split points needs to work with two boundary cases: splits where
|
|
|
|
* newitem is lastleft, and splits where newitem is firstright.
|
|
|
|
* newitemonleft resolves the ambiguity that would otherwise exist when
|
|
|
|
* newitemoff == firstright. In all other cases it's clear which side of
|
|
|
|
* the split every tuple goes on from context. newitemonleft is usually
|
|
|
|
* (but not always) redundant information.
|
2007-04-11 22:47:38 +02:00
|
|
|
*/
|
Don't leave behind junk nbtree pages during split.
Commit 8fa30f906be reduced the elevel of a number of "can't happen"
_bt_split() errors from PANIC to ERROR. At the same time, the new right
page buffer for the split could continue to be acquired well before the
critical section. This was possible because it was relatively
straightforward to make sure that _bt_split() could not throw an error,
with a few specific exceptions. The exceptional cases were safe because
they involved specific, well understood errors, making it possible to
consistently zero the right page before actually raising an error using
elog(). There was no danger of leaving around a junk page, provided
_bt_split() stuck to this coding rule.
Commit 8224de4f, which introduced INCLUDE indexes, added code to make
_bt_split() truncate away non-key attributes. This happened at a point
that broke the rule around zeroing the right page in _bt_split(). If
truncation failed (perhaps due to palloc() failure), that would result
in an errant right page buffer with junk contents. This could confuse
VACUUM when it attempted to delete the page, and should be avoided on
general principle.
To fix, reorganize _bt_split() so that truncation occurs before the new
right page buffer is even acquired. A junk page/buffer will not be left
behind if _bt_nonkey_truncate()/_bt_truncate() raise an error.
Discussion: https://postgr.es/m/CAH2-WzkcWT_-NH7EeL=Az4efg0KCV+wArygW8zKB=+HoP=VWMw@mail.gmail.com
Backpatch: 11-, where INCLUDE indexes were introduced.
2019-05-13 19:27:59 +02:00
|
|
|
firstright = _bt_findsplitloc(rel, origpage, newitemoff, newitemsz,
|
|
|
|
newitem, &newitemonleft);
|
2007-04-11 22:47:38 +02:00
|
|
|
|
Don't leave behind junk nbtree pages during split.
Commit 8fa30f906be reduced the elevel of a number of "can't happen"
_bt_split() errors from PANIC to ERROR. At the same time, the new right
page buffer for the split could continue to be acquired well before the
critical section. This was possible because it was relatively
straightforward to make sure that _bt_split() could not throw an error,
with a few specific exceptions. The exceptional cases were safe because
they involved specific, well understood errors, making it possible to
consistently zero the right page before actually raising an error using
elog(). There was no danger of leaving around a junk page, provided
_bt_split() stuck to this coding rule.
Commit 8224de4f, which introduced INCLUDE indexes, added code to make
_bt_split() truncate away non-key attributes. This happened at a point
that broke the rule around zeroing the right page in _bt_split(). If
truncation failed (perhaps due to palloc() failure), that would result
in an errant right page buffer with junk contents. This could confuse
VACUUM when it attempted to delete the page, and should be avoided on
general principle.
To fix, reorganize _bt_split() so that truncation occurs before the new
right page buffer is even acquired. A junk page/buffer will not be left
behind if _bt_nonkey_truncate()/_bt_truncate() raise an error.
Discussion: https://postgr.es/m/CAH2-WzkcWT_-NH7EeL=Az4efg0KCV+wArygW8zKB=+HoP=VWMw@mail.gmail.com
Backpatch: 11-, where INCLUDE indexes were introduced.
2019-05-13 19:27:59 +02:00
|
|
|
/* Allocate temp buffer for leftpage */
|
|
|
|
leftpage = PageGetTempPage(origpage);
|
|
|
|
_bt_pageinit(leftpage, BufferGetPageSize(buf));
|
1997-09-07 07:04:48 +02:00
|
|
|
lopaque = (BTPageOpaque) PageGetSpecialPointer(leftpage);
|
|
|
|
|
Don't leave behind junk nbtree pages during split.
Commit 8fa30f906be reduced the elevel of a number of "can't happen"
_bt_split() errors from PANIC to ERROR. At the same time, the new right
page buffer for the split could continue to be acquired well before the
critical section. This was possible because it was relatively
straightforward to make sure that _bt_split() could not throw an error,
with a few specific exceptions. The exceptional cases were safe because
they involved specific, well understood errors, making it possible to
consistently zero the right page before actually raising an error using
elog(). There was no danger of leaving around a junk page, provided
_bt_split() stuck to this coding rule.
Commit 8224de4f, which introduced INCLUDE indexes, added code to make
_bt_split() truncate away non-key attributes. This happened at a point
that broke the rule around zeroing the right page in _bt_split(). If
truncation failed (perhaps due to palloc() failure), that would result
in an errant right page buffer with junk contents. This could confuse
VACUUM when it attempted to delete the page, and should be avoided on
general principle.
To fix, reorganize _bt_split() so that truncation occurs before the new
right page buffer is even acquired. A junk page/buffer will not be left
behind if _bt_nonkey_truncate()/_bt_truncate() raise an error.
Discussion: https://postgr.es/m/CAH2-WzkcWT_-NH7EeL=Az4efg0KCV+wArygW8zKB=+HoP=VWMw@mail.gmail.com
Backpatch: 11-, where INCLUDE indexes were introduced.
2019-05-13 19:27:59 +02:00
|
|
|
/*
|
|
|
|
* leftpage won't be the root when we're done. Also, clear the SPLIT_END
|
|
|
|
* and HAS_GARBAGE flags.
|
|
|
|
*/
|
2000-10-04 02:04:43 +02:00
|
|
|
lopaque->btpo_flags = oopaque->btpo_flags;
|
2006-07-25 21:13:00 +02:00
|
|
|
lopaque->btpo_flags &= ~(BTP_ROOT | BTP_SPLIT_END | BTP_HAS_GARBAGE);
|
Don't leave behind junk nbtree pages during split.
Commit 8fa30f906be reduced the elevel of a number of "can't happen"
_bt_split() errors from PANIC to ERROR. At the same time, the new right
page buffer for the split could continue to be acquired well before the
critical section. This was possible because it was relatively
straightforward to make sure that _bt_split() could not throw an error,
with a few specific exceptions. The exceptional cases were safe because
they involved specific, well understood errors, making it possible to
consistently zero the right page before actually raising an error using
elog(). There was no danger of leaving around a junk page, provided
_bt_split() stuck to this coding rule.
Commit 8224de4f, which introduced INCLUDE indexes, added code to make
_bt_split() truncate away non-key attributes. This happened at a point
that broke the rule around zeroing the right page in _bt_split(). If
truncation failed (perhaps due to palloc() failure), that would result
in an errant right page buffer with junk contents. This could confuse
VACUUM when it attempted to delete the page, and should be avoided on
general principle.
To fix, reorganize _bt_split() so that truncation occurs before the new
right page buffer is even acquired. A junk page/buffer will not be left
behind if _bt_nonkey_truncate()/_bt_truncate() raise an error.
Discussion: https://postgr.es/m/CAH2-WzkcWT_-NH7EeL=Az4efg0KCV+wArygW8zKB=+HoP=VWMw@mail.gmail.com
Backpatch: 11-, where INCLUDE indexes were introduced.
2019-05-13 19:27:59 +02:00
|
|
|
/* set flag in leftpage indicating that rightpage has no downlink yet */
|
Make the handling of interrupted B-tree page splits more robust.
Splitting a page consists of two separate steps: splitting the child page,
and inserting the downlink for the new right page to the parent. Previously,
we handled the case that you crash in between those steps with a cleanup
routine after the WAL recovery had finished, which finished the incomplete
split. However, that doesn't help if the page split is interrupted but the
database doesn't crash, so that you don't perform WAL recovery. That could
happen for example if you run out of disk space.
Remove the end-of-recovery cleanup step. Instead, when a page is split, the
left page is marked with a new INCOMPLETE_SPLIT flag, and when the downlink
is inserted to the parent, the flag is cleared again. If an insertion sees
a page with the flag set, it knows that the split was interrupted for some
reason, and inserts the missing downlink before proceeding.
I used the same approach to fix GIN and GiST split algorithms earlier. This
was the last WAL cleanup routine, so we could get rid of that whole
machinery now, but I'll leave that for a separate patch.
Reviewed by Peter Geoghegan.
2014-03-18 19:12:58 +01:00
|
|
|
lopaque->btpo_flags |= BTP_INCOMPLETE_SPLIT;
|
1997-09-07 07:04:48 +02:00
|
|
|
lopaque->btpo_prev = oopaque->btpo_prev;
|
Don't leave behind junk nbtree pages during split.
Commit 8fa30f906be reduced the elevel of a number of "can't happen"
_bt_split() errors from PANIC to ERROR. At the same time, the new right
page buffer for the split could continue to be acquired well before the
critical section. This was possible because it was relatively
straightforward to make sure that _bt_split() could not throw an error,
with a few specific exceptions. The exceptional cases were safe because
they involved specific, well understood errors, making it possible to
consistently zero the right page before actually raising an error using
elog(). There was no danger of leaving around a junk page, provided
_bt_split() stuck to this coding rule.
Commit 8224de4f, which introduced INCLUDE indexes, added code to make
_bt_split() truncate away non-key attributes. This happened at a point
that broke the rule around zeroing the right page in _bt_split(). If
truncation failed (perhaps due to palloc() failure), that would result
in an errant right page buffer with junk contents. This could confuse
VACUUM when it attempted to delete the page, and should be avoided on
general principle.
To fix, reorganize _bt_split() so that truncation occurs before the new
right page buffer is even acquired. A junk page/buffer will not be left
behind if _bt_nonkey_truncate()/_bt_truncate() raise an error.
Discussion: https://postgr.es/m/CAH2-WzkcWT_-NH7EeL=Az4efg0KCV+wArygW8zKB=+HoP=VWMw@mail.gmail.com
Backpatch: 11-, where INCLUDE indexes were introduced.
2019-05-13 19:27:59 +02:00
|
|
|
/* handle btpo_next after rightpage buffer acquired */
|
|
|
|
lopaque->btpo.level = oopaque->btpo.level;
|
|
|
|
/* handle btpo_cycleid after rightpage buffer acquired */
|
1999-03-28 22:32:42 +02:00
|
|
|
|
1997-09-07 07:04:48 +02:00
|
|
|
/*
|
Don't leave behind junk nbtree pages during split.
Commit 8fa30f906be reduced the elevel of a number of "can't happen"
_bt_split() errors from PANIC to ERROR. At the same time, the new right
page buffer for the split could continue to be acquired well before the
critical section. This was possible because it was relatively
straightforward to make sure that _bt_split() could not throw an error,
with a few specific exceptions. The exceptional cases were safe because
they involved specific, well understood errors, making it possible to
consistently zero the right page before actually raising an error using
elog(). There was no danger of leaving around a junk page, provided
_bt_split() stuck to this coding rule.
Commit 8224de4f, which introduced INCLUDE indexes, added code to make
_bt_split() truncate away non-key attributes. This happened at a point
that broke the rule around zeroing the right page in _bt_split(). If
truncation failed (perhaps due to palloc() failure), that would result
in an errant right page buffer with junk contents. This could confuse
VACUUM when it attempted to delete the page, and should be avoided on
general principle.
To fix, reorganize _bt_split() so that truncation occurs before the new
right page buffer is even acquired. A junk page/buffer will not be left
behind if _bt_nonkey_truncate()/_bt_truncate() raise an error.
Discussion: https://postgr.es/m/CAH2-WzkcWT_-NH7EeL=Az4efg0KCV+wArygW8zKB=+HoP=VWMw@mail.gmail.com
Backpatch: 11-, where INCLUDE indexes were introduced.
2019-05-13 19:27:59 +02:00
|
|
|
* Copy the original page's LSN into leftpage, which will become the
|
|
|
|
* updated version of the page. We need this because XLogInsert will
|
|
|
|
* examine the LSN and possibly dump it in a page image.
|
1997-09-07 07:04:48 +02:00
|
|
|
*/
|
Don't leave behind junk nbtree pages during split.
Commit 8fa30f906be reduced the elevel of a number of "can't happen"
_bt_split() errors from PANIC to ERROR. At the same time, the new right
page buffer for the split could continue to be acquired well before the
critical section. This was possible because it was relatively
straightforward to make sure that _bt_split() could not throw an error,
with a few specific exceptions. The exceptional cases were safe because
they involved specific, well understood errors, making it possible to
consistently zero the right page before actually raising an error using
elog(). There was no danger of leaving around a junk page, provided
_bt_split() stuck to this coding rule.
Commit 8224de4f, which introduced INCLUDE indexes, added code to make
_bt_split() truncate away non-key attributes. This happened at a point
that broke the rule around zeroing the right page in _bt_split(). If
truncation failed (perhaps due to palloc() failure), that would result
in an errant right page buffer with junk contents. This could confuse
VACUUM when it attempted to delete the page, and should be avoided on
general principle.
To fix, reorganize _bt_split() so that truncation occurs before the new
right page buffer is even acquired. A junk page/buffer will not be left
behind if _bt_nonkey_truncate()/_bt_truncate() raise an error.
Discussion: https://postgr.es/m/CAH2-WzkcWT_-NH7EeL=Az4efg0KCV+wArygW8zKB=+HoP=VWMw@mail.gmail.com
Backpatch: 11-, where INCLUDE indexes were introduced.
2019-05-13 19:27:59 +02:00
|
|
|
PageSetLSN(leftpage, PageGetLSN(origpage));
|
|
|
|
isleaf = P_ISLEAF(oopaque);
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2000-07-21 08:42:39 +02:00
|
|
|
/*
|
2005-10-15 04:49:52 +02:00
|
|
|
* The "high key" for the new left page will be the first key that's going
|
Don't leave behind junk nbtree pages during split.
Commit 8fa30f906be reduced the elevel of a number of "can't happen"
_bt_split() errors from PANIC to ERROR. At the same time, the new right
page buffer for the split could continue to be acquired well before the
critical section. This was possible because it was relatively
straightforward to make sure that _bt_split() could not throw an error,
with a few specific exceptions. The exceptional cases were safe because
they involved specific, well understood errors, making it possible to
consistently zero the right page before actually raising an error using
elog(). There was no danger of leaving around a junk page, provided
_bt_split() stuck to this coding rule.
Commit 8224de4f, which introduced INCLUDE indexes, added code to make
_bt_split() truncate away non-key attributes. This happened at a point
that broke the rule around zeroing the right page in _bt_split(). If
truncation failed (perhaps due to palloc() failure), that would result
in an errant right page buffer with junk contents. This could confuse
VACUUM when it attempted to delete the page, and should be avoided on
general principle.
To fix, reorganize _bt_split() so that truncation occurs before the new
right page buffer is even acquired. A junk page/buffer will not be left
behind if _bt_nonkey_truncate()/_bt_truncate() raise an error.
Discussion: https://postgr.es/m/CAH2-WzkcWT_-NH7EeL=Az4efg0KCV+wArygW8zKB=+HoP=VWMw@mail.gmail.com
Backpatch: 11-, where INCLUDE indexes were introduced.
2019-05-13 19:27:59 +02:00
|
|
|
* to go into the new right page, or a truncated version if this is a leaf
|
|
|
|
* page split.
|
Make heap TID a tiebreaker nbtree index column.
Make nbtree treat all index tuples as having a heap TID attribute.
Index searches can distinguish duplicates by heap TID, since heap TID is
always guaranteed to be unique. This general approach has numerous
benefits for performance, and is prerequisite to teaching VACUUM to
perform "retail index tuple deletion".
Naively adding a new attribute to every pivot tuple has unacceptable
overhead (it bloats internal pages), so suffix truncation of pivot
tuples is added. This will usually truncate away the "extra" heap TID
attribute from pivot tuples during a leaf page split, and may also
truncate away additional user attributes. This can increase fan-out,
especially in a multi-column index. Truncation can only occur at the
attribute granularity, which isn't particularly effective, but works
well enough for now. A future patch may add support for truncating
"within" text attributes by generating truncated key values using new
opclass infrastructure.
Only new indexes (BTREE_VERSION 4 indexes) will have insertions that
treat heap TID as a tiebreaker attribute, or will have pivot tuples
undergo suffix truncation during a leaf page split (on-disk
compatibility with versions 2 and 3 is preserved). Upgrades to version
4 cannot be performed on-the-fly, unlike upgrades from version 2 to
version 3. contrib/amcheck continues to work with version 2 and 3
indexes, while also enforcing stricter invariants when verifying version
4 indexes. These stricter invariants are the same invariants described
by "3.1.12 Sequencing" from the Lehman and Yao paper.
A later patch will enhance the logic used by nbtree to pick a split
point. This patch is likely to negatively impact performance without
smarter choices around the precise point to split leaf pages at. Making
these two mostly-distinct sets of enhancements into distinct commits
seems like it might clarify their design, even though neither commit is
particularly useful on its own.
The maximum allowed size of new tuples is reduced by an amount equal to
the space required to store an extra MAXALIGN()'d TID in a new high key
during leaf page splits. The user-facing definition of the "1/3 of a
page" restriction is already imprecise, and so does not need to be
revised. However, there should be a compatibility note in the v12
release notes.
Author: Peter Geoghegan
Reviewed-By: Heikki Linnakangas, Alexander Korotkov
Discussion: https://postgr.es/m/CAH2-WzkVb0Kom=R+88fDFb=JSxZMFvbHVC6Mn9LJ2n=X=kS-Uw@mail.gmail.com
2019-03-20 18:04:01 +01:00
|
|
|
*
|
|
|
|
* The high key for the left page is formed using the first item on the
|
|
|
|
* right page, which may seem to be contrary to Lehman & Yao's approach of
|
|
|
|
* using the left page's last item as its new high key when splitting on
|
|
|
|
* the leaf level. It isn't, though: suffix truncation will leave the
|
|
|
|
* left page's high key fully equal to the last item on the left page when
|
|
|
|
* two tuples with equal key values (excluding heap TID) enclose the split
|
|
|
|
* point. It isn't actually necessary for a new leaf high key to be equal
|
|
|
|
* to the last item on the left for the L&Y "subtree" invariant to hold.
|
|
|
|
* It's sufficient to make sure that the new leaf high key is strictly
|
|
|
|
* less than the first item on the right leaf page, and greater than or
|
|
|
|
* equal to (not necessarily equal to) the last item on the left leaf
|
|
|
|
* page.
|
|
|
|
*
|
|
|
|
* In other words, when suffix truncation isn't possible, L&Y's exact
|
|
|
|
* approach to leaf splits is taken. (Actually, even that is slightly
|
|
|
|
* inaccurate. A tuple with all the keys from firstright but the heap TID
|
|
|
|
* from lastleft will be used as the new high key, since the last left
|
|
|
|
* tuple could be physically larger despite being opclass-equal in respect
|
|
|
|
* of all attributes prior to the heap TID attribute.)
|
2000-07-21 08:42:39 +02:00
|
|
|
*/
|
|
|
|
if (!newitemonleft && newitemoff == firstright)
|
|
|
|
{
|
|
|
|
/* incoming tuple will become first on right page */
|
|
|
|
itemsz = newitemsz;
|
|
|
|
item = newitem;
|
1997-09-07 07:04:48 +02:00
|
|
|
}
|
2000-07-21 08:42:39 +02:00
|
|
|
else
|
1997-09-07 07:04:48 +02:00
|
|
|
{
|
2000-07-21 08:42:39 +02:00
|
|
|
/* existing item at firstright will become first on right page */
|
|
|
|
itemid = PageGetItemId(origpage, firstright);
|
|
|
|
itemsz = ItemIdGetLength(itemid);
|
2006-01-26 00:04:21 +01:00
|
|
|
item = (IndexTuple) PageGetItem(origpage, itemid);
|
1997-09-07 07:04:48 +02:00
|
|
|
}
|
2018-04-07 22:00:39 +02:00
|
|
|
|
|
|
|
/*
|
Make heap TID a tiebreaker nbtree index column.
Make nbtree treat all index tuples as having a heap TID attribute.
Index searches can distinguish duplicates by heap TID, since heap TID is
always guaranteed to be unique. This general approach has numerous
benefits for performance, and is prerequisite to teaching VACUUM to
perform "retail index tuple deletion".
Naively adding a new attribute to every pivot tuple has unacceptable
overhead (it bloats internal pages), so suffix truncation of pivot
tuples is added. This will usually truncate away the "extra" heap TID
attribute from pivot tuples during a leaf page split, and may also
truncate away additional user attributes. This can increase fan-out,
especially in a multi-column index. Truncation can only occur at the
attribute granularity, which isn't particularly effective, but works
well enough for now. A future patch may add support for truncating
"within" text attributes by generating truncated key values using new
opclass infrastructure.
Only new indexes (BTREE_VERSION 4 indexes) will have insertions that
treat heap TID as a tiebreaker attribute, or will have pivot tuples
undergo suffix truncation during a leaf page split (on-disk
compatibility with versions 2 and 3 is preserved). Upgrades to version
4 cannot be performed on-the-fly, unlike upgrades from version 2 to
version 3. contrib/amcheck continues to work with version 2 and 3
indexes, while also enforcing stricter invariants when verifying version
4 indexes. These stricter invariants are the same invariants described
by "3.1.12 Sequencing" from the Lehman and Yao paper.
A later patch will enhance the logic used by nbtree to pick a split
point. This patch is likely to negatively impact performance without
smarter choices around the precise point to split leaf pages at. Making
these two mostly-distinct sets of enhancements into distinct commits
seems like it might clarify their design, even though neither commit is
particularly useful on its own.
The maximum allowed size of new tuples is reduced by an amount equal to
the space required to store an extra MAXALIGN()'d TID in a new high key
during leaf page splits. The user-facing definition of the "1/3 of a
page" restriction is already imprecise, and so does not need to be
revised. However, there should be a compatibility note in the v12
release notes.
Author: Peter Geoghegan
Reviewed-By: Heikki Linnakangas, Alexander Korotkov
Discussion: https://postgr.es/m/CAH2-WzkVb0Kom=R+88fDFb=JSxZMFvbHVC6Mn9LJ2n=X=kS-Uw@mail.gmail.com
2019-03-20 18:04:01 +01:00
|
|
|
* Truncate unneeded key and non-key attributes of the high key item
|
|
|
|
* before inserting it on the left page. This can only happen at the leaf
|
Adjust INCLUDE index truncation comments and code.
Add several assertions that ensure that we're dealing with a pivot tuple
without non-key attributes where that's expected. Also, remove the
assertion within _bt_isequal(), restoring the v10 function signature. A
similar check will be performed for the page highkey within
_bt_moveright() in most cases. Also avoid dropping all objects within
regression tests, to increase pg_dump test coverage for INCLUDE indexes.
Rather than using infrastructure that's generally intended to be used
with reference counted heap tuple descriptors during truncation, use the
same function that was introduced to store flat TupleDescs in shared
memory (we use a temp palloc'd buffer). This isn't strictly necessary,
but seems more future-proof than the old approach. It also lets us
avoid including rel.h within indextuple.c, which was arguably a
modularity violation. Also, we now call index_deform_tuple() with the
truncated TupleDesc, not the source TupleDesc, since that's more robust,
and saves a few cycles.
In passing, fix a memory leak by pfree'ing truncated pivot tuple memory
during CREATE INDEX. Also pfree during a page split, just to be
consistent.
Refactor _bt_check_natts() to be more readable.
Author: Peter Geoghegan with some editorization by me
Reviewed by: Alexander Korotkov, Teodor Sigaev
Discussion: https://www.postgresql.org/message-id/CAH2-Wz%3DkCWuXeMrBCopC-tFs3FbiVxQNjjgNKdG2sHxZ5k2y3w%40mail.gmail.com
2018-04-19 07:45:58 +02:00
|
|
|
* level, since in general all pivot tuple values originate from leaf
|
Make heap TID a tiebreaker nbtree index column.
Make nbtree treat all index tuples as having a heap TID attribute.
Index searches can distinguish duplicates by heap TID, since heap TID is
always guaranteed to be unique. This general approach has numerous
benefits for performance, and is prerequisite to teaching VACUUM to
perform "retail index tuple deletion".
Naively adding a new attribute to every pivot tuple has unacceptable
overhead (it bloats internal pages), so suffix truncation of pivot
tuples is added. This will usually truncate away the "extra" heap TID
attribute from pivot tuples during a leaf page split, and may also
truncate away additional user attributes. This can increase fan-out,
especially in a multi-column index. Truncation can only occur at the
attribute granularity, which isn't particularly effective, but works
well enough for now. A future patch may add support for truncating
"within" text attributes by generating truncated key values using new
opclass infrastructure.
Only new indexes (BTREE_VERSION 4 indexes) will have insertions that
treat heap TID as a tiebreaker attribute, or will have pivot tuples
undergo suffix truncation during a leaf page split (on-disk
compatibility with versions 2 and 3 is preserved). Upgrades to version
4 cannot be performed on-the-fly, unlike upgrades from version 2 to
version 3. contrib/amcheck continues to work with version 2 and 3
indexes, while also enforcing stricter invariants when verifying version
4 indexes. These stricter invariants are the same invariants described
by "3.1.12 Sequencing" from the Lehman and Yao paper.
A later patch will enhance the logic used by nbtree to pick a split
point. This patch is likely to negatively impact performance without
smarter choices around the precise point to split leaf pages at. Making
these two mostly-distinct sets of enhancements into distinct commits
seems like it might clarify their design, even though neither commit is
particularly useful on its own.
The maximum allowed size of new tuples is reduced by an amount equal to
the space required to store an extra MAXALIGN()'d TID in a new high key
during leaf page splits. The user-facing definition of the "1/3 of a
page" restriction is already imprecise, and so does not need to be
revised. However, there should be a compatibility note in the v12
release notes.
Author: Peter Geoghegan
Reviewed-By: Heikki Linnakangas, Alexander Korotkov
Discussion: https://postgr.es/m/CAH2-WzkVb0Kom=R+88fDFb=JSxZMFvbHVC6Mn9LJ2n=X=kS-Uw@mail.gmail.com
2019-03-20 18:04:01 +01:00
|
|
|
* level high keys. A pivot tuple in a grandparent page must guide a
|
|
|
|
* search not only to the correct parent page, but also to the correct
|
|
|
|
* leaf page.
|
2018-04-07 22:00:39 +02:00
|
|
|
*/
|
Make heap TID a tiebreaker nbtree index column.
Make nbtree treat all index tuples as having a heap TID attribute.
Index searches can distinguish duplicates by heap TID, since heap TID is
always guaranteed to be unique. This general approach has numerous
benefits for performance, and is prerequisite to teaching VACUUM to
perform "retail index tuple deletion".
Naively adding a new attribute to every pivot tuple has unacceptable
overhead (it bloats internal pages), so suffix truncation of pivot
tuples is added. This will usually truncate away the "extra" heap TID
attribute from pivot tuples during a leaf page split, and may also
truncate away additional user attributes. This can increase fan-out,
especially in a multi-column index. Truncation can only occur at the
attribute granularity, which isn't particularly effective, but works
well enough for now. A future patch may add support for truncating
"within" text attributes by generating truncated key values using new
opclass infrastructure.
Only new indexes (BTREE_VERSION 4 indexes) will have insertions that
treat heap TID as a tiebreaker attribute, or will have pivot tuples
undergo suffix truncation during a leaf page split (on-disk
compatibility with versions 2 and 3 is preserved). Upgrades to version
4 cannot be performed on-the-fly, unlike upgrades from version 2 to
version 3. contrib/amcheck continues to work with version 2 and 3
indexes, while also enforcing stricter invariants when verifying version
4 indexes. These stricter invariants are the same invariants described
by "3.1.12 Sequencing" from the Lehman and Yao paper.
A later patch will enhance the logic used by nbtree to pick a split
point. This patch is likely to negatively impact performance without
smarter choices around the precise point to split leaf pages at. Making
these two mostly-distinct sets of enhancements into distinct commits
seems like it might clarify their design, even though neither commit is
particularly useful on its own.
The maximum allowed size of new tuples is reduced by an amount equal to
the space required to store an extra MAXALIGN()'d TID in a new high key
during leaf page splits. The user-facing definition of the "1/3 of a
page" restriction is already imprecise, and so does not need to be
revised. However, there should be a compatibility note in the v12
release notes.
Author: Peter Geoghegan
Reviewed-By: Heikki Linnakangas, Alexander Korotkov
Discussion: https://postgr.es/m/CAH2-WzkVb0Kom=R+88fDFb=JSxZMFvbHVC6Mn9LJ2n=X=kS-Uw@mail.gmail.com
2019-03-20 18:04:01 +01:00
|
|
|
if (isleaf && (itup_key->heapkeyspace || indnatts != indnkeyatts))
|
2018-04-07 22:00:39 +02:00
|
|
|
{
|
Make heap TID a tiebreaker nbtree index column.
Make nbtree treat all index tuples as having a heap TID attribute.
Index searches can distinguish duplicates by heap TID, since heap TID is
always guaranteed to be unique. This general approach has numerous
benefits for performance, and is prerequisite to teaching VACUUM to
perform "retail index tuple deletion".
Naively adding a new attribute to every pivot tuple has unacceptable
overhead (it bloats internal pages), so suffix truncation of pivot
tuples is added. This will usually truncate away the "extra" heap TID
attribute from pivot tuples during a leaf page split, and may also
truncate away additional user attributes. This can increase fan-out,
especially in a multi-column index. Truncation can only occur at the
attribute granularity, which isn't particularly effective, but works
well enough for now. A future patch may add support for truncating
"within" text attributes by generating truncated key values using new
opclass infrastructure.
Only new indexes (BTREE_VERSION 4 indexes) will have insertions that
treat heap TID as a tiebreaker attribute, or will have pivot tuples
undergo suffix truncation during a leaf page split (on-disk
compatibility with versions 2 and 3 is preserved). Upgrades to version
4 cannot be performed on-the-fly, unlike upgrades from version 2 to
version 3. contrib/amcheck continues to work with version 2 and 3
indexes, while also enforcing stricter invariants when verifying version
4 indexes. These stricter invariants are the same invariants described
by "3.1.12 Sequencing" from the Lehman and Yao paper.
A later patch will enhance the logic used by nbtree to pick a split
point. This patch is likely to negatively impact performance without
smarter choices around the precise point to split leaf pages at. Making
these two mostly-distinct sets of enhancements into distinct commits
seems like it might clarify their design, even though neither commit is
particularly useful on its own.
The maximum allowed size of new tuples is reduced by an amount equal to
the space required to store an extra MAXALIGN()'d TID in a new high key
during leaf page splits. The user-facing definition of the "1/3 of a
page" restriction is already imprecise, and so does not need to be
revised. However, there should be a compatibility note in the v12
release notes.
Author: Peter Geoghegan
Reviewed-By: Heikki Linnakangas, Alexander Korotkov
Discussion: https://postgr.es/m/CAH2-WzkVb0Kom=R+88fDFb=JSxZMFvbHVC6Mn9LJ2n=X=kS-Uw@mail.gmail.com
2019-03-20 18:04:01 +01:00
|
|
|
IndexTuple lastleft;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Determine which tuple will become the last on the left page. This
|
|
|
|
* is needed to decide how many attributes from the first item on the
|
|
|
|
* right page must remain in new high key for left page.
|
|
|
|
*/
|
|
|
|
if (newitemonleft && newitemoff == firstright)
|
|
|
|
{
|
|
|
|
/* incoming tuple will become last on left page */
|
|
|
|
lastleft = newitem;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
OffsetNumber lastleftoff;
|
|
|
|
|
|
|
|
/* item just before firstright will become last on left page */
|
|
|
|
lastleftoff = OffsetNumberPrev(firstright);
|
|
|
|
Assert(lastleftoff >= P_FIRSTDATAKEY(oopaque));
|
|
|
|
itemid = PageGetItemId(origpage, lastleftoff);
|
|
|
|
lastleft = (IndexTuple) PageGetItem(origpage, itemid);
|
|
|
|
}
|
|
|
|
|
|
|
|
Assert(lastleft != item);
|
|
|
|
lefthikey = _bt_truncate(rel, lastleft, item, itup_key);
|
2018-04-07 22:00:39 +02:00
|
|
|
itemsz = IndexTupleSize(lefthikey);
|
|
|
|
itemsz = MAXALIGN(itemsz);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
lefthikey = item;
|
|
|
|
|
Don't leave behind junk nbtree pages during split.
Commit 8fa30f906be reduced the elevel of a number of "can't happen"
_bt_split() errors from PANIC to ERROR. At the same time, the new right
page buffer for the split could continue to be acquired well before the
critical section. This was possible because it was relatively
straightforward to make sure that _bt_split() could not throw an error,
with a few specific exceptions. The exceptional cases were safe because
they involved specific, well understood errors, making it possible to
consistently zero the right page before actually raising an error using
elog(). There was no danger of leaving around a junk page, provided
_bt_split() stuck to this coding rule.
Commit 8224de4f, which introduced INCLUDE indexes, added code to make
_bt_split() truncate away non-key attributes. This happened at a point
that broke the rule around zeroing the right page in _bt_split(). If
truncation failed (perhaps due to palloc() failure), that would result
in an errant right page buffer with junk contents. This could confuse
VACUUM when it attempted to delete the page, and should be avoided on
general principle.
To fix, reorganize _bt_split() so that truncation occurs before the new
right page buffer is even acquired. A junk page/buffer will not be left
behind if _bt_nonkey_truncate()/_bt_truncate() raise an error.
Discussion: https://postgr.es/m/CAH2-WzkcWT_-NH7EeL=Az4efg0KCV+wArygW8zKB=+HoP=VWMw@mail.gmail.com
Backpatch: 11-, where INCLUDE indexes were introduced.
2019-05-13 19:27:59 +02:00
|
|
|
/*
|
|
|
|
* Add new high key to leftpage
|
|
|
|
*/
|
|
|
|
leftoff = P_HIKEY;
|
|
|
|
|
Make heap TID a tiebreaker nbtree index column.
Make nbtree treat all index tuples as having a heap TID attribute.
Index searches can distinguish duplicates by heap TID, since heap TID is
always guaranteed to be unique. This general approach has numerous
benefits for performance, and is prerequisite to teaching VACUUM to
perform "retail index tuple deletion".
Naively adding a new attribute to every pivot tuple has unacceptable
overhead (it bloats internal pages), so suffix truncation of pivot
tuples is added. This will usually truncate away the "extra" heap TID
attribute from pivot tuples during a leaf page split, and may also
truncate away additional user attributes. This can increase fan-out,
especially in a multi-column index. Truncation can only occur at the
attribute granularity, which isn't particularly effective, but works
well enough for now. A future patch may add support for truncating
"within" text attributes by generating truncated key values using new
opclass infrastructure.
Only new indexes (BTREE_VERSION 4 indexes) will have insertions that
treat heap TID as a tiebreaker attribute, or will have pivot tuples
undergo suffix truncation during a leaf page split (on-disk
compatibility with versions 2 and 3 is preserved). Upgrades to version
4 cannot be performed on-the-fly, unlike upgrades from version 2 to
version 3. contrib/amcheck continues to work with version 2 and 3
indexes, while also enforcing stricter invariants when verifying version
4 indexes. These stricter invariants are the same invariants described
by "3.1.12 Sequencing" from the Lehman and Yao paper.
A later patch will enhance the logic used by nbtree to pick a split
point. This patch is likely to negatively impact performance without
smarter choices around the precise point to split leaf pages at. Making
these two mostly-distinct sets of enhancements into distinct commits
seems like it might clarify their design, even though neither commit is
particularly useful on its own.
The maximum allowed size of new tuples is reduced by an amount equal to
the space required to store an extra MAXALIGN()'d TID in a new high key
during leaf page splits. The user-facing definition of the "1/3 of a
page" restriction is already imprecise, and so does not need to be
revised. However, there should be a compatibility note in the v12
release notes.
Author: Peter Geoghegan
Reviewed-By: Heikki Linnakangas, Alexander Korotkov
Discussion: https://postgr.es/m/CAH2-WzkVb0Kom=R+88fDFb=JSxZMFvbHVC6Mn9LJ2n=X=kS-Uw@mail.gmail.com
2019-03-20 18:04:01 +01:00
|
|
|
Assert(BTreeTupleGetNAtts(lefthikey, rel) > 0);
|
|
|
|
Assert(BTreeTupleGetNAtts(lefthikey, rel) <= indnkeyatts);
|
2018-04-07 22:00:39 +02:00
|
|
|
if (PageAddItem(leftpage, (Item) lefthikey, itemsz, leftoff,
|
2007-09-20 19:56:33 +02:00
|
|
|
false, false) == InvalidOffsetNumber)
|
2010-08-29 21:33:14 +02:00
|
|
|
elog(ERROR, "failed to add hikey to the left sibling"
|
2007-12-31 05:52:05 +01:00
|
|
|
" while splitting block %u of index \"%s\"",
|
2010-08-29 21:33:14 +02:00
|
|
|
origpagenumber, RelationGetRelationName(rel));
|
2000-07-21 08:42:39 +02:00
|
|
|
leftoff = OffsetNumberNext(leftoff);
|
Adjust INCLUDE index truncation comments and code.
Add several assertions that ensure that we're dealing with a pivot tuple
without non-key attributes where that's expected. Also, remove the
assertion within _bt_isequal(), restoring the v10 function signature. A
similar check will be performed for the page highkey within
_bt_moveright() in most cases. Also avoid dropping all objects within
regression tests, to increase pg_dump test coverage for INCLUDE indexes.
Rather than using infrastructure that's generally intended to be used
with reference counted heap tuple descriptors during truncation, use the
same function that was introduced to store flat TupleDescs in shared
memory (we use a temp palloc'd buffer). This isn't strictly necessary,
but seems more future-proof than the old approach. It also lets us
avoid including rel.h within indextuple.c, which was arguably a
modularity violation. Also, we now call index_deform_tuple() with the
truncated TupleDesc, not the source TupleDesc, since that's more robust,
and saves a few cycles.
In passing, fix a memory leak by pfree'ing truncated pivot tuple memory
during CREATE INDEX. Also pfree during a page split, just to be
consistent.
Refactor _bt_check_natts() to be more readable.
Author: Peter Geoghegan with some editorization by me
Reviewed by: Alexander Korotkov, Teodor Sigaev
Discussion: https://www.postgresql.org/message-id/CAH2-Wz%3DkCWuXeMrBCopC-tFs3FbiVxQNjjgNKdG2sHxZ5k2y3w%40mail.gmail.com
2018-04-19 07:45:58 +02:00
|
|
|
/* be tidy */
|
|
|
|
if (lefthikey != item)
|
|
|
|
pfree(lefthikey);
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2000-07-21 08:42:39 +02:00
|
|
|
/*
|
Don't leave behind junk nbtree pages during split.
Commit 8fa30f906be reduced the elevel of a number of "can't happen"
_bt_split() errors from PANIC to ERROR. At the same time, the new right
page buffer for the split could continue to be acquired well before the
critical section. This was possible because it was relatively
straightforward to make sure that _bt_split() could not throw an error,
with a few specific exceptions. The exceptional cases were safe because
they involved specific, well understood errors, making it possible to
consistently zero the right page before actually raising an error using
elog(). There was no danger of leaving around a junk page, provided
_bt_split() stuck to this coding rule.
Commit 8224de4f, which introduced INCLUDE indexes, added code to make
_bt_split() truncate away non-key attributes. This happened at a point
that broke the rule around zeroing the right page in _bt_split(). If
truncation failed (perhaps due to palloc() failure), that would result
in an errant right page buffer with junk contents. This could confuse
VACUUM when it attempted to delete the page, and should be avoided on
general principle.
To fix, reorganize _bt_split() so that truncation occurs before the new
right page buffer is even acquired. A junk page/buffer will not be left
behind if _bt_nonkey_truncate()/_bt_truncate() raise an error.
Discussion: https://postgr.es/m/CAH2-WzkcWT_-NH7EeL=Az4efg0KCV+wArygW8zKB=+HoP=VWMw@mail.gmail.com
Backpatch: 11-, where INCLUDE indexes were introduced.
2019-05-13 19:27:59 +02:00
|
|
|
* Acquire a new right page to split into, now that left page has a new
|
|
|
|
* high key. From here on, it's not okay to throw an error without
|
|
|
|
* zeroing rightpage first. This coding rule ensures that we won't
|
|
|
|
* confuse future VACUUM operations, which might otherwise try to re-find
|
|
|
|
* a downlink to a leftover junk page as the page undergoes deletion.
|
|
|
|
*
|
|
|
|
* It would be reasonable to start the critical section just after the new
|
|
|
|
* rightpage buffer is acquired instead; that would allow us to avoid
|
|
|
|
* leftover junk pages without bothering to zero rightpage. We do it this
|
|
|
|
* way because it avoids an unnecessary PANIC when either origpage or its
|
|
|
|
* existing sibling page are corrupt.
|
|
|
|
*/
|
|
|
|
rbuf = _bt_getbuf(rel, P_NEW, BT_WRITE);
|
|
|
|
rightpage = BufferGetPage(rbuf);
|
|
|
|
rightpagenumber = BufferGetBlockNumber(rbuf);
|
|
|
|
/* rightpage was initialized by _bt_getbuf */
|
|
|
|
ropaque = (BTPageOpaque) PageGetSpecialPointer(rightpage);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Finish off remaining leftpage special area fields. They cannot be set
|
|
|
|
* before both origpage (leftpage) and rightpage buffers are acquired and
|
|
|
|
* locked.
|
|
|
|
*/
|
|
|
|
lopaque->btpo_next = rightpagenumber;
|
|
|
|
lopaque->btpo_cycleid = _bt_vacuum_cycleid(rel);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* rightpage won't be the root when we're done. Also, clear the SPLIT_END
|
|
|
|
* and HAS_GARBAGE flags.
|
|
|
|
*/
|
|
|
|
ropaque->btpo_flags = oopaque->btpo_flags;
|
|
|
|
ropaque->btpo_flags &= ~(BTP_ROOT | BTP_SPLIT_END | BTP_HAS_GARBAGE);
|
|
|
|
ropaque->btpo_prev = origpagenumber;
|
|
|
|
ropaque->btpo_next = oopaque->btpo_next;
|
|
|
|
ropaque->btpo.level = oopaque->btpo.level;
|
|
|
|
ropaque->btpo_cycleid = lopaque->btpo_cycleid;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Add new high key to rightpage where necessary.
|
|
|
|
*
|
|
|
|
* If the page we're splitting is not the rightmost page at its level in
|
|
|
|
* the tree, then the first entry on the page is the high key from
|
|
|
|
* origpage.
|
|
|
|
*/
|
|
|
|
rightoff = P_HIKEY;
|
|
|
|
|
|
|
|
if (!P_RIGHTMOST(oopaque))
|
|
|
|
{
|
|
|
|
itemid = PageGetItemId(origpage, P_HIKEY);
|
|
|
|
itemsz = ItemIdGetLength(itemid);
|
|
|
|
item = (IndexTuple) PageGetItem(origpage, itemid);
|
|
|
|
Assert(BTreeTupleGetNAtts(item, rel) > 0);
|
|
|
|
Assert(BTreeTupleGetNAtts(item, rel) <= indnkeyatts);
|
|
|
|
if (PageAddItem(rightpage, (Item) item, itemsz, rightoff,
|
|
|
|
false, false) == InvalidOffsetNumber)
|
|
|
|
{
|
|
|
|
memset(rightpage, 0, BufferGetPageSize(rbuf));
|
|
|
|
elog(ERROR, "failed to add hikey to the right sibling"
|
|
|
|
" while splitting block %u of index \"%s\"",
|
|
|
|
origpagenumber, RelationGetRelationName(rel));
|
|
|
|
}
|
|
|
|
rightoff = OffsetNumberNext(rightoff);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Now transfer all the data items (non-pivot tuples in isleaf case, or
|
|
|
|
* additional pivot tuples in !isleaf case) to the appropriate page.
|
2007-04-11 22:47:38 +02:00
|
|
|
*
|
|
|
|
* Note: we *must* insert at least the right page's items in item-number
|
|
|
|
* order, for the benefit of _bt_restore_page().
|
2000-07-21 08:42:39 +02:00
|
|
|
*/
|
|
|
|
maxoff = PageGetMaxOffsetNumber(origpage);
|
|
|
|
|
|
|
|
for (i = P_FIRSTDATAKEY(oopaque); i <= maxoff; i = OffsetNumberNext(i))
|
1997-09-07 07:04:48 +02:00
|
|
|
{
|
|
|
|
itemid = PageGetItemId(origpage, i);
|
|
|
|
itemsz = ItemIdGetLength(itemid);
|
2006-01-26 00:04:21 +01:00
|
|
|
item = (IndexTuple) PageGetItem(origpage, itemid);
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2000-07-21 08:42:39 +02:00
|
|
|
/* does new item belong before this one? */
|
|
|
|
if (i == newitemoff)
|
|
|
|
{
|
|
|
|
if (newitemonleft)
|
|
|
|
{
|
Don't leave behind junk nbtree pages during split.
Commit 8fa30f906be reduced the elevel of a number of "can't happen"
_bt_split() errors from PANIC to ERROR. At the same time, the new right
page buffer for the split could continue to be acquired well before the
critical section. This was possible because it was relatively
straightforward to make sure that _bt_split() could not throw an error,
with a few specific exceptions. The exceptional cases were safe because
they involved specific, well understood errors, making it possible to
consistently zero the right page before actually raising an error using
elog(). There was no danger of leaving around a junk page, provided
_bt_split() stuck to this coding rule.
Commit 8224de4f, which introduced INCLUDE indexes, added code to make
_bt_split() truncate away non-key attributes. This happened at a point
that broke the rule around zeroing the right page in _bt_split(). If
truncation failed (perhaps due to palloc() failure), that would result
in an errant right page buffer with junk contents. This could confuse
VACUUM when it attempted to delete the page, and should be avoided on
general principle.
To fix, reorganize _bt_split() so that truncation occurs before the new
right page buffer is even acquired. A junk page/buffer will not be left
behind if _bt_nonkey_truncate()/_bt_truncate() raise an error.
Discussion: https://postgr.es/m/CAH2-WzkcWT_-NH7EeL=Az4efg0KCV+wArygW8zKB=+HoP=VWMw@mail.gmail.com
Backpatch: 11-, where INCLUDE indexes were introduced.
2019-05-13 19:27:59 +02:00
|
|
|
Assert(newitemoff <= firstright);
|
2010-08-29 21:33:14 +02:00
|
|
|
if (!_bt_pgaddtup(leftpage, newitemsz, newitem, leftoff))
|
|
|
|
{
|
|
|
|
memset(rightpage, 0, BufferGetPageSize(rbuf));
|
|
|
|
elog(ERROR, "failed to add new item to the left sibling"
|
|
|
|
" while splitting block %u of index \"%s\"",
|
|
|
|
origpagenumber, RelationGetRelationName(rel));
|
|
|
|
}
|
2000-07-21 08:42:39 +02:00
|
|
|
leftoff = OffsetNumberNext(leftoff);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
Don't leave behind junk nbtree pages during split.
Commit 8fa30f906be reduced the elevel of a number of "can't happen"
_bt_split() errors from PANIC to ERROR. At the same time, the new right
page buffer for the split could continue to be acquired well before the
critical section. This was possible because it was relatively
straightforward to make sure that _bt_split() could not throw an error,
with a few specific exceptions. The exceptional cases were safe because
they involved specific, well understood errors, making it possible to
consistently zero the right page before actually raising an error using
elog(). There was no danger of leaving around a junk page, provided
_bt_split() stuck to this coding rule.
Commit 8224de4f, which introduced INCLUDE indexes, added code to make
_bt_split() truncate away non-key attributes. This happened at a point
that broke the rule around zeroing the right page in _bt_split(). If
truncation failed (perhaps due to palloc() failure), that would result
in an errant right page buffer with junk contents. This could confuse
VACUUM when it attempted to delete the page, and should be avoided on
general principle.
To fix, reorganize _bt_split() so that truncation occurs before the new
right page buffer is even acquired. A junk page/buffer will not be left
behind if _bt_nonkey_truncate()/_bt_truncate() raise an error.
Discussion: https://postgr.es/m/CAH2-WzkcWT_-NH7EeL=Az4efg0KCV+wArygW8zKB=+HoP=VWMw@mail.gmail.com
Backpatch: 11-, where INCLUDE indexes were introduced.
2019-05-13 19:27:59 +02:00
|
|
|
Assert(newitemoff >= firstright);
|
2010-08-29 21:33:14 +02:00
|
|
|
if (!_bt_pgaddtup(rightpage, newitemsz, newitem, rightoff))
|
|
|
|
{
|
|
|
|
memset(rightpage, 0, BufferGetPageSize(rbuf));
|
|
|
|
elog(ERROR, "failed to add new item to the right sibling"
|
|
|
|
" while splitting block %u of index \"%s\"",
|
|
|
|
origpagenumber, RelationGetRelationName(rel));
|
|
|
|
}
|
2000-07-21 08:42:39 +02:00
|
|
|
rightoff = OffsetNumberNext(rightoff);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
1997-09-07 07:04:48 +02:00
|
|
|
/* decide which page to put it on */
|
|
|
|
if (i < firstright)
|
|
|
|
{
|
2010-08-29 21:33:14 +02:00
|
|
|
if (!_bt_pgaddtup(leftpage, itemsz, item, leftoff))
|
|
|
|
{
|
|
|
|
memset(rightpage, 0, BufferGetPageSize(rbuf));
|
|
|
|
elog(ERROR, "failed to add old item to the left sibling"
|
|
|
|
" while splitting block %u of index \"%s\"",
|
|
|
|
origpagenumber, RelationGetRelationName(rel));
|
|
|
|
}
|
1997-09-07 07:04:48 +02:00
|
|
|
leftoff = OffsetNumberNext(leftoff);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2010-08-29 21:33:14 +02:00
|
|
|
if (!_bt_pgaddtup(rightpage, itemsz, item, rightoff))
|
|
|
|
{
|
|
|
|
memset(rightpage, 0, BufferGetPageSize(rbuf));
|
|
|
|
elog(ERROR, "failed to add old item to the right sibling"
|
|
|
|
" while splitting block %u of index \"%s\"",
|
|
|
|
origpagenumber, RelationGetRelationName(rel));
|
|
|
|
}
|
1997-09-07 07:04:48 +02:00
|
|
|
rightoff = OffsetNumberNext(rightoff);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2000-07-21 08:42:39 +02:00
|
|
|
/* cope with possibility that newitem goes at the end */
|
|
|
|
if (i <= newitemoff)
|
|
|
|
{
|
2007-02-06 15:55:11 +01:00
|
|
|
/*
|
|
|
|
* Can't have newitemonleft here; that would imply we were told to put
|
|
|
|
* *everything* on the left page, which cannot fit (if it could, we'd
|
|
|
|
* not be splitting the page).
|
|
|
|
*/
|
|
|
|
Assert(!newitemonleft);
|
2010-08-29 21:33:14 +02:00
|
|
|
if (!_bt_pgaddtup(rightpage, newitemsz, newitem, rightoff))
|
|
|
|
{
|
|
|
|
memset(rightpage, 0, BufferGetPageSize(rbuf));
|
|
|
|
elog(ERROR, "failed to add new item to the right sibling"
|
|
|
|
" while splitting block %u of index \"%s\"",
|
|
|
|
origpagenumber, RelationGetRelationName(rel));
|
|
|
|
}
|
2007-02-06 15:55:11 +01:00
|
|
|
rightoff = OffsetNumberNext(rightoff);
|
2000-07-21 08:42:39 +02:00
|
|
|
}
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2000-10-04 02:04:43 +02:00
|
|
|
/*
|
2001-03-22 05:01:46 +01:00
|
|
|
* We have to grab the right sibling (if any) and fix the prev pointer
|
|
|
|
* there. We are guaranteed that this is deadlock-free since no other
|
2005-10-15 04:49:52 +02:00
|
|
|
* writer will be holding a lock on that page and trying to move left, and
|
|
|
|
* all readers release locks on a page before trying to fetch its
|
2001-03-22 05:01:46 +01:00
|
|
|
* neighbors.
|
2000-10-04 02:04:43 +02:00
|
|
|
*/
|
2010-08-29 21:33:14 +02:00
|
|
|
if (!P_RIGHTMOST(oopaque))
|
2000-10-04 02:04:43 +02:00
|
|
|
{
|
2010-08-29 21:33:14 +02:00
|
|
|
sbuf = _bt_getbuf(rel, oopaque->btpo_next, BT_WRITE);
|
2016-04-20 15:31:19 +02:00
|
|
|
spage = BufferGetPage(sbuf);
|
2003-02-22 01:45:05 +01:00
|
|
|
sopaque = (BTPageOpaque) PageGetSpecialPointer(spage);
|
2010-08-29 21:33:14 +02:00
|
|
|
if (sopaque->btpo_prev != origpagenumber)
|
|
|
|
{
|
|
|
|
memset(rightpage, 0, BufferGetPageSize(rbuf));
|
2019-08-01 11:05:08 +02:00
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_INDEX_CORRUPTED),
|
|
|
|
errmsg_internal("right sibling's left-link doesn't match: "
|
|
|
|
"block %u links to %u instead of expected %u in index \"%s\"",
|
|
|
|
oopaque->btpo_next, sopaque->btpo_prev, origpagenumber,
|
|
|
|
RelationGetRelationName(rel))));
|
2010-08-29 21:33:14 +02:00
|
|
|
}
|
2006-10-04 02:30:14 +02:00
|
|
|
|
2006-05-08 02:00:17 +02:00
|
|
|
/*
|
|
|
|
* Check to see if we can set the SPLIT_END flag in the right-hand
|
|
|
|
* split page; this can save some I/O for vacuum since it need not
|
|
|
|
* proceed to the right sibling. We can set the flag if the right
|
2006-10-04 02:30:14 +02:00
|
|
|
* sibling has a different cycleid: that means it could not be part of
|
|
|
|
* a group of pages that were all split off from the same ancestor
|
2006-05-08 02:00:17 +02:00
|
|
|
* page. If you're confused, imagine that page A splits to A B and
|
|
|
|
* then again, yielding A C B, while vacuum is in progress. Tuples
|
|
|
|
* originally in A could now be in either B or C, hence vacuum must
|
2014-05-06 18:12:18 +02:00
|
|
|
* examine both pages. But if D, our right sibling, has a different
|
2006-05-08 02:00:17 +02:00
|
|
|
* cycleid then it could not contain any tuples that were in A when
|
|
|
|
* the vacuum started.
|
|
|
|
*/
|
|
|
|
if (sopaque->btpo_cycleid != ropaque->btpo_cycleid)
|
|
|
|
ropaque->btpo_flags |= BTP_SPLIT_END;
|
2000-10-04 02:04:43 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2005-10-15 04:49:52 +02:00
|
|
|
* Right sibling is locked, new siblings are prepared, but original page
|
2007-04-11 22:47:38 +02:00
|
|
|
* is not updated yet.
|
2000-10-04 02:04:43 +02:00
|
|
|
*
|
2006-04-01 01:32:07 +02:00
|
|
|
* NO EREPORT(ERROR) till right sibling is updated. We can get away with
|
|
|
|
* not starting the critical section till here because we haven't been
|
2010-08-29 21:33:14 +02:00
|
|
|
* scribbling on the original page yet; see comments above.
|
2000-10-04 02:04:43 +02:00
|
|
|
*/
|
2001-01-12 22:54:01 +01:00
|
|
|
START_CRIT_SECTION();
|
2002-08-06 04:36:35 +02:00
|
|
|
|
2007-02-08 06:05:53 +01:00
|
|
|
/*
|
|
|
|
* By here, the original data page has been split into two new halves, and
|
|
|
|
* these are correct. The algorithm requires that the left page never
|
|
|
|
* move during a split, so we copy the new left page back on top of the
|
|
|
|
* original. Note that this is not a waste of time, since we also require
|
|
|
|
* (in the page management code) that the center of a page always be
|
|
|
|
* clean, and the most efficient way to guarantee this is just to compact
|
|
|
|
* the data by reinserting it into a new left page. (XXX the latter
|
2010-08-29 21:33:14 +02:00
|
|
|
* comment is probably obsolete; but in any case it's good to not scribble
|
|
|
|
* on the original page until we enter the critical section.)
|
2007-02-08 06:05:53 +01:00
|
|
|
*
|
2007-11-15 22:14:46 +01:00
|
|
|
* We need to do this before writing the WAL record, so that XLogInsert
|
|
|
|
* can WAL log an image of the page if necessary.
|
2007-02-08 06:05:53 +01:00
|
|
|
*/
|
|
|
|
PageRestoreTempPage(leftpage, origpage);
|
2010-08-29 21:33:14 +02:00
|
|
|
/* leftpage, lopaque must not be used below here */
|
2007-02-08 06:05:53 +01:00
|
|
|
|
2007-04-11 22:47:38 +02:00
|
|
|
MarkBufferDirty(buf);
|
|
|
|
MarkBufferDirty(rbuf);
|
|
|
|
|
|
|
|
if (!P_RIGHTMOST(ropaque))
|
|
|
|
{
|
2010-08-29 21:33:14 +02:00
|
|
|
sopaque->btpo_prev = rightpagenumber;
|
2007-04-11 22:47:38 +02:00
|
|
|
MarkBufferDirty(sbuf);
|
|
|
|
}
|
|
|
|
|
Make the handling of interrupted B-tree page splits more robust.
Splitting a page consists of two separate steps: splitting the child page,
and inserting the downlink for the new right page to the parent. Previously,
we handled the case that you crash in between those steps with a cleanup
routine after the WAL recovery had finished, which finished the incomplete
split. However, that doesn't help if the page split is interrupted but the
database doesn't crash, so that you don't perform WAL recovery. That could
happen for example if you run out of disk space.
Remove the end-of-recovery cleanup step. Instead, when a page is split, the
left page is marked with a new INCOMPLETE_SPLIT flag, and when the downlink
is inserted to the parent, the flag is cleared again. If an insertion sees
a page with the flag set, it knows that the split was interrupted for some
reason, and inserts the missing downlink before proceeding.
I used the same approach to fix GIN and GiST split algorithms earlier. This
was the last WAL cleanup routine, so we could get rid of that whole
machinery now, but I'll leave that for a separate patch.
Reviewed by Peter Geoghegan.
2014-03-18 19:12:58 +01:00
|
|
|
/*
|
|
|
|
* Clear INCOMPLETE_SPLIT flag on child if inserting the new item finishes
|
|
|
|
* a split.
|
|
|
|
*/
|
|
|
|
if (!isleaf)
|
|
|
|
{
|
2016-04-20 15:31:19 +02:00
|
|
|
Page cpage = BufferGetPage(cbuf);
|
Make the handling of interrupted B-tree page splits more robust.
Splitting a page consists of two separate steps: splitting the child page,
and inserting the downlink for the new right page to the parent. Previously,
we handled the case that you crash in between those steps with a cleanup
routine after the WAL recovery had finished, which finished the incomplete
split. However, that doesn't help if the page split is interrupted but the
database doesn't crash, so that you don't perform WAL recovery. That could
happen for example if you run out of disk space.
Remove the end-of-recovery cleanup step. Instead, when a page is split, the
left page is marked with a new INCOMPLETE_SPLIT flag, and when the downlink
is inserted to the parent, the flag is cleared again. If an insertion sees
a page with the flag set, it knows that the split was interrupted for some
reason, and inserts the missing downlink before proceeding.
I used the same approach to fix GIN and GiST split algorithms earlier. This
was the last WAL cleanup routine, so we could get rid of that whole
machinery now, but I'll leave that for a separate patch.
Reviewed by Peter Geoghegan.
2014-03-18 19:12:58 +01:00
|
|
|
BTPageOpaque cpageop = (BTPageOpaque) PageGetSpecialPointer(cpage);
|
|
|
|
|
|
|
|
cpageop->btpo_flags &= ~BTP_INCOMPLETE_SPLIT;
|
|
|
|
MarkBufferDirty(cbuf);
|
|
|
|
}
|
|
|
|
|
2002-08-06 04:36:35 +02:00
|
|
|
/* XLOG stuff */
|
2010-12-13 18:34:26 +01:00
|
|
|
if (RelationNeedsWAL(rel))
|
2000-10-04 02:04:43 +02:00
|
|
|
{
|
2001-03-22 05:01:46 +01:00
|
|
|
xl_btree_split xlrec;
|
2003-02-21 01:06:22 +01:00
|
|
|
uint8 xlinfo;
|
2001-03-22 05:01:46 +01:00
|
|
|
XLogRecPtr recptr;
|
Revamp the WAL record format.
Each WAL record now carries information about the modified relation and
block(s) in a standardized format. That makes it easier to write tools that
need that information, like pg_rewind, prefetching the blocks to speed up
recovery, etc.
There's a whole new API for building WAL records, replacing the XLogRecData
chains used previously. The new API consists of XLogRegister* functions,
which are called for each buffer and chunk of data that is added to the
record. The new API also gives more control over when a full-page image is
written, by passing flags to the XLogRegisterBuffer function.
This also simplifies the XLogReadBufferForRedo() calls. The function can dig
the relation and block number from the WAL record, so they no longer need to
be passed as arguments.
For the convenience of redo routines, XLogReader now disects each WAL record
after reading it, copying the main data part and the per-block data into
MAXALIGNed buffers. The data chunks are not aligned within the WAL record,
but the redo routines can assume that the pointers returned by XLogRecGet*
functions are. Redo routines are now passed the XLogReaderState, which
contains the record in the already-disected format, instead of the plain
XLogRecord.
The new record format also makes the fixed size XLogRecord header smaller,
by removing the xl_len field. The length of the "main data" portion is now
stored at the end of the WAL record, and there's a separate header after
XLogRecord for it. The alignment padding at the end of XLogRecord is also
removed. This compansates for the fact that the new format would otherwise
be more bulky than the old format.
Reviewed by Andres Freund, Amit Kapila, Michael Paquier, Alvaro Herrera,
Fujii Masao.
2014-11-20 16:56:26 +01:00
|
|
|
|
2007-02-08 14:52:55 +01:00
|
|
|
xlrec.level = ropaque->btpo.level;
|
2007-04-11 22:47:38 +02:00
|
|
|
xlrec.firstright = firstright;
|
Revamp the WAL record format.
Each WAL record now carries information about the modified relation and
block(s) in a standardized format. That makes it easier to write tools that
need that information, like pg_rewind, prefetching the blocks to speed up
recovery, etc.
There's a whole new API for building WAL records, replacing the XLogRecData
chains used previously. The new API consists of XLogRegister* functions,
which are called for each buffer and chunk of data that is added to the
record. The new API also gives more control over when a full-page image is
written, by passing flags to the XLogRegisterBuffer function.
This also simplifies the XLogReadBufferForRedo() calls. The function can dig
the relation and block number from the WAL record, so they no longer need to
be passed as arguments.
For the convenience of redo routines, XLogReader now disects each WAL record
after reading it, copying the main data part and the per-block data into
MAXALIGNed buffers. The data chunks are not aligned within the WAL record,
but the redo routines can assume that the pointers returned by XLogRecGet*
functions are. Redo routines are now passed the XLogReaderState, which
contains the record in the already-disected format, instead of the plain
XLogRecord.
The new record format also makes the fixed size XLogRecord header smaller,
by removing the xl_len field. The length of the "main data" portion is now
stored at the end of the WAL record, and there's a separate header after
XLogRecord for it. The alignment padding at the end of XLogRecord is also
removed. This compansates for the fact that the new format would otherwise
be more bulky than the old format.
Reviewed by Andres Freund, Amit Kapila, Michael Paquier, Alvaro Herrera,
Fujii Masao.
2014-11-20 16:56:26 +01:00
|
|
|
xlrec.newitemoff = newitemoff;
|
2000-12-28 14:00:29 +01:00
|
|
|
|
Revamp the WAL record format.
Each WAL record now carries information about the modified relation and
block(s) in a standardized format. That makes it easier to write tools that
need that information, like pg_rewind, prefetching the blocks to speed up
recovery, etc.
There's a whole new API for building WAL records, replacing the XLogRecData
chains used previously. The new API consists of XLogRegister* functions,
which are called for each buffer and chunk of data that is added to the
record. The new API also gives more control over when a full-page image is
written, by passing flags to the XLogRegisterBuffer function.
This also simplifies the XLogReadBufferForRedo() calls. The function can dig
the relation and block number from the WAL record, so they no longer need to
be passed as arguments.
For the convenience of redo routines, XLogReader now disects each WAL record
after reading it, copying the main data part and the per-block data into
MAXALIGNed buffers. The data chunks are not aligned within the WAL record,
but the redo routines can assume that the pointers returned by XLogRecGet*
functions are. Redo routines are now passed the XLogReaderState, which
contains the record in the already-disected format, instead of the plain
XLogRecord.
The new record format also makes the fixed size XLogRecord header smaller,
by removing the xl_len field. The length of the "main data" portion is now
stored at the end of the WAL record, and there's a separate header after
XLogRecord for it. The alignment padding at the end of XLogRecord is also
removed. This compansates for the fact that the new format would otherwise
be more bulky than the old format.
Reviewed by Andres Freund, Amit Kapila, Michael Paquier, Alvaro Herrera,
Fujii Masao.
2014-11-20 16:56:26 +01:00
|
|
|
XLogBeginInsert();
|
|
|
|
XLogRegisterData((char *) &xlrec, SizeOfBtreeSplit);
|
2007-02-08 06:05:53 +01:00
|
|
|
|
Revamp the WAL record format.
Each WAL record now carries information about the modified relation and
block(s) in a standardized format. That makes it easier to write tools that
need that information, like pg_rewind, prefetching the blocks to speed up
recovery, etc.
There's a whole new API for building WAL records, replacing the XLogRecData
chains used previously. The new API consists of XLogRegister* functions,
which are called for each buffer and chunk of data that is added to the
record. The new API also gives more control over when a full-page image is
written, by passing flags to the XLogRegisterBuffer function.
This also simplifies the XLogReadBufferForRedo() calls. The function can dig
the relation and block number from the WAL record, so they no longer need to
be passed as arguments.
For the convenience of redo routines, XLogReader now disects each WAL record
after reading it, copying the main data part and the per-block data into
MAXALIGNed buffers. The data chunks are not aligned within the WAL record,
but the redo routines can assume that the pointers returned by XLogRecGet*
functions are. Redo routines are now passed the XLogReaderState, which
contains the record in the already-disected format, instead of the plain
XLogRecord.
The new record format also makes the fixed size XLogRecord header smaller,
by removing the xl_len field. The length of the "main data" portion is now
stored at the end of the WAL record, and there's a separate header after
XLogRecord for it. The alignment padding at the end of XLogRecord is also
removed. This compansates for the fact that the new format would otherwise
be more bulky than the old format.
Reviewed by Andres Freund, Amit Kapila, Michael Paquier, Alvaro Herrera,
Fujii Masao.
2014-11-20 16:56:26 +01:00
|
|
|
XLogRegisterBuffer(0, buf, REGBUF_STANDARD);
|
|
|
|
XLogRegisterBuffer(1, rbuf, REGBUF_WILL_INIT);
|
|
|
|
/* Log the right sibling, because we've changed its prev-pointer. */
|
|
|
|
if (!P_RIGHTMOST(ropaque))
|
|
|
|
XLogRegisterBuffer(2, sbuf, REGBUF_STANDARD);
|
|
|
|
if (BufferIsValid(cbuf))
|
|
|
|
XLogRegisterBuffer(3, cbuf, REGBUF_STANDARD);
|
2007-02-08 06:05:53 +01:00
|
|
|
|
2007-04-11 22:47:38 +02:00
|
|
|
/*
|
Revamp the WAL record format.
Each WAL record now carries information about the modified relation and
block(s) in a standardized format. That makes it easier to write tools that
need that information, like pg_rewind, prefetching the blocks to speed up
recovery, etc.
There's a whole new API for building WAL records, replacing the XLogRecData
chains used previously. The new API consists of XLogRegister* functions,
which are called for each buffer and chunk of data that is added to the
record. The new API also gives more control over when a full-page image is
written, by passing flags to the XLogRegisterBuffer function.
This also simplifies the XLogReadBufferForRedo() calls. The function can dig
the relation and block number from the WAL record, so they no longer need to
be passed as arguments.
For the convenience of redo routines, XLogReader now disects each WAL record
after reading it, copying the main data part and the per-block data into
MAXALIGNed buffers. The data chunks are not aligned within the WAL record,
but the redo routines can assume that the pointers returned by XLogRecGet*
functions are. Redo routines are now passed the XLogReaderState, which
contains the record in the already-disected format, instead of the plain
XLogRecord.
The new record format also makes the fixed size XLogRecord header smaller,
by removing the xl_len field. The length of the "main data" portion is now
stored at the end of the WAL record, and there's a separate header after
XLogRecord for it. The alignment padding at the end of XLogRecord is also
removed. This compansates for the fact that the new format would otherwise
be more bulky than the old format.
Reviewed by Andres Freund, Amit Kapila, Michael Paquier, Alvaro Herrera,
Fujii Masao.
2014-11-20 16:56:26 +01:00
|
|
|
* Log the new item, if it was inserted on the left page. (If it was
|
|
|
|
* put on the right page, we don't need to explicitly WAL log it
|
|
|
|
* because it's included with all the other items on the right page.)
|
|
|
|
* Show the new item as belonging to the left page buffer, so that it
|
|
|
|
* is not stored if XLogInsert decides it needs a full-page image of
|
|
|
|
* the left page. We store the offset anyway, though, to support
|
|
|
|
* archive compression of these records.
|
2007-02-08 06:05:53 +01:00
|
|
|
*/
|
2000-10-04 02:04:43 +02:00
|
|
|
if (newitemonleft)
|
Revamp the WAL record format.
Each WAL record now carries information about the modified relation and
block(s) in a standardized format. That makes it easier to write tools that
need that information, like pg_rewind, prefetching the blocks to speed up
recovery, etc.
There's a whole new API for building WAL records, replacing the XLogRecData
chains used previously. The new API consists of XLogRegister* functions,
which are called for each buffer and chunk of data that is added to the
record. The new API also gives more control over when a full-page image is
written, by passing flags to the XLogRegisterBuffer function.
This also simplifies the XLogReadBufferForRedo() calls. The function can dig
the relation and block number from the WAL record, so they no longer need to
be passed as arguments.
For the convenience of redo routines, XLogReader now disects each WAL record
after reading it, copying the main data part and the per-block data into
MAXALIGNed buffers. The data chunks are not aligned within the WAL record,
but the redo routines can assume that the pointers returned by XLogRecGet*
functions are. Redo routines are now passed the XLogReaderState, which
contains the record in the already-disected format, instead of the plain
XLogRecord.
The new record format also makes the fixed size XLogRecord header smaller,
by removing the xl_len field. The length of the "main data" portion is now
stored at the end of the WAL record, and there's a separate header after
XLogRecord for it. The alignment padding at the end of XLogRecord is also
removed. This compansates for the fact that the new format would otherwise
be more bulky than the old format.
Reviewed by Andres Freund, Amit Kapila, Michael Paquier, Alvaro Herrera,
Fujii Masao.
2014-11-20 16:56:26 +01:00
|
|
|
XLogRegisterBufData(0, (char *) newitem, MAXALIGN(newitemsz));
|
Make the handling of interrupted B-tree page splits more robust.
Splitting a page consists of two separate steps: splitting the child page,
and inserting the downlink for the new right page to the parent. Previously,
we handled the case that you crash in between those steps with a cleanup
routine after the WAL recovery had finished, which finished the incomplete
split. However, that doesn't help if the page split is interrupted but the
database doesn't crash, so that you don't perform WAL recovery. That could
happen for example if you run out of disk space.
Remove the end-of-recovery cleanup step. Instead, when a page is split, the
left page is marked with a new INCOMPLETE_SPLIT flag, and when the downlink
is inserted to the parent, the flag is cleared again. If an insertion sees
a page with the flag set, it knows that the split was interrupted for some
reason, and inserts the missing downlink before proceeding.
I used the same approach to fix GIN and GiST split algorithms earlier. This
was the last WAL cleanup routine, so we could get rid of that whole
machinery now, but I'll leave that for a separate patch.
Reviewed by Peter Geoghegan.
2014-03-18 19:12:58 +01:00
|
|
|
|
Make heap TID a tiebreaker nbtree index column.
Make nbtree treat all index tuples as having a heap TID attribute.
Index searches can distinguish duplicates by heap TID, since heap TID is
always guaranteed to be unique. This general approach has numerous
benefits for performance, and is prerequisite to teaching VACUUM to
perform "retail index tuple deletion".
Naively adding a new attribute to every pivot tuple has unacceptable
overhead (it bloats internal pages), so suffix truncation of pivot
tuples is added. This will usually truncate away the "extra" heap TID
attribute from pivot tuples during a leaf page split, and may also
truncate away additional user attributes. This can increase fan-out,
especially in a multi-column index. Truncation can only occur at the
attribute granularity, which isn't particularly effective, but works
well enough for now. A future patch may add support for truncating
"within" text attributes by generating truncated key values using new
opclass infrastructure.
Only new indexes (BTREE_VERSION 4 indexes) will have insertions that
treat heap TID as a tiebreaker attribute, or will have pivot tuples
undergo suffix truncation during a leaf page split (on-disk
compatibility with versions 2 and 3 is preserved). Upgrades to version
4 cannot be performed on-the-fly, unlike upgrades from version 2 to
version 3. contrib/amcheck continues to work with version 2 and 3
indexes, while also enforcing stricter invariants when verifying version
4 indexes. These stricter invariants are the same invariants described
by "3.1.12 Sequencing" from the Lehman and Yao paper.
A later patch will enhance the logic used by nbtree to pick a split
point. This patch is likely to negatively impact performance without
smarter choices around the precise point to split leaf pages at. Making
these two mostly-distinct sets of enhancements into distinct commits
seems like it might clarify their design, even though neither commit is
particularly useful on its own.
The maximum allowed size of new tuples is reduced by an amount equal to
the space required to store an extra MAXALIGN()'d TID in a new high key
during leaf page splits. The user-facing definition of the "1/3 of a
page" restriction is already imprecise, and so does not need to be
revised. However, there should be a compatibility note in the v12
release notes.
Author: Peter Geoghegan
Reviewed-By: Heikki Linnakangas, Alexander Korotkov
Discussion: https://postgr.es/m/CAH2-WzkVb0Kom=R+88fDFb=JSxZMFvbHVC6Mn9LJ2n=X=kS-Uw@mail.gmail.com
2019-03-20 18:04:01 +01:00
|
|
|
/* Log the left page's new high key */
|
|
|
|
itemid = PageGetItemId(origpage, P_HIKEY);
|
|
|
|
item = (IndexTuple) PageGetItem(origpage, itemid);
|
|
|
|
XLogRegisterBufData(0, (char *) item, MAXALIGN(IndexTupleSize(item)));
|
Make the handling of interrupted B-tree page splits more robust.
Splitting a page consists of two separate steps: splitting the child page,
and inserting the downlink for the new right page to the parent. Previously,
we handled the case that you crash in between those steps with a cleanup
routine after the WAL recovery had finished, which finished the incomplete
split. However, that doesn't help if the page split is interrupted but the
database doesn't crash, so that you don't perform WAL recovery. That could
happen for example if you run out of disk space.
Remove the end-of-recovery cleanup step. Instead, when a page is split, the
left page is marked with a new INCOMPLETE_SPLIT flag, and when the downlink
is inserted to the parent, the flag is cleared again. If an insertion sees
a page with the flag set, it knows that the split was interrupted for some
reason, and inserts the missing downlink before proceeding.
I used the same approach to fix GIN and GiST split algorithms earlier. This
was the last WAL cleanup routine, so we could get rid of that whole
machinery now, but I'll leave that for a separate patch.
Reviewed by Peter Geoghegan.
2014-03-18 19:12:58 +01:00
|
|
|
|
2007-04-11 22:47:38 +02:00
|
|
|
/*
|
|
|
|
* Log the contents of the right page in the format understood by
|
2019-03-04 21:32:40 +01:00
|
|
|
* _bt_restore_page(). The whole right page will be recreated.
|
2007-02-08 06:05:53 +01:00
|
|
|
*
|
2005-10-15 04:49:52 +02:00
|
|
|
* Direct access to page is not good but faster - we should implement
|
|
|
|
* some new func in page API. Note we only store the tuples
|
2007-04-11 22:47:38 +02:00
|
|
|
* themselves, knowing that they were inserted in item-number order
|
2019-05-14 00:53:39 +02:00
|
|
|
* and so the line pointers can be reconstructed. See comments for
|
2006-10-04 02:30:14 +02:00
|
|
|
* _bt_restore_page().
|
2000-10-04 02:04:43 +02:00
|
|
|
*/
|
Revamp the WAL record format.
Each WAL record now carries information about the modified relation and
block(s) in a standardized format. That makes it easier to write tools that
need that information, like pg_rewind, prefetching the blocks to speed up
recovery, etc.
There's a whole new API for building WAL records, replacing the XLogRecData
chains used previously. The new API consists of XLogRegister* functions,
which are called for each buffer and chunk of data that is added to the
record. The new API also gives more control over when a full-page image is
written, by passing flags to the XLogRegisterBuffer function.
This also simplifies the XLogReadBufferForRedo() calls. The function can dig
the relation and block number from the WAL record, so they no longer need to
be passed as arguments.
For the convenience of redo routines, XLogReader now disects each WAL record
after reading it, copying the main data part and the per-block data into
MAXALIGNed buffers. The data chunks are not aligned within the WAL record,
but the redo routines can assume that the pointers returned by XLogRecGet*
functions are. Redo routines are now passed the XLogReaderState, which
contains the record in the already-disected format, instead of the plain
XLogRecord.
The new record format also makes the fixed size XLogRecord header smaller,
by removing the xl_len field. The length of the "main data" portion is now
stored at the end of the WAL record, and there's a separate header after
XLogRecord for it. The alignment padding at the end of XLogRecord is also
removed. This compansates for the fact that the new format would otherwise
be more bulky than the old format.
Reviewed by Andres Freund, Amit Kapila, Michael Paquier, Alvaro Herrera,
Fujii Masao.
2014-11-20 16:56:26 +01:00
|
|
|
XLogRegisterBufData(1,
|
Phase 3 of pgindent updates.
Don't move parenthesized lines to the left, even if that means they
flow past the right margin.
By default, BSD indent lines up statement continuation lines that are
within parentheses so that they start just to the right of the preceding
left parenthesis. However, traditionally, if that resulted in the
continuation line extending to the right of the desired right margin,
then indent would push it left just far enough to not overrun the margin,
if it could do so without making the continuation line start to the left of
the current statement indent. That makes for a weird mix of indentations
unless one has been completely rigid about never violating the 80-column
limit.
This behavior has been pretty universally panned by Postgres developers.
Hence, disable it with indent's new -lpl switch, so that parenthesized
lines are always lined up with the preceding left paren.
This patch is much less interesting than the first round of indent
changes, but also bulkier, so I thought it best to separate the effects.
Discussion: https://postgr.es/m/E1dAmxK-0006EE-1r@gemulon.postgresql.org
Discussion: https://postgr.es/m/30527.1495162840@sss.pgh.pa.us
2017-06-21 21:35:54 +02:00
|
|
|
(char *) rightpage + ((PageHeader) rightpage)->pd_upper,
|
Revamp the WAL record format.
Each WAL record now carries information about the modified relation and
block(s) in a standardized format. That makes it easier to write tools that
need that information, like pg_rewind, prefetching the blocks to speed up
recovery, etc.
There's a whole new API for building WAL records, replacing the XLogRecData
chains used previously. The new API consists of XLogRegister* functions,
which are called for each buffer and chunk of data that is added to the
record. The new API also gives more control over when a full-page image is
written, by passing flags to the XLogRegisterBuffer function.
This also simplifies the XLogReadBufferForRedo() calls. The function can dig
the relation and block number from the WAL record, so they no longer need to
be passed as arguments.
For the convenience of redo routines, XLogReader now disects each WAL record
after reading it, copying the main data part and the per-block data into
MAXALIGNed buffers. The data chunks are not aligned within the WAL record,
but the redo routines can assume that the pointers returned by XLogRecGet*
functions are. Redo routines are now passed the XLogReaderState, which
contains the record in the already-disected format, instead of the plain
XLogRecord.
The new record format also makes the fixed size XLogRecord header smaller,
by removing the xl_len field. The length of the "main data" portion is now
stored at the end of the WAL record, and there's a separate header after
XLogRecord for it. The alignment padding at the end of XLogRecord is also
removed. This compansates for the fact that the new format would otherwise
be more bulky than the old format.
Reviewed by Andres Freund, Amit Kapila, Michael Paquier, Alvaro Herrera,
Fujii Masao.
2014-11-20 16:56:26 +01:00
|
|
|
((PageHeader) rightpage)->pd_special - ((PageHeader) rightpage)->pd_upper);
|
2007-02-08 06:05:53 +01:00
|
|
|
|
Make heap TID a tiebreaker nbtree index column.
Make nbtree treat all index tuples as having a heap TID attribute.
Index searches can distinguish duplicates by heap TID, since heap TID is
always guaranteed to be unique. This general approach has numerous
benefits for performance, and is prerequisite to teaching VACUUM to
perform "retail index tuple deletion".
Naively adding a new attribute to every pivot tuple has unacceptable
overhead (it bloats internal pages), so suffix truncation of pivot
tuples is added. This will usually truncate away the "extra" heap TID
attribute from pivot tuples during a leaf page split, and may also
truncate away additional user attributes. This can increase fan-out,
especially in a multi-column index. Truncation can only occur at the
attribute granularity, which isn't particularly effective, but works
well enough for now. A future patch may add support for truncating
"within" text attributes by generating truncated key values using new
opclass infrastructure.
Only new indexes (BTREE_VERSION 4 indexes) will have insertions that
treat heap TID as a tiebreaker attribute, or will have pivot tuples
undergo suffix truncation during a leaf page split (on-disk
compatibility with versions 2 and 3 is preserved). Upgrades to version
4 cannot be performed on-the-fly, unlike upgrades from version 2 to
version 3. contrib/amcheck continues to work with version 2 and 3
indexes, while also enforcing stricter invariants when verifying version
4 indexes. These stricter invariants are the same invariants described
by "3.1.12 Sequencing" from the Lehman and Yao paper.
A later patch will enhance the logic used by nbtree to pick a split
point. This patch is likely to negatively impact performance without
smarter choices around the precise point to split leaf pages at. Making
these two mostly-distinct sets of enhancements into distinct commits
seems like it might clarify their design, even though neither commit is
particularly useful on its own.
The maximum allowed size of new tuples is reduced by an amount equal to
the space required to store an extra MAXALIGN()'d TID in a new high key
during leaf page splits. The user-facing definition of the "1/3 of a
page" restriction is already imprecise, and so does not need to be
revised. However, there should be a compatibility note in the v12
release notes.
Author: Peter Geoghegan
Reviewed-By: Heikki Linnakangas, Alexander Korotkov
Discussion: https://postgr.es/m/CAH2-WzkVb0Kom=R+88fDFb=JSxZMFvbHVC6Mn9LJ2n=X=kS-Uw@mail.gmail.com
2019-03-20 18:04:01 +01:00
|
|
|
xlinfo = newitemonleft ? XLOG_BTREE_SPLIT_L : XLOG_BTREE_SPLIT_R;
|
Revamp the WAL record format.
Each WAL record now carries information about the modified relation and
block(s) in a standardized format. That makes it easier to write tools that
need that information, like pg_rewind, prefetching the blocks to speed up
recovery, etc.
There's a whole new API for building WAL records, replacing the XLogRecData
chains used previously. The new API consists of XLogRegister* functions,
which are called for each buffer and chunk of data that is added to the
record. The new API also gives more control over when a full-page image is
written, by passing flags to the XLogRegisterBuffer function.
This also simplifies the XLogReadBufferForRedo() calls. The function can dig
the relation and block number from the WAL record, so they no longer need to
be passed as arguments.
For the convenience of redo routines, XLogReader now disects each WAL record
after reading it, copying the main data part and the per-block data into
MAXALIGNed buffers. The data chunks are not aligned within the WAL record,
but the redo routines can assume that the pointers returned by XLogRecGet*
functions are. Redo routines are now passed the XLogReaderState, which
contains the record in the already-disected format, instead of the plain
XLogRecord.
The new record format also makes the fixed size XLogRecord header smaller,
by removing the xl_len field. The length of the "main data" portion is now
stored at the end of the WAL record, and there's a separate header after
XLogRecord for it. The alignment padding at the end of XLogRecord is also
removed. This compansates for the fact that the new format would otherwise
be more bulky than the old format.
Reviewed by Andres Freund, Amit Kapila, Michael Paquier, Alvaro Herrera,
Fujii Masao.
2014-11-20 16:56:26 +01:00
|
|
|
recptr = XLogInsert(RM_BTREE_ID, xlinfo);
|
2000-10-04 02:04:43 +02:00
|
|
|
|
2007-02-08 14:52:55 +01:00
|
|
|
PageSetLSN(origpage, recptr);
|
2000-10-04 02:04:43 +02:00
|
|
|
PageSetLSN(rightpage, recptr);
|
|
|
|
if (!P_RIGHTMOST(ropaque))
|
|
|
|
{
|
|
|
|
PageSetLSN(spage, recptr);
|
|
|
|
}
|
2014-04-01 18:19:47 +02:00
|
|
|
if (!isleaf)
|
|
|
|
{
|
2016-04-20 15:31:19 +02:00
|
|
|
PageSetLSN(BufferGetPage(cbuf), recptr);
|
2014-04-01 18:19:47 +02:00
|
|
|
}
|
2000-10-04 02:04:43 +02:00
|
|
|
}
|
|
|
|
|
2001-01-24 00:29:22 +01:00
|
|
|
END_CRIT_SECTION();
|
|
|
|
|
2006-04-01 01:32:07 +02:00
|
|
|
/* release the old right sibling */
|
1997-09-07 07:04:48 +02:00
|
|
|
if (!P_RIGHTMOST(ropaque))
|
2006-04-01 01:32:07 +02:00
|
|
|
_bt_relbuf(rel, sbuf);
|
1997-09-07 07:04:48 +02:00
|
|
|
|
Make the handling of interrupted B-tree page splits more robust.
Splitting a page consists of two separate steps: splitting the child page,
and inserting the downlink for the new right page to the parent. Previously,
we handled the case that you crash in between those steps with a cleanup
routine after the WAL recovery had finished, which finished the incomplete
split. However, that doesn't help if the page split is interrupted but the
database doesn't crash, so that you don't perform WAL recovery. That could
happen for example if you run out of disk space.
Remove the end-of-recovery cleanup step. Instead, when a page is split, the
left page is marked with a new INCOMPLETE_SPLIT flag, and when the downlink
is inserted to the parent, the flag is cleared again. If an insertion sees
a page with the flag set, it knows that the split was interrupted for some
reason, and inserts the missing downlink before proceeding.
I used the same approach to fix GIN and GiST split algorithms earlier. This
was the last WAL cleanup routine, so we could get rid of that whole
machinery now, but I'll leave that for a separate patch.
Reviewed by Peter Geoghegan.
2014-03-18 19:12:58 +01:00
|
|
|
/* release the child */
|
|
|
|
if (!isleaf)
|
|
|
|
_bt_relbuf(rel, cbuf);
|
|
|
|
|
1997-09-07 07:04:48 +02:00
|
|
|
/* split's done */
|
1998-09-01 05:29:17 +02:00
|
|
|
return rbuf;
|
1996-07-09 08:22:35 +02:00
|
|
|
}
|
|
|
|
|
2003-02-21 01:06:22 +01:00
|
|
|
/*
|
2019-05-03 22:34:45 +02:00
|
|
|
* _bt_insert_parent() -- Insert downlink into parent, completing split.
|
2003-02-21 01:06:22 +01:00
|
|
|
*
|
|
|
|
* On entry, buf and rbuf are the left and right split pages, which we
|
2019-05-03 22:34:45 +02:00
|
|
|
* still hold write locks on. Both locks will be released here. We
|
|
|
|
* release the rbuf lock once we have a write lock on the page that we
|
|
|
|
* intend to insert a downlink to rbuf on (i.e. buf's current parent page).
|
|
|
|
* The lock on buf is released at the same point as the lock on the parent
|
|
|
|
* page, since buf's INCOMPLETE_SPLIT flag must be cleared by the same
|
|
|
|
* atomic operation that completes the split by inserting a new downlink.
|
2003-02-21 01:06:22 +01:00
|
|
|
*
|
2018-12-19 01:59:50 +01:00
|
|
|
* stack - stack showing how we got here. Will be NULL when splitting true
|
|
|
|
* root, or during concurrent root split, where we can be inefficient
|
2003-02-21 01:06:22 +01:00
|
|
|
* is_root - we split the true root
|
|
|
|
* is_only - we split a page alone on its level (might have been fast root)
|
|
|
|
*/
|
Make the handling of interrupted B-tree page splits more robust.
Splitting a page consists of two separate steps: splitting the child page,
and inserting the downlink for the new right page to the parent. Previously,
we handled the case that you crash in between those steps with a cleanup
routine after the WAL recovery had finished, which finished the incomplete
split. However, that doesn't help if the page split is interrupted but the
database doesn't crash, so that you don't perform WAL recovery. That could
happen for example if you run out of disk space.
Remove the end-of-recovery cleanup step. Instead, when a page is split, the
left page is marked with a new INCOMPLETE_SPLIT flag, and when the downlink
is inserted to the parent, the flag is cleared again. If an insertion sees
a page with the flag set, it knows that the split was interrupted for some
reason, and inserts the missing downlink before proceeding.
I used the same approach to fix GIN and GiST split algorithms earlier. This
was the last WAL cleanup routine, so we could get rid of that whole
machinery now, but I'll leave that for a separate patch.
Reviewed by Peter Geoghegan.
2014-03-18 19:12:58 +01:00
|
|
|
static void
|
2003-02-21 01:06:22 +01:00
|
|
|
_bt_insert_parent(Relation rel,
|
|
|
|
Buffer buf,
|
|
|
|
Buffer rbuf,
|
|
|
|
BTStack stack,
|
|
|
|
bool is_root,
|
|
|
|
bool is_only)
|
|
|
|
{
|
|
|
|
/*
|
2005-10-15 04:49:52 +02:00
|
|
|
* Here we have to do something Lehman and Yao don't talk about: deal with
|
|
|
|
* a root split and construction of a new root. If our stack is empty
|
|
|
|
* then we have just split a node on what had been the root level when we
|
2014-05-06 18:12:18 +02:00
|
|
|
* descended the tree. If it was still the root then we perform a
|
2005-10-15 04:49:52 +02:00
|
|
|
* new-root construction. If it *wasn't* the root anymore, search to find
|
|
|
|
* the next higher level that someone constructed meanwhile, and find the
|
|
|
|
* right place to insert as for the normal case.
|
2003-02-21 01:06:22 +01:00
|
|
|
*
|
2005-11-22 19:17:34 +01:00
|
|
|
* If we have to search for the parent level, we do so by re-descending
|
|
|
|
* from the root. This is not super-efficient, but it's rare enough not
|
Make the handling of interrupted B-tree page splits more robust.
Splitting a page consists of two separate steps: splitting the child page,
and inserting the downlink for the new right page to the parent. Previously,
we handled the case that you crash in between those steps with a cleanup
routine after the WAL recovery had finished, which finished the incomplete
split. However, that doesn't help if the page split is interrupted but the
database doesn't crash, so that you don't perform WAL recovery. That could
happen for example if you run out of disk space.
Remove the end-of-recovery cleanup step. Instead, when a page is split, the
left page is marked with a new INCOMPLETE_SPLIT flag, and when the downlink
is inserted to the parent, the flag is cleared again. If an insertion sees
a page with the flag set, it knows that the split was interrupted for some
reason, and inserts the missing downlink before proceeding.
I used the same approach to fix GIN and GiST split algorithms earlier. This
was the last WAL cleanup routine, so we could get rid of that whole
machinery now, but I'll leave that for a separate patch.
Reviewed by Peter Geoghegan.
2014-03-18 19:12:58 +01:00
|
|
|
* to matter.
|
2003-02-21 01:06:22 +01:00
|
|
|
*/
|
|
|
|
if (is_root)
|
|
|
|
{
|
|
|
|
Buffer rootbuf;
|
|
|
|
|
2004-01-07 19:56:30 +01:00
|
|
|
Assert(stack == NULL);
|
2003-02-21 01:06:22 +01:00
|
|
|
Assert(is_only);
|
|
|
|
/* create a new root node and update the metapage */
|
|
|
|
rootbuf = _bt_newroot(rel, buf, rbuf);
|
|
|
|
/* release the split buffers */
|
2006-04-01 01:32:07 +02:00
|
|
|
_bt_relbuf(rel, rootbuf);
|
|
|
|
_bt_relbuf(rel, rbuf);
|
|
|
|
_bt_relbuf(rel, buf);
|
2003-02-21 01:06:22 +01:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
BlockNumber bknum = BufferGetBlockNumber(buf);
|
|
|
|
BlockNumber rbknum = BufferGetBlockNumber(rbuf);
|
2016-04-20 15:31:19 +02:00
|
|
|
Page page = BufferGetPage(buf);
|
2006-01-26 00:04:21 +01:00
|
|
|
IndexTuple new_item;
|
2003-02-21 01:06:22 +01:00
|
|
|
BTStackData fakestack;
|
2006-01-26 00:04:21 +01:00
|
|
|
IndexTuple ritem;
|
2003-02-21 01:06:22 +01:00
|
|
|
Buffer pbuf;
|
|
|
|
|
2004-01-07 19:56:30 +01:00
|
|
|
if (stack == NULL)
|
2003-02-21 01:06:22 +01:00
|
|
|
{
|
|
|
|
BTPageOpaque lpageop;
|
|
|
|
|
2014-09-11 21:43:56 +02:00
|
|
|
elog(DEBUG2, "concurrent ROOT page split");
|
2003-02-21 01:06:22 +01:00
|
|
|
lpageop = (BTPageOpaque) PageGetSpecialPointer(page);
|
|
|
|
/* Find the leftmost page at the next level up */
|
2016-04-08 21:36:30 +02:00
|
|
|
pbuf = _bt_get_endpoint(rel, lpageop->btpo.level + 1, false,
|
|
|
|
NULL);
|
2003-02-21 01:06:22 +01:00
|
|
|
/* Set up a phony stack entry pointing there */
|
|
|
|
stack = &fakestack;
|
|
|
|
stack->bts_blkno = BufferGetBlockNumber(pbuf);
|
|
|
|
stack->bts_offset = InvalidOffsetNumber;
|
|
|
|
stack->bts_parent = NULL;
|
|
|
|
_bt_relbuf(rel, pbuf);
|
|
|
|
}
|
|
|
|
|
Make heap TID a tiebreaker nbtree index column.
Make nbtree treat all index tuples as having a heap TID attribute.
Index searches can distinguish duplicates by heap TID, since heap TID is
always guaranteed to be unique. This general approach has numerous
benefits for performance, and is prerequisite to teaching VACUUM to
perform "retail index tuple deletion".
Naively adding a new attribute to every pivot tuple has unacceptable
overhead (it bloats internal pages), so suffix truncation of pivot
tuples is added. This will usually truncate away the "extra" heap TID
attribute from pivot tuples during a leaf page split, and may also
truncate away additional user attributes. This can increase fan-out,
especially in a multi-column index. Truncation can only occur at the
attribute granularity, which isn't particularly effective, but works
well enough for now. A future patch may add support for truncating
"within" text attributes by generating truncated key values using new
opclass infrastructure.
Only new indexes (BTREE_VERSION 4 indexes) will have insertions that
treat heap TID as a tiebreaker attribute, or will have pivot tuples
undergo suffix truncation during a leaf page split (on-disk
compatibility with versions 2 and 3 is preserved). Upgrades to version
4 cannot be performed on-the-fly, unlike upgrades from version 2 to
version 3. contrib/amcheck continues to work with version 2 and 3
indexes, while also enforcing stricter invariants when verifying version
4 indexes. These stricter invariants are the same invariants described
by "3.1.12 Sequencing" from the Lehman and Yao paper.
A later patch will enhance the logic used by nbtree to pick a split
point. This patch is likely to negatively impact performance without
smarter choices around the precise point to split leaf pages at. Making
these two mostly-distinct sets of enhancements into distinct commits
seems like it might clarify their design, even though neither commit is
particularly useful on its own.
The maximum allowed size of new tuples is reduced by an amount equal to
the space required to store an extra MAXALIGN()'d TID in a new high key
during leaf page splits. The user-facing definition of the "1/3 of a
page" restriction is already imprecise, and so does not need to be
revised. However, there should be a compatibility note in the v12
release notes.
Author: Peter Geoghegan
Reviewed-By: Heikki Linnakangas, Alexander Korotkov
Discussion: https://postgr.es/m/CAH2-WzkVb0Kom=R+88fDFb=JSxZMFvbHVC6Mn9LJ2n=X=kS-Uw@mail.gmail.com
2019-03-20 18:04:01 +01:00
|
|
|
/* get high key from left, a strict lower bound for new right page */
|
2006-01-26 00:04:21 +01:00
|
|
|
ritem = (IndexTuple) PageGetItem(page,
|
|
|
|
PageGetItemId(page, P_HIKEY));
|
2003-02-21 01:06:22 +01:00
|
|
|
|
|
|
|
/* form an index tuple that points at the new right page */
|
2006-01-26 00:04:21 +01:00
|
|
|
new_item = CopyIndexTuple(ritem);
|
2018-04-07 22:00:39 +02:00
|
|
|
BTreeInnerTupleSetDownLink(new_item, rbknum);
|
2003-02-21 01:06:22 +01:00
|
|
|
|
|
|
|
/*
|
2019-05-03 22:34:45 +02:00
|
|
|
* Re-find and write lock the parent of buf.
|
2003-02-21 01:06:22 +01:00
|
|
|
*
|
2019-05-22 18:55:34 +02:00
|
|
|
* It's possible that the location of buf's downlink has changed since
|
|
|
|
* our initial _bt_search() descent. _bt_getstackbuf() will detect
|
|
|
|
* and recover from this, updating the stack, which ensures that the
|
|
|
|
* new downlink will be inserted at the correct offset. Even buf's
|
|
|
|
* parent may have changed.
|
2003-02-21 01:06:22 +01:00
|
|
|
*/
|
2019-08-14 20:32:35 +02:00
|
|
|
pbuf = _bt_getstackbuf(rel, stack, bknum);
|
2003-02-21 01:06:22 +01:00
|
|
|
|
Make the handling of interrupted B-tree page splits more robust.
Splitting a page consists of two separate steps: splitting the child page,
and inserting the downlink for the new right page to the parent. Previously,
we handled the case that you crash in between those steps with a cleanup
routine after the WAL recovery had finished, which finished the incomplete
split. However, that doesn't help if the page split is interrupted but the
database doesn't crash, so that you don't perform WAL recovery. That could
happen for example if you run out of disk space.
Remove the end-of-recovery cleanup step. Instead, when a page is split, the
left page is marked with a new INCOMPLETE_SPLIT flag, and when the downlink
is inserted to the parent, the flag is cleared again. If an insertion sees
a page with the flag set, it knows that the split was interrupted for some
reason, and inserts the missing downlink before proceeding.
I used the same approach to fix GIN and GiST split algorithms earlier. This
was the last WAL cleanup routine, so we could get rid of that whole
machinery now, but I'll leave that for a separate patch.
Reviewed by Peter Geoghegan.
2014-03-18 19:12:58 +01:00
|
|
|
/*
|
|
|
|
* Now we can unlock the right child. The left child will be unlocked
|
|
|
|
* by _bt_insertonpg().
|
|
|
|
*/
|
2006-04-01 01:32:07 +02:00
|
|
|
_bt_relbuf(rel, rbuf);
|
2003-02-21 01:06:22 +01:00
|
|
|
|
|
|
|
if (pbuf == InvalidBuffer)
|
2019-08-01 11:05:08 +02:00
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_INDEX_CORRUPTED),
|
|
|
|
errmsg_internal("failed to re-find parent key in index \"%s\" for split pages %u/%u",
|
|
|
|
RelationGetRelationName(rel), bknum, rbknum)));
|
2003-02-21 01:06:22 +01:00
|
|
|
|
2019-08-14 20:32:35 +02:00
|
|
|
/* Recursively insert into the parent */
|
Make heap TID a tiebreaker nbtree index column.
Make nbtree treat all index tuples as having a heap TID attribute.
Index searches can distinguish duplicates by heap TID, since heap TID is
always guaranteed to be unique. This general approach has numerous
benefits for performance, and is prerequisite to teaching VACUUM to
perform "retail index tuple deletion".
Naively adding a new attribute to every pivot tuple has unacceptable
overhead (it bloats internal pages), so suffix truncation of pivot
tuples is added. This will usually truncate away the "extra" heap TID
attribute from pivot tuples during a leaf page split, and may also
truncate away additional user attributes. This can increase fan-out,
especially in a multi-column index. Truncation can only occur at the
attribute granularity, which isn't particularly effective, but works
well enough for now. A future patch may add support for truncating
"within" text attributes by generating truncated key values using new
opclass infrastructure.
Only new indexes (BTREE_VERSION 4 indexes) will have insertions that
treat heap TID as a tiebreaker attribute, or will have pivot tuples
undergo suffix truncation during a leaf page split (on-disk
compatibility with versions 2 and 3 is preserved). Upgrades to version
4 cannot be performed on-the-fly, unlike upgrades from version 2 to
version 3. contrib/amcheck continues to work with version 2 and 3
indexes, while also enforcing stricter invariants when verifying version
4 indexes. These stricter invariants are the same invariants described
by "3.1.12 Sequencing" from the Lehman and Yao paper.
A later patch will enhance the logic used by nbtree to pick a split
point. This patch is likely to negatively impact performance without
smarter choices around the precise point to split leaf pages at. Making
these two mostly-distinct sets of enhancements into distinct commits
seems like it might clarify their design, even though neither commit is
particularly useful on its own.
The maximum allowed size of new tuples is reduced by an amount equal to
the space required to store an extra MAXALIGN()'d TID in a new high key
during leaf page splits. The user-facing definition of the "1/3 of a
page" restriction is already imprecise, and so does not need to be
revised. However, there should be a compatibility note in the v12
release notes.
Author: Peter Geoghegan
Reviewed-By: Heikki Linnakangas, Alexander Korotkov
Discussion: https://postgr.es/m/CAH2-WzkVb0Kom=R+88fDFb=JSxZMFvbHVC6Mn9LJ2n=X=kS-Uw@mail.gmail.com
2019-03-20 18:04:01 +01:00
|
|
|
_bt_insertonpg(rel, NULL, pbuf, buf, stack->bts_parent,
|
2007-03-03 21:13:06 +01:00
|
|
|
new_item, stack->bts_offset + 1,
|
2005-03-21 02:24:04 +01:00
|
|
|
is_only);
|
2003-02-21 01:06:22 +01:00
|
|
|
|
|
|
|
/* be tidy */
|
|
|
|
pfree(new_item);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Make the handling of interrupted B-tree page splits more robust.
Splitting a page consists of two separate steps: splitting the child page,
and inserting the downlink for the new right page to the parent. Previously,
we handled the case that you crash in between those steps with a cleanup
routine after the WAL recovery had finished, which finished the incomplete
split. However, that doesn't help if the page split is interrupted but the
database doesn't crash, so that you don't perform WAL recovery. That could
happen for example if you run out of disk space.
Remove the end-of-recovery cleanup step. Instead, when a page is split, the
left page is marked with a new INCOMPLETE_SPLIT flag, and when the downlink
is inserted to the parent, the flag is cleared again. If an insertion sees
a page with the flag set, it knows that the split was interrupted for some
reason, and inserts the missing downlink before proceeding.
I used the same approach to fix GIN and GiST split algorithms earlier. This
was the last WAL cleanup routine, so we could get rid of that whole
machinery now, but I'll leave that for a separate patch.
Reviewed by Peter Geoghegan.
2014-03-18 19:12:58 +01:00
|
|
|
/*
|
|
|
|
* _bt_finish_split() -- Finish an incomplete split
|
|
|
|
*
|
|
|
|
* A crash or other failure can leave a split incomplete. The insertion
|
|
|
|
* routines won't allow to insert on a page that is incompletely split.
|
|
|
|
* Before inserting on such a page, call _bt_finish_split().
|
|
|
|
*
|
|
|
|
* On entry, 'lbuf' must be locked in write-mode. On exit, it is unlocked
|
|
|
|
* and unpinned.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
_bt_finish_split(Relation rel, Buffer lbuf, BTStack stack)
|
|
|
|
{
|
2016-04-20 15:31:19 +02:00
|
|
|
Page lpage = BufferGetPage(lbuf);
|
Make the handling of interrupted B-tree page splits more robust.
Splitting a page consists of two separate steps: splitting the child page,
and inserting the downlink for the new right page to the parent. Previously,
we handled the case that you crash in between those steps with a cleanup
routine after the WAL recovery had finished, which finished the incomplete
split. However, that doesn't help if the page split is interrupted but the
database doesn't crash, so that you don't perform WAL recovery. That could
happen for example if you run out of disk space.
Remove the end-of-recovery cleanup step. Instead, when a page is split, the
left page is marked with a new INCOMPLETE_SPLIT flag, and when the downlink
is inserted to the parent, the flag is cleared again. If an insertion sees
a page with the flag set, it knows that the split was interrupted for some
reason, and inserts the missing downlink before proceeding.
I used the same approach to fix GIN and GiST split algorithms earlier. This
was the last WAL cleanup routine, so we could get rid of that whole
machinery now, but I'll leave that for a separate patch.
Reviewed by Peter Geoghegan.
2014-03-18 19:12:58 +01:00
|
|
|
BTPageOpaque lpageop = (BTPageOpaque) PageGetSpecialPointer(lpage);
|
|
|
|
Buffer rbuf;
|
|
|
|
Page rpage;
|
|
|
|
BTPageOpaque rpageop;
|
|
|
|
bool was_root;
|
|
|
|
bool was_only;
|
|
|
|
|
|
|
|
Assert(P_INCOMPLETE_SPLIT(lpageop));
|
|
|
|
|
|
|
|
/* Lock right sibling, the one missing the downlink */
|
|
|
|
rbuf = _bt_getbuf(rel, lpageop->btpo_next, BT_WRITE);
|
2016-04-20 15:31:19 +02:00
|
|
|
rpage = BufferGetPage(rbuf);
|
Make the handling of interrupted B-tree page splits more robust.
Splitting a page consists of two separate steps: splitting the child page,
and inserting the downlink for the new right page to the parent. Previously,
we handled the case that you crash in between those steps with a cleanup
routine after the WAL recovery had finished, which finished the incomplete
split. However, that doesn't help if the page split is interrupted but the
database doesn't crash, so that you don't perform WAL recovery. That could
happen for example if you run out of disk space.
Remove the end-of-recovery cleanup step. Instead, when a page is split, the
left page is marked with a new INCOMPLETE_SPLIT flag, and when the downlink
is inserted to the parent, the flag is cleared again. If an insertion sees
a page with the flag set, it knows that the split was interrupted for some
reason, and inserts the missing downlink before proceeding.
I used the same approach to fix GIN and GiST split algorithms earlier. This
was the last WAL cleanup routine, so we could get rid of that whole
machinery now, but I'll leave that for a separate patch.
Reviewed by Peter Geoghegan.
2014-03-18 19:12:58 +01:00
|
|
|
rpageop = (BTPageOpaque) PageGetSpecialPointer(rpage);
|
|
|
|
|
|
|
|
/* Could this be a root split? */
|
|
|
|
if (!stack)
|
|
|
|
{
|
|
|
|
Buffer metabuf;
|
|
|
|
Page metapg;
|
|
|
|
BTMetaPageData *metad;
|
|
|
|
|
|
|
|
/* acquire lock on the metapage */
|
|
|
|
metabuf = _bt_getbuf(rel, BTREE_METAPAGE, BT_WRITE);
|
2016-04-20 15:31:19 +02:00
|
|
|
metapg = BufferGetPage(metabuf);
|
Make the handling of interrupted B-tree page splits more robust.
Splitting a page consists of two separate steps: splitting the child page,
and inserting the downlink for the new right page to the parent. Previously,
we handled the case that you crash in between those steps with a cleanup
routine after the WAL recovery had finished, which finished the incomplete
split. However, that doesn't help if the page split is interrupted but the
database doesn't crash, so that you don't perform WAL recovery. That could
happen for example if you run out of disk space.
Remove the end-of-recovery cleanup step. Instead, when a page is split, the
left page is marked with a new INCOMPLETE_SPLIT flag, and when the downlink
is inserted to the parent, the flag is cleared again. If an insertion sees
a page with the flag set, it knows that the split was interrupted for some
reason, and inserts the missing downlink before proceeding.
I used the same approach to fix GIN and GiST split algorithms earlier. This
was the last WAL cleanup routine, so we could get rid of that whole
machinery now, but I'll leave that for a separate patch.
Reviewed by Peter Geoghegan.
2014-03-18 19:12:58 +01:00
|
|
|
metad = BTPageGetMeta(metapg);
|
|
|
|
|
|
|
|
was_root = (metad->btm_root == BufferGetBlockNumber(lbuf));
|
|
|
|
|
|
|
|
_bt_relbuf(rel, metabuf);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
was_root = false;
|
|
|
|
|
|
|
|
/* Was this the only page on the level before split? */
|
|
|
|
was_only = (P_LEFTMOST(lpageop) && P_RIGHTMOST(rpageop));
|
|
|
|
|
|
|
|
elog(DEBUG1, "finishing incomplete split of %u/%u",
|
|
|
|
BufferGetBlockNumber(lbuf), BufferGetBlockNumber(rbuf));
|
|
|
|
|
|
|
|
_bt_insert_parent(rel, lbuf, rbuf, stack, was_root, was_only);
|
|
|
|
}
|
|
|
|
|
2000-07-21 08:42:39 +02:00
|
|
|
/*
|
2019-08-14 20:32:35 +02:00
|
|
|
* _bt_getstackbuf() -- Walk back up the tree one step, and find the pivot
|
|
|
|
* tuple whose downlink points to child page.
|
2000-07-21 08:42:39 +02:00
|
|
|
*
|
2019-08-14 20:32:35 +02:00
|
|
|
* Caller passes child's block number, which is used to identify
|
|
|
|
* associated pivot tuple in parent page using a linear search that
|
|
|
|
* matches on pivot's downlink/block number. The expected location of
|
|
|
|
* the pivot tuple is taken from the stack one level above the child
|
|
|
|
* page. This is used as a starting point. Insertions into the
|
|
|
|
* parent level could cause the pivot tuple to move right; deletions
|
|
|
|
* could cause it to move left, but not left of the page we previously
|
|
|
|
* found it on.
|
2000-07-21 08:42:39 +02:00
|
|
|
*
|
2019-08-14 20:32:35 +02:00
|
|
|
* Caller can use its stack to relocate the pivot tuple/downlink for
|
|
|
|
* any same-level page to the right of the page found by its initial
|
|
|
|
* descent. This is necessary because of the possibility that caller
|
|
|
|
* moved right to recover from a concurrent page split. It's also
|
|
|
|
* convenient for certain callers to be able to step right when there
|
|
|
|
* wasn't a concurrent page split, while still using their original
|
|
|
|
* stack. For example, the checkingunique _bt_doinsert() case may
|
|
|
|
* have to step right when there are many physical duplicates, and its
|
|
|
|
* scantid forces an insertion to the right of the "first page the
|
|
|
|
* value could be on".
|
2003-02-21 01:06:22 +01:00
|
|
|
*
|
2019-08-14 20:32:35 +02:00
|
|
|
* Returns write-locked parent page buffer, or InvalidBuffer if pivot
|
|
|
|
* tuple not found (should not happen). Adjusts bts_blkno &
|
|
|
|
* bts_offset if changed. Page split caller should insert its new
|
|
|
|
* pivot tuple for its new right sibling page on parent page, at the
|
|
|
|
* offset number bts_offset + 1.
|
2000-07-21 08:42:39 +02:00
|
|
|
*/
|
2003-02-23 07:17:13 +01:00
|
|
|
Buffer
|
2019-08-14 20:32:35 +02:00
|
|
|
_bt_getstackbuf(Relation rel, BTStack stack, BlockNumber child)
|
2000-07-21 08:42:39 +02:00
|
|
|
{
|
|
|
|
BlockNumber blkno;
|
2003-02-21 01:06:22 +01:00
|
|
|
OffsetNumber start;
|
2000-07-21 08:42:39 +02:00
|
|
|
|
|
|
|
blkno = stack->bts_blkno;
|
|
|
|
start = stack->bts_offset;
|
2001-03-22 05:01:46 +01:00
|
|
|
|
2000-07-21 08:42:39 +02:00
|
|
|
for (;;)
|
|
|
|
{
|
2003-02-21 01:06:22 +01:00
|
|
|
Buffer buf;
|
|
|
|
Page page;
|
|
|
|
BTPageOpaque opaque;
|
|
|
|
|
2019-02-26 02:47:43 +01:00
|
|
|
buf = _bt_getbuf(rel, blkno, BT_WRITE);
|
2016-04-20 15:31:19 +02:00
|
|
|
page = BufferGetPage(buf);
|
2003-02-21 01:06:22 +01:00
|
|
|
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
|
|
|
|
|
2019-02-26 02:47:43 +01:00
|
|
|
if (P_INCOMPLETE_SPLIT(opaque))
|
Make the handling of interrupted B-tree page splits more robust.
Splitting a page consists of two separate steps: splitting the child page,
and inserting the downlink for the new right page to the parent. Previously,
we handled the case that you crash in between those steps with a cleanup
routine after the WAL recovery had finished, which finished the incomplete
split. However, that doesn't help if the page split is interrupted but the
database doesn't crash, so that you don't perform WAL recovery. That could
happen for example if you run out of disk space.
Remove the end-of-recovery cleanup step. Instead, when a page is split, the
left page is marked with a new INCOMPLETE_SPLIT flag, and when the downlink
is inserted to the parent, the flag is cleared again. If an insertion sees
a page with the flag set, it knows that the split was interrupted for some
reason, and inserts the missing downlink before proceeding.
I used the same approach to fix GIN and GiST split algorithms earlier. This
was the last WAL cleanup routine, so we could get rid of that whole
machinery now, but I'll leave that for a separate patch.
Reviewed by Peter Geoghegan.
2014-03-18 19:12:58 +01:00
|
|
|
{
|
|
|
|
_bt_finish_split(rel, buf, stack->bts_parent);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2003-02-22 01:45:05 +01:00
|
|
|
if (!P_IGNORE(opaque))
|
2000-07-21 08:42:39 +02:00
|
|
|
{
|
2003-02-22 01:45:05 +01:00
|
|
|
OffsetNumber offnum,
|
|
|
|
minoff,
|
|
|
|
maxoff;
|
|
|
|
ItemId itemid;
|
2006-01-26 00:04:21 +01:00
|
|
|
IndexTuple item;
|
2003-02-22 01:45:05 +01:00
|
|
|
|
|
|
|
minoff = P_FIRSTDATAKEY(opaque);
|
|
|
|
maxoff = PageGetMaxOffsetNumber(page);
|
|
|
|
|
|
|
|
/*
|
2005-10-15 04:49:52 +02:00
|
|
|
* start = InvalidOffsetNumber means "search the whole page". We
|
|
|
|
* need this test anyway due to possibility that page has a high
|
|
|
|
* key now when it didn't before.
|
2003-02-22 01:45:05 +01:00
|
|
|
*/
|
|
|
|
if (start < minoff)
|
|
|
|
start = minoff;
|
|
|
|
|
2004-08-18 01:15:33 +02:00
|
|
|
/*
|
|
|
|
* Need this check too, to guard against possibility that page
|
|
|
|
* split since we visited it originally.
|
|
|
|
*/
|
|
|
|
if (start > maxoff)
|
|
|
|
start = OffsetNumberNext(maxoff);
|
|
|
|
|
2003-02-22 01:45:05 +01:00
|
|
|
/*
|
|
|
|
* These loops will check every item on the page --- but in an
|
2005-10-15 04:49:52 +02:00
|
|
|
* order that's attuned to the probability of where it actually
|
2014-05-06 18:12:18 +02:00
|
|
|
* is. Scan to the right first, then to the left.
|
2003-02-22 01:45:05 +01:00
|
|
|
*/
|
|
|
|
for (offnum = start;
|
|
|
|
offnum <= maxoff;
|
|
|
|
offnum = OffsetNumberNext(offnum))
|
2000-07-21 08:42:39 +02:00
|
|
|
{
|
2003-02-22 01:45:05 +01:00
|
|
|
itemid = PageGetItemId(page, offnum);
|
2006-01-26 00:04:21 +01:00
|
|
|
item = (IndexTuple) PageGetItem(page, itemid);
|
2018-04-07 22:00:39 +02:00
|
|
|
|
2019-08-14 20:32:35 +02:00
|
|
|
if (BTreeInnerTupleGetDownLink(item) == child)
|
2003-02-22 01:45:05 +01:00
|
|
|
{
|
|
|
|
/* Return accurate pointer to where link is now */
|
|
|
|
stack->bts_blkno = blkno;
|
|
|
|
stack->bts_offset = offnum;
|
|
|
|
return buf;
|
|
|
|
}
|
2000-07-21 08:42:39 +02:00
|
|
|
}
|
2001-03-22 05:01:46 +01:00
|
|
|
|
2003-02-22 01:45:05 +01:00
|
|
|
for (offnum = OffsetNumberPrev(start);
|
|
|
|
offnum >= minoff;
|
|
|
|
offnum = OffsetNumberPrev(offnum))
|
2003-02-21 01:06:22 +01:00
|
|
|
{
|
2003-02-22 01:45:05 +01:00
|
|
|
itemid = PageGetItemId(page, offnum);
|
2006-01-26 00:04:21 +01:00
|
|
|
item = (IndexTuple) PageGetItem(page, itemid);
|
2018-04-07 22:00:39 +02:00
|
|
|
|
2019-08-14 20:32:35 +02:00
|
|
|
if (BTreeInnerTupleGetDownLink(item) == child)
|
2003-02-22 01:45:05 +01:00
|
|
|
{
|
|
|
|
/* Return accurate pointer to where link is now */
|
|
|
|
stack->bts_blkno = blkno;
|
|
|
|
stack->bts_offset = offnum;
|
|
|
|
return buf;
|
|
|
|
}
|
2003-02-21 01:06:22 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
/*
|
2003-02-21 01:06:22 +01:00
|
|
|
* The item we're looking for moved right at least one page.
|
2001-03-22 05:01:46 +01:00
|
|
|
*/
|
2000-07-21 08:42:39 +02:00
|
|
|
if (P_RIGHTMOST(opaque))
|
2001-01-31 02:08:36 +01:00
|
|
|
{
|
Restructure index AM interface for index building and index tuple deletion,
per previous discussion on pghackers. Most of the duplicate code in
different AMs' ambuild routines has been moved out to a common routine
in index.c; this means that all index types now do the right things about
inserting recently-dead tuples, etc. (I also removed support for EXTEND
INDEX in the ambuild routines, since that's about to go away anyway, and
it cluttered the code a lot.) The retail indextuple deletion routines have
been replaced by a "bulk delete" routine in which the indexscan is inside
the access method. I haven't pushed this change as far as it should go yet,
but it should allow considerable simplification of the internal bookkeeping
for deletions. Also, add flag columns to pg_am to eliminate various
hardcoded tests on AM OIDs, and remove unused pg_am columns.
Fix rtree and gist index types to not attempt to store NULLs; before this,
gist usually crashed, while rtree managed not to crash but computed wacko
bounding boxes for NULL entries (which might have had something to do with
the performance problems we've heard about occasionally).
Add AtEOXact routines to hash, rtree, and gist, all of which have static
state that needs to be reset after an error. We discovered this need long
ago for btree, but missed the other guys.
Oh, one more thing: concurrent VACUUM is now the default.
2001-07-16 00:48:19 +02:00
|
|
|
_bt_relbuf(rel, buf);
|
2004-04-21 20:24:26 +02:00
|
|
|
return InvalidBuffer;
|
2001-01-31 02:08:36 +01:00
|
|
|
}
|
2000-07-21 08:42:39 +02:00
|
|
|
blkno = opaque->btpo_next;
|
2003-02-21 01:06:22 +01:00
|
|
|
start = InvalidOffsetNumber;
|
Restructure index AM interface for index building and index tuple deletion,
per previous discussion on pghackers. Most of the duplicate code in
different AMs' ambuild routines has been moved out to a common routine
in index.c; this means that all index types now do the right things about
inserting recently-dead tuples, etc. (I also removed support for EXTEND
INDEX in the ambuild routines, since that's about to go away anyway, and
it cluttered the code a lot.) The retail indextuple deletion routines have
been replaced by a "bulk delete" routine in which the indexscan is inside
the access method. I haven't pushed this change as far as it should go yet,
but it should allow considerable simplification of the internal bookkeeping
for deletions. Also, add flag columns to pg_am to eliminate various
hardcoded tests on AM OIDs, and remove unused pg_am columns.
Fix rtree and gist index types to not attempt to store NULLs; before this,
gist usually crashed, while rtree managed not to crash but computed wacko
bounding boxes for NULL entries (which might have had something to do with
the performance problems we've heard about occasionally).
Add AtEOXact routines to hash, rtree, and gist, all of which have static
state that needs to be reset after an error. We discovered this need long
ago for btree, but missed the other guys.
Oh, one more thing: concurrent VACUUM is now the default.
2001-07-16 00:48:19 +02:00
|
|
|
_bt_relbuf(rel, buf);
|
2000-07-21 08:42:39 +02:00
|
|
|
}
|
1996-07-09 08:22:35 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
1997-09-07 07:04:48 +02:00
|
|
|
* _bt_newroot() -- Create a new root page for the index.
|
1996-07-09 08:22:35 +02:00
|
|
|
*
|
1997-09-07 07:04:48 +02:00
|
|
|
* We've just split the old root page and need to create a new one.
|
|
|
|
* In order to do this, we add a new root page to the file, then lock
|
|
|
|
* the metadata page and update it. This is guaranteed to be deadlock-
|
|
|
|
* free, because all readers release their locks on the metadata page
|
|
|
|
* before trying to lock the root, and all writers lock the root before
|
|
|
|
* trying to lock the metadata page. We have a write lock on the old
|
|
|
|
* root page, so we have not introduced any cycles into the waits-for
|
|
|
|
* graph.
|
1996-07-09 08:22:35 +02:00
|
|
|
*
|
1997-09-07 07:04:48 +02:00
|
|
|
* On entry, lbuf (the old root) and rbuf (its new peer) are write-
|
2001-01-26 02:24:31 +01:00
|
|
|
* locked. On exit, a new root page exists with entries for the
|
2001-03-22 05:01:46 +01:00
|
|
|
* two new children, metapage is updated and unlocked/unpinned.
|
|
|
|
* The new root buffer is returned to caller which has to unlock/unpin
|
|
|
|
* lbuf, rbuf & rootbuf.
|
1996-07-09 08:22:35 +02:00
|
|
|
*/
|
2001-01-26 02:24:31 +01:00
|
|
|
static Buffer
|
1996-07-09 08:22:35 +02:00
|
|
|
_bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf)
|
|
|
|
{
|
2001-03-22 05:01:46 +01:00
|
|
|
Buffer rootbuf;
|
|
|
|
Page lpage,
|
|
|
|
rootpage;
|
|
|
|
BlockNumber lbkno,
|
|
|
|
rbkno;
|
|
|
|
BlockNumber rootblknum;
|
|
|
|
BTPageOpaque rootopaque;
|
Make the handling of interrupted B-tree page splits more robust.
Splitting a page consists of two separate steps: splitting the child page,
and inserting the downlink for the new right page to the parent. Previously,
we handled the case that you crash in between those steps with a cleanup
routine after the WAL recovery had finished, which finished the incomplete
split. However, that doesn't help if the page split is interrupted but the
database doesn't crash, so that you don't perform WAL recovery. That could
happen for example if you run out of disk space.
Remove the end-of-recovery cleanup step. Instead, when a page is split, the
left page is marked with a new INCOMPLETE_SPLIT flag, and when the downlink
is inserted to the parent, the flag is cleared again. If an insertion sees
a page with the flag set, it knows that the split was interrupted for some
reason, and inserts the missing downlink before proceeding.
I used the same approach to fix GIN and GiST split algorithms earlier. This
was the last WAL cleanup routine, so we could get rid of that whole
machinery now, but I'll leave that for a separate patch.
Reviewed by Peter Geoghegan.
2014-03-18 19:12:58 +01:00
|
|
|
BTPageOpaque lopaque;
|
2001-03-22 05:01:46 +01:00
|
|
|
ItemId itemid;
|
2006-01-26 00:04:21 +01:00
|
|
|
IndexTuple item;
|
2014-04-04 12:12:38 +02:00
|
|
|
IndexTuple left_item;
|
|
|
|
Size left_item_sz;
|
|
|
|
IndexTuple right_item;
|
|
|
|
Size right_item_sz;
|
2001-03-22 05:01:46 +01:00
|
|
|
Buffer metabuf;
|
|
|
|
Page metapg;
|
2000-12-28 14:00:29 +01:00
|
|
|
BTMetaPageData *metad;
|
2000-10-13 04:03:02 +02:00
|
|
|
|
2003-02-21 01:06:22 +01:00
|
|
|
lbkno = BufferGetBlockNumber(lbuf);
|
|
|
|
rbkno = BufferGetBlockNumber(rbuf);
|
2016-04-20 15:31:19 +02:00
|
|
|
lpage = BufferGetPage(lbuf);
|
Make the handling of interrupted B-tree page splits more robust.
Splitting a page consists of two separate steps: splitting the child page,
and inserting the downlink for the new right page to the parent. Previously,
we handled the case that you crash in between those steps with a cleanup
routine after the WAL recovery had finished, which finished the incomplete
split. However, that doesn't help if the page split is interrupted but the
database doesn't crash, so that you don't perform WAL recovery. That could
happen for example if you run out of disk space.
Remove the end-of-recovery cleanup step. Instead, when a page is split, the
left page is marked with a new INCOMPLETE_SPLIT flag, and when the downlink
is inserted to the parent, the flag is cleared again. If an insertion sees
a page with the flag set, it knows that the split was interrupted for some
reason, and inserts the missing downlink before proceeding.
I used the same approach to fix GIN and GiST split algorithms earlier. This
was the last WAL cleanup routine, so we could get rid of that whole
machinery now, but I'll leave that for a separate patch.
Reviewed by Peter Geoghegan.
2014-03-18 19:12:58 +01:00
|
|
|
lopaque = (BTPageOpaque) PageGetSpecialPointer(lpage);
|
2003-02-21 01:06:22 +01:00
|
|
|
|
1997-09-07 07:04:48 +02:00
|
|
|
/* get a new root page */
|
|
|
|
rootbuf = _bt_getbuf(rel, P_NEW, BT_WRITE);
|
2016-04-20 15:31:19 +02:00
|
|
|
rootpage = BufferGetPage(rootbuf);
|
2000-10-04 02:04:43 +02:00
|
|
|
rootblknum = BufferGetBlockNumber(rootbuf);
|
2003-02-22 01:45:05 +01:00
|
|
|
|
|
|
|
/* acquire lock on the metapage */
|
2000-12-28 14:00:29 +01:00
|
|
|
metabuf = _bt_getbuf(rel, BTREE_METAPAGE, BT_WRITE);
|
2016-04-20 15:31:19 +02:00
|
|
|
metapg = BufferGetPage(metabuf);
|
2000-12-28 14:00:29 +01:00
|
|
|
metad = BTPageGetMeta(metapg);
|
2000-10-04 02:04:43 +02:00
|
|
|
|
2014-04-04 12:12:38 +02:00
|
|
|
/*
|
|
|
|
* Create downlink item for left page (old root). Since this will be the
|
|
|
|
* first item in a non-leaf page, it implicitly has minus-infinity key
|
|
|
|
* value, so we need not store any actual key in it.
|
|
|
|
*/
|
|
|
|
left_item_sz = sizeof(IndexTupleData);
|
|
|
|
left_item = (IndexTuple) palloc(left_item_sz);
|
|
|
|
left_item->t_info = left_item_sz;
|
2018-04-07 22:00:39 +02:00
|
|
|
BTreeInnerTupleSetDownLink(left_item, lbkno);
|
Adjust INCLUDE index truncation comments and code.
Add several assertions that ensure that we're dealing with a pivot tuple
without non-key attributes where that's expected. Also, remove the
assertion within _bt_isequal(), restoring the v10 function signature. A
similar check will be performed for the page highkey within
_bt_moveright() in most cases. Also avoid dropping all objects within
regression tests, to increase pg_dump test coverage for INCLUDE indexes.
Rather than using infrastructure that's generally intended to be used
with reference counted heap tuple descriptors during truncation, use the
same function that was introduced to store flat TupleDescs in shared
memory (we use a temp palloc'd buffer). This isn't strictly necessary,
but seems more future-proof than the old approach. It also lets us
avoid including rel.h within indextuple.c, which was arguably a
modularity violation. Also, we now call index_deform_tuple() with the
truncated TupleDesc, not the source TupleDesc, since that's more robust,
and saves a few cycles.
In passing, fix a memory leak by pfree'ing truncated pivot tuple memory
during CREATE INDEX. Also pfree during a page split, just to be
consistent.
Refactor _bt_check_natts() to be more readable.
Author: Peter Geoghegan with some editorization by me
Reviewed by: Alexander Korotkov, Teodor Sigaev
Discussion: https://www.postgresql.org/message-id/CAH2-Wz%3DkCWuXeMrBCopC-tFs3FbiVxQNjjgNKdG2sHxZ5k2y3w%40mail.gmail.com
2018-04-19 07:45:58 +02:00
|
|
|
BTreeTupleSetNAtts(left_item, 0);
|
2014-04-04 12:12:38 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Create downlink item for right page. The key for it is obtained from
|
|
|
|
* the "high key" position in the left page.
|
|
|
|
*/
|
|
|
|
itemid = PageGetItemId(lpage, P_HIKEY);
|
|
|
|
right_item_sz = ItemIdGetLength(itemid);
|
|
|
|
item = (IndexTuple) PageGetItem(lpage, itemid);
|
|
|
|
right_item = CopyIndexTuple(item);
|
2018-04-07 22:00:39 +02:00
|
|
|
BTreeInnerTupleSetDownLink(right_item, rbkno);
|
2014-04-04 12:12:38 +02:00
|
|
|
|
2003-07-21 22:29:40 +02:00
|
|
|
/* NO EREPORT(ERROR) from here till newroot op is logged */
|
2001-01-12 22:54:01 +01:00
|
|
|
START_CRIT_SECTION();
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2018-05-30 18:45:39 +02:00
|
|
|
/* upgrade metapage if needed */
|
Make heap TID a tiebreaker nbtree index column.
Make nbtree treat all index tuples as having a heap TID attribute.
Index searches can distinguish duplicates by heap TID, since heap TID is
always guaranteed to be unique. This general approach has numerous
benefits for performance, and is prerequisite to teaching VACUUM to
perform "retail index tuple deletion".
Naively adding a new attribute to every pivot tuple has unacceptable
overhead (it bloats internal pages), so suffix truncation of pivot
tuples is added. This will usually truncate away the "extra" heap TID
attribute from pivot tuples during a leaf page split, and may also
truncate away additional user attributes. This can increase fan-out,
especially in a multi-column index. Truncation can only occur at the
attribute granularity, which isn't particularly effective, but works
well enough for now. A future patch may add support for truncating
"within" text attributes by generating truncated key values using new
opclass infrastructure.
Only new indexes (BTREE_VERSION 4 indexes) will have insertions that
treat heap TID as a tiebreaker attribute, or will have pivot tuples
undergo suffix truncation during a leaf page split (on-disk
compatibility with versions 2 and 3 is preserved). Upgrades to version
4 cannot be performed on-the-fly, unlike upgrades from version 2 to
version 3. contrib/amcheck continues to work with version 2 and 3
indexes, while also enforcing stricter invariants when verifying version
4 indexes. These stricter invariants are the same invariants described
by "3.1.12 Sequencing" from the Lehman and Yao paper.
A later patch will enhance the logic used by nbtree to pick a split
point. This patch is likely to negatively impact performance without
smarter choices around the precise point to split leaf pages at. Making
these two mostly-distinct sets of enhancements into distinct commits
seems like it might clarify their design, even though neither commit is
particularly useful on its own.
The maximum allowed size of new tuples is reduced by an amount equal to
the space required to store an extra MAXALIGN()'d TID in a new high key
during leaf page splits. The user-facing definition of the "1/3 of a
page" restriction is already imprecise, and so does not need to be
revised. However, there should be a compatibility note in the v12
release notes.
Author: Peter Geoghegan
Reviewed-By: Heikki Linnakangas, Alexander Korotkov
Discussion: https://postgr.es/m/CAH2-WzkVb0Kom=R+88fDFb=JSxZMFvbHVC6Mn9LJ2n=X=kS-Uw@mail.gmail.com
2019-03-20 18:04:01 +01:00
|
|
|
if (metad->btm_version < BTREE_NOVAC_VERSION)
|
2018-05-30 18:45:39 +02:00
|
|
|
_bt_upgrademetapage(metapg);
|
|
|
|
|
1997-09-07 07:04:48 +02:00
|
|
|
/* set btree special data */
|
|
|
|
rootopaque = (BTPageOpaque) PageGetSpecialPointer(rootpage);
|
|
|
|
rootopaque->btpo_prev = rootopaque->btpo_next = P_NONE;
|
2003-02-21 01:06:22 +01:00
|
|
|
rootopaque->btpo_flags = BTP_ROOT;
|
|
|
|
rootopaque->btpo.level =
|
|
|
|
((BTPageOpaque) PageGetSpecialPointer(lpage))->btpo.level + 1;
|
2006-05-08 02:00:17 +02:00
|
|
|
rootopaque->btpo_cycleid = 0;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2003-02-21 01:06:22 +01:00
|
|
|
/* update metapage data */
|
|
|
|
metad->btm_root = rootblknum;
|
|
|
|
metad->btm_level = rootopaque->btpo.level;
|
|
|
|
metad->btm_fastroot = rootblknum;
|
|
|
|
metad->btm_fastlevel = rootopaque->btpo.level;
|
1999-03-28 22:32:42 +02:00
|
|
|
|
1997-09-07 07:04:48 +02:00
|
|
|
/*
|
2005-10-15 04:49:52 +02:00
|
|
|
* Insert the left page pointer into the new root page. The root page is
|
|
|
|
* the rightmost page on its level so there is no "high key" in it; the
|
|
|
|
* two items will go into positions P_HIKEY and P_FIRSTKEY.
|
2006-04-13 05:53:05 +02:00
|
|
|
*
|
|
|
|
* Note: we *must* insert the two items in item-number order, for the
|
|
|
|
* benefit of _bt_restore_page().
|
1997-09-07 07:04:48 +02:00
|
|
|
*/
|
Adjust INCLUDE index truncation comments and code.
Add several assertions that ensure that we're dealing with a pivot tuple
without non-key attributes where that's expected. Also, remove the
assertion within _bt_isequal(), restoring the v10 function signature. A
similar check will be performed for the page highkey within
_bt_moveright() in most cases. Also avoid dropping all objects within
regression tests, to increase pg_dump test coverage for INCLUDE indexes.
Rather than using infrastructure that's generally intended to be used
with reference counted heap tuple descriptors during truncation, use the
same function that was introduced to store flat TupleDescs in shared
memory (we use a temp palloc'd buffer). This isn't strictly necessary,
but seems more future-proof than the old approach. It also lets us
avoid including rel.h within indextuple.c, which was arguably a
modularity violation. Also, we now call index_deform_tuple() with the
truncated TupleDesc, not the source TupleDesc, since that's more robust,
and saves a few cycles.
In passing, fix a memory leak by pfree'ing truncated pivot tuple memory
during CREATE INDEX. Also pfree during a page split, just to be
consistent.
Refactor _bt_check_natts() to be more readable.
Author: Peter Geoghegan with some editorization by me
Reviewed by: Alexander Korotkov, Teodor Sigaev
Discussion: https://www.postgresql.org/message-id/CAH2-Wz%3DkCWuXeMrBCopC-tFs3FbiVxQNjjgNKdG2sHxZ5k2y3w%40mail.gmail.com
2018-04-19 07:45:58 +02:00
|
|
|
Assert(BTreeTupleGetNAtts(left_item, rel) == 0);
|
2014-04-04 12:12:38 +02:00
|
|
|
if (PageAddItem(rootpage, (Item) left_item, left_item_sz, P_HIKEY,
|
2007-09-20 19:56:33 +02:00
|
|
|
false, false) == InvalidOffsetNumber)
|
2007-12-31 05:52:05 +01:00
|
|
|
elog(PANIC, "failed to add leftkey to new root page"
|
|
|
|
" while splitting block %u of index \"%s\"",
|
|
|
|
BufferGetBlockNumber(lbuf), RelationGetRelationName(rel));
|
1997-09-07 07:04:48 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* insert the right page pointer into the new root page.
|
|
|
|
*/
|
Make heap TID a tiebreaker nbtree index column.
Make nbtree treat all index tuples as having a heap TID attribute.
Index searches can distinguish duplicates by heap TID, since heap TID is
always guaranteed to be unique. This general approach has numerous
benefits for performance, and is prerequisite to teaching VACUUM to
perform "retail index tuple deletion".
Naively adding a new attribute to every pivot tuple has unacceptable
overhead (it bloats internal pages), so suffix truncation of pivot
tuples is added. This will usually truncate away the "extra" heap TID
attribute from pivot tuples during a leaf page split, and may also
truncate away additional user attributes. This can increase fan-out,
especially in a multi-column index. Truncation can only occur at the
attribute granularity, which isn't particularly effective, but works
well enough for now. A future patch may add support for truncating
"within" text attributes by generating truncated key values using new
opclass infrastructure.
Only new indexes (BTREE_VERSION 4 indexes) will have insertions that
treat heap TID as a tiebreaker attribute, or will have pivot tuples
undergo suffix truncation during a leaf page split (on-disk
compatibility with versions 2 and 3 is preserved). Upgrades to version
4 cannot be performed on-the-fly, unlike upgrades from version 2 to
version 3. contrib/amcheck continues to work with version 2 and 3
indexes, while also enforcing stricter invariants when verifying version
4 indexes. These stricter invariants are the same invariants described
by "3.1.12 Sequencing" from the Lehman and Yao paper.
A later patch will enhance the logic used by nbtree to pick a split
point. This patch is likely to negatively impact performance without
smarter choices around the precise point to split leaf pages at. Making
these two mostly-distinct sets of enhancements into distinct commits
seems like it might clarify their design, even though neither commit is
particularly useful on its own.
The maximum allowed size of new tuples is reduced by an amount equal to
the space required to store an extra MAXALIGN()'d TID in a new high key
during leaf page splits. The user-facing definition of the "1/3 of a
page" restriction is already imprecise, and so does not need to be
revised. However, there should be a compatibility note in the v12
release notes.
Author: Peter Geoghegan
Reviewed-By: Heikki Linnakangas, Alexander Korotkov
Discussion: https://postgr.es/m/CAH2-WzkVb0Kom=R+88fDFb=JSxZMFvbHVC6Mn9LJ2n=X=kS-Uw@mail.gmail.com
2019-03-20 18:04:01 +01:00
|
|
|
Assert(BTreeTupleGetNAtts(right_item, rel) > 0);
|
|
|
|
Assert(BTreeTupleGetNAtts(right_item, rel) <=
|
Adjust INCLUDE index truncation comments and code.
Add several assertions that ensure that we're dealing with a pivot tuple
without non-key attributes where that's expected. Also, remove the
assertion within _bt_isequal(), restoring the v10 function signature. A
similar check will be performed for the page highkey within
_bt_moveright() in most cases. Also avoid dropping all objects within
regression tests, to increase pg_dump test coverage for INCLUDE indexes.
Rather than using infrastructure that's generally intended to be used
with reference counted heap tuple descriptors during truncation, use the
same function that was introduced to store flat TupleDescs in shared
memory (we use a temp palloc'd buffer). This isn't strictly necessary,
but seems more future-proof than the old approach. It also lets us
avoid including rel.h within indextuple.c, which was arguably a
modularity violation. Also, we now call index_deform_tuple() with the
truncated TupleDesc, not the source TupleDesc, since that's more robust,
and saves a few cycles.
In passing, fix a memory leak by pfree'ing truncated pivot tuple memory
during CREATE INDEX. Also pfree during a page split, just to be
consistent.
Refactor _bt_check_natts() to be more readable.
Author: Peter Geoghegan with some editorization by me
Reviewed by: Alexander Korotkov, Teodor Sigaev
Discussion: https://www.postgresql.org/message-id/CAH2-Wz%3DkCWuXeMrBCopC-tFs3FbiVxQNjjgNKdG2sHxZ5k2y3w%40mail.gmail.com
2018-04-19 07:45:58 +02:00
|
|
|
IndexRelationGetNumberOfKeyAttributes(rel));
|
2014-04-04 12:12:38 +02:00
|
|
|
if (PageAddItem(rootpage, (Item) right_item, right_item_sz, P_FIRSTKEY,
|
2007-09-20 19:56:33 +02:00
|
|
|
false, false) == InvalidOffsetNumber)
|
2007-12-31 05:52:05 +01:00
|
|
|
elog(PANIC, "failed to add rightkey to new root page"
|
|
|
|
" while splitting block %u of index \"%s\"",
|
|
|
|
BufferGetBlockNumber(lbuf), RelationGetRelationName(rel));
|
1997-09-07 07:04:48 +02:00
|
|
|
|
Make the handling of interrupted B-tree page splits more robust.
Splitting a page consists of two separate steps: splitting the child page,
and inserting the downlink for the new right page to the parent. Previously,
we handled the case that you crash in between those steps with a cleanup
routine after the WAL recovery had finished, which finished the incomplete
split. However, that doesn't help if the page split is interrupted but the
database doesn't crash, so that you don't perform WAL recovery. That could
happen for example if you run out of disk space.
Remove the end-of-recovery cleanup step. Instead, when a page is split, the
left page is marked with a new INCOMPLETE_SPLIT flag, and when the downlink
is inserted to the parent, the flag is cleared again. If an insertion sees
a page with the flag set, it knows that the split was interrupted for some
reason, and inserts the missing downlink before proceeding.
I used the same approach to fix GIN and GiST split algorithms earlier. This
was the last WAL cleanup routine, so we could get rid of that whole
machinery now, but I'll leave that for a separate patch.
Reviewed by Peter Geoghegan.
2014-03-18 19:12:58 +01:00
|
|
|
/* Clear the incomplete-split flag in the left child */
|
|
|
|
Assert(P_INCOMPLETE_SPLIT(lopaque));
|
|
|
|
lopaque->btpo_flags &= ~BTP_INCOMPLETE_SPLIT;
|
|
|
|
MarkBufferDirty(lbuf);
|
|
|
|
|
2006-04-01 01:32:07 +02:00
|
|
|
MarkBufferDirty(rootbuf);
|
|
|
|
MarkBufferDirty(metabuf);
|
|
|
|
|
2000-10-04 02:04:43 +02:00
|
|
|
/* XLOG stuff */
|
2010-12-13 18:34:26 +01:00
|
|
|
if (RelationNeedsWAL(rel))
|
2000-10-04 02:04:43 +02:00
|
|
|
{
|
2001-03-22 05:01:46 +01:00
|
|
|
xl_btree_newroot xlrec;
|
|
|
|
XLogRecPtr recptr;
|
Revamp the WAL record format.
Each WAL record now carries information about the modified relation and
block(s) in a standardized format. That makes it easier to write tools that
need that information, like pg_rewind, prefetching the blocks to speed up
recovery, etc.
There's a whole new API for building WAL records, replacing the XLogRecData
chains used previously. The new API consists of XLogRegister* functions,
which are called for each buffer and chunk of data that is added to the
record. The new API also gives more control over when a full-page image is
written, by passing flags to the XLogRegisterBuffer function.
This also simplifies the XLogReadBufferForRedo() calls. The function can dig
the relation and block number from the WAL record, so they no longer need to
be passed as arguments.
For the convenience of redo routines, XLogReader now disects each WAL record
after reading it, copying the main data part and the per-block data into
MAXALIGNed buffers. The data chunks are not aligned within the WAL record,
but the redo routines can assume that the pointers returned by XLogRecGet*
functions are. Redo routines are now passed the XLogReaderState, which
contains the record in the already-disected format, instead of the plain
XLogRecord.
The new record format also makes the fixed size XLogRecord header smaller,
by removing the xl_len field. The length of the "main data" portion is now
stored at the end of the WAL record, and there's a separate header after
XLogRecord for it. The alignment padding at the end of XLogRecord is also
removed. This compansates for the fact that the new format would otherwise
be more bulky than the old format.
Reviewed by Andres Freund, Amit Kapila, Michael Paquier, Alvaro Herrera,
Fujii Masao.
2014-11-20 16:56:26 +01:00
|
|
|
xl_btree_metadata md;
|
2000-10-13 04:03:02 +02:00
|
|
|
|
2003-02-21 01:06:22 +01:00
|
|
|
xlrec.rootblk = rootblknum;
|
2000-12-28 14:00:29 +01:00
|
|
|
xlrec.level = metad->btm_level;
|
2003-02-21 01:06:22 +01:00
|
|
|
|
Revamp the WAL record format.
Each WAL record now carries information about the modified relation and
block(s) in a standardized format. That makes it easier to write tools that
need that information, like pg_rewind, prefetching the blocks to speed up
recovery, etc.
There's a whole new API for building WAL records, replacing the XLogRecData
chains used previously. The new API consists of XLogRegister* functions,
which are called for each buffer and chunk of data that is added to the
record. The new API also gives more control over when a full-page image is
written, by passing flags to the XLogRegisterBuffer function.
This also simplifies the XLogReadBufferForRedo() calls. The function can dig
the relation and block number from the WAL record, so they no longer need to
be passed as arguments.
For the convenience of redo routines, XLogReader now disects each WAL record
after reading it, copying the main data part and the per-block data into
MAXALIGNed buffers. The data chunks are not aligned within the WAL record,
but the redo routines can assume that the pointers returned by XLogRecGet*
functions are. Redo routines are now passed the XLogReaderState, which
contains the record in the already-disected format, instead of the plain
XLogRecord.
The new record format also makes the fixed size XLogRecord header smaller,
by removing the xl_len field. The length of the "main data" portion is now
stored at the end of the WAL record, and there's a separate header after
XLogRecord for it. The alignment padding at the end of XLogRecord is also
removed. This compansates for the fact that the new format would otherwise
be more bulky than the old format.
Reviewed by Andres Freund, Amit Kapila, Michael Paquier, Alvaro Herrera,
Fujii Masao.
2014-11-20 16:56:26 +01:00
|
|
|
XLogBeginInsert();
|
|
|
|
XLogRegisterData((char *) &xlrec, SizeOfBtreeNewroot);
|
|
|
|
|
|
|
|
XLogRegisterBuffer(0, rootbuf, REGBUF_WILL_INIT);
|
|
|
|
XLogRegisterBuffer(1, lbuf, REGBUF_STANDARD);
|
2017-11-03 21:31:32 +01:00
|
|
|
XLogRegisterBuffer(2, metabuf, REGBUF_WILL_INIT | REGBUF_STANDARD);
|
Revamp the WAL record format.
Each WAL record now carries information about the modified relation and
block(s) in a standardized format. That makes it easier to write tools that
need that information, like pg_rewind, prefetching the blocks to speed up
recovery, etc.
There's a whole new API for building WAL records, replacing the XLogRecData
chains used previously. The new API consists of XLogRegister* functions,
which are called for each buffer and chunk of data that is added to the
record. The new API also gives more control over when a full-page image is
written, by passing flags to the XLogRegisterBuffer function.
This also simplifies the XLogReadBufferForRedo() calls. The function can dig
the relation and block number from the WAL record, so they no longer need to
be passed as arguments.
For the convenience of redo routines, XLogReader now disects each WAL record
after reading it, copying the main data part and the per-block data into
MAXALIGNed buffers. The data chunks are not aligned within the WAL record,
but the redo routines can assume that the pointers returned by XLogRecGet*
functions are. Redo routines are now passed the XLogReaderState, which
contains the record in the already-disected format, instead of the plain
XLogRecord.
The new record format also makes the fixed size XLogRecord header smaller,
by removing the xl_len field. The length of the "main data" portion is now
stored at the end of the WAL record, and there's a separate header after
XLogRecord for it. The alignment padding at the end of XLogRecord is also
removed. This compansates for the fact that the new format would otherwise
be more bulky than the old format.
Reviewed by Andres Freund, Amit Kapila, Michael Paquier, Alvaro Herrera,
Fujii Masao.
2014-11-20 16:56:26 +01:00
|
|
|
|
Make heap TID a tiebreaker nbtree index column.
Make nbtree treat all index tuples as having a heap TID attribute.
Index searches can distinguish duplicates by heap TID, since heap TID is
always guaranteed to be unique. This general approach has numerous
benefits for performance, and is prerequisite to teaching VACUUM to
perform "retail index tuple deletion".
Naively adding a new attribute to every pivot tuple has unacceptable
overhead (it bloats internal pages), so suffix truncation of pivot
tuples is added. This will usually truncate away the "extra" heap TID
attribute from pivot tuples during a leaf page split, and may also
truncate away additional user attributes. This can increase fan-out,
especially in a multi-column index. Truncation can only occur at the
attribute granularity, which isn't particularly effective, but works
well enough for now. A future patch may add support for truncating
"within" text attributes by generating truncated key values using new
opclass infrastructure.
Only new indexes (BTREE_VERSION 4 indexes) will have insertions that
treat heap TID as a tiebreaker attribute, or will have pivot tuples
undergo suffix truncation during a leaf page split (on-disk
compatibility with versions 2 and 3 is preserved). Upgrades to version
4 cannot be performed on-the-fly, unlike upgrades from version 2 to
version 3. contrib/amcheck continues to work with version 2 and 3
indexes, while also enforcing stricter invariants when verifying version
4 indexes. These stricter invariants are the same invariants described
by "3.1.12 Sequencing" from the Lehman and Yao paper.
A later patch will enhance the logic used by nbtree to pick a split
point. This patch is likely to negatively impact performance without
smarter choices around the precise point to split leaf pages at. Making
these two mostly-distinct sets of enhancements into distinct commits
seems like it might clarify their design, even though neither commit is
particularly useful on its own.
The maximum allowed size of new tuples is reduced by an amount equal to
the space required to store an extra MAXALIGN()'d TID in a new high key
during leaf page splits. The user-facing definition of the "1/3 of a
page" restriction is already imprecise, and so does not need to be
revised. However, there should be a compatibility note in the v12
release notes.
Author: Peter Geoghegan
Reviewed-By: Heikki Linnakangas, Alexander Korotkov
Discussion: https://postgr.es/m/CAH2-WzkVb0Kom=R+88fDFb=JSxZMFvbHVC6Mn9LJ2n=X=kS-Uw@mail.gmail.com
2019-03-20 18:04:01 +01:00
|
|
|
Assert(metad->btm_version >= BTREE_NOVAC_VERSION);
|
|
|
|
md.version = metad->btm_version;
|
Revamp the WAL record format.
Each WAL record now carries information about the modified relation and
block(s) in a standardized format. That makes it easier to write tools that
need that information, like pg_rewind, prefetching the blocks to speed up
recovery, etc.
There's a whole new API for building WAL records, replacing the XLogRecData
chains used previously. The new API consists of XLogRegister* functions,
which are called for each buffer and chunk of data that is added to the
record. The new API also gives more control over when a full-page image is
written, by passing flags to the XLogRegisterBuffer function.
This also simplifies the XLogReadBufferForRedo() calls. The function can dig
the relation and block number from the WAL record, so they no longer need to
be passed as arguments.
For the convenience of redo routines, XLogReader now disects each WAL record
after reading it, copying the main data part and the per-block data into
MAXALIGNed buffers. The data chunks are not aligned within the WAL record,
but the redo routines can assume that the pointers returned by XLogRecGet*
functions are. Redo routines are now passed the XLogReaderState, which
contains the record in the already-disected format, instead of the plain
XLogRecord.
The new record format also makes the fixed size XLogRecord header smaller,
by removing the xl_len field. The length of the "main data" portion is now
stored at the end of the WAL record, and there's a separate header after
XLogRecord for it. The alignment padding at the end of XLogRecord is also
removed. This compansates for the fact that the new format would otherwise
be more bulky than the old format.
Reviewed by Andres Freund, Amit Kapila, Michael Paquier, Alvaro Herrera,
Fujii Masao.
2014-11-20 16:56:26 +01:00
|
|
|
md.root = rootblknum;
|
|
|
|
md.level = metad->btm_level;
|
|
|
|
md.fastroot = rootblknum;
|
|
|
|
md.fastlevel = metad->btm_level;
|
Skip full index scan during cleanup of B-tree indexes when possible
Vacuum of index consists from two stages: multiple (zero of more) ambulkdelete
calls and one amvacuumcleanup call. When workload on particular table
is append-only, then autovacuum isn't intended to touch this table. However,
user may run vacuum manually in order to fill visibility map and get benefits
of index-only scans. Then ambulkdelete wouldn't be called for indexes
of such table (because no heap tuples were deleted), only amvacuumcleanup would
be called In this case, amvacuumcleanup would perform full index scan for
two objectives: put recyclable pages into free space map and update index
statistics.
This patch allows btvacuumclanup to skip full index scan when two conditions
are satisfied: no pages are going to be put into free space map and index
statistics isn't stalled. In order to check first condition, we store
oldest btpo_xact in the meta-page. When it's precedes RecentGlobalXmin, then
there are some recyclable pages. In order to check second condition we store
number of heap tuples observed during previous full index scan by cleanup.
If fraction of newly inserted tuples is less than
vacuum_cleanup_index_scale_factor, then statistics isn't considered to be
stalled. vacuum_cleanup_index_scale_factor can be defined as both reloption and GUC (default).
This patch bumps B-tree meta-page version. Upgrade of meta-page is performed
"on the fly": during VACUUM meta-page is rewritten with new version. No special
handling in pg_upgrade is required.
Author: Masahiko Sawada, Alexander Korotkov
Review by: Peter Geoghegan, Kyotaro Horiguchi, Alexander Korotkov, Yura Sokolov
Discussion: https://www.postgresql.org/message-id/flat/CAD21AoAX+d2oD_nrd9O2YkpzHaFr=uQeGr9s1rKC3O4ENc568g@mail.gmail.com
2018-04-04 18:29:00 +02:00
|
|
|
md.oldest_btpo_xact = metad->btm_oldest_btpo_xact;
|
|
|
|
md.last_cleanup_num_heap_tuples = metad->btm_last_cleanup_num_heap_tuples;
|
Revamp the WAL record format.
Each WAL record now carries information about the modified relation and
block(s) in a standardized format. That makes it easier to write tools that
need that information, like pg_rewind, prefetching the blocks to speed up
recovery, etc.
There's a whole new API for building WAL records, replacing the XLogRecData
chains used previously. The new API consists of XLogRegister* functions,
which are called for each buffer and chunk of data that is added to the
record. The new API also gives more control over when a full-page image is
written, by passing flags to the XLogRegisterBuffer function.
This also simplifies the XLogReadBufferForRedo() calls. The function can dig
the relation and block number from the WAL record, so they no longer need to
be passed as arguments.
For the convenience of redo routines, XLogReader now disects each WAL record
after reading it, copying the main data part and the per-block data into
MAXALIGNed buffers. The data chunks are not aligned within the WAL record,
but the redo routines can assume that the pointers returned by XLogRecGet*
functions are. Redo routines are now passed the XLogReaderState, which
contains the record in the already-disected format, instead of the plain
XLogRecord.
The new record format also makes the fixed size XLogRecord header smaller,
by removing the xl_len field. The length of the "main data" portion is now
stored at the end of the WAL record, and there's a separate header after
XLogRecord for it. The alignment padding at the end of XLogRecord is also
removed. This compansates for the fact that the new format would otherwise
be more bulky than the old format.
Reviewed by Andres Freund, Amit Kapila, Michael Paquier, Alvaro Herrera,
Fujii Masao.
2014-11-20 16:56:26 +01:00
|
|
|
|
|
|
|
XLogRegisterBufData(2, (char *) &md, sizeof(xl_btree_metadata));
|
2000-10-04 02:04:43 +02:00
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
/*
|
2005-10-15 04:49:52 +02:00
|
|
|
* Direct access to page is not good but faster - we should implement
|
|
|
|
* some new func in page API.
|
2000-10-04 02:04:43 +02:00
|
|
|
*/
|
Revamp the WAL record format.
Each WAL record now carries information about the modified relation and
block(s) in a standardized format. That makes it easier to write tools that
need that information, like pg_rewind, prefetching the blocks to speed up
recovery, etc.
There's a whole new API for building WAL records, replacing the XLogRecData
chains used previously. The new API consists of XLogRegister* functions,
which are called for each buffer and chunk of data that is added to the
record. The new API also gives more control over when a full-page image is
written, by passing flags to the XLogRegisterBuffer function.
This also simplifies the XLogReadBufferForRedo() calls. The function can dig
the relation and block number from the WAL record, so they no longer need to
be passed as arguments.
For the convenience of redo routines, XLogReader now disects each WAL record
after reading it, copying the main data part and the per-block data into
MAXALIGNed buffers. The data chunks are not aligned within the WAL record,
but the redo routines can assume that the pointers returned by XLogRecGet*
functions are. Redo routines are now passed the XLogReaderState, which
contains the record in the already-disected format, instead of the plain
XLogRecord.
The new record format also makes the fixed size XLogRecord header smaller,
by removing the xl_len field. The length of the "main data" portion is now
stored at the end of the WAL record, and there's a separate header after
XLogRecord for it. The alignment padding at the end of XLogRecord is also
removed. This compansates for the fact that the new format would otherwise
be more bulky than the old format.
Reviewed by Andres Freund, Amit Kapila, Michael Paquier, Alvaro Herrera,
Fujii Masao.
2014-11-20 16:56:26 +01:00
|
|
|
XLogRegisterBufData(0,
|
Phase 3 of pgindent updates.
Don't move parenthesized lines to the left, even if that means they
flow past the right margin.
By default, BSD indent lines up statement continuation lines that are
within parentheses so that they start just to the right of the preceding
left parenthesis. However, traditionally, if that resulted in the
continuation line extending to the right of the desired right margin,
then indent would push it left just far enough to not overrun the margin,
if it could do so without making the continuation line start to the left of
the current statement indent. That makes for a weird mix of indentations
unless one has been completely rigid about never violating the 80-column
limit.
This behavior has been pretty universally panned by Postgres developers.
Hence, disable it with indent's new -lpl switch, so that parenthesized
lines are always lined up with the preceding left paren.
This patch is much less interesting than the first round of indent
changes, but also bulkier, so I thought it best to separate the effects.
Discussion: https://postgr.es/m/E1dAmxK-0006EE-1r@gemulon.postgresql.org
Discussion: https://postgr.es/m/30527.1495162840@sss.pgh.pa.us
2017-06-21 21:35:54 +02:00
|
|
|
(char *) rootpage + ((PageHeader) rootpage)->pd_upper,
|
Revamp the WAL record format.
Each WAL record now carries information about the modified relation and
block(s) in a standardized format. That makes it easier to write tools that
need that information, like pg_rewind, prefetching the blocks to speed up
recovery, etc.
There's a whole new API for building WAL records, replacing the XLogRecData
chains used previously. The new API consists of XLogRegister* functions,
which are called for each buffer and chunk of data that is added to the
record. The new API also gives more control over when a full-page image is
written, by passing flags to the XLogRegisterBuffer function.
This also simplifies the XLogReadBufferForRedo() calls. The function can dig
the relation and block number from the WAL record, so they no longer need to
be passed as arguments.
For the convenience of redo routines, XLogReader now disects each WAL record
after reading it, copying the main data part and the per-block data into
MAXALIGNed buffers. The data chunks are not aligned within the WAL record,
but the redo routines can assume that the pointers returned by XLogRecGet*
functions are. Redo routines are now passed the XLogReaderState, which
contains the record in the already-disected format, instead of the plain
XLogRecord.
The new record format also makes the fixed size XLogRecord header smaller,
by removing the xl_len field. The length of the "main data" portion is now
stored at the end of the WAL record, and there's a separate header after
XLogRecord for it. The alignment padding at the end of XLogRecord is also
removed. This compansates for the fact that the new format would otherwise
be more bulky than the old format.
Reviewed by Andres Freund, Amit Kapila, Michael Paquier, Alvaro Herrera,
Fujii Masao.
2014-11-20 16:56:26 +01:00
|
|
|
((PageHeader) rootpage)->pd_special -
|
|
|
|
((PageHeader) rootpage)->pd_upper);
|
|
|
|
|
|
|
|
recptr = XLogInsert(RM_BTREE_ID, XLOG_BTREE_NEWROOT);
|
2000-10-13 04:03:02 +02:00
|
|
|
|
2014-04-22 21:40:44 +02:00
|
|
|
PageSetLSN(lpage, recptr);
|
2000-10-04 02:04:43 +02:00
|
|
|
PageSetLSN(rootpage, recptr);
|
2000-10-13 04:03:02 +02:00
|
|
|
PageSetLSN(metapg, recptr);
|
2000-10-04 02:04:43 +02:00
|
|
|
}
|
2002-08-06 04:36:35 +02:00
|
|
|
|
2001-01-12 22:54:01 +01:00
|
|
|
END_CRIT_SECTION();
|
2000-10-04 02:04:43 +02:00
|
|
|
|
2006-04-01 01:32:07 +02:00
|
|
|
/* done with metapage */
|
|
|
|
_bt_relbuf(rel, metabuf);
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2014-04-04 12:12:38 +02:00
|
|
|
pfree(left_item);
|
|
|
|
pfree(right_item);
|
|
|
|
|
2006-01-11 09:43:13 +01:00
|
|
|
return rootbuf;
|
2001-01-26 02:24:31 +01:00
|
|
|
}
|
|
|
|
|
1996-07-09 08:22:35 +02:00
|
|
|
/*
|
1997-09-07 07:04:48 +02:00
|
|
|
* _bt_pgaddtup() -- add a tuple to a particular page in the index.
|
1996-07-09 08:22:35 +02:00
|
|
|
*
|
2000-07-21 08:42:39 +02:00
|
|
|
* This routine adds the tuple to the page as requested. It does
|
|
|
|
* not affect pin/lock status, but you'd better have a write lock
|
|
|
|
* and pin on the target buffer! Don't forget to write and release
|
|
|
|
* the buffer afterwards, either.
|
|
|
|
*
|
|
|
|
* The main difference between this routine and a bare PageAddItem call
|
2006-01-26 00:04:21 +01:00
|
|
|
* is that this code knows that the leftmost index tuple on a non-leaf
|
2000-07-21 08:42:39 +02:00
|
|
|
* btree page doesn't need to have a key. Therefore, it strips such
|
2006-01-26 00:04:21 +01:00
|
|
|
* tuples down to just the tuple header. CAUTION: this works ONLY if
|
|
|
|
* we insert the tuples in order, so that the given itup_off does
|
|
|
|
* represent the final position of the tuple!
|
1996-07-09 08:22:35 +02:00
|
|
|
*/
|
2016-04-08 20:52:13 +02:00
|
|
|
static bool
|
2010-08-29 21:33:14 +02:00
|
|
|
_bt_pgaddtup(Page page,
|
1997-09-07 07:04:48 +02:00
|
|
|
Size itemsize,
|
2006-01-26 00:04:21 +01:00
|
|
|
IndexTuple itup,
|
2010-08-29 21:33:14 +02:00
|
|
|
OffsetNumber itup_off)
|
1996-07-09 08:22:35 +02:00
|
|
|
{
|
2000-07-21 08:42:39 +02:00
|
|
|
BTPageOpaque opaque = (BTPageOpaque) PageGetSpecialPointer(page);
|
2006-01-26 00:04:21 +01:00
|
|
|
IndexTupleData trunctuple;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
if (!P_ISLEAF(opaque) && itup_off == P_FIRSTDATAKEY(opaque))
|
2000-02-18 07:32:39 +01:00
|
|
|
{
|
2006-01-26 00:04:21 +01:00
|
|
|
trunctuple = *itup;
|
|
|
|
trunctuple.t_info = sizeof(IndexTupleData);
|
Make heap TID a tiebreaker nbtree index column.
Make nbtree treat all index tuples as having a heap TID attribute.
Index searches can distinguish duplicates by heap TID, since heap TID is
always guaranteed to be unique. This general approach has numerous
benefits for performance, and is prerequisite to teaching VACUUM to
perform "retail index tuple deletion".
Naively adding a new attribute to every pivot tuple has unacceptable
overhead (it bloats internal pages), so suffix truncation of pivot
tuples is added. This will usually truncate away the "extra" heap TID
attribute from pivot tuples during a leaf page split, and may also
truncate away additional user attributes. This can increase fan-out,
especially in a multi-column index. Truncation can only occur at the
attribute granularity, which isn't particularly effective, but works
well enough for now. A future patch may add support for truncating
"within" text attributes by generating truncated key values using new
opclass infrastructure.
Only new indexes (BTREE_VERSION 4 indexes) will have insertions that
treat heap TID as a tiebreaker attribute, or will have pivot tuples
undergo suffix truncation during a leaf page split (on-disk
compatibility with versions 2 and 3 is preserved). Upgrades to version
4 cannot be performed on-the-fly, unlike upgrades from version 2 to
version 3. contrib/amcheck continues to work with version 2 and 3
indexes, while also enforcing stricter invariants when verifying version
4 indexes. These stricter invariants are the same invariants described
by "3.1.12 Sequencing" from the Lehman and Yao paper.
A later patch will enhance the logic used by nbtree to pick a split
point. This patch is likely to negatively impact performance without
smarter choices around the precise point to split leaf pages at. Making
these two mostly-distinct sets of enhancements into distinct commits
seems like it might clarify their design, even though neither commit is
particularly useful on its own.
The maximum allowed size of new tuples is reduced by an amount equal to
the space required to store an extra MAXALIGN()'d TID in a new high key
during leaf page splits. The user-facing definition of the "1/3 of a
page" restriction is already imprecise, and so does not need to be
revised. However, there should be a compatibility note in the v12
release notes.
Author: Peter Geoghegan
Reviewed-By: Heikki Linnakangas, Alexander Korotkov
Discussion: https://postgr.es/m/CAH2-WzkVb0Kom=R+88fDFb=JSxZMFvbHVC6Mn9LJ2n=X=kS-Uw@mail.gmail.com
2019-03-20 18:04:01 +01:00
|
|
|
/* Deliberately zero INDEX_ALT_TID_MASK bits */
|
Adjust INCLUDE index truncation comments and code.
Add several assertions that ensure that we're dealing with a pivot tuple
without non-key attributes where that's expected. Also, remove the
assertion within _bt_isequal(), restoring the v10 function signature. A
similar check will be performed for the page highkey within
_bt_moveright() in most cases. Also avoid dropping all objects within
regression tests, to increase pg_dump test coverage for INCLUDE indexes.
Rather than using infrastructure that's generally intended to be used
with reference counted heap tuple descriptors during truncation, use the
same function that was introduced to store flat TupleDescs in shared
memory (we use a temp palloc'd buffer). This isn't strictly necessary,
but seems more future-proof than the old approach. It also lets us
avoid including rel.h within indextuple.c, which was arguably a
modularity violation. Also, we now call index_deform_tuple() with the
truncated TupleDesc, not the source TupleDesc, since that's more robust,
and saves a few cycles.
In passing, fix a memory leak by pfree'ing truncated pivot tuple memory
during CREATE INDEX. Also pfree during a page split, just to be
consistent.
Refactor _bt_check_natts() to be more readable.
Author: Peter Geoghegan with some editorization by me
Reviewed by: Alexander Korotkov, Teodor Sigaev
Discussion: https://www.postgresql.org/message-id/CAH2-Wz%3DkCWuXeMrBCopC-tFs3FbiVxQNjjgNKdG2sHxZ5k2y3w%40mail.gmail.com
2018-04-19 07:45:58 +02:00
|
|
|
BTreeTupleSetNAtts(&trunctuple, 0);
|
2006-01-26 00:04:21 +01:00
|
|
|
itup = &trunctuple;
|
|
|
|
itemsize = sizeof(IndexTupleData);
|
2000-02-18 07:32:39 +01:00
|
|
|
}
|
|
|
|
|
2006-01-26 00:04:21 +01:00
|
|
|
if (PageAddItem(page, (Item) itup, itemsize, itup_off,
|
2007-09-20 19:56:33 +02:00
|
|
|
false, false) == InvalidOffsetNumber)
|
2010-08-29 21:33:14 +02:00
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
1996-07-09 08:22:35 +02:00
|
|
|
}
|
1997-03-24 09:48:16 +01:00
|
|
|
|
2006-07-25 21:13:00 +02:00
|
|
|
/*
|
|
|
|
* _bt_vacuum_one_page - vacuum just one index page.
|
|
|
|
*
|
2007-09-13 00:10:26 +02:00
|
|
|
* Try to remove LP_DEAD items from the given page. The passed buffer
|
2006-07-25 21:13:00 +02:00
|
|
|
* must be exclusive-locked, but unlike a real VACUUM, we don't need a
|
|
|
|
* super-exclusive "cleanup" lock (see nbtree/README).
|
|
|
|
*/
|
|
|
|
static void
|
2010-03-28 11:27:02 +02:00
|
|
|
_bt_vacuum_one_page(Relation rel, Buffer buffer, Relation heapRel)
|
2006-07-25 21:13:00 +02:00
|
|
|
{
|
2006-10-04 02:30:14 +02:00
|
|
|
OffsetNumber deletable[MaxOffsetNumber];
|
|
|
|
int ndeletable = 0;
|
|
|
|
OffsetNumber offnum,
|
|
|
|
minoff,
|
|
|
|
maxoff;
|
2016-04-20 15:31:19 +02:00
|
|
|
Page page = BufferGetPage(buffer);
|
2006-10-04 02:30:14 +02:00
|
|
|
BTPageOpaque opaque = (BTPageOpaque) PageGetSpecialPointer(page);
|
2006-07-25 21:13:00 +02:00
|
|
|
|
2019-03-20 17:30:57 +01:00
|
|
|
Assert(P_ISLEAF(opaque));
|
|
|
|
|
2006-07-25 21:13:00 +02:00
|
|
|
/*
|
2007-11-15 22:14:46 +01:00
|
|
|
* Scan over all items to see which ones need to be deleted according to
|
|
|
|
* LP_DEAD flags.
|
2006-07-25 21:13:00 +02:00
|
|
|
*/
|
|
|
|
minoff = P_FIRSTDATAKEY(opaque);
|
|
|
|
maxoff = PageGetMaxOffsetNumber(page);
|
|
|
|
for (offnum = minoff;
|
|
|
|
offnum <= maxoff;
|
|
|
|
offnum = OffsetNumberNext(offnum))
|
|
|
|
{
|
2006-10-04 02:30:14 +02:00
|
|
|
ItemId itemId = PageGetItemId(page, offnum);
|
2006-07-25 21:13:00 +02:00
|
|
|
|
2007-09-13 00:10:26 +02:00
|
|
|
if (ItemIdIsDead(itemId))
|
2006-07-25 21:13:00 +02:00
|
|
|
deletable[ndeletable++] = offnum;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ndeletable > 0)
|
2010-03-28 11:27:02 +02:00
|
|
|
_bt_delitems_delete(rel, buffer, deletable, ndeletable, heapRel);
|
2006-10-04 02:30:14 +02:00
|
|
|
|
2006-07-25 21:13:00 +02:00
|
|
|
/*
|
2007-09-13 00:10:26 +02:00
|
|
|
* Note: if we didn't find any LP_DEAD items, then the page's
|
2006-10-04 02:30:14 +02:00
|
|
|
* BTP_HAS_GARBAGE hint bit is falsely set. We do not bother expending a
|
|
|
|
* separate write to clear it, however. We will clear it when we split
|
|
|
|
* the page.
|
2006-07-25 21:13:00 +02:00
|
|
|
*/
|
|
|
|
}
|