postgresql/contrib/bloom/blutils.c

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

494 lines
13 KiB
C
Raw Normal View History

/*-------------------------------------------------------------------------
*
* blutils.c
* Bloom index utilities.
*
* Portions Copyright (c) 2016-2024, PostgreSQL Global Development Group
* Portions Copyright (c) 1990-1993, Regents of the University of California
*
* IDENTIFICATION
* contrib/bloom/blutils.c
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include "access/amapi.h"
#include "access/generic_xlog.h"
#include "access/reloptions.h"
#include "bloom.h"
#include "catalog/index.h"
#include "commands/vacuum.h"
#include "miscadmin.h"
#include "storage/bufmgr.h"
#include "storage/freespace.h"
#include "storage/indexfsm.h"
#include "storage/lmgr.h"
#include "utils/memutils.h"
/* Signature dealing macros - note i is assumed to be of type int */
#define GETWORD(x,i) ( *( (BloomSignatureWord *)(x) + ( (i) / SIGNWORDBITS ) ) )
#define CLRBIT(x,i) GETWORD(x,i) &= ~( 0x01 << ( (i) % SIGNWORDBITS ) )
#define SETBIT(x,i) GETWORD(x,i) |= ( 0x01 << ( (i) % SIGNWORDBITS ) )
#define GETBIT(x,i) ( (GETWORD(x,i) >> ( (i) % SIGNWORDBITS )) & 0x01 )
PG_FUNCTION_INFO_V1(blhandler);
/* Kind of relation options for bloom index */
static relopt_kind bl_relopt_kind;
2016-06-10 00:02:36 +02:00
/* parse table for fillRelOptions */
static relopt_parse_elt bl_relopt_tab[INDEX_MAX_KEYS + 1];
static int32 myRand(void);
static void mySrand(uint32 seed);
/*
* Module initialize function: initialize info about Bloom relation options.
*
* Note: keep this in sync with makeDefaultBloomOptions().
*/
void
_PG_init(void)
{
int i;
char buf[16];
bl_relopt_kind = add_reloption_kind();
/* Option for length of signature */
add_int_reloption(bl_relopt_kind, "length",
"Length of signature in bits",
DEFAULT_BLOOM_LENGTH, 1, MAX_BLOOM_LENGTH,
AccessExclusiveLock);
bl_relopt_tab[0].optname = "length";
bl_relopt_tab[0].opttype = RELOPT_TYPE_INT;
bl_relopt_tab[0].offset = offsetof(BloomOptions, bloomLength);
/* Number of bits for each possible index column: col1, col2, ... */
for (i = 0; i < INDEX_MAX_KEYS; i++)
{
snprintf(buf, sizeof(buf), "col%d", i + 1);
add_int_reloption(bl_relopt_kind, buf,
"Number of bits generated for each index column",
DEFAULT_BLOOM_BITS, 1, MAX_BLOOM_BITS,
AccessExclusiveLock);
bl_relopt_tab[i + 1].optname = MemoryContextStrdup(TopMemoryContext,
buf);
bl_relopt_tab[i + 1].opttype = RELOPT_TYPE_INT;
bl_relopt_tab[i + 1].offset = offsetof(BloomOptions, bitSize[0]) + sizeof(int) * i;
}
}
/*
* Construct a default set of Bloom options.
*/
static BloomOptions *
makeDefaultBloomOptions(void)
{
BloomOptions *opts;
int i;
opts = (BloomOptions *) palloc0(sizeof(BloomOptions));
/* Convert DEFAULT_BLOOM_LENGTH from # of bits to # of words */
opts->bloomLength = (DEFAULT_BLOOM_LENGTH + SIGNWORDBITS - 1) / SIGNWORDBITS;
for (i = 0; i < INDEX_MAX_KEYS; i++)
opts->bitSize[i] = DEFAULT_BLOOM_BITS;
SET_VARSIZE(opts, sizeof(BloomOptions));
return opts;
}
/*
* Bloom handler function: return IndexAmRoutine with access method parameters
* and callbacks.
*/
Datum
blhandler(PG_FUNCTION_ARGS)
{
IndexAmRoutine *amroutine = makeNode(IndexAmRoutine);
amroutine->amstrategies = BLOOM_NSTRATEGIES;
amroutine->amsupport = BLOOM_NPROC;
Implement operator class parameters PostgreSQL provides set of template index access methods, where opclasses have much freedom in the semantics of indexing. These index AMs are GiST, GIN, SP-GiST and BRIN. There opclasses define representation of keys, operations on them and supported search strategies. So, it's natural that opclasses may be faced some tradeoffs, which require user-side decision. This commit implements opclass parameters allowing users to set some values, which tell opclass how to index the particular dataset. This commit doesn't introduce new storage in system catalog. Instead it uses pg_attribute.attoptions, which is used for table column storage options but unused for index attributes. In order to evade changing signature of each opclass support function, we implement unified way to pass options to opclass support functions. Options are set to fn_expr as the constant bytea expression. It's possible due to the fact that opclass support functions are executed outside of expressions, so fn_expr is unused for them. This commit comes with some examples of opclass options usage. We parametrize signature length in GiST. That applies to multiple opclasses: tsvector_ops, gist__intbig_ops, gist_ltree_ops, gist__ltree_ops, gist_trgm_ops and gist_hstore_ops. Also we parametrize maximum number of integer ranges for gist__int_ops. However, the main future usage of this feature is expected to be json, where users would be able to specify which way to index particular json parts. Catversion is bumped. Discussion: https://postgr.es/m/d22c3a18-31c7-1879-fc11-4c1ce2f5e5af%40postgrespro.ru Author: Nikita Glukhov, revised by me Reviwed-by: Nikolay Shaplov, Robert Haas, Tom Lane, Tomas Vondra, Alvaro Herrera
2020-03-30 18:17:11 +02:00
amroutine->amoptsprocnum = BLOOM_OPTIONS_PROC;
amroutine->amcanorder = false;
amroutine->amcanorderbyop = false;
amroutine->amcanbackward = false;
amroutine->amcanunique = false;
amroutine->amcanmulticol = true;
amroutine->amoptionalkey = true;
amroutine->amsearcharray = false;
amroutine->amsearchnulls = false;
amroutine->amstorage = false;
amroutine->amclusterable = false;
amroutine->ampredlocks = false;
amroutine->amcanparallel = false;
Allow parallel CREATE INDEX for BRIN indexes Allow using multiple worker processes to build BRIN index, which until now was supported only for BTREE indexes. For large tables this often results in significant speedup when the build is CPU-bound. The work is split in a simple way - each worker builds BRIN summaries on a subset of the table, determined by the regular parallel scan used to read the data, and feeds them into a shared tuplesort which sorts them by blkno (start of the range). The leader then reads this sorted stream of ranges, merges duplicates (which may happen if the parallel scan does not align with BRIN pages_per_range), and adds the resulting ranges into the index. The number of duplicate results produced by workers (requiring merging in the leader process) should be fairly small, thanks to how parallel scans assign chunks to workers. The likelihood of duplicate results may increase for higher pages_per_range values, but then there are fewer page ranges in total. In any case, we expect the merging to be much cheaper than summarization, so this should be a win. Most of the parallelism infrastructure is a simplified copy of the code used by BTREE indexes, omitting the parts irrelevant for BRIN indexes (e.g. uniqueness checks). This also introduces a new index AM flag amcanbuildparallel, determining whether to attempt to start parallel workers for the index build. Original patch by me, with reviews and substantial reworks by Matthias van de Meent, certainly enough to make him a co-author. Author: Tomas Vondra, Matthias van de Meent Reviewed-by: Matthias van de Meent Discussion: https://postgr.es/m/c2ee7d69-ce17-43f2-d1a0-9811edbda6e6%40enterprisedb.com
2023-12-08 18:15:23 +01:00
amroutine->amcanbuildparallel = false;
amroutine->amcaninclude = false;
amroutine->amusemaintenanceworkmem = false;
amroutine->amparallelvacuumoptions =
VACUUM_OPTION_PARALLEL_BULKDEL | VACUUM_OPTION_PARALLEL_CLEANUP;
amroutine->amkeytype = InvalidOid;
amroutine->ambuild = blbuild;
amroutine->ambuildempty = blbuildempty;
amroutine->aminsert = blinsert;
amroutine->aminsertcleanup = NULL;
amroutine->ambulkdelete = blbulkdelete;
amroutine->amvacuumcleanup = blvacuumcleanup;
amroutine->amcanreturn = NULL;
amroutine->amcostestimate = blcostestimate;
amroutine->amoptions = bloptions;
amroutine->amproperty = NULL;
amroutine->ambuildphasename = NULL;
amroutine->amvalidate = blvalidate;
amroutine->amadjustmembers = NULL;
amroutine->ambeginscan = blbeginscan;
amroutine->amrescan = blrescan;
amroutine->amgettuple = NULL;
amroutine->amgetbitmap = blgetbitmap;
amroutine->amendscan = blendscan;
amroutine->ammarkpos = NULL;
amroutine->amrestrpos = NULL;
amroutine->amestimateparallelscan = NULL;
amroutine->aminitparallelscan = NULL;
amroutine->amparallelrescan = NULL;
PG_RETURN_POINTER(amroutine);
}
/*
* Fill BloomState structure for particular index.
*/
void
initBloomState(BloomState *state, Relation index)
{
int i;
state->nColumns = index->rd_att->natts;
/* Initialize hash function for each attribute */
for (i = 0; i < index->rd_att->natts; i++)
{
fmgr_info_copy(&(state->hashFn[i]),
index_getprocinfo(index, i + 1, BLOOM_HASH_PROC),
CurrentMemoryContext);
Collations with nondeterministic comparison This adds a flag "deterministic" to collations. If that is false, such a collation disables various optimizations that assume that strings are equal only if they are byte-wise equal. That then allows use cases such as case-insensitive or accent-insensitive comparisons or handling of strings with different Unicode normal forms. This functionality is only supported with the ICU provider. At least glibc doesn't appear to have any locales that work in a nondeterministic way, so it's not worth supporting this for the libc provider. The term "deterministic comparison" in this context is from Unicode Technical Standard #10 (https://unicode.org/reports/tr10/#Deterministic_Comparison). This patch makes changes in three areas: - CREATE COLLATION DDL changes and system catalog changes to support this new flag. - Many executor nodes and auxiliary code are extended to track collations. Previously, this code would just throw away collation information, because the eventually-called user-defined functions didn't use it since they only cared about equality, which didn't need collation information. - String data type functions that do equality comparisons and hashing are changed to take the (non-)deterministic flag into account. For comparison, this just means skipping various shortcuts and tie breakers that use byte-wise comparison. For hashing, we first need to convert the input string to a canonical "sort key" using the ICU analogue of strxfrm(). Reviewed-by: Daniel Verite <daniel@manitou-mail.org> Reviewed-by: Peter Geoghegan <pg@bowt.ie> Discussion: https://www.postgresql.org/message-id/flat/1ccc668f-4cbc-0bef-af67-450b47cdfee7@2ndquadrant.com
2019-03-22 12:09:32 +01:00
state->collations[i] = index->rd_indcollation[i];
}
/* Initialize amcache if needed with options from metapage */
if (!index->rd_amcache)
{
Buffer buffer;
Page page;
BloomMetaPageData *meta;
BloomOptions *opts;
opts = MemoryContextAlloc(index->rd_indexcxt, sizeof(BloomOptions));
buffer = ReadBuffer(index, BLOOM_METAPAGE_BLKNO);
LockBuffer(buffer, BUFFER_LOCK_SHARE);
page = BufferGetPage(buffer);
if (!BloomPageIsMeta(page))
elog(ERROR, "Relation is not a bloom index");
meta = BloomPageGetMeta(BufferGetPage(buffer));
if (meta->magickNumber != BLOOM_MAGICK_NUMBER)
elog(ERROR, "Relation is not a bloom index");
*opts = meta->opts;
UnlockReleaseBuffer(buffer);
index->rd_amcache = (void *) opts;
}
memcpy(&state->opts, index->rd_amcache, sizeof(state->opts));
state->sizeOfBloomTuple = BLOOMTUPLEHDRSZ +
sizeof(BloomSignatureWord) * state->opts.bloomLength;
}
/*
* Random generator copied from FreeBSD. Using own random generator here for
* two reasons:
*
* 1) In this case random numbers are used for on-disk storage. Usage of
* PostgreSQL number generator would obstruct it from all possible changes.
* 2) Changing seed of PostgreSQL random generator would be undesirable side
* effect.
*/
static int32 next;
static int32
myRand(void)
{
/*----------
* Compute x = (7^5 * x) mod (2^31 - 1)
* without overflowing 31 bits:
* (2^31 - 1) = 127773 * (7^5) + 2836
* From "Random number generators: good ones are hard to find",
* Park and Miller, Communications of the ACM, vol. 31, no. 10,
* October 1988, p. 1195.
*----------
*/
int32 hi,
lo,
x;
/* Must be in [1, 0x7ffffffe] range at this point. */
hi = next / 127773;
lo = next % 127773;
x = 16807 * lo - 2836 * hi;
if (x < 0)
x += 0x7fffffff;
next = x;
/* Transform to [0, 0x7ffffffd] range. */
return (x - 1);
}
static void
mySrand(uint32 seed)
{
next = seed;
/* Transform to [1, 0x7ffffffe] range. */
next = (next % 0x7ffffffe) + 1;
}
/*
* Add bits of given value to the signature.
*/
void
signValue(BloomState *state, BloomSignatureWord *sign, Datum value, int attno)
{
uint32 hashVal;
int nBit,
j;
/*
* init generator with "column's" number to get "hashed" seed for new
* value. We don't want to map the same numbers from different columns
* into the same bits!
*/
mySrand(attno);
/*
* Init hash sequence to map our value into bits. the same values in
* different columns will be mapped into different bits because of step
* above
*/
Collations with nondeterministic comparison This adds a flag "deterministic" to collations. If that is false, such a collation disables various optimizations that assume that strings are equal only if they are byte-wise equal. That then allows use cases such as case-insensitive or accent-insensitive comparisons or handling of strings with different Unicode normal forms. This functionality is only supported with the ICU provider. At least glibc doesn't appear to have any locales that work in a nondeterministic way, so it's not worth supporting this for the libc provider. The term "deterministic comparison" in this context is from Unicode Technical Standard #10 (https://unicode.org/reports/tr10/#Deterministic_Comparison). This patch makes changes in three areas: - CREATE COLLATION DDL changes and system catalog changes to support this new flag. - Many executor nodes and auxiliary code are extended to track collations. Previously, this code would just throw away collation information, because the eventually-called user-defined functions didn't use it since they only cared about equality, which didn't need collation information. - String data type functions that do equality comparisons and hashing are changed to take the (non-)deterministic flag into account. For comparison, this just means skipping various shortcuts and tie breakers that use byte-wise comparison. For hashing, we first need to convert the input string to a canonical "sort key" using the ICU analogue of strxfrm(). Reviewed-by: Daniel Verite <daniel@manitou-mail.org> Reviewed-by: Peter Geoghegan <pg@bowt.ie> Discussion: https://www.postgresql.org/message-id/flat/1ccc668f-4cbc-0bef-af67-450b47cdfee7@2ndquadrant.com
2019-03-22 12:09:32 +01:00
hashVal = DatumGetInt32(FunctionCall1Coll(&state->hashFn[attno], state->collations[attno], value));
mySrand(hashVal ^ myRand());
for (j = 0; j < state->opts.bitSize[attno]; j++)
{
/* prevent multiple evaluation in SETBIT macro */
nBit = myRand() % (state->opts.bloomLength * SIGNWORDBITS);
SETBIT(sign, nBit);
}
}
/*
* Make bloom tuple from values.
*/
BloomTuple *
BloomFormTuple(BloomState *state, ItemPointer iptr, Datum *values, bool *isnull)
{
int i;
BloomTuple *res = (BloomTuple *) palloc0(state->sizeOfBloomTuple);
res->heapPtr = *iptr;
/* Blooming each column */
for (i = 0; i < state->nColumns; i++)
{
/* skip nulls */
if (isnull[i])
continue;
signValue(state, res->sign, values[i], i);
}
return res;
}
/*
* Add new bloom tuple to the page. Returns true if new tuple was successfully
Fix assorted bugs in contrib/bloom. In blinsert(), cope with the possibility that a page we pull from the notFullPage list is marked BLOOM_DELETED. This could happen if VACUUM recently marked it deleted but hasn't (yet) updated the metapage. We can re-use such a page safely, but we *must* reinitialize it so that it's no longer marked deleted. Fix blvacuum() so that it updates the notFullPage list even if it's going to update it to empty. The previous "optimization" of skipping the update seems pretty dubious, since it means that the next blinsert() will uselessly visit whatever pages we left in the list. Uniformly treat PageIsNew pages the same as deleted pages. This should allow proper recovery if a crash occurs just after relation extension. Properly use vacuum_delay_point, not assorted ad-hoc CHECK_FOR_INTERRUPTS calls, in the blvacuum() main loop. Fix broken tuple-counting logic: blvacuum.c counted the number of live index tuples over again in each scan, leading to VACUUM VERBOSE reporting some multiple of the actual number of surviving index tuples after any vacuum that removed any tuples (since they'd be counted in blvacuum, maybe more than once, and then again in blvacuumcleanup, without ever zeroing the counter). It's sufficient to count them in blvacuumcleanup. stats->estimated_count is a boolean, not a counter, and we don't want to set it true, so don't add tuple counts to it. Add a couple of Asserts that we don't overrun available space on a bloom page. I don't think there's any bug there today, but the way the FreeBlockNumberArray size calculation is set up is scarily fragile, and BloomPageGetFreeSpace isn't much better. The Asserts should help catch any future mistakes. Per investigation of a report from Jeff Janes. I think the first item above may explain his report; the other changes were things I noticed while casting about for an explanation. Report: <CAMkU=1xEUuBphDwDmB1WjN4+td4kpnEniFaTBxnk1xzHCw8_OQ@mail.gmail.com>
2016-08-14 04:24:48 +02:00
* added to the page. Returns false if it doesn't fit on the page.
*/
bool
BloomPageAddItem(BloomState *state, Page page, BloomTuple *tuple)
{
BloomTuple *itup;
BloomPageOpaque opaque;
Pointer ptr;
Fix assorted bugs in contrib/bloom. In blinsert(), cope with the possibility that a page we pull from the notFullPage list is marked BLOOM_DELETED. This could happen if VACUUM recently marked it deleted but hasn't (yet) updated the metapage. We can re-use such a page safely, but we *must* reinitialize it so that it's no longer marked deleted. Fix blvacuum() so that it updates the notFullPage list even if it's going to update it to empty. The previous "optimization" of skipping the update seems pretty dubious, since it means that the next blinsert() will uselessly visit whatever pages we left in the list. Uniformly treat PageIsNew pages the same as deleted pages. This should allow proper recovery if a crash occurs just after relation extension. Properly use vacuum_delay_point, not assorted ad-hoc CHECK_FOR_INTERRUPTS calls, in the blvacuum() main loop. Fix broken tuple-counting logic: blvacuum.c counted the number of live index tuples over again in each scan, leading to VACUUM VERBOSE reporting some multiple of the actual number of surviving index tuples after any vacuum that removed any tuples (since they'd be counted in blvacuum, maybe more than once, and then again in blvacuumcleanup, without ever zeroing the counter). It's sufficient to count them in blvacuumcleanup. stats->estimated_count is a boolean, not a counter, and we don't want to set it true, so don't add tuple counts to it. Add a couple of Asserts that we don't overrun available space on a bloom page. I don't think there's any bug there today, but the way the FreeBlockNumberArray size calculation is set up is scarily fragile, and BloomPageGetFreeSpace isn't much better. The Asserts should help catch any future mistakes. Per investigation of a report from Jeff Janes. I think the first item above may explain his report; the other changes were things I noticed while casting about for an explanation. Report: <CAMkU=1xEUuBphDwDmB1WjN4+td4kpnEniFaTBxnk1xzHCw8_OQ@mail.gmail.com>
2016-08-14 04:24:48 +02:00
/* We shouldn't be pointed to an invalid page */
Assert(!PageIsNew(page) && !BloomPageIsDeleted(page));
/* Does new tuple fit on the page? */
if (BloomPageGetFreeSpace(state, page) < state->sizeOfBloomTuple)
return false;
/* Copy new tuple to the end of page */
opaque = BloomPageGetOpaque(page);
itup = BloomPageGetTuple(state, page, opaque->maxoff + 1);
memcpy((Pointer) itup, (Pointer) tuple, state->sizeOfBloomTuple);
/* Adjust maxoff and pd_lower */
opaque->maxoff++;
ptr = (Pointer) BloomPageGetTuple(state, page, opaque->maxoff + 1);
((PageHeader) page)->pd_lower = ptr - page;
Fix assorted bugs in contrib/bloom. In blinsert(), cope with the possibility that a page we pull from the notFullPage list is marked BLOOM_DELETED. This could happen if VACUUM recently marked it deleted but hasn't (yet) updated the metapage. We can re-use such a page safely, but we *must* reinitialize it so that it's no longer marked deleted. Fix blvacuum() so that it updates the notFullPage list even if it's going to update it to empty. The previous "optimization" of skipping the update seems pretty dubious, since it means that the next blinsert() will uselessly visit whatever pages we left in the list. Uniformly treat PageIsNew pages the same as deleted pages. This should allow proper recovery if a crash occurs just after relation extension. Properly use vacuum_delay_point, not assorted ad-hoc CHECK_FOR_INTERRUPTS calls, in the blvacuum() main loop. Fix broken tuple-counting logic: blvacuum.c counted the number of live index tuples over again in each scan, leading to VACUUM VERBOSE reporting some multiple of the actual number of surviving index tuples after any vacuum that removed any tuples (since they'd be counted in blvacuum, maybe more than once, and then again in blvacuumcleanup, without ever zeroing the counter). It's sufficient to count them in blvacuumcleanup. stats->estimated_count is a boolean, not a counter, and we don't want to set it true, so don't add tuple counts to it. Add a couple of Asserts that we don't overrun available space on a bloom page. I don't think there's any bug there today, but the way the FreeBlockNumberArray size calculation is set up is scarily fragile, and BloomPageGetFreeSpace isn't much better. The Asserts should help catch any future mistakes. Per investigation of a report from Jeff Janes. I think the first item above may explain his report; the other changes were things I noticed while casting about for an explanation. Report: <CAMkU=1xEUuBphDwDmB1WjN4+td4kpnEniFaTBxnk1xzHCw8_OQ@mail.gmail.com>
2016-08-14 04:24:48 +02:00
/* Assert we didn't overrun available space */
Assert(((PageHeader) page)->pd_lower <= ((PageHeader) page)->pd_upper);
return true;
}
/*
* Allocate a new page (either by recycling, or by extending the index file)
* The returned buffer is already pinned and exclusive-locked
* Caller is responsible for initializing the page by calling BloomInitPage
*/
Buffer
BloomNewBuffer(Relation index)
{
Buffer buffer;
/* First, try to get a page from FSM */
for (;;)
{
BlockNumber blkno = GetFreeIndexPage(index);
if (blkno == InvalidBlockNumber)
break;
buffer = ReadBuffer(index, blkno);
/*
* We have to guard against the possibility that someone else already
* recycled this page; the buffer may be locked if so.
*/
if (ConditionalLockBuffer(buffer))
{
Page page = BufferGetPage(buffer);
if (PageIsNew(page))
return buffer; /* OK to use, if never initialized */
if (BloomPageIsDeleted(page))
return buffer; /* OK to use */
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
}
/* Can't use it, so release buffer and try again */
ReleaseBuffer(buffer);
}
/* Must extend the file */
buffer = ExtendBufferedRel(BMR_REL(index), MAIN_FORKNUM, NULL,
EB_LOCK_FIRST);
return buffer;
}
/*
* Initialize any page of a bloom index.
*/
void
BloomInitPage(Page page, uint16 flags)
{
BloomPageOpaque opaque;
PageInit(page, BLCKSZ, sizeof(BloomPageOpaqueData));
opaque = BloomPageGetOpaque(page);
opaque->flags = flags;
opaque->bloom_page_id = BLOOM_PAGE_ID;
}
/*
* Fill in metapage for bloom index.
*/
void
BloomFillMetapage(Relation index, Page metaPage)
{
BloomOptions *opts;
BloomMetaPageData *metadata;
/*
* Choose the index's options. If reloptions have been assigned, use
* those, otherwise create default options.
*/
opts = (BloomOptions *) index->rd_options;
if (!opts)
opts = makeDefaultBloomOptions();
/*
* Initialize contents of meta page, including a copy of the options,
* which are now frozen for the life of the index.
*/
BloomInitPage(metaPage, BLOOM_META);
metadata = BloomPageGetMeta(metaPage);
memset(metadata, 0, sizeof(BloomMetaPageData));
metadata->magickNumber = BLOOM_MAGICK_NUMBER;
metadata->opts = *opts;
((PageHeader) metaPage)->pd_lower += sizeof(BloomMetaPageData);
Fix assorted bugs in contrib/bloom. In blinsert(), cope with the possibility that a page we pull from the notFullPage list is marked BLOOM_DELETED. This could happen if VACUUM recently marked it deleted but hasn't (yet) updated the metapage. We can re-use such a page safely, but we *must* reinitialize it so that it's no longer marked deleted. Fix blvacuum() so that it updates the notFullPage list even if it's going to update it to empty. The previous "optimization" of skipping the update seems pretty dubious, since it means that the next blinsert() will uselessly visit whatever pages we left in the list. Uniformly treat PageIsNew pages the same as deleted pages. This should allow proper recovery if a crash occurs just after relation extension. Properly use vacuum_delay_point, not assorted ad-hoc CHECK_FOR_INTERRUPTS calls, in the blvacuum() main loop. Fix broken tuple-counting logic: blvacuum.c counted the number of live index tuples over again in each scan, leading to VACUUM VERBOSE reporting some multiple of the actual number of surviving index tuples after any vacuum that removed any tuples (since they'd be counted in blvacuum, maybe more than once, and then again in blvacuumcleanup, without ever zeroing the counter). It's sufficient to count them in blvacuumcleanup. stats->estimated_count is a boolean, not a counter, and we don't want to set it true, so don't add tuple counts to it. Add a couple of Asserts that we don't overrun available space on a bloom page. I don't think there's any bug there today, but the way the FreeBlockNumberArray size calculation is set up is scarily fragile, and BloomPageGetFreeSpace isn't much better. The Asserts should help catch any future mistakes. Per investigation of a report from Jeff Janes. I think the first item above may explain his report; the other changes were things I noticed while casting about for an explanation. Report: <CAMkU=1xEUuBphDwDmB1WjN4+td4kpnEniFaTBxnk1xzHCw8_OQ@mail.gmail.com>
2016-08-14 04:24:48 +02:00
/* If this fails, probably FreeBlockNumberArray size calc is wrong: */
Assert(((PageHeader) metaPage)->pd_lower <= ((PageHeader) metaPage)->pd_upper);
}
/*
* Initialize metapage for bloom index.
*/
void
BloomInitMetapage(Relation index, ForkNumber forknum)
{
Buffer metaBuffer;
Page metaPage;
GenericXLogState *state;
/*
* Make a new page; since it is first page it should be associated with
* block number 0 (BLOOM_METAPAGE_BLKNO). No need to hold the extension
* lock because there cannot be concurrent inserters yet.
*/
metaBuffer = ReadBufferExtended(index, forknum, P_NEW, RBM_NORMAL, NULL);
LockBuffer(metaBuffer, BUFFER_LOCK_EXCLUSIVE);
Assert(BufferGetBlockNumber(metaBuffer) == BLOOM_METAPAGE_BLKNO);
/* Initialize contents of meta page */
state = GenericXLogStart(index);
metaPage = GenericXLogRegisterBuffer(state, metaBuffer,
GENERIC_XLOG_FULL_IMAGE);
BloomFillMetapage(index, metaPage);
GenericXLogFinish(state);
UnlockReleaseBuffer(metaBuffer);
}
/*
* Parse reloptions for bloom index, producing a BloomOptions struct.
*/
bytea *
bloptions(Datum reloptions, bool validate)
{
BloomOptions *rdopts;
/* Parse the user-given reloptions */
rdopts = (BloomOptions *) build_reloptions(reloptions, validate,
bl_relopt_kind,
sizeof(BloomOptions),
bl_relopt_tab,
lengthof(bl_relopt_tab));
/* Convert signature length from # of bits to # to words, rounding up */
if (rdopts)
rdopts->bloomLength = (rdopts->bloomLength + SIGNWORDBITS - 1) / SIGNWORDBITS;
return (bytea *) rdopts;
}