2016-04-01 15:42:24 +02:00
|
|
|
/*-------------------------------------------------------------------------
|
|
|
|
*
|
|
|
|
* blutils.c
|
|
|
|
* Bloom index utilities.
|
|
|
|
*
|
2024-01-04 02:49:05 +01:00
|
|
|
* Portions Copyright (c) 2016-2024, PostgreSQL Global Development Group
|
2016-04-01 15:42:24 +02:00
|
|
|
* Portions Copyright (c) 1990-1993, Regents of the University of California
|
|
|
|
*
|
|
|
|
* IDENTIFICATION
|
|
|
|
* contrib/bloom/blutils.c
|
|
|
|
*
|
|
|
|
*-------------------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
#include "postgres.h"
|
|
|
|
|
2019-12-27 00:09:00 +01:00
|
|
|
#include "access/amapi.h"
|
2016-04-01 15:42:24 +02:00
|
|
|
#include "access/generic_xlog.h"
|
2019-10-23 05:56:22 +02:00
|
|
|
#include "access/reloptions.h"
|
|
|
|
#include "bloom.h"
|
2016-04-01 15:42:24 +02:00
|
|
|
#include "catalog/index.h"
|
2020-01-15 02:54:14 +01:00
|
|
|
#include "commands/vacuum.h"
|
2016-04-01 15:42:24 +02:00
|
|
|
#include "miscadmin.h"
|
|
|
|
#include "storage/bufmgr.h"
|
|
|
|
#include "storage/freespace.h"
|
|
|
|
#include "storage/indexfsm.h"
|
2019-10-23 05:56:22 +02:00
|
|
|
#include "storage/lmgr.h"
|
|
|
|
#include "utils/memutils.h"
|
2016-04-01 15:42:24 +02:00
|
|
|
|
2016-06-03 16:52:36 +02:00
|
|
|
/* Signature dealing macros - note i is assumed to be of type int */
|
|
|
|
#define GETWORD(x,i) ( *( (BloomSignatureWord *)(x) + ( (i) / SIGNWORDBITS ) ) )
|
|
|
|
#define CLRBIT(x,i) GETWORD(x,i) &= ~( 0x01 << ( (i) % SIGNWORDBITS ) )
|
|
|
|
#define SETBIT(x,i) GETWORD(x,i) |= ( 0x01 << ( (i) % SIGNWORDBITS ) )
|
|
|
|
#define GETBIT(x,i) ( (GETWORD(x,i) >> ( (i) % SIGNWORDBITS )) & 0x01 )
|
2016-04-01 15:42:24 +02:00
|
|
|
|
|
|
|
PG_FUNCTION_INFO_V1(blhandler);
|
|
|
|
|
2016-06-03 16:52:36 +02:00
|
|
|
/* Kind of relation options for bloom index */
|
2016-04-01 15:42:24 +02:00
|
|
|
static relopt_kind bl_relopt_kind;
|
2016-06-10 00:02:36 +02:00
|
|
|
|
2016-06-03 16:52:36 +02:00
|
|
|
/* parse table for fillRelOptions */
|
|
|
|
static relopt_parse_elt bl_relopt_tab[INDEX_MAX_KEYS + 1];
|
2016-04-01 15:42:24 +02:00
|
|
|
|
2016-04-12 17:42:06 +02:00
|
|
|
static int32 myRand(void);
|
2016-04-01 15:42:24 +02:00
|
|
|
static void mySrand(uint32 seed);
|
|
|
|
|
|
|
|
/*
|
2016-06-03 16:52:36 +02:00
|
|
|
* Module initialize function: initialize info about Bloom relation options.
|
|
|
|
*
|
|
|
|
* Note: keep this in sync with makeDefaultBloomOptions().
|
2016-04-01 15:42:24 +02:00
|
|
|
*/
|
|
|
|
void
|
|
|
|
_PG_init(void)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
char buf[16];
|
|
|
|
|
|
|
|
bl_relopt_kind = add_reloption_kind();
|
|
|
|
|
2016-06-03 16:52:36 +02:00
|
|
|
/* Option for length of signature */
|
2016-04-01 15:42:24 +02:00
|
|
|
add_int_reloption(bl_relopt_kind, "length",
|
2016-06-03 16:52:36 +02:00
|
|
|
"Length of signature in bits",
|
2019-09-25 03:13:52 +02:00
|
|
|
DEFAULT_BLOOM_LENGTH, 1, MAX_BLOOM_LENGTH,
|
|
|
|
AccessExclusiveLock);
|
2016-06-03 16:52:36 +02:00
|
|
|
bl_relopt_tab[0].optname = "length";
|
|
|
|
bl_relopt_tab[0].opttype = RELOPT_TYPE_INT;
|
|
|
|
bl_relopt_tab[0].offset = offsetof(BloomOptions, bloomLength);
|
2016-04-01 15:42:24 +02:00
|
|
|
|
2016-06-03 16:52:36 +02:00
|
|
|
/* Number of bits for each possible index column: col1, col2, ... */
|
2016-04-01 15:42:24 +02:00
|
|
|
for (i = 0; i < INDEX_MAX_KEYS; i++)
|
|
|
|
{
|
2016-06-03 16:52:36 +02:00
|
|
|
snprintf(buf, sizeof(buf), "col%d", i + 1);
|
2016-04-01 15:42:24 +02:00
|
|
|
add_int_reloption(bl_relopt_kind, buf,
|
2016-06-03 16:52:36 +02:00
|
|
|
"Number of bits generated for each index column",
|
2019-09-25 03:13:52 +02:00
|
|
|
DEFAULT_BLOOM_BITS, 1, MAX_BLOOM_BITS,
|
|
|
|
AccessExclusiveLock);
|
2016-06-03 16:52:36 +02:00
|
|
|
bl_relopt_tab[i + 1].optname = MemoryContextStrdup(TopMemoryContext,
|
|
|
|
buf);
|
|
|
|
bl_relopt_tab[i + 1].opttype = RELOPT_TYPE_INT;
|
2016-08-30 18:00:00 +02:00
|
|
|
bl_relopt_tab[i + 1].offset = offsetof(BloomOptions, bitSize[0]) + sizeof(int) * i;
|
2016-04-01 15:42:24 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-03 16:52:36 +02:00
|
|
|
/*
|
|
|
|
* Construct a default set of Bloom options.
|
|
|
|
*/
|
|
|
|
static BloomOptions *
|
|
|
|
makeDefaultBloomOptions(void)
|
|
|
|
{
|
|
|
|
BloomOptions *opts;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
opts = (BloomOptions *) palloc0(sizeof(BloomOptions));
|
|
|
|
/* Convert DEFAULT_BLOOM_LENGTH from # of bits to # of words */
|
|
|
|
opts->bloomLength = (DEFAULT_BLOOM_LENGTH + SIGNWORDBITS - 1) / SIGNWORDBITS;
|
|
|
|
for (i = 0; i < INDEX_MAX_KEYS; i++)
|
|
|
|
opts->bitSize[i] = DEFAULT_BLOOM_BITS;
|
|
|
|
SET_VARSIZE(opts, sizeof(BloomOptions));
|
|
|
|
return opts;
|
|
|
|
}
|
|
|
|
|
2016-04-01 15:42:24 +02:00
|
|
|
/*
|
|
|
|
* Bloom handler function: return IndexAmRoutine with access method parameters
|
|
|
|
* and callbacks.
|
|
|
|
*/
|
|
|
|
Datum
|
|
|
|
blhandler(PG_FUNCTION_ARGS)
|
|
|
|
{
|
|
|
|
IndexAmRoutine *amroutine = makeNode(IndexAmRoutine);
|
|
|
|
|
2016-04-28 15:39:25 +02:00
|
|
|
amroutine->amstrategies = BLOOM_NSTRATEGIES;
|
|
|
|
amroutine->amsupport = BLOOM_NPROC;
|
Implement operator class parameters
PostgreSQL provides set of template index access methods, where opclasses have
much freedom in the semantics of indexing. These index AMs are GiST, GIN,
SP-GiST and BRIN. There opclasses define representation of keys, operations on
them and supported search strategies. So, it's natural that opclasses may be
faced some tradeoffs, which require user-side decision. This commit implements
opclass parameters allowing users to set some values, which tell opclass how to
index the particular dataset.
This commit doesn't introduce new storage in system catalog. Instead it uses
pg_attribute.attoptions, which is used for table column storage options but
unused for index attributes.
In order to evade changing signature of each opclass support function, we
implement unified way to pass options to opclass support functions. Options
are set to fn_expr as the constant bytea expression. It's possible due to the
fact that opclass support functions are executed outside of expressions, so
fn_expr is unused for them.
This commit comes with some examples of opclass options usage. We parametrize
signature length in GiST. That applies to multiple opclasses: tsvector_ops,
gist__intbig_ops, gist_ltree_ops, gist__ltree_ops, gist_trgm_ops and
gist_hstore_ops. Also we parametrize maximum number of integer ranges for
gist__int_ops. However, the main future usage of this feature is expected
to be json, where users would be able to specify which way to index particular
json parts.
Catversion is bumped.
Discussion: https://postgr.es/m/d22c3a18-31c7-1879-fc11-4c1ce2f5e5af%40postgrespro.ru
Author: Nikita Glukhov, revised by me
Reviwed-by: Nikolay Shaplov, Robert Haas, Tom Lane, Tomas Vondra, Alvaro Herrera
2020-03-30 18:17:11 +02:00
|
|
|
amroutine->amoptsprocnum = BLOOM_OPTIONS_PROC;
|
2016-04-01 15:42:24 +02:00
|
|
|
amroutine->amcanorder = false;
|
|
|
|
amroutine->amcanorderbyop = false;
|
|
|
|
amroutine->amcanbackward = false;
|
|
|
|
amroutine->amcanunique = false;
|
|
|
|
amroutine->amcanmulticol = true;
|
|
|
|
amroutine->amoptionalkey = true;
|
|
|
|
amroutine->amsearcharray = false;
|
|
|
|
amroutine->amsearchnulls = false;
|
|
|
|
amroutine->amstorage = false;
|
|
|
|
amroutine->amclusterable = false;
|
|
|
|
amroutine->ampredlocks = false;
|
2017-02-15 19:53:24 +01:00
|
|
|
amroutine->amcanparallel = false;
|
Allow parallel CREATE INDEX for BRIN indexes
Allow using multiple worker processes to build BRIN index, which until
now was supported only for BTREE indexes. For large tables this often
results in significant speedup when the build is CPU-bound.
The work is split in a simple way - each worker builds BRIN summaries on
a subset of the table, determined by the regular parallel scan used to
read the data, and feeds them into a shared tuplesort which sorts them
by blkno (start of the range). The leader then reads this sorted stream
of ranges, merges duplicates (which may happen if the parallel scan does
not align with BRIN pages_per_range), and adds the resulting ranges into
the index.
The number of duplicate results produced by workers (requiring merging
in the leader process) should be fairly small, thanks to how parallel
scans assign chunks to workers. The likelihood of duplicate results may
increase for higher pages_per_range values, but then there are fewer
page ranges in total. In any case, we expect the merging to be much
cheaper than summarization, so this should be a win.
Most of the parallelism infrastructure is a simplified copy of the code
used by BTREE indexes, omitting the parts irrelevant for BRIN indexes
(e.g. uniqueness checks).
This also introduces a new index AM flag amcanbuildparallel, determining
whether to attempt to start parallel workers for the index build.
Original patch by me, with reviews and substantial reworks by Matthias
van de Meent, certainly enough to make him a co-author.
Author: Tomas Vondra, Matthias van de Meent
Reviewed-by: Matthias van de Meent
Discussion: https://postgr.es/m/c2ee7d69-ce17-43f2-d1a0-9811edbda6e6%40enterprisedb.com
2023-12-08 18:15:23 +01:00
|
|
|
amroutine->amcanbuildparallel = false;
|
2018-04-07 22:00:39 +02:00
|
|
|
amroutine->amcaninclude = false;
|
2020-01-15 02:54:14 +01:00
|
|
|
amroutine->amusemaintenanceworkmem = false;
|
|
|
|
amroutine->amparallelvacuumoptions =
|
|
|
|
VACUUM_OPTION_PARALLEL_BULKDEL | VACUUM_OPTION_PARALLEL_CLEANUP;
|
2016-08-11 18:23:35 +02:00
|
|
|
amroutine->amkeytype = InvalidOid;
|
2016-04-01 15:42:24 +02:00
|
|
|
|
|
|
|
amroutine->ambuild = blbuild;
|
|
|
|
amroutine->ambuildempty = blbuildempty;
|
2016-08-11 18:23:35 +02:00
|
|
|
amroutine->aminsert = blinsert;
|
2023-11-25 20:27:04 +01:00
|
|
|
amroutine->aminsertcleanup = NULL;
|
2016-04-01 15:42:24 +02:00
|
|
|
amroutine->ambulkdelete = blbulkdelete;
|
|
|
|
amroutine->amvacuumcleanup = blvacuumcleanup;
|
|
|
|
amroutine->amcanreturn = NULL;
|
|
|
|
amroutine->amcostestimate = blcostestimate;
|
|
|
|
amroutine->amoptions = bloptions;
|
2016-08-14 00:31:14 +02:00
|
|
|
amroutine->amproperty = NULL;
|
Report progress of CREATE INDEX operations
This uses the progress reporting infrastructure added by c16dc1aca5e0,
adding support for CREATE INDEX and CREATE INDEX CONCURRENTLY.
There are two pieces to this: one is index-AM-agnostic, and the other is
AM-specific. The latter is fairly elaborate for btrees, including
reportage for parallel index builds and the separate phases that btree
index creation uses; other index AMs, which are much simpler in their
building procedures, have simplistic reporting only, but that seems
sufficient, at least for non-concurrent builds.
The index-AM-agnostic part is fairly complete, providing insight into
the CONCURRENTLY wait phases as well as block-based progress during the
index validation table scan. (The index validation index scan requires
patching each AM, which has not been included here.)
Reviewers: Rahila Syed, Pavan Deolasee, Tatsuro Yamada
Discussion: https://postgr.es/m/20181220220022.mg63bhk26zdpvmcj@alvherre.pgsql
2019-04-02 20:18:08 +02:00
|
|
|
amroutine->ambuildphasename = NULL;
|
2016-04-01 15:42:24 +02:00
|
|
|
amroutine->amvalidate = blvalidate;
|
2020-08-01 23:12:47 +02:00
|
|
|
amroutine->amadjustmembers = NULL;
|
2016-08-11 18:23:35 +02:00
|
|
|
amroutine->ambeginscan = blbeginscan;
|
|
|
|
amroutine->amrescan = blrescan;
|
|
|
|
amroutine->amgettuple = NULL;
|
|
|
|
amroutine->amgetbitmap = blgetbitmap;
|
|
|
|
amroutine->amendscan = blendscan;
|
|
|
|
amroutine->ammarkpos = NULL;
|
|
|
|
amroutine->amrestrpos = NULL;
|
2017-01-24 22:42:58 +01:00
|
|
|
amroutine->amestimateparallelscan = NULL;
|
|
|
|
amroutine->aminitparallelscan = NULL;
|
|
|
|
amroutine->amparallelrescan = NULL;
|
2016-04-01 15:42:24 +02:00
|
|
|
|
|
|
|
PG_RETURN_POINTER(amroutine);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Fill BloomState structure for particular index.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
initBloomState(BloomState *state, Relation index)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
state->nColumns = index->rd_att->natts;
|
|
|
|
|
|
|
|
/* Initialize hash function for each attribute */
|
|
|
|
for (i = 0; i < index->rd_att->natts; i++)
|
|
|
|
{
|
|
|
|
fmgr_info_copy(&(state->hashFn[i]),
|
|
|
|
index_getprocinfo(index, i + 1, BLOOM_HASH_PROC),
|
|
|
|
CurrentMemoryContext);
|
2019-03-22 12:09:32 +01:00
|
|
|
state->collations[i] = index->rd_indcollation[i];
|
2016-04-01 15:42:24 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Initialize amcache if needed with options from metapage */
|
|
|
|
if (!index->rd_amcache)
|
|
|
|
{
|
|
|
|
Buffer buffer;
|
|
|
|
Page page;
|
|
|
|
BloomMetaPageData *meta;
|
|
|
|
BloomOptions *opts;
|
|
|
|
|
|
|
|
opts = MemoryContextAlloc(index->rd_indexcxt, sizeof(BloomOptions));
|
|
|
|
|
|
|
|
buffer = ReadBuffer(index, BLOOM_METAPAGE_BLKNO);
|
|
|
|
LockBuffer(buffer, BUFFER_LOCK_SHARE);
|
|
|
|
|
2016-04-20 15:31:19 +02:00
|
|
|
page = BufferGetPage(buffer);
|
2016-04-01 15:42:24 +02:00
|
|
|
|
|
|
|
if (!BloomPageIsMeta(page))
|
|
|
|
elog(ERROR, "Relation is not a bloom index");
|
2016-04-20 15:31:19 +02:00
|
|
|
meta = BloomPageGetMeta(BufferGetPage(buffer));
|
2016-04-01 15:42:24 +02:00
|
|
|
|
|
|
|
if (meta->magickNumber != BLOOM_MAGICK_NUMBER)
|
|
|
|
elog(ERROR, "Relation is not a bloom index");
|
|
|
|
|
|
|
|
*opts = meta->opts;
|
|
|
|
|
|
|
|
UnlockReleaseBuffer(buffer);
|
|
|
|
|
|
|
|
index->rd_amcache = (void *) opts;
|
|
|
|
}
|
|
|
|
|
2016-04-03 21:16:07 +02:00
|
|
|
memcpy(&state->opts, index->rd_amcache, sizeof(state->opts));
|
2016-04-01 15:42:24 +02:00
|
|
|
state->sizeOfBloomTuple = BLOOMTUPLEHDRSZ +
|
2016-06-03 16:52:36 +02:00
|
|
|
sizeof(BloomSignatureWord) * state->opts.bloomLength;
|
2016-04-01 15:42:24 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Random generator copied from FreeBSD. Using own random generator here for
|
|
|
|
* two reasons:
|
|
|
|
*
|
|
|
|
* 1) In this case random numbers are used for on-disk storage. Usage of
|
|
|
|
* PostgreSQL number generator would obstruct it from all possible changes.
|
|
|
|
* 2) Changing seed of PostgreSQL random generator would be undesirable side
|
|
|
|
* effect.
|
|
|
|
*/
|
|
|
|
static int32 next;
|
|
|
|
|
|
|
|
static int32
|
2016-04-12 17:42:06 +02:00
|
|
|
myRand(void)
|
2016-04-01 15:42:24 +02:00
|
|
|
{
|
2016-04-12 17:42:06 +02:00
|
|
|
/*----------
|
2016-04-01 15:42:24 +02:00
|
|
|
* Compute x = (7^5 * x) mod (2^31 - 1)
|
|
|
|
* without overflowing 31 bits:
|
2016-04-02 12:47:04 +02:00
|
|
|
* (2^31 - 1) = 127773 * (7^5) + 2836
|
2016-04-01 15:42:24 +02:00
|
|
|
* From "Random number generators: good ones are hard to find",
|
|
|
|
* Park and Miller, Communications of the ACM, vol. 31, no. 10,
|
|
|
|
* October 1988, p. 1195.
|
2016-04-12 17:42:06 +02:00
|
|
|
*----------
|
2016-04-01 15:42:24 +02:00
|
|
|
*/
|
|
|
|
int32 hi,
|
|
|
|
lo,
|
|
|
|
x;
|
|
|
|
|
|
|
|
/* Must be in [1, 0x7ffffffe] range at this point. */
|
|
|
|
hi = next / 127773;
|
|
|
|
lo = next % 127773;
|
|
|
|
x = 16807 * lo - 2836 * hi;
|
|
|
|
if (x < 0)
|
|
|
|
x += 0x7fffffff;
|
|
|
|
next = x;
|
|
|
|
/* Transform to [0, 0x7ffffffd] range. */
|
|
|
|
return (x - 1);
|
|
|
|
}
|
|
|
|
|
2016-04-02 19:59:11 +02:00
|
|
|
static void
|
2016-04-01 15:42:24 +02:00
|
|
|
mySrand(uint32 seed)
|
|
|
|
{
|
|
|
|
next = seed;
|
|
|
|
/* Transform to [1, 0x7ffffffe] range. */
|
|
|
|
next = (next % 0x7ffffffe) + 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Add bits of given value to the signature.
|
|
|
|
*/
|
|
|
|
void
|
2016-06-03 16:52:36 +02:00
|
|
|
signValue(BloomState *state, BloomSignatureWord *sign, Datum value, int attno)
|
2016-04-01 15:42:24 +02:00
|
|
|
{
|
|
|
|
uint32 hashVal;
|
|
|
|
int nBit,
|
|
|
|
j;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* init generator with "column's" number to get "hashed" seed for new
|
|
|
|
* value. We don't want to map the same numbers from different columns
|
|
|
|
* into the same bits!
|
|
|
|
*/
|
|
|
|
mySrand(attno);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Init hash sequence to map our value into bits. the same values in
|
|
|
|
* different columns will be mapped into different bits because of step
|
|
|
|
* above
|
|
|
|
*/
|
2019-03-22 12:09:32 +01:00
|
|
|
hashVal = DatumGetInt32(FunctionCall1Coll(&state->hashFn[attno], state->collations[attno], value));
|
2016-04-01 15:42:24 +02:00
|
|
|
mySrand(hashVal ^ myRand());
|
|
|
|
|
2016-04-03 21:16:07 +02:00
|
|
|
for (j = 0; j < state->opts.bitSize[attno]; j++)
|
2016-04-01 15:42:24 +02:00
|
|
|
{
|
2016-06-03 16:52:36 +02:00
|
|
|
/* prevent multiple evaluation in SETBIT macro */
|
|
|
|
nBit = myRand() % (state->opts.bloomLength * SIGNWORDBITS);
|
2016-04-01 15:42:24 +02:00
|
|
|
SETBIT(sign, nBit);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Make bloom tuple from values.
|
|
|
|
*/
|
|
|
|
BloomTuple *
|
|
|
|
BloomFormTuple(BloomState *state, ItemPointer iptr, Datum *values, bool *isnull)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
BloomTuple *res = (BloomTuple *) palloc0(state->sizeOfBloomTuple);
|
|
|
|
|
|
|
|
res->heapPtr = *iptr;
|
|
|
|
|
|
|
|
/* Blooming each column */
|
|
|
|
for (i = 0; i < state->nColumns; i++)
|
|
|
|
{
|
|
|
|
/* skip nulls */
|
|
|
|
if (isnull[i])
|
|
|
|
continue;
|
|
|
|
|
|
|
|
signValue(state, res->sign, values[i], i);
|
|
|
|
}
|
|
|
|
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Add new bloom tuple to the page. Returns true if new tuple was successfully
|
Fix assorted bugs in contrib/bloom.
In blinsert(), cope with the possibility that a page we pull from the
notFullPage list is marked BLOOM_DELETED. This could happen if VACUUM
recently marked it deleted but hasn't (yet) updated the metapage.
We can re-use such a page safely, but we *must* reinitialize it so that
it's no longer marked deleted.
Fix blvacuum() so that it updates the notFullPage list even if it's
going to update it to empty. The previous "optimization" of skipping
the update seems pretty dubious, since it means that the next blinsert()
will uselessly visit whatever pages we left in the list.
Uniformly treat PageIsNew pages the same as deleted pages. This should
allow proper recovery if a crash occurs just after relation extension.
Properly use vacuum_delay_point, not assorted ad-hoc CHECK_FOR_INTERRUPTS
calls, in the blvacuum() main loop.
Fix broken tuple-counting logic: blvacuum.c counted the number of live
index tuples over again in each scan, leading to VACUUM VERBOSE reporting
some multiple of the actual number of surviving index tuples after any
vacuum that removed any tuples (since they'd be counted in blvacuum, maybe
more than once, and then again in blvacuumcleanup, without ever zeroing the
counter). It's sufficient to count them in blvacuumcleanup.
stats->estimated_count is a boolean, not a counter, and we don't want
to set it true, so don't add tuple counts to it.
Add a couple of Asserts that we don't overrun available space on a bloom
page. I don't think there's any bug there today, but the way the
FreeBlockNumberArray size calculation is set up is scarily fragile, and
BloomPageGetFreeSpace isn't much better. The Asserts should help catch
any future mistakes.
Per investigation of a report from Jeff Janes. I think the first item
above may explain his report; the other changes were things I noticed
while casting about for an explanation.
Report: <CAMkU=1xEUuBphDwDmB1WjN4+td4kpnEniFaTBxnk1xzHCw8_OQ@mail.gmail.com>
2016-08-14 04:24:48 +02:00
|
|
|
* added to the page. Returns false if it doesn't fit on the page.
|
2016-04-01 15:42:24 +02:00
|
|
|
*/
|
|
|
|
bool
|
|
|
|
BloomPageAddItem(BloomState *state, Page page, BloomTuple *tuple)
|
|
|
|
{
|
|
|
|
BloomTuple *itup;
|
|
|
|
BloomPageOpaque opaque;
|
|
|
|
Pointer ptr;
|
|
|
|
|
Fix assorted bugs in contrib/bloom.
In blinsert(), cope with the possibility that a page we pull from the
notFullPage list is marked BLOOM_DELETED. This could happen if VACUUM
recently marked it deleted but hasn't (yet) updated the metapage.
We can re-use such a page safely, but we *must* reinitialize it so that
it's no longer marked deleted.
Fix blvacuum() so that it updates the notFullPage list even if it's
going to update it to empty. The previous "optimization" of skipping
the update seems pretty dubious, since it means that the next blinsert()
will uselessly visit whatever pages we left in the list.
Uniformly treat PageIsNew pages the same as deleted pages. This should
allow proper recovery if a crash occurs just after relation extension.
Properly use vacuum_delay_point, not assorted ad-hoc CHECK_FOR_INTERRUPTS
calls, in the blvacuum() main loop.
Fix broken tuple-counting logic: blvacuum.c counted the number of live
index tuples over again in each scan, leading to VACUUM VERBOSE reporting
some multiple of the actual number of surviving index tuples after any
vacuum that removed any tuples (since they'd be counted in blvacuum, maybe
more than once, and then again in blvacuumcleanup, without ever zeroing the
counter). It's sufficient to count them in blvacuumcleanup.
stats->estimated_count is a boolean, not a counter, and we don't want
to set it true, so don't add tuple counts to it.
Add a couple of Asserts that we don't overrun available space on a bloom
page. I don't think there's any bug there today, but the way the
FreeBlockNumberArray size calculation is set up is scarily fragile, and
BloomPageGetFreeSpace isn't much better. The Asserts should help catch
any future mistakes.
Per investigation of a report from Jeff Janes. I think the first item
above may explain his report; the other changes were things I noticed
while casting about for an explanation.
Report: <CAMkU=1xEUuBphDwDmB1WjN4+td4kpnEniFaTBxnk1xzHCw8_OQ@mail.gmail.com>
2016-08-14 04:24:48 +02:00
|
|
|
/* We shouldn't be pointed to an invalid page */
|
|
|
|
Assert(!PageIsNew(page) && !BloomPageIsDeleted(page));
|
|
|
|
|
|
|
|
/* Does new tuple fit on the page? */
|
2016-04-01 15:42:24 +02:00
|
|
|
if (BloomPageGetFreeSpace(state, page) < state->sizeOfBloomTuple)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/* Copy new tuple to the end of page */
|
|
|
|
opaque = BloomPageGetOpaque(page);
|
|
|
|
itup = BloomPageGetTuple(state, page, opaque->maxoff + 1);
|
|
|
|
memcpy((Pointer) itup, (Pointer) tuple, state->sizeOfBloomTuple);
|
|
|
|
|
|
|
|
/* Adjust maxoff and pd_lower */
|
|
|
|
opaque->maxoff++;
|
|
|
|
ptr = (Pointer) BloomPageGetTuple(state, page, opaque->maxoff + 1);
|
|
|
|
((PageHeader) page)->pd_lower = ptr - page;
|
|
|
|
|
Fix assorted bugs in contrib/bloom.
In blinsert(), cope with the possibility that a page we pull from the
notFullPage list is marked BLOOM_DELETED. This could happen if VACUUM
recently marked it deleted but hasn't (yet) updated the metapage.
We can re-use such a page safely, but we *must* reinitialize it so that
it's no longer marked deleted.
Fix blvacuum() so that it updates the notFullPage list even if it's
going to update it to empty. The previous "optimization" of skipping
the update seems pretty dubious, since it means that the next blinsert()
will uselessly visit whatever pages we left in the list.
Uniformly treat PageIsNew pages the same as deleted pages. This should
allow proper recovery if a crash occurs just after relation extension.
Properly use vacuum_delay_point, not assorted ad-hoc CHECK_FOR_INTERRUPTS
calls, in the blvacuum() main loop.
Fix broken tuple-counting logic: blvacuum.c counted the number of live
index tuples over again in each scan, leading to VACUUM VERBOSE reporting
some multiple of the actual number of surviving index tuples after any
vacuum that removed any tuples (since they'd be counted in blvacuum, maybe
more than once, and then again in blvacuumcleanup, without ever zeroing the
counter). It's sufficient to count them in blvacuumcleanup.
stats->estimated_count is a boolean, not a counter, and we don't want
to set it true, so don't add tuple counts to it.
Add a couple of Asserts that we don't overrun available space on a bloom
page. I don't think there's any bug there today, but the way the
FreeBlockNumberArray size calculation is set up is scarily fragile, and
BloomPageGetFreeSpace isn't much better. The Asserts should help catch
any future mistakes.
Per investigation of a report from Jeff Janes. I think the first item
above may explain his report; the other changes were things I noticed
while casting about for an explanation.
Report: <CAMkU=1xEUuBphDwDmB1WjN4+td4kpnEniFaTBxnk1xzHCw8_OQ@mail.gmail.com>
2016-08-14 04:24:48 +02:00
|
|
|
/* Assert we didn't overrun available space */
|
|
|
|
Assert(((PageHeader) page)->pd_lower <= ((PageHeader) page)->pd_upper);
|
|
|
|
|
2016-04-01 15:42:24 +02:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocate a new page (either by recycling, or by extending the index file)
|
|
|
|
* The returned buffer is already pinned and exclusive-locked
|
2019-07-01 03:00:23 +02:00
|
|
|
* Caller is responsible for initializing the page by calling BloomInitPage
|
2016-04-01 15:42:24 +02:00
|
|
|
*/
|
|
|
|
Buffer
|
|
|
|
BloomNewBuffer(Relation index)
|
|
|
|
{
|
|
|
|
Buffer buffer;
|
|
|
|
|
|
|
|
/* First, try to get a page from FSM */
|
|
|
|
for (;;)
|
|
|
|
{
|
|
|
|
BlockNumber blkno = GetFreeIndexPage(index);
|
|
|
|
|
|
|
|
if (blkno == InvalidBlockNumber)
|
|
|
|
break;
|
|
|
|
|
|
|
|
buffer = ReadBuffer(index, blkno);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We have to guard against the possibility that someone else already
|
|
|
|
* recycled this page; the buffer may be locked if so.
|
|
|
|
*/
|
|
|
|
if (ConditionalLockBuffer(buffer))
|
|
|
|
{
|
2016-04-20 15:31:19 +02:00
|
|
|
Page page = BufferGetPage(buffer);
|
2016-04-01 15:42:24 +02:00
|
|
|
|
|
|
|
if (PageIsNew(page))
|
|
|
|
return buffer; /* OK to use, if never initialized */
|
|
|
|
|
|
|
|
if (BloomPageIsDeleted(page))
|
|
|
|
return buffer; /* OK to use */
|
|
|
|
|
|
|
|
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Can't use it, so release buffer and try again */
|
|
|
|
ReleaseBuffer(buffer);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Must extend the file */
|
2023-08-23 02:10:18 +02:00
|
|
|
buffer = ExtendBufferedRel(BMR_REL(index), MAIN_FORKNUM, NULL,
|
2023-04-06 03:57:29 +02:00
|
|
|
EB_LOCK_FIRST);
|
2016-04-01 15:42:24 +02:00
|
|
|
|
|
|
|
return buffer;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2016-05-25 03:04:23 +02:00
|
|
|
* Initialize any page of a bloom index.
|
2016-04-01 15:42:24 +02:00
|
|
|
*/
|
|
|
|
void
|
|
|
|
BloomInitPage(Page page, uint16 flags)
|
|
|
|
{
|
|
|
|
BloomPageOpaque opaque;
|
|
|
|
|
|
|
|
PageInit(page, BLCKSZ, sizeof(BloomPageOpaqueData));
|
|
|
|
|
|
|
|
opaque = BloomPageGetOpaque(page);
|
|
|
|
opaque->flags = flags;
|
2016-04-12 17:03:01 +02:00
|
|
|
opaque->bloom_page_id = BLOOM_PAGE_ID;
|
2016-04-01 15:42:24 +02:00
|
|
|
}
|
|
|
|
|
2016-05-25 03:04:23 +02:00
|
|
|
/*
|
|
|
|
* Fill in metapage for bloom index.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
BloomFillMetapage(Relation index, Page metaPage)
|
|
|
|
{
|
|
|
|
BloomOptions *opts;
|
|
|
|
BloomMetaPageData *metadata;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Choose the index's options. If reloptions have been assigned, use
|
2016-06-03 16:52:36 +02:00
|
|
|
* those, otherwise create default options.
|
2016-05-25 03:04:23 +02:00
|
|
|
*/
|
|
|
|
opts = (BloomOptions *) index->rd_options;
|
|
|
|
if (!opts)
|
2016-06-03 16:52:36 +02:00
|
|
|
opts = makeDefaultBloomOptions();
|
2016-05-25 03:04:23 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Initialize contents of meta page, including a copy of the options,
|
|
|
|
* which are now frozen for the life of the index.
|
|
|
|
*/
|
|
|
|
BloomInitPage(metaPage, BLOOM_META);
|
|
|
|
metadata = BloomPageGetMeta(metaPage);
|
|
|
|
memset(metadata, 0, sizeof(BloomMetaPageData));
|
|
|
|
metadata->magickNumber = BLOOM_MAGICK_NUMBER;
|
|
|
|
metadata->opts = *opts;
|
|
|
|
((PageHeader) metaPage)->pd_lower += sizeof(BloomMetaPageData);
|
Fix assorted bugs in contrib/bloom.
In blinsert(), cope with the possibility that a page we pull from the
notFullPage list is marked BLOOM_DELETED. This could happen if VACUUM
recently marked it deleted but hasn't (yet) updated the metapage.
We can re-use such a page safely, but we *must* reinitialize it so that
it's no longer marked deleted.
Fix blvacuum() so that it updates the notFullPage list even if it's
going to update it to empty. The previous "optimization" of skipping
the update seems pretty dubious, since it means that the next blinsert()
will uselessly visit whatever pages we left in the list.
Uniformly treat PageIsNew pages the same as deleted pages. This should
allow proper recovery if a crash occurs just after relation extension.
Properly use vacuum_delay_point, not assorted ad-hoc CHECK_FOR_INTERRUPTS
calls, in the blvacuum() main loop.
Fix broken tuple-counting logic: blvacuum.c counted the number of live
index tuples over again in each scan, leading to VACUUM VERBOSE reporting
some multiple of the actual number of surviving index tuples after any
vacuum that removed any tuples (since they'd be counted in blvacuum, maybe
more than once, and then again in blvacuumcleanup, without ever zeroing the
counter). It's sufficient to count them in blvacuumcleanup.
stats->estimated_count is a boolean, not a counter, and we don't want
to set it true, so don't add tuple counts to it.
Add a couple of Asserts that we don't overrun available space on a bloom
page. I don't think there's any bug there today, but the way the
FreeBlockNumberArray size calculation is set up is scarily fragile, and
BloomPageGetFreeSpace isn't much better. The Asserts should help catch
any future mistakes.
Per investigation of a report from Jeff Janes. I think the first item
above may explain his report; the other changes were things I noticed
while casting about for an explanation.
Report: <CAMkU=1xEUuBphDwDmB1WjN4+td4kpnEniFaTBxnk1xzHCw8_OQ@mail.gmail.com>
2016-08-14 04:24:48 +02:00
|
|
|
|
|
|
|
/* If this fails, probably FreeBlockNumberArray size calc is wrong: */
|
|
|
|
Assert(((PageHeader) metaPage)->pd_lower <= ((PageHeader) metaPage)->pd_upper);
|
2016-05-25 03:04:23 +02:00
|
|
|
}
|
|
|
|
|
2016-04-01 15:42:24 +02:00
|
|
|
/*
|
|
|
|
* Initialize metapage for bloom index.
|
|
|
|
*/
|
|
|
|
void
|
Use the buffer cache when initializing an unlogged index.
Some of the ambuildempty functions used smgrwrite() directly, followed
by smgrimmedsync(). A few small problems with that:
Firstly, one is supposed to use smgrextend() when extending a
relation, not smgrwrite(). It doesn't make much difference in
production builds. smgrextend() updates the relation size cache, so
you miss that, but that's harmless because we never use the cached
relation size of an init fork. But if you compile with
CHECK_WRITE_VS_EXTEND, you get an assertion failure.
Secondly, the smgrwrite() calls were performed before WAL-logging, so
the page image written to disk had 0/0 as the LSN, not the LSN of the
WAL record. That's also harmless in practice, but seems sloppy.
Thirdly, it's better to use the buffer cache, because then you don't
need to smgrimmedsync() the relation to disk, which adds latency.
Bypassing the cache makes sense for bulk operations like index
creation, but not when you're just initializing an empty index.
Creation of unlogged tables is hardly performance bottleneck in any
real world applications, but nevertheless.
Backpatch to v16, but no further. These issues should be harmless in
practice, so better to not rock the boat in older branches.
Reviewed-by: Robert Haas
Discussion: https://www.postgresql.org/message-id/6e5bbc08-cdfc-b2b3-9e23-1a914b9850a9@iki.fi
2023-08-23 16:21:31 +02:00
|
|
|
BloomInitMetapage(Relation index, ForkNumber forknum)
|
2016-04-01 15:42:24 +02:00
|
|
|
{
|
|
|
|
Buffer metaBuffer;
|
2016-05-25 03:04:23 +02:00
|
|
|
Page metaPage;
|
2016-04-01 15:42:24 +02:00
|
|
|
GenericXLogState *state;
|
|
|
|
|
|
|
|
/*
|
2016-05-25 03:04:23 +02:00
|
|
|
* Make a new page; since it is first page it should be associated with
|
Use the buffer cache when initializing an unlogged index.
Some of the ambuildempty functions used smgrwrite() directly, followed
by smgrimmedsync(). A few small problems with that:
Firstly, one is supposed to use smgrextend() when extending a
relation, not smgrwrite(). It doesn't make much difference in
production builds. smgrextend() updates the relation size cache, so
you miss that, but that's harmless because we never use the cached
relation size of an init fork. But if you compile with
CHECK_WRITE_VS_EXTEND, you get an assertion failure.
Secondly, the smgrwrite() calls were performed before WAL-logging, so
the page image written to disk had 0/0 as the LSN, not the LSN of the
WAL record. That's also harmless in practice, but seems sloppy.
Thirdly, it's better to use the buffer cache, because then you don't
need to smgrimmedsync() the relation to disk, which adds latency.
Bypassing the cache makes sense for bulk operations like index
creation, but not when you're just initializing an empty index.
Creation of unlogged tables is hardly performance bottleneck in any
real world applications, but nevertheless.
Backpatch to v16, but no further. These issues should be harmless in
practice, so better to not rock the boat in older branches.
Reviewed-by: Robert Haas
Discussion: https://www.postgresql.org/message-id/6e5bbc08-cdfc-b2b3-9e23-1a914b9850a9@iki.fi
2023-08-23 16:21:31 +02:00
|
|
|
* block number 0 (BLOOM_METAPAGE_BLKNO). No need to hold the extension
|
|
|
|
* lock because there cannot be concurrent inserters yet.
|
2016-04-01 15:42:24 +02:00
|
|
|
*/
|
Use the buffer cache when initializing an unlogged index.
Some of the ambuildempty functions used smgrwrite() directly, followed
by smgrimmedsync(). A few small problems with that:
Firstly, one is supposed to use smgrextend() when extending a
relation, not smgrwrite(). It doesn't make much difference in
production builds. smgrextend() updates the relation size cache, so
you miss that, but that's harmless because we never use the cached
relation size of an init fork. But if you compile with
CHECK_WRITE_VS_EXTEND, you get an assertion failure.
Secondly, the smgrwrite() calls were performed before WAL-logging, so
the page image written to disk had 0/0 as the LSN, not the LSN of the
WAL record. That's also harmless in practice, but seems sloppy.
Thirdly, it's better to use the buffer cache, because then you don't
need to smgrimmedsync() the relation to disk, which adds latency.
Bypassing the cache makes sense for bulk operations like index
creation, but not when you're just initializing an empty index.
Creation of unlogged tables is hardly performance bottleneck in any
real world applications, but nevertheless.
Backpatch to v16, but no further. These issues should be harmless in
practice, so better to not rock the boat in older branches.
Reviewed-by: Robert Haas
Discussion: https://www.postgresql.org/message-id/6e5bbc08-cdfc-b2b3-9e23-1a914b9850a9@iki.fi
2023-08-23 16:21:31 +02:00
|
|
|
metaBuffer = ReadBufferExtended(index, forknum, P_NEW, RBM_NORMAL, NULL);
|
|
|
|
LockBuffer(metaBuffer, BUFFER_LOCK_EXCLUSIVE);
|
2016-04-01 15:42:24 +02:00
|
|
|
Assert(BufferGetBlockNumber(metaBuffer) == BLOOM_METAPAGE_BLKNO);
|
|
|
|
|
|
|
|
/* Initialize contents of meta page */
|
|
|
|
state = GenericXLogStart(index);
|
2016-05-25 03:04:23 +02:00
|
|
|
metaPage = GenericXLogRegisterBuffer(state, metaBuffer,
|
|
|
|
GENERIC_XLOG_FULL_IMAGE);
|
|
|
|
BloomFillMetapage(index, metaPage);
|
2016-04-01 15:42:24 +02:00
|
|
|
GenericXLogFinish(state);
|
2016-05-25 03:04:23 +02:00
|
|
|
|
2016-04-01 15:42:24 +02:00
|
|
|
UnlockReleaseBuffer(metaBuffer);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2016-05-25 03:04:23 +02:00
|
|
|
* Parse reloptions for bloom index, producing a BloomOptions struct.
|
2016-04-01 15:42:24 +02:00
|
|
|
*/
|
|
|
|
bytea *
|
|
|
|
bloptions(Datum reloptions, bool validate)
|
|
|
|
{
|
|
|
|
BloomOptions *rdopts;
|
|
|
|
|
2016-06-03 16:52:36 +02:00
|
|
|
/* Parse the user-given reloptions */
|
2019-11-05 01:17:05 +01:00
|
|
|
rdopts = (BloomOptions *) build_reloptions(reloptions, validate,
|
|
|
|
bl_relopt_kind,
|
|
|
|
sizeof(BloomOptions),
|
|
|
|
bl_relopt_tab,
|
|
|
|
lengthof(bl_relopt_tab));
|
2016-04-01 15:42:24 +02:00
|
|
|
|
2016-06-03 16:52:36 +02:00
|
|
|
/* Convert signature length from # of bits to # to words, rounding up */
|
2019-11-05 01:17:05 +01:00
|
|
|
if (rdopts)
|
|
|
|
rdopts->bloomLength = (rdopts->bloomLength + SIGNWORDBITS - 1) / SIGNWORDBITS;
|
2016-04-01 15:42:24 +02:00
|
|
|
|
|
|
|
return (bytea *) rdopts;
|
|
|
|
}
|