postgresql/src/include/access/relscan.h
Andres Freund c3b23ae457 Don't to predicate lock for analyze scans, refactor scan option passing.
Before this commit, when ANALYZE was run on a table and serializable
was used (either by virtue of an explicit BEGIN TRANSACTION ISOLATION
LEVEL SERIALIZABLE, or default_transaction_isolation being set to
serializable) a null pointer dereference lead to a crash.

The analyze scan doesn't need a snapshot (nor predicate locking), but
before this commit a scan only contained information about being a
bitmap or sample scan.

Refactor the option passing to the scan_begin callback to use a
bitmask instead. Alternatively we could have added a new boolean
parameter, but that seems harder to read. Even before this issue
various people (Heikki, Tom, Robert) suggested doing so.

These changes don't change the scan APIs outside of tableam. The flags
argument could be exposed, it's not necessary to fix this
problem. Also the wrapper table_beginscan* functions encapsulate most
of that complexity.

After these changes fixing the bug is trivial, just don't acquire
predicate lock for analyze style scans. That was already done for
bitmap heap scans.  Add an assert that a snapshot is passed when
acquiring the predicate lock, so this kind of bug doesn't require
running with serializable.

Also add a comment about sample scans currently requiring predicate
locking the entire relation, that previously wasn't remarked upon.

Reported-By: Joe Wildish
Author: Andres Freund
Discussion:
    https://postgr.es/m/4EA80A20-E9BF-49F1-9F01-5B66CAB21453@elusive.cx
    https://postgr.es/m/20190411164947.nkii4gaeilt4bui7@alap3.anarazel.de
    https://postgr.es/m/20190518203102.g7peu2fianukjuxm@alap3.anarazel.de
2019-05-19 15:10:28 -07:00

177 lines
6.1 KiB
C

/*-------------------------------------------------------------------------
*
* relscan.h
* POSTGRES relation scan descriptor definitions.
*
*
* Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* src/include/access/relscan.h
*
*-------------------------------------------------------------------------
*/
#ifndef RELSCAN_H
#define RELSCAN_H
#include "access/htup_details.h"
#include "access/itup.h"
#include "port/atomics.h"
#include "storage/buf.h"
#include "storage/spin.h"
#include "utils/relcache.h"
struct ParallelTableScanDescData;
/*
* Generic descriptor for table scans. This is the base-class for table scans,
* which needs to be embedded in the scans of individual AMs.
*/
typedef struct TableScanDescData
{
/* scan parameters */
Relation rs_rd; /* heap relation descriptor */
struct SnapshotData *rs_snapshot; /* snapshot to see */
int rs_nkeys; /* number of scan keys */
struct ScanKeyData *rs_key; /* array of scan key descriptors */
/*
* Information about type and behaviour of the scan, a bitmask of members
* of the ScanOptions enum (see tableam.h).
*/
uint32 rs_flags;
struct ParallelTableScanDescData *rs_parallel; /* parallel scan
* information */
} TableScanDescData;
typedef struct TableScanDescData *TableScanDesc;
/*
* Shared state for parallel table scan.
*
* Each backend participating in a parallel table scan has its own
* TableScanDesc in backend-private memory, and those objects all contain a
* pointer to this structure. The information here must be sufficient to
* properly initialize each new TableScanDesc as workers join the scan, and it
* must act as a information what to scan for those workers.
*/
typedef struct ParallelTableScanDescData
{
Oid phs_relid; /* OID of relation to scan */
bool phs_syncscan; /* report location to syncscan logic? */
bool phs_snapshot_any; /* SnapshotAny, not phs_snapshot_data? */
Size phs_snapshot_off; /* data for snapshot */
} ParallelTableScanDescData;
typedef struct ParallelTableScanDescData *ParallelTableScanDesc;
/*
* Shared state for parallel table scans, for block oriented storage.
*/
typedef struct ParallelBlockTableScanDescData
{
ParallelTableScanDescData base;
BlockNumber phs_nblocks; /* # blocks in relation at start of scan */
slock_t phs_mutex; /* mutual exclusion for setting startblock */
BlockNumber phs_startblock; /* starting block number */
pg_atomic_uint64 phs_nallocated; /* number of blocks allocated to
* workers so far. */
} ParallelBlockTableScanDescData;
typedef struct ParallelBlockTableScanDescData *ParallelBlockTableScanDesc;
/*
* Base class for fetches from a table via an index. This is the base-class
* for such scans, which needs to be embedded in the respective struct for
* individual AMs.
*/
typedef struct IndexFetchTableData
{
Relation rel;
} IndexFetchTableData;
/*
* We use the same IndexScanDescData structure for both amgettuple-based
* and amgetbitmap-based index scans. Some fields are only relevant in
* amgettuple-based scans.
*/
typedef struct IndexScanDescData
{
/* scan parameters */
Relation heapRelation; /* heap relation descriptor, or NULL */
Relation indexRelation; /* index relation descriptor */
struct SnapshotData *xs_snapshot; /* snapshot to see */
int numberOfKeys; /* number of index qualifier conditions */
int numberOfOrderBys; /* number of ordering operators */
struct ScanKeyData *keyData; /* array of index qualifier descriptors */
struct ScanKeyData *orderByData; /* array of ordering op descriptors */
bool xs_want_itup; /* caller requests index tuples */
bool xs_temp_snap; /* unregister snapshot at scan end? */
/* signaling to index AM about killing index tuples */
bool kill_prior_tuple; /* last-returned tuple is dead */
bool ignore_killed_tuples; /* do not return killed entries */
bool xactStartedInRecovery; /* prevents killing/seeing killed
* tuples */
/* index access method's private state */
void *opaque; /* access-method-specific info */
/*
* In an index-only scan, a successful amgettuple call must fill either
* xs_itup (and xs_itupdesc) or xs_hitup (and xs_hitupdesc) to provide the
* data returned by the scan. It can fill both, in which case the heap
* format will be used.
*/
IndexTuple xs_itup; /* index tuple returned by AM */
struct TupleDescData *xs_itupdesc; /* rowtype descriptor of xs_itup */
HeapTuple xs_hitup; /* index data returned by AM, as HeapTuple */
struct TupleDescData *xs_hitupdesc; /* rowtype descriptor of xs_hitup */
ItemPointerData xs_heaptid; /* result */
bool xs_heap_continue; /* T if must keep walking, potential
* further results */
IndexFetchTableData *xs_heapfetch;
bool xs_recheck; /* T means scan keys must be rechecked */
/*
* When fetching with an ordering operator, the values of the ORDER BY
* expressions of the last returned tuple, according to the index. If
* xs_recheckorderby is true, these need to be rechecked just like the
* scan keys, and the values returned here are a lower-bound on the actual
* values.
*/
Datum *xs_orderbyvals;
bool *xs_orderbynulls;
bool xs_recheckorderby;
/* parallel index scan information, in shared memory */
struct ParallelIndexScanDescData *parallel_scan;
} IndexScanDescData;
/* Generic structure for parallel scans */
typedef struct ParallelIndexScanDescData
{
Oid ps_relid;
Oid ps_indexid;
Size ps_offset; /* Offset in bytes of am specific structure */
char ps_snapshot_data[FLEXIBLE_ARRAY_MEMBER];
} ParallelIndexScanDescData;
struct TupleTableSlot;
/* Struct for storage-or-index scans of system tables */
typedef struct SysScanDescData
{
Relation heap_rel; /* catalog being scanned */
Relation irel; /* NULL if doing heap scan */
struct TableScanDescData *scan; /* only valid in storage-scan case */
struct IndexScanDescData *iscan; /* only valid in index-scan case */
struct SnapshotData *snapshot; /* snapshot to unregister at end of scan */
struct TupleTableSlot *slot;
} SysScanDescData;
#endif /* RELSCAN_H */