Revert "Avoid the creation of the free space map for small heap relations".

This feature was using a process local map to track the first few blocks
in the relation.  The map was reset each time we get the block with enough
freespace.  It was discussed that it would be better to track this map on
a per-relation basis in relcache and then invalidate the same whenever
vacuum frees up some space in the page or when FSM is created.  The new
design would be better both in terms of API design and performance.

List of commits reverted, in reverse chronological order:

06c8a5090e  Improve code comments in b0eaa4c51b.
13e8643bfc  During pg_upgrade, conditionally skip transfer of FSMs.
6f918159a9  Add more tests for FSM.
9c32e4c350  Clear the local map when not used.
29d108cdec  Update the documentation for FSM behavior..
08ecdfe7e5  Make FSM test portable.
b0eaa4c51b  Avoid creation of the free space map for small heap relations.

Discussion: https://postgr.es/m/20190416180452.3pm6uegx54iitbt5@alap3.anarazel.de
This commit is contained in:
Amit Kapila 2019-05-07 09:30:24 +05:30
parent af82f95abb
commit 7db0cde6b5
23 changed files with 107 additions and 686 deletions

View File

@ -1,56 +1,48 @@
CREATE EXTENSION pageinspect;
CREATE TABLE test_rel_forks (a int);
-- Make sure there are enough blocks in the heap for the FSM to be created.
INSERT INTO test_rel_forks SELECT i from generate_series(1,2000) i;
-- set up FSM and VM
VACUUM test_rel_forks;
CREATE TABLE test1 (a int, b int);
INSERT INTO test1 VALUES (16777217, 131584);
VACUUM test1; -- set up FSM
-- The page contents can vary, so just test that it can be read
-- successfully, but don't keep the output.
SELECT octet_length(get_raw_page('test_rel_forks', 'main', 0)) AS main_0;
SELECT octet_length(get_raw_page('test1', 'main', 0)) AS main_0;
main_0
--------
8192
(1 row)
SELECT octet_length(get_raw_page('test_rel_forks', 'main', 100)) AS main_100;
ERROR: block number 100 is out of range for relation "test_rel_forks"
SELECT octet_length(get_raw_page('test_rel_forks', 'fsm', 0)) AS fsm_0;
SELECT octet_length(get_raw_page('test1', 'main', 1)) AS main_1;
ERROR: block number 1 is out of range for relation "test1"
SELECT octet_length(get_raw_page('test1', 'fsm', 0)) AS fsm_0;
fsm_0
-------
8192
(1 row)
SELECT octet_length(get_raw_page('test_rel_forks', 'fsm', 20)) AS fsm_20;
ERROR: block number 20 is out of range for relation "test_rel_forks"
SELECT octet_length(get_raw_page('test_rel_forks', 'vm', 0)) AS vm_0;
SELECT octet_length(get_raw_page('test1', 'fsm', 1)) AS fsm_1;
fsm_1
-------
8192
(1 row)
SELECT octet_length(get_raw_page('test1', 'vm', 0)) AS vm_0;
vm_0
------
8192
(1 row)
SELECT octet_length(get_raw_page('test_rel_forks', 'vm', 1)) AS vm_1;
ERROR: block number 1 is out of range for relation "test_rel_forks"
SELECT octet_length(get_raw_page('test1', 'vm', 1)) AS vm_1;
ERROR: block number 1 is out of range for relation "test1"
SELECT octet_length(get_raw_page('xxx', 'main', 0));
ERROR: relation "xxx" does not exist
SELECT octet_length(get_raw_page('test_rel_forks', 'xxx', 0));
SELECT octet_length(get_raw_page('test1', 'xxx', 0));
ERROR: invalid fork name
HINT: Valid fork names are "main", "fsm", "vm", and "init".
EXPLAIN (costs off, analyze on, timing off, summary off) SELECT * FROM
fsm_page_contents(get_raw_page('test_rel_forks', 'fsm', 0));
QUERY PLAN
------------------------------------------------------------
Function Scan on fsm_page_contents (actual rows=1 loops=1)
(1 row)
SELECT get_raw_page('test_rel_forks', 0) = get_raw_page('test_rel_forks', 'main', 0);
SELECT get_raw_page('test1', 0) = get_raw_page('test1', 'main', 0);
?column?
----------
t
(1 row)
DROP TABLE test_rel_forks;
CREATE TABLE test1 (a int, b int);
INSERT INTO test1 VALUES (16777217, 131584);
SELECT pagesize, version FROM page_header(get_raw_page('test1', 0));
pagesize | version
----------+---------
@ -70,6 +62,26 @@ SELECT tuple_data_split('test1'::regclass, t_data, t_infomask, t_infomask2, t_bi
{"\\x01000001","\\x00020200"}
(1 row)
SELECT * FROM fsm_page_contents(get_raw_page('test1', 'fsm', 0));
fsm_page_contents
-------------------
0: 254 +
1: 254 +
3: 254 +
7: 254 +
15: 254 +
31: 254 +
63: 254 +
127: 254 +
255: 254 +
511: 254 +
1023: 254 +
2047: 254 +
4095: 254 +
fp_next_slot: 0 +
(1 row)
DROP TABLE test1;
-- check that using any of these functions with a partitioned table or index
-- would fail

View File

@ -1,36 +1,26 @@
CREATE EXTENSION pageinspect;
CREATE TABLE test_rel_forks (a int);
-- Make sure there are enough blocks in the heap for the FSM to be created.
INSERT INTO test_rel_forks SELECT i from generate_series(1,2000) i;
CREATE TABLE test1 (a int, b int);
INSERT INTO test1 VALUES (16777217, 131584);
-- set up FSM and VM
VACUUM test_rel_forks;
VACUUM test1; -- set up FSM
-- The page contents can vary, so just test that it can be read
-- successfully, but don't keep the output.
SELECT octet_length(get_raw_page('test_rel_forks', 'main', 0)) AS main_0;
SELECT octet_length(get_raw_page('test_rel_forks', 'main', 100)) AS main_100;
SELECT octet_length(get_raw_page('test1', 'main', 0)) AS main_0;
SELECT octet_length(get_raw_page('test1', 'main', 1)) AS main_1;
SELECT octet_length(get_raw_page('test_rel_forks', 'fsm', 0)) AS fsm_0;
SELECT octet_length(get_raw_page('test_rel_forks', 'fsm', 20)) AS fsm_20;
SELECT octet_length(get_raw_page('test1', 'fsm', 0)) AS fsm_0;
SELECT octet_length(get_raw_page('test1', 'fsm', 1)) AS fsm_1;
SELECT octet_length(get_raw_page('test_rel_forks', 'vm', 0)) AS vm_0;
SELECT octet_length(get_raw_page('test_rel_forks', 'vm', 1)) AS vm_1;
SELECT octet_length(get_raw_page('test1', 'vm', 0)) AS vm_0;
SELECT octet_length(get_raw_page('test1', 'vm', 1)) AS vm_1;
SELECT octet_length(get_raw_page('xxx', 'main', 0));
SELECT octet_length(get_raw_page('test_rel_forks', 'xxx', 0));
SELECT octet_length(get_raw_page('test1', 'xxx', 0));
EXPLAIN (costs off, analyze on, timing off, summary off) SELECT * FROM
fsm_page_contents(get_raw_page('test_rel_forks', 'fsm', 0));
SELECT get_raw_page('test_rel_forks', 0) = get_raw_page('test_rel_forks', 'main', 0);
DROP TABLE test_rel_forks;
CREATE TABLE test1 (a int, b int);
INSERT INTO test1 VALUES (16777217, 131584);
SELECT get_raw_page('test1', 0) = get_raw_page('test1', 'main', 0);
SELECT pagesize, version FROM page_header(get_raw_page('test1', 0));
@ -39,6 +29,8 @@ SELECT page_checksum(get_raw_page('test1', 0), 0) IS NOT NULL AS silly_checksum_
SELECT tuple_data_split('test1'::regclass, t_data, t_infomask, t_infomask2, t_bits)
FROM heap_page_items(get_raw_page('test1', 0));
SELECT * FROM fsm_page_contents(get_raw_page('test1', 'fsm', 0));
DROP TABLE test1;
-- check that using any of these functions with a partitioned table or index

View File

@ -90,9 +90,6 @@ statapprox_heap(Relation rel, output_type *stat)
/*
* If the page has only visible tuples, then we can find out the free
* space from the FSM and move on.
*
* Note: If a relation has no FSM, GetRecordedFreeSpace() will report
* zero free space. This is fine for the purposes of approximation.
*/
if (VM_ALL_VISIBLE(rel, blkno, &vmbuffer))
{

View File

@ -61,8 +61,6 @@
The values stored in the free space map are not exact. They're rounded
to precision of 1/256th of <symbol>BLCKSZ</symbol> (32 bytes with default <symbol>BLCKSZ</symbol>), and
they're not kept fully up-to-date as tuples are inserted and updated.
In addition, small tables don't have a free space map, so these functions
will return zero even if free space is available.
</para>
<para>

View File

@ -527,9 +527,7 @@ approx_free_percent | 2.09
bit set, then it is assumed to contain no dead tuples). For such
pages, it derives the free space value from the free space map, and
assumes that the rest of the space on the page is taken up by live
tuples. Small tables don't have a free space map, so in that case
this function will report zero free space, likewise inflating the
approximate tuple length.
tuples.
</para>
<para>

View File

@ -812,13 +812,6 @@ psql --username=postgres --file=script.sql postgres
is down.
</para>
<para>
In <productname>PostgreSQL</productname> 12 and later small tables by
default don't have a free space map, as a space optimization. If you are
upgrading a pre-12 cluster, the free space maps of small tables will
likewise not be transferred to the new cluster.
</para>
</refsect1>
<refsect1>

View File

@ -598,13 +598,12 @@ tuple would otherwise be too big.
<indexterm><primary>FSM</primary><see>Free Space Map</see></indexterm>
<para>
Each heap relation, unless it is very small, and each index relation, except
for hash indexes, has a Free Space Map (FSM) to keep track of available
space in the relation. It's stored alongside the main relation data in a
separate relation fork, named after the filenode number of the relation, plus
a <literal>_fsm</literal> suffix. For example, if the filenode of a relation
is 12345, the FSM is stored in a file called <filename>12345_fsm</filename>,
in the same directory as the main relation file.
Each heap and index relation, except for hash indexes, has a Free Space Map
(FSM) to keep track of available space in the relation. It's stored
alongside the main relation data in a separate relation fork, named after the
filenode number of the relation, plus a <literal>_fsm</literal> suffix. For example,
if the filenode of a relation is 12345, the FSM is stored in a file called
<filename>12345_fsm</filename>, in the same directory as the main relation file.
</para>
<para>

View File

@ -1152,7 +1152,7 @@ terminate_brin_buildstate(BrinBuildState *state)
freespace = PageGetFreeSpace(page);
blk = BufferGetBlockNumber(state->bs_currentInsertBuf);
ReleaseBuffer(state->bs_currentInsertBuf);
RecordPageWithFreeSpace(state->bs_irel, blk, freespace, InvalidBlockNumber);
RecordPageWithFreeSpace(state->bs_irel, blk, freespace);
FreeSpaceMapVacuumRange(state->bs_irel, blk, blk + 1);
}

View File

@ -310,7 +310,7 @@ brin_doupdate(Relation idxrel, BlockNumber pagesPerRange,
if (extended)
{
RecordPageWithFreeSpace(idxrel, newblk, freespace, InvalidBlockNumber);
RecordPageWithFreeSpace(idxrel, newblk, freespace);
FreeSpaceMapVacuumRange(idxrel, newblk, newblk + 1);
}
@ -461,7 +461,7 @@ brin_doinsert(Relation idxrel, BlockNumber pagesPerRange,
if (extended)
{
RecordPageWithFreeSpace(idxrel, blk, freespace, InvalidBlockNumber);
RecordPageWithFreeSpace(idxrel, blk, freespace);
FreeSpaceMapVacuumRange(idxrel, blk, blk + 1);
}
@ -654,7 +654,7 @@ brin_page_cleanup(Relation idxrel, Buffer buf)
/* Measure free space and record it */
RecordPageWithFreeSpace(idxrel, BufferGetBlockNumber(buf),
br_page_get_freespace(page), InvalidBlockNumber);
br_page_get_freespace(page));
}
/*
@ -703,7 +703,7 @@ brin_getinsertbuffer(Relation irel, Buffer oldbuf, Size itemsz,
/* Choose initial target page, re-using existing target if known */
newblk = RelationGetTargetBlock(irel);
if (newblk == InvalidBlockNumber)
newblk = GetPageWithFreeSpace(irel, itemsz, true);
newblk = GetPageWithFreeSpace(irel, itemsz);
/*
* Loop until we find a page with sufficient free space. By the time we
@ -895,7 +895,7 @@ brin_initialize_empty_new_buffer(Relation idxrel, Buffer buffer)
* pages whose FSM records were forgotten in a crash.
*/
RecordPageWithFreeSpace(idxrel, BufferGetBlockNumber(buffer),
br_page_get_freespace(page), InvalidBlockNumber);
br_page_get_freespace(page));
}

View File

@ -246,14 +246,8 @@ RelationAddExtraBlocks(Relation relation, BulkInsertState bistate)
* Immediately update the bottom level of the FSM. This has a good
* chance of making this page visible to other concurrently inserting
* backends, and we want that to happen without delay.
*
* Since we know the table will end up with extraBlocks additional
* pages, we pass the final number to avoid possible unnecessary
* system calls and to make sure the FSM is created when we add the
* first new page.
*/
RecordPageWithFreeSpace(relation, blockNum, freespace,
firstBlock + extraBlocks);
RecordPageWithFreeSpace(relation, blockNum, freespace);
}
while (--extraBlocks > 0);
@ -390,9 +384,20 @@ RelationGetBufferForTuple(Relation relation, Size len,
* We have no cached target page, so ask the FSM for an initial
* target.
*/
targetBlock = GetPageWithFreeSpace(relation,
len + saveFreeSpace,
false);
targetBlock = GetPageWithFreeSpace(relation, len + saveFreeSpace);
/*
* If the FSM knows nothing of the rel, try the last page before we
* give up and extend. This avoids one-tuple-per-page syndrome during
* bootstrapping or in a recently-started system.
*/
if (targetBlock == InvalidBlockNumber)
{
BlockNumber nblocks = RelationGetNumberOfBlocks(relation);
if (nblocks > 0)
targetBlock = nblocks - 1;
}
}
loop:
@ -499,13 +504,6 @@ loop:
{
/* use this page as future insert target, too */
RelationSetTargetBlock(relation, targetBlock);
/*
* In case we used an in-memory map of available blocks, reset it
* for next use.
*/
FSMClearLocalMap();
return buffer;
}
@ -565,12 +563,9 @@ loop:
/*
* Check if some other backend has extended a block for us while
* we were waiting on the lock. We only check the FSM -- if there
* isn't one we don't recheck the number of blocks.
* we were waiting on the lock.
*/
targetBlock = GetPageWithFreeSpace(relation,
len + saveFreeSpace,
true);
targetBlock = GetPageWithFreeSpace(relation, len + saveFreeSpace);
/*
* If some other waiter has already extended the relation, we
@ -675,8 +670,5 @@ loop:
*/
RelationSetTargetBlock(relation, BufferGetBlockNumber(buffer));
/* This should already be cleared by now, but make sure it is. */
FSMClearLocalMap();
return buffer;
}

View File

@ -153,7 +153,7 @@ static BufferAccessStrategy vac_strategy;
static void lazy_scan_heap(Relation onerel, VacuumParams *params,
LVRelStats *vacrelstats, Relation *Irel, int nindexes,
bool aggressive);
static void lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats, BlockNumber nblocks);
static void lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats);
static bool lazy_check_needs_freeze(Buffer buf, bool *hastup);
static void lazy_vacuum_index(Relation indrel,
IndexBulkDeleteResult **stats,
@ -780,7 +780,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
pgstat_progress_update_multi_param(2, hvp_index, hvp_val);
/* Remove tuples from heap */
lazy_vacuum_heap(onerel, vacrelstats, nblocks);
lazy_vacuum_heap(onerel, vacrelstats);
/*
* Forget the now-vacuumed tuples, and press on, but be careful
@ -919,7 +919,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
Size freespace;
freespace = BufferGetPageSize(buf) - SizeOfPageHeaderData;
RecordPageWithFreeSpace(onerel, blkno, freespace, nblocks);
RecordPageWithFreeSpace(onerel, blkno, freespace);
}
}
continue;
@ -963,7 +963,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
}
UnlockReleaseBuffer(buf);
RecordPageWithFreeSpace(onerel, blkno, freespace, nblocks);
RecordPageWithFreeSpace(onerel, blkno, freespace);
continue;
}
@ -1381,7 +1381,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
* taken if there are no indexes.)
*/
if (vacrelstats->num_dead_tuples == prev_dead_count)
RecordPageWithFreeSpace(onerel, blkno, freespace, nblocks);
RecordPageWithFreeSpace(onerel, blkno, freespace);
}
/* report that everything is scanned and vacuumed */
@ -1443,7 +1443,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
/* Remove tuples from heap */
pgstat_progress_update_param(PROGRESS_VACUUM_PHASE,
PROGRESS_VACUUM_PHASE_VACUUM_HEAP);
lazy_vacuum_heap(onerel, vacrelstats, nblocks);
lazy_vacuum_heap(onerel, vacrelstats);
vacrelstats->num_index_scans++;
}
@ -1517,10 +1517,9 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
* Note: the reason for doing this as a second pass is we cannot remove
* the tuples until we've removed their index entries, and we want to
* process index entry removal in batches as large as possible.
* Note: nblocks is passed as an optimization for RecordPageWithFreeSpace().
*/
static void
lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats, BlockNumber nblocks)
lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats)
{
int tupindex;
int npages;
@ -1557,7 +1556,7 @@ lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats, BlockNumber nblocks)
freespace = PageGetHeapFreeSpace(page);
UnlockReleaseBuffer(buf);
RecordPageWithFreeSpace(onerel, tblk, freespace, nblocks);
RecordPageWithFreeSpace(onerel, tblk, freespace);
npages++;
}

View File

@ -48,7 +48,6 @@
#include "replication/walsender.h"
#include "storage/condition_variable.h"
#include "storage/fd.h"
#include "storage/freespace.h"
#include "storage/lmgr.h"
#include "storage/md.h"
#include "storage/predicate.h"
@ -2587,12 +2586,6 @@ AbortTransaction(void)
pgstat_report_wait_end();
pgstat_progress_end_command();
/*
* In case we aborted during RelationGetBufferForTuple(), clear the local
* map of heap pages.
*/
FSMClearLocalMap();
/* Clean up buffer I/O and buffer context locks, too */
AbortBufferIO();
UnlockBuffers();
@ -4880,13 +4873,6 @@ AbortSubTransaction(void)
pgstat_report_wait_end();
pgstat_progress_end_command();
/*
* In case we aborted during RelationGetBufferForTuple(), clear the local
* map of heap pages.
*/
FSMClearLocalMap();
AbortBufferIO();
UnlockBuffers();

View File

@ -8,41 +8,7 @@ free space to hold a tuple to be stored; or to determine that no such page
exists and the relation must be extended by one page. As of PostgreSQL 8.4
each relation has its own, extensible free space map stored in a separate
"fork" of its relation. This eliminates the disadvantages of the former
fixed-size FSM. There are two exceptions:
1. Hash indexes never have a FSM.
2. For very small tables, a 3-page relation fork would be relatively large
and wasteful, so to save space we refrain from creating the FSM if the
heap has HEAP_FSM_CREATION_THRESHOLD pages or fewer.
To locate free space in the latter case, we simply try pages directly without
knowing ahead of time how much free space they have. To maintain good
performance, we create a local in-memory map of pages to try, and only mark
every other page as available. For example, in a 3-page heap, the local map
would look like:
ANAN
0123
Pages 0 and 2 are marked "available", and page 1 as "not available".
Page 3 is beyond the end of the relation, so is likewise marked "not
available". First we try page 2, and if that doesn't have sufficient free
space we try page 0 before giving up and extending the relation. There may
be some wasted free space on block 1, but if the relation extends to 4 pages:
NANA
0123
We not only have the new page 3 at our disposal, we can now check page 1
for free space as well.
Once the FSM is created for a heap we don't remove it even if somebody deletes
all the rows from the corresponding relation. We don't think it is a useful
optimization as it is quite likely that relation will again grow to the same
size.
FSM data structure
------------------
fixed-size FSM.
It is important to keep the map small so that it can be searched rapidly.
Therefore, we don't attempt to record the exact free space on a page.
@ -226,3 +192,5 @@ TODO
----
- fastroot to avoid traversing upper nodes with just 1 child
- use a different system for tables that fit into one FSM page, with a
mechanism to switch to the real thing as it grows.

View File

@ -76,14 +76,6 @@
#define FSM_ROOT_LEVEL (FSM_TREE_DEPTH - 1)
#define FSM_BOTTOM_LEVEL 0
/* Status codes for the local map. */
/* Either already tried, or beyond the end of the relation */
#define FSM_LOCAL_NOT_AVAIL 0x00
/* Available to try */
#define FSM_LOCAL_AVAIL 0x01
/*
* The internal FSM routines work on a logical addressing scheme. Each
* level of the tree can be thought of as a separately addressable file.
@ -97,32 +89,6 @@ typedef struct
/* Address of the root page. */
static const FSMAddress FSM_ROOT_ADDRESS = {FSM_ROOT_LEVEL, 0};
/*
* For small relations, we don't create FSM to save space, instead we use
* local in-memory map of pages to try. To locate free space, we simply try
* pages directly without knowing ahead of time how much free space they have.
*
* Note that this map is used to the find the block with required free space
* for any given relation. We clear this map when we have found a block with
* enough free space, when we extend the relation, or on transaction abort.
* See src/backend/storage/freespace/README for further details.
*/
typedef struct
{
BlockNumber nblocks;
uint8 map[HEAP_FSM_CREATION_THRESHOLD];
} FSMLocalMap;
static FSMLocalMap fsm_local_map =
{
0,
{
FSM_LOCAL_NOT_AVAIL
}
};
#define FSM_LOCAL_MAP_EXISTS (fsm_local_map.nblocks > 0)
/* functions to navigate the tree */
static FSMAddress fsm_get_child(FSMAddress parent, uint16 slot);
static FSMAddress fsm_get_parent(FSMAddress child, uint16 *slot);
@ -141,14 +107,10 @@ static Size fsm_space_cat_to_avail(uint8 cat);
/* workhorse functions for various operations */
static int fsm_set_and_search(Relation rel, FSMAddress addr, uint16 slot,
uint8 newValue, uint8 minValue);
static void fsm_local_set(Relation rel, BlockNumber cur_nblocks);
static BlockNumber fsm_search(Relation rel, uint8 min_cat);
static BlockNumber fsm_local_search(void);
static uint8 fsm_vacuum_page(Relation rel, FSMAddress addr,
BlockNumber start, BlockNumber end,
bool *eof);
static bool fsm_allow_writes(Relation rel, BlockNumber heapblk,
BlockNumber nblocks, BlockNumber *get_nblocks);
/******** Public API ********/
@ -165,46 +127,13 @@ static bool fsm_allow_writes(Relation rel, BlockNumber heapblk,
* amount of free space available on that page and then try again (see
* RecordAndGetPageWithFreeSpace). If InvalidBlockNumber is returned,
* extend the relation.
*
* For very small heap relations that don't have a FSM, we try every other
* page before extending the relation. To keep track of which pages have
* been tried, initialize a local in-memory map of pages.
*/
BlockNumber
GetPageWithFreeSpace(Relation rel, Size spaceNeeded, bool check_fsm_only)
GetPageWithFreeSpace(Relation rel, Size spaceNeeded)
{
uint8 min_cat = fsm_space_needed_to_cat(spaceNeeded);
BlockNumber target_block,
nblocks;
/* First try the FSM, if it exists. */
target_block = fsm_search(rel, min_cat);
if (target_block == InvalidBlockNumber &&
(rel->rd_rel->relkind == RELKIND_RELATION ||
rel->rd_rel->relkind == RELKIND_TOASTVALUE) &&
!check_fsm_only)
{
nblocks = RelationGetNumberOfBlocks(rel);
if (nblocks > HEAP_FSM_CREATION_THRESHOLD)
{
/*
* If the FSM knows nothing of the rel, try the last page before
* we give up and extend. This avoids one-tuple-per-page syndrome
* during bootstrapping or in a recently-started system.
*/
target_block = nblocks - 1;
}
else if (nblocks > 0)
{
/* Initialize local map and get first candidate block. */
fsm_local_set(rel, nblocks);
target_block = fsm_local_search();
}
}
return target_block;
return fsm_search(rel, min_cat);
}
/*
@ -215,47 +144,16 @@ GetPageWithFreeSpace(Relation rel, Size spaceNeeded, bool check_fsm_only)
* also some effort to return a page close to the old page; if there's a
* page with enough free space on the same FSM page where the old one page
* is located, it is preferred.
*
* For very small heap relations that don't have a FSM, we update the local
* map to indicate we have tried a page, and return the next page to try.
*/
BlockNumber
RecordAndGetPageWithFreeSpace(Relation rel, BlockNumber oldPage,
Size oldSpaceAvail, Size spaceNeeded)
{
int old_cat;
int search_cat;
int old_cat = fsm_space_avail_to_cat(oldSpaceAvail);
int search_cat = fsm_space_needed_to_cat(spaceNeeded);
FSMAddress addr;
uint16 slot;
int search_slot;
BlockNumber nblocks = InvalidBlockNumber;
/* First try the local map, if it exists. */
if (FSM_LOCAL_MAP_EXISTS)
{
Assert((rel->rd_rel->relkind == RELKIND_RELATION ||
rel->rd_rel->relkind == RELKIND_TOASTVALUE) &&
fsm_local_map.map[oldPage] == FSM_LOCAL_AVAIL);
fsm_local_map.map[oldPage] = FSM_LOCAL_NOT_AVAIL;
return fsm_local_search();
}
if (!fsm_allow_writes(rel, oldPage, InvalidBlockNumber, &nblocks))
{
/*
* If we have neither a local map nor a FSM, we probably just tried
* the target block in the smgr relation entry and failed, so we'll
* need to create the local map.
*/
fsm_local_set(rel, nblocks);
return fsm_local_search();
}
/* Normal FSM logic follows */
old_cat = fsm_space_avail_to_cat(oldSpaceAvail);
search_cat = fsm_space_needed_to_cat(spaceNeeded);
/* Get the location of the FSM byte representing the heap block */
addr = fsm_get_location(oldPage, &slot);
@ -278,44 +176,20 @@ RecordAndGetPageWithFreeSpace(Relation rel, BlockNumber oldPage,
* Note that if the new spaceAvail value is higher than the old value stored
* in the FSM, the space might not become visible to searchers until the next
* FreeSpaceMapVacuum call, which updates the upper level pages.
*
* Callers have no need for a local map.
*/
void
RecordPageWithFreeSpace(Relation rel, BlockNumber heapBlk,
Size spaceAvail, BlockNumber nblocks)
RecordPageWithFreeSpace(Relation rel, BlockNumber heapBlk, Size spaceAvail)
{
int new_cat;
int new_cat = fsm_space_avail_to_cat(spaceAvail);
FSMAddress addr;
uint16 slot;
BlockNumber dummy;
if (!fsm_allow_writes(rel, heapBlk, nblocks, &dummy))
/* No FSM to update and no local map either */
return;
/* Get the location of the FSM byte representing the heap block */
addr = fsm_get_location(heapBlk, &slot);
new_cat = fsm_space_avail_to_cat(spaceAvail);
fsm_set_and_search(rel, addr, slot, new_cat, 0);
}
/*
* Clear the local map. We must call this when we have found a block with
* enough free space, when we extend the relation, or on transaction abort.
*/
void
FSMClearLocalMap(void)
{
if (FSM_LOCAL_MAP_EXISTS)
{
fsm_local_map.nblocks = 0;
memset(&fsm_local_map.map, FSM_LOCAL_NOT_AVAIL,
sizeof(fsm_local_map.map));
}
}
/*
* XLogRecordPageWithFreeSpace - like RecordPageWithFreeSpace, for use in
* WAL replay
@ -330,31 +204,6 @@ XLogRecordPageWithFreeSpace(RelFileNode rnode, BlockNumber heapBlk,
BlockNumber blkno;
Buffer buf;
Page page;
bool write_to_fsm;
/* This is meant to mirror the logic in fsm_allow_writes() */
if (heapBlk >= HEAP_FSM_CREATION_THRESHOLD)
write_to_fsm = true;
else
{
/* Open the relation at smgr level */
SMgrRelation smgr = smgropen(rnode, InvalidBackendId);
if (smgrexists(smgr, FSM_FORKNUM))
write_to_fsm = true;
else
{
BlockNumber heap_nblocks = smgrnblocks(smgr, MAIN_FORKNUM);
if (heap_nblocks > HEAP_FSM_CREATION_THRESHOLD)
write_to_fsm = true;
else
write_to_fsm = false;
}
}
if (!write_to_fsm)
return;
/* Get the location of the FSM byte representing the heap block */
addr = fsm_get_location(heapBlk, &slot);
@ -1055,141 +904,3 @@ fsm_vacuum_page(Relation rel, FSMAddress addr,
return max_avail;
}
/*
* For heaps, we prevent creation of the FSM unless the number of pages
* exceeds HEAP_FSM_CREATION_THRESHOLD. For tables that don't already have
* a FSM, this will save an inode and a few kB of space.
*
* XXX The API is a little awkward -- if the caller passes a valid nblocks
* value, it can avoid invoking a system call. If the caller passes
* InvalidBlockNumber and receives a false return value, it can get an
* up-to-date relation size from get_nblocks. This saves a few cycles in
* the caller, which would otherwise need to get the relation size by itself.
*/
static bool
fsm_allow_writes(Relation rel, BlockNumber heapblk,
BlockNumber nblocks, BlockNumber *get_nblocks)
{
bool skip_get_nblocks;
if (heapblk >= HEAP_FSM_CREATION_THRESHOLD)
return true;
/* Non-heap rels can always create a FSM. */
if (rel->rd_rel->relkind != RELKIND_RELATION &&
rel->rd_rel->relkind != RELKIND_TOASTVALUE)
return true;
/*
* If the caller knows nblocks, we can avoid a system call later. If it
* doesn't, maybe we have relpages from a previous VACUUM. Since the table
* may have extended since then, we still have to count the pages later if
* we can't return now.
*/
if (nblocks != InvalidBlockNumber)
{
if (nblocks > HEAP_FSM_CREATION_THRESHOLD)
return true;
else
skip_get_nblocks = true;
}
else
{
if (rel->rd_rel->relpages != InvalidBlockNumber &&
rel->rd_rel->relpages > HEAP_FSM_CREATION_THRESHOLD)
return true;
else
skip_get_nblocks = false;
}
RelationOpenSmgr(rel);
if (smgrexists(rel->rd_smgr, FSM_FORKNUM))
return true;
if (skip_get_nblocks)
return false;
/* last resort */
*get_nblocks = RelationGetNumberOfBlocks(rel);
if (*get_nblocks > HEAP_FSM_CREATION_THRESHOLD)
return true;
else
return false;
}
/*
* Initialize the local map of blocks to try, for when there is no FSM.
*
* When we initialize the map, the whole heap is potentially available to
* try. Testing revealed that trying every block can cause a small
* performance dip compared to when we use a FSM, so we try every other
* block instead.
*/
static void
fsm_local_set(Relation rel, BlockNumber cur_nblocks)
{
BlockNumber blkno,
cached_target_block;
/* The local map must not be set already. */
Assert(!FSM_LOCAL_MAP_EXISTS);
/*
* Starting at the current last block in the relation and working
* backwards, mark alternating blocks as available.
*/
blkno = cur_nblocks - 1;
while (true)
{
fsm_local_map.map[blkno] = FSM_LOCAL_AVAIL;
if (blkno >= 2)
blkno -= 2;
else
break;
}
/* Cache the number of blocks. */
fsm_local_map.nblocks = cur_nblocks;
/* Set the status of the cached target block to 'unavailable'. */
cached_target_block = RelationGetTargetBlock(rel);
if (cached_target_block != InvalidBlockNumber &&
cached_target_block < cur_nblocks)
fsm_local_map.map[cached_target_block] = FSM_LOCAL_NOT_AVAIL;
}
/*
* Search the local map for an available block to try, in descending order.
* As such, there is no heuristic available to decide which order will be
* better to try, but the probability of having space in the last block in the
* map is higher because that is the most recent block added to the heap.
*
* This function is used when there is no FSM.
*/
static BlockNumber
fsm_local_search(void)
{
BlockNumber target_block;
/* Local map must be set by now. */
Assert(FSM_LOCAL_MAP_EXISTS);
target_block = fsm_local_map.nblocks;
do
{
target_block--;
if (fsm_local_map.map[target_block] == FSM_LOCAL_AVAIL)
return target_block;
} while (target_block > 0);
/*
* If we didn't find any available block to try in the local map, then
* clear it. This prevents us from using the map again without setting it
* first, which would otherwise lead to the same conclusion again and
* again.
*/
FSMClearLocalMap();
return InvalidBlockNumber;
}

View File

@ -37,7 +37,7 @@
BlockNumber
GetFreeIndexPage(Relation rel)
{
BlockNumber blkno = GetPageWithFreeSpace(rel, BLCKSZ / 2, true);
BlockNumber blkno = GetPageWithFreeSpace(rel, BLCKSZ / 2);
if (blkno != InvalidBlockNumber)
RecordUsedIndexPage(rel, blkno);
@ -51,7 +51,7 @@ GetFreeIndexPage(Relation rel)
void
RecordFreeIndexPage(Relation rel, BlockNumber freeBlock)
{
RecordPageWithFreeSpace(rel, freeBlock, BLCKSZ - 1, InvalidBlockNumber);
RecordPageWithFreeSpace(rel, freeBlock, BLCKSZ - 1);
}
@ -61,7 +61,7 @@ RecordFreeIndexPage(Relation rel, BlockNumber freeBlock)
void
RecordUsedIndexPage(Relation rel, BlockNumber usedBlock)
{
RecordPageWithFreeSpace(rel, usedBlock, 0, InvalidBlockNumber);
RecordPageWithFreeSpace(rel, usedBlock, 0);
}
/*

View File

@ -200,8 +200,6 @@ create_rel_filename_map(const char *old_data, const char *new_data,
map->old_db_oid = old_db->db_oid;
map->new_db_oid = new_db->db_oid;
map->relpages = old_rel->relpages;
map->relkind = old_rel->relkind;
/*
* old_relfilenode might differ from pg_class.oid (and hence
@ -420,7 +418,6 @@ get_rel_infos(ClusterInfo *cluster, DbInfo *dbinfo)
char *nspname = NULL;
char *relname = NULL;
char *tablespace = NULL;
char *relkind = NULL;
int i_spclocation,
i_nspname,
i_relname,
@ -428,9 +425,7 @@ get_rel_infos(ClusterInfo *cluster, DbInfo *dbinfo)
i_indtable,
i_toastheap,
i_relfilenode,
i_reltablespace,
i_relpages,
i_relkind;
i_reltablespace;
char query[QUERY_ALLOC];
char *last_namespace = NULL,
*last_tablespace = NULL;
@ -499,7 +494,7 @@ get_rel_infos(ClusterInfo *cluster, DbInfo *dbinfo)
*/
snprintf(query + strlen(query), sizeof(query) - strlen(query),
"SELECT all_rels.*, n.nspname, c.relname, "
" c.relfilenode, c.reltablespace, c.relpages, c.relkind, %s "
" c.relfilenode, c.reltablespace, %s "
"FROM (SELECT * FROM regular_heap "
" UNION ALL "
" SELECT * FROM toast_heap "
@ -530,8 +525,6 @@ get_rel_infos(ClusterInfo *cluster, DbInfo *dbinfo)
i_relname = PQfnumber(res, "relname");
i_relfilenode = PQfnumber(res, "relfilenode");
i_reltablespace = PQfnumber(res, "reltablespace");
i_relpages = PQfnumber(res, "relpages");
i_relkind = PQfnumber(res, "relkind");
i_spclocation = PQfnumber(res, "spclocation");
for (relnum = 0; relnum < ntups; relnum++)
@ -563,11 +556,6 @@ get_rel_infos(ClusterInfo *cluster, DbInfo *dbinfo)
curr->relname = pg_strdup(relname);
curr->relfilenode = atooid(PQgetvalue(res, relnum, i_relfilenode));
curr->relpages = atoi(PQgetvalue(res, relnum, i_relpages));
relkind = PQgetvalue(res, relnum, i_relkind);
curr->relkind = relkind[0];
curr->tblsp_alloc = false;
/* Is the tablespace oid non-default? */

View File

@ -147,8 +147,6 @@ typedef struct
char *tablespace; /* tablespace path; "" for cluster default */
bool nsp_alloc; /* should nspname be freed? */
bool tblsp_alloc; /* should tablespace be freed? */
int32 relpages; /* # of pages -- see pg_class.h */
char relkind; /* relation kind -- see pg_class.h */
} RelInfo;
typedef struct
@ -175,10 +173,6 @@ typedef struct
*/
Oid old_relfilenode;
Oid new_relfilenode;
int32 relpages; /* # of pages -- see pg_class.h */
char relkind; /* relation kind -- see pg_class.h */
/* the rest are used only for logging and error reporting */
char *nspname; /* namespaces */
char *relname;

View File

@ -14,12 +14,10 @@
#include <sys/stat.h>
#include "catalog/pg_class_d.h"
#include "access/transam.h"
#include "storage/freespace.h"
static void transfer_single_new_db(FileNameMap *maps, int size, char *old_tablespace);
static void transfer_relfile(FileNameMap *map, const char *suffix, bool vm_must_add_frozenbit);
static bool new_cluster_needs_fsm(FileNameMap *map);
/*
@ -176,8 +174,7 @@ transfer_single_new_db(FileNameMap *maps, int size, char *old_tablespace)
/*
* Copy/link any fsm and vm files, if they exist
*/
if (new_cluster_needs_fsm(&maps[mapnum]))
transfer_relfile(&maps[mapnum], "_fsm", vm_must_add_frozenbit);
transfer_relfile(&maps[mapnum], "_fsm", vm_must_add_frozenbit);
if (vm_crashsafe_match)
transfer_relfile(&maps[mapnum], "_vm", vm_must_add_frozenbit);
}
@ -281,61 +278,3 @@ transfer_relfile(FileNameMap *map, const char *type_suffix, bool vm_must_add_fro
}
}
}
/*
* new_cluster_needs_fsm()
*
* Return false for small heaps if we're upgrading across PG 12, the first
* version where small heap relations don't have FSMs by default.
*/
static bool
new_cluster_needs_fsm(FileNameMap *map)
{
char old_primary_file[MAXPGPATH];
struct stat statbuf;
/* fsm/vm files added in PG 8.4 */
Assert(GET_MAJOR_VERSION(old_cluster.major_version) >= 804);
if (!(GET_MAJOR_VERSION(old_cluster.major_version) <= 1100 &&
GET_MAJOR_VERSION(new_cluster.major_version) >= 1200))
return true;
/* Always transfer FSMs of non-heap relations. */
if (map->relkind != RELKIND_RELATION &&
map->relkind != RELKIND_TOASTVALUE)
return true;
/*
* If pg_class.relpages falsely reports that the heap is above the
* threshold, we will transfer a FSM when we don't need to, but this is
* harmless.
*/
if (map->relpages > HEAP_FSM_CREATION_THRESHOLD)
return true;
/* Determine path of the primary file. */
snprintf(old_primary_file, sizeof(old_primary_file), "%s%s/%u/%u",
map->old_tablespace,
map->old_tablespace_suffix,
map->old_db_oid,
map->old_relfilenode);
/*
* If pg_class.relpages falsely reports that the heap is below the
* threshold, a FSM would be skipped when we actually need it. To guard
* against this, we verify the size of the primary file.
*/
if (stat(old_primary_file, &statbuf) != 0)
{
pg_fatal("error while checking for file existence \"%s.%s\" (\"%s\"): %s\n",
map->nspname, map->relname, old_primary_file, strerror(errno));
/* Keep compiler quiet. */
return false;
}
else if (statbuf.st_size > HEAP_FSM_CREATION_THRESHOLD * BLCKSZ)
return true;
else
return false;
}

View File

@ -18,20 +18,15 @@
#include "storage/relfilenode.h"
#include "utils/relcache.h"
/* Only create the FSM if the heap has greater than this many blocks */
#define HEAP_FSM_CREATION_THRESHOLD 4
/* prototypes for public functions in freespace.c */
extern Size GetRecordedFreeSpace(Relation rel, BlockNumber heapBlk);
extern BlockNumber GetPageWithFreeSpace(Relation rel, Size spaceNeeded,
bool check_fsm_only);
extern BlockNumber GetPageWithFreeSpace(Relation rel, Size spaceNeeded);
extern BlockNumber RecordAndGetPageWithFreeSpace(Relation rel,
BlockNumber oldPage,
Size oldSpaceAvail,
Size spaceNeeded);
extern void RecordPageWithFreeSpace(Relation rel, BlockNumber heapBlk,
Size spaceAvail, BlockNumber nblocks);
extern void FSMClearLocalMap(void);
Size spaceAvail);
extern void XLogRecordPageWithFreeSpace(RelFileNode rnode, BlockNumber heapBlk,
Size spaceAvail);

View File

@ -1,73 +0,0 @@
--
-- Free Space Map test
--
SELECT current_setting('block_size')::integer AS blocksize,
current_setting('block_size')::integer / 8 AS strsize
\gset
CREATE TABLE fsm_check_size (num int, str text);
-- Fill 3 blocks with one record each
ALTER TABLE fsm_check_size SET (fillfactor=15);
INSERT INTO fsm_check_size SELECT i, rpad('', :strsize, 'a')
FROM generate_series(1,3) i;
-- There should be no FSM
VACUUM fsm_check_size;
SELECT pg_relation_size('fsm_check_size', 'main') / :blocksize AS heap_nblocks,
pg_relation_size('fsm_check_size', 'fsm') / :blocksize AS fsm_nblocks;
heap_nblocks | fsm_nblocks
--------------+-------------
3 | 0
(1 row)
-- The following operations are for testing the functionality of the local
-- in-memory map. In particular, we want to be able to insert into some
-- other block than the one at the end of the heap, without using a FSM.
-- Fill most of the last block
ALTER TABLE fsm_check_size SET (fillfactor=100);
INSERT INTO fsm_check_size SELECT i, rpad('', :strsize, 'a')
FROM generate_series(101,105) i;
-- Make sure records can go into any block but the last one
ALTER TABLE fsm_check_size SET (fillfactor=30);
-- Insert large record and make sure it does not cause the relation to extend
INSERT INTO fsm_check_size VALUES (111, rpad('', :strsize, 'a'));
VACUUM fsm_check_size;
SELECT pg_relation_size('fsm_check_size', 'main') / :blocksize AS heap_nblocks,
pg_relation_size('fsm_check_size', 'fsm') / :blocksize AS fsm_nblocks;
heap_nblocks | fsm_nblocks
--------------+-------------
3 | 0
(1 row)
-- Extend table with enough blocks to exceed the FSM threshold
DO $$
DECLARE curtid tid;
num int;
BEGIN
num = 11;
LOOP
INSERT INTO fsm_check_size VALUES (num, 'b') RETURNING ctid INTO curtid;
EXIT WHEN curtid >= tid '(4, 0)';
num = num + 1;
END LOOP;
END;
$$;
VACUUM fsm_check_size;
SELECT pg_relation_size('fsm_check_size', 'fsm') / :blocksize AS fsm_nblocks;
fsm_nblocks
-------------
3
(1 row)
-- Add long random string to extend TOAST table to 1 block
INSERT INTO fsm_check_size
VALUES(0, (SELECT string_agg(md5(chr(i)), '')
FROM generate_series(1, :blocksize / 100) i));
VACUUM fsm_check_size;
SELECT pg_relation_size(reltoastrelid, 'main') / :blocksize AS toast_nblocks,
pg_relation_size(reltoastrelid, 'fsm') / :blocksize AS toast_fsm_nblocks
FROM pg_class WHERE relname = 'fsm_check_size';
toast_nblocks | toast_fsm_nblocks
---------------+-------------------
1 | 0
(1 row)
DROP TABLE fsm_check_size;

View File

@ -20,7 +20,7 @@ test: boolean char name varchar text int2 int4 int8 oid float4 float8 bit numeri
# strings depends on char, varchar and text
# numerology depends on int2, int4, int8, float4, float8
# ----------
test: strings numerology point lseg line box path polygon circle date time timetz timestamp timestamptz interval inet macaddr macaddr8 tstypes fsm
test: strings numerology point lseg line box path polygon circle date time timetz timestamp timestamptz interval inet macaddr macaddr8 tstypes
# ----------
# Another group of parallel tests

View File

@ -40,7 +40,6 @@ test: inet
test: macaddr
test: macaddr8
test: tstypes
test: fsm
test: geometry
test: horology
test: regex

View File

@ -1,66 +0,0 @@
--
-- Free Space Map test
--
SELECT current_setting('block_size')::integer AS blocksize,
current_setting('block_size')::integer / 8 AS strsize
\gset
CREATE TABLE fsm_check_size (num int, str text);
-- Fill 3 blocks with one record each
ALTER TABLE fsm_check_size SET (fillfactor=15);
INSERT INTO fsm_check_size SELECT i, rpad('', :strsize, 'a')
FROM generate_series(1,3) i;
-- There should be no FSM
VACUUM fsm_check_size;
SELECT pg_relation_size('fsm_check_size', 'main') / :blocksize AS heap_nblocks,
pg_relation_size('fsm_check_size', 'fsm') / :blocksize AS fsm_nblocks;
-- The following operations are for testing the functionality of the local
-- in-memory map. In particular, we want to be able to insert into some
-- other block than the one at the end of the heap, without using a FSM.
-- Fill most of the last block
ALTER TABLE fsm_check_size SET (fillfactor=100);
INSERT INTO fsm_check_size SELECT i, rpad('', :strsize, 'a')
FROM generate_series(101,105) i;
-- Make sure records can go into any block but the last one
ALTER TABLE fsm_check_size SET (fillfactor=30);
-- Insert large record and make sure it does not cause the relation to extend
INSERT INTO fsm_check_size VALUES (111, rpad('', :strsize, 'a'));
VACUUM fsm_check_size;
SELECT pg_relation_size('fsm_check_size', 'main') / :blocksize AS heap_nblocks,
pg_relation_size('fsm_check_size', 'fsm') / :blocksize AS fsm_nblocks;
-- Extend table with enough blocks to exceed the FSM threshold
DO $$
DECLARE curtid tid;
num int;
BEGIN
num = 11;
LOOP
INSERT INTO fsm_check_size VALUES (num, 'b') RETURNING ctid INTO curtid;
EXIT WHEN curtid >= tid '(4, 0)';
num = num + 1;
END LOOP;
END;
$$;
VACUUM fsm_check_size;
SELECT pg_relation_size('fsm_check_size', 'fsm') / :blocksize AS fsm_nblocks;
-- Add long random string to extend TOAST table to 1 block
INSERT INTO fsm_check_size
VALUES(0, (SELECT string_agg(md5(chr(i)), '')
FROM generate_series(1, :blocksize / 100) i));
VACUUM fsm_check_size;
SELECT pg_relation_size(reltoastrelid, 'main') / :blocksize AS toast_nblocks,
pg_relation_size(reltoastrelid, 'fsm') / :blocksize AS toast_fsm_nblocks
FROM pg_class WHERE relname = 'fsm_check_size';
DROP TABLE fsm_check_size;