pgstat: store statistics in shared memory.

Previously the statistics collector received statistics updates via UDP and
shared statistics data by writing them out to temporary files regularly. These
files can reach tens of megabytes and are written out up to twice a
second. This has repeatedly prevented us from adding additional useful
statistics.

Now statistics are stored in shared memory. Statistics for variable-numbered
objects are stored in a dshash hashtable (backed by dynamic shared
memory). Fixed-numbered stats are stored in plain shared memory.

The header for pgstat.c contains an overview of the architecture.

The stats collector is not needed anymore, remove it.

By utilizing the transactional statistics drop infrastructure introduced in a
prior commit statistics entries cannot "leak" anymore. Previously leaked
statistics were dropped by pgstat_vacuum_stat(), called from [auto-]vacuum. On
systems with many small relations pgstat_vacuum_stat() could be quite
expensive.

Now that replicas drop statistics entries for dropped objects, it is not
necessary anymore to reset stats when starting from a cleanly shut down
replica.

Subsequent commits will perform some further code cleanup, adapt docs and add
tests.

Bumps PGSTAT_FILE_FORMAT_ID.

Author: Kyotaro Horiguchi <horikyota.ntt@gmail.com>
Author: Andres Freund <andres@anarazel.de>
Author: Melanie Plageman <melanieplageman@gmail.com>
Reviewed-By: Andres Freund <andres@anarazel.de>
Reviewed-By: Thomas Munro <thomas.munro@gmail.com>
Reviewed-By: Justin Pryzby <pryzby@telsasoft.com>
Reviewed-By: "David G. Johnston" <david.g.johnston@gmail.com>
Reviewed-By: Tomas Vondra <tomas.vondra@2ndquadrant.com> (in a much earlier version)
Reviewed-By: Arthur Zakirov <a.zakirov@postgrespro.ru> (in a much earlier version)
Reviewed-By: Antonin Houska <ah@cybertec.at> (in a much earlier version)
Discussion: https://postgr.es/m/20220303021600.hs34ghqcw6zcokdh@alap3.anarazel.de
Discussion: https://postgr.es/m/20220308205351.2xcn6k4x5yivcxyd@alap3.anarazel.de
Discussion: https://postgr.es/m/20210319235115.y3wz7hpnnrshdyv6@alap3.anarazel.de
This commit is contained in:
Andres Freund 2022-04-06 21:29:46 -07:00
parent be902e2651
commit 5891c7a8ed
50 changed files with 4395 additions and 5485 deletions

View File

@ -1110,10 +1110,6 @@ postgres 27093 0.0 0.0 30096 2752 ? Ss 11:34 0:00 postgres: ser
<entry><literal>LogicalLauncherMain</literal></entry>
<entry>Waiting in main loop of logical replication launcher process.</entry>
</row>
<row>
<entry><literal>PgStatMain</literal></entry>
<entry>Waiting in main loop of statistics collector process.</entry>
</row>
<row>
<entry><literal>RecoveryWalStream</literal></entry>
<entry>Waiting in main loop of startup process for WAL to arrive, during
@ -2115,6 +2111,18 @@ postgres 27093 0.0 0.0 30096 2752 ? Ss 11:34 0:00 postgres: ser
<entry>Waiting to access the list of predicate locks held by
serializable transactions.</entry>
</row>
<row>
<entry><literal>PgStatsDSA</literal></entry>
<entry>Waiting for stats dynamic shared memory allocator access</entry>
</row>
<row>
<entry><literal>PgStatsHash</literal></entry>
<entry>Waiting for stats shared memory hash table access</entry>
</row>
<row>
<entry><literal>PgStatsData</literal></entry>
<entry>Waiting for shared memory stats data access</entry>
</row>
<row>
<entry><literal>SerializableXactHash</literal></entry>
<entry>Waiting to read or update information about serializable
@ -5142,7 +5150,8 @@ SELECT pid, wait_event_type, wait_event FROM pg_stat_activity WHERE wait_event i
<returnvalue>timestamp with time zone</returnvalue>
</para>
<para>
Returns the timestamp of the current statistics snapshot.
Returns the timestamp of the current statistics snapshot, or NULL if
no statistics snapshot has been taken.
</para></entry>
</row>

View File

@ -1842,7 +1842,7 @@ AdvanceXLInsertBuffer(XLogRecPtr upto, TimeLineID tli, bool opportunistic)
WriteRqst.Flush = 0;
XLogWrite(WriteRqst, tli, false);
LWLockRelease(WALWriteLock);
WalStats.m_wal_buffers_full++;
PendingWalStats.wal_buffers_full++;
TRACE_POSTGRESQL_WAL_BUFFER_WRITE_DIRTY_DONE();
}
/* Re-acquire WALBufMappingLock and retry */
@ -2200,10 +2200,10 @@ XLogWrite(XLogwrtRqst WriteRqst, TimeLineID tli, bool flexible)
INSTR_TIME_SET_CURRENT(duration);
INSTR_TIME_SUBTRACT(duration, start);
WalStats.m_wal_write_time += INSTR_TIME_GET_MICROSEC(duration);
PendingWalStats.wal_write_time += INSTR_TIME_GET_MICROSEC(duration);
}
WalStats.m_wal_write++;
PendingWalStats.wal_write++;
if (written <= 0)
{
@ -4877,6 +4877,7 @@ StartupXLOG(void)
XLogCtlInsert *Insert;
CheckPoint checkPoint;
bool wasShutdown;
bool didCrash;
bool haveTblspcMap;
bool haveBackupLabel;
XLogRecPtr EndOfLog;
@ -4994,7 +4995,10 @@ StartupXLOG(void)
{
RemoveTempXlogFiles();
SyncDataDirectory();
didCrash = true;
}
else
didCrash = false;
/*
* Prepare for WAL recovery if needed.
@ -5106,6 +5110,22 @@ StartupXLOG(void)
*/
restoreTwoPhaseData();
/*
* When starting with crash recovery, reset pgstat data - it might not be
* valid. Otherwise restore pgstat data. It's safe to do this here,
* because postmaster will not yet have started any other processes.
*
* NB: Restoring replication slot stats relies on slot state to have
* already been restored from disk.
*
* TODO: With a bit of extra work we could just start with a pgstat file
* associated with the checkpoint redo location we're starting from.
*/
if (didCrash)
pgstat_discard_stats();
else
pgstat_restore_stats();
lastFullPageWrites = checkPoint.fullPageWrites;
RedoRecPtr = XLogCtl->RedoRecPtr = XLogCtl->Insert.RedoRecPtr = checkPoint.redo;
@ -5180,11 +5200,6 @@ StartupXLOG(void)
LocalMinRecoveryPointTLI = 0;
}
/*
* Reset pgstat data, because it may be invalid after recovery.
*/
pgstat_reset_all();
/* Check that the GUCs used to generate the WAL allow recovery */
CheckRequiredParameterValues();
@ -6081,8 +6096,8 @@ LogCheckpointEnd(bool restartpoint)
CheckpointStats.ckpt_sync_end_t);
/* Accumulate checkpoint timing summary data, in milliseconds. */
PendingCheckpointerStats.m_checkpoint_write_time += write_msecs;
PendingCheckpointerStats.m_checkpoint_sync_time += sync_msecs;
PendingCheckpointerStats.checkpoint_write_time += write_msecs;
PendingCheckpointerStats.checkpoint_sync_time += sync_msecs;
/*
* All of the published timing statistics are accounted for. Only
@ -8009,10 +8024,10 @@ issue_xlog_fsync(int fd, XLogSegNo segno, TimeLineID tli)
INSTR_TIME_SET_CURRENT(duration);
INSTR_TIME_SUBTRACT(duration, start);
WalStats.m_wal_sync_time += INSTR_TIME_GET_MICROSEC(duration);
PendingWalStats.wal_sync_time += INSTR_TIME_GET_MICROSEC(duration);
}
WalStats.m_wal_sync++;
PendingWalStats.wal_sync++;
}
/*

View File

@ -351,13 +351,6 @@ vacuum(List *relations, VacuumParams *params,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("PROCESS_TOAST required with VACUUM FULL")));
/*
* Send info about dead objects to the cumulative stats system, unless
* we are in autovacuum --- autovacuum.c does this for itself.
*/
if ((params->options & VACOPT_VACUUM) && !IsAutoVacuumWorkerProcess())
pgstat_vacuum_stat();
/*
* Create special memory context for cross-transaction storage.
*

View File

@ -28,6 +28,7 @@
#include "access/amapi.h"
#include "access/table.h"
#include "access/xact.h"
#include "catalog/index.h"
#include "commands/vacuum.h"
#include "optimizer/paths.h"
@ -35,6 +36,7 @@
#include "storage/bufmgr.h"
#include "tcop/tcopprot.h"
#include "utils/lsyscache.h"
#include "utils/rel.h"
/*
* DSM keys for parallel vacuum. Unlike other parallel execution code, since

View File

@ -44,11 +44,12 @@
* Note that there can be more than one worker in a database concurrently.
* They will store the table they are currently vacuuming in shared memory, so
* that other workers avoid being blocked waiting for the vacuum lock for that
* table. They will also reload the pgstats data just before vacuuming each
* table, to avoid vacuuming a table that was just finished being vacuumed by
* another worker and thus is no longer noted in shared memory. However,
* there is a window (caused by pgstat delay) on which a worker may choose a
* table that was already vacuumed; this is a bug in the current design.
* table. They will also fetch the last time the table was vacuumed from
* pgstats just before vacuuming each table, to avoid vacuuming a table that
* was just finished being vacuumed by another worker and thus is no longer
* noted in shared memory. However, there is a small window (due to not yet
* holding the relation lock) during which a worker may choose a table that was
* already vacuumed; this is a bug in the current design.
*
* Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
@ -129,9 +130,6 @@ int autovacuum_vac_cost_limit;
int Log_autovacuum_min_duration = 600000;
/* how long to keep pgstat data in the launcher, in milliseconds */
#define STATS_READ_DELAY 1000
/* the minimum allowed time between two awakenings of the launcher */
#define MIN_AUTOVAC_SLEEPTIME 100.0 /* milliseconds */
#define MAX_AUTOVAC_SLEEPTIME 300 /* seconds */
@ -342,15 +340,11 @@ static void autovacuum_do_vac_analyze(autovac_table *tab,
BufferAccessStrategy bstrategy);
static AutoVacOpts *extract_autovac_opts(HeapTuple tup,
TupleDesc pg_class_desc);
static PgStat_StatTabEntry *get_pgstat_tabentry_relid(Oid relid, bool isshared,
PgStat_StatDBEntry *shared,
PgStat_StatDBEntry *dbentry);
static void perform_work_item(AutoVacuumWorkItem *workitem);
static void autovac_report_activity(autovac_table *tab);
static void autovac_report_workitem(AutoVacuumWorkItem *workitem,
const char *nspname, const char *relname);
static void avl_sigusr2_handler(SIGNAL_ARGS);
static void autovac_refresh_stats(void);
@ -555,12 +549,6 @@ AutoVacLauncherMain(int argc, char *argv[])
DatabaseListCxt = NULL;
dlist_init(&DatabaseList);
/*
* Make sure pgstat also considers our stat data as gone. Note: we
* mustn't use autovac_refresh_stats here.
*/
pgstat_clear_snapshot();
/* Now we can allow interrupts again */
RESUME_INTERRUPTS();
@ -611,6 +599,12 @@ AutoVacLauncherMain(int argc, char *argv[])
SetConfigOption("default_transaction_isolation", "read committed",
PGC_SUSET, PGC_S_OVERRIDE);
/*
* Even when system is configured to use a different fetch consistency,
* for autovac we always want fresh stats.
*/
SetConfigOption("stats_fetch_consistency", "none", PGC_SUSET, PGC_S_OVERRIDE);
/*
* In emergency mode, just start a worker (unless shutdown was requested)
* and go away.
@ -963,9 +957,6 @@ rebuild_database_list(Oid newdb)
HTAB *dbhash;
dlist_iter iter;
/* use fresh stats */
autovac_refresh_stats();
newcxt = AllocSetContextCreate(AutovacMemCxt,
"Autovacuum database list",
ALLOCSET_DEFAULT_SIZES);
@ -1184,9 +1175,6 @@ do_start_worker(void)
ALLOCSET_DEFAULT_SIZES);
oldcxt = MemoryContextSwitchTo(tmpcxt);
/* use fresh stats */
autovac_refresh_stats();
/* Get a list of databases */
dblist = get_database_list();
@ -1642,6 +1630,12 @@ AutoVacWorkerMain(int argc, char *argv[])
SetConfigOption("synchronous_commit", "local",
PGC_SUSET, PGC_S_OVERRIDE);
/*
* Even when system is configured to use a different fetch consistency,
* for autovac we always want fresh stats.
*/
SetConfigOption("stats_fetch_consistency", "none", PGC_SUSET, PGC_S_OVERRIDE);
/*
* Get the info about the database we're going to work on.
*/
@ -1966,8 +1960,6 @@ do_autovacuum(void)
HASHCTL ctl;
HTAB *table_toast_map;
ListCell *volatile cell;
PgStat_StatDBEntry *shared;
PgStat_StatDBEntry *dbentry;
BufferAccessStrategy bstrategy;
ScanKeyData key;
TupleDesc pg_class_desc;
@ -1986,22 +1978,9 @@ do_autovacuum(void)
ALLOCSET_DEFAULT_SIZES);
MemoryContextSwitchTo(AutovacMemCxt);
/*
* may be NULL if we couldn't find an entry (only happens if we are
* forcing a vacuum for anti-wrap purposes).
*/
dbentry = pgstat_fetch_stat_dbentry(MyDatabaseId);
/* Start a transaction so our commands have one to play into. */
StartTransactionCommand();
/*
* Clean up any dead statistics entries for this DB. We always want to do
* this exactly once per DB-processing cycle, even if we find nothing
* worth vacuuming in the database.
*/
pgstat_vacuum_stat();
/*
* Compute the multixact age for which freezing is urgent. This is
* normally autovacuum_multixact_freeze_max_age, but may be less if we are
@ -2039,9 +2018,6 @@ do_autovacuum(void)
/* StartTransactionCommand changed elsewhere */
MemoryContextSwitchTo(AutovacMemCxt);
/* The database hash where pgstat keeps shared relations */
shared = pgstat_fetch_stat_dbentry(InvalidOid);
classRel = table_open(RelationRelationId, AccessShareLock);
/* create a copy so we can use it after closing pg_class */
@ -2119,8 +2095,8 @@ do_autovacuum(void)
/* Fetch reloptions and the pgstat entry for this table */
relopts = extract_autovac_opts(tuple, pg_class_desc);
tabentry = get_pgstat_tabentry_relid(relid, classForm->relisshared,
shared, dbentry);
tabentry = pgstat_fetch_stat_tabentry_ext(classForm->relisshared,
relid);
/* Check if it needs vacuum or analyze */
relation_needs_vacanalyze(relid, relopts, classForm, tabentry,
@ -2203,8 +2179,8 @@ do_autovacuum(void)
}
/* Fetch the pgstat entry for this table */
tabentry = get_pgstat_tabentry_relid(relid, classForm->relisshared,
shared, dbentry);
tabentry = pgstat_fetch_stat_tabentry_ext(classForm->relisshared,
relid);
relation_needs_vacanalyze(relid, relopts, classForm, tabentry,
effective_multixact_freeze_max_age,
@ -2418,12 +2394,8 @@ do_autovacuum(void)
/*
* Check whether pgstat data still says we need to vacuum this table.
* It could have changed if something else processed the table while
* we weren't looking.
*
* Note: we have a special case in pgstat code to ensure that the
* stats we read are as up-to-date as possible, to avoid the problem
* that somebody just finished vacuuming this table. The window to
* the race condition is not closed but it is very small.
* we weren't looking. This doesn't entirely close the race condition,
* but it is very small.
*/
MemoryContextSwitchTo(AutovacMemCxt);
tab = table_recheck_autovac(relid, table_toast_map, pg_class_desc,
@ -2768,29 +2740,6 @@ extract_autovac_opts(HeapTuple tup, TupleDesc pg_class_desc)
return av;
}
/*
* get_pgstat_tabentry_relid
*
* Fetch the pgstat entry of a table, either local to a database or shared.
*/
static PgStat_StatTabEntry *
get_pgstat_tabentry_relid(Oid relid, bool isshared, PgStat_StatDBEntry *shared,
PgStat_StatDBEntry *dbentry)
{
PgStat_StatTabEntry *tabentry = NULL;
if (isshared)
{
if (PointerIsValid(shared))
tabentry = hash_search(shared->tables, &relid,
HASH_FIND, NULL);
}
else if (PointerIsValid(dbentry))
tabentry = hash_search(dbentry->tables, &relid,
HASH_FIND, NULL);
return tabentry;
}
/*
* table_recheck_autovac
@ -2812,7 +2761,6 @@ table_recheck_autovac(Oid relid, HTAB *table_toast_map,
autovac_table *tab = NULL;
bool wraparound;
AutoVacOpts *avopts;
static bool reuse_stats = false;
/* fetch the relation's relcache entry */
classTup = SearchSysCacheCopy1(RELOID, ObjectIdGetDatum(relid));
@ -2836,35 +2784,6 @@ table_recheck_autovac(Oid relid, HTAB *table_toast_map,
avopts = &hentry->ar_reloptions;
}
/*
* Reuse the stats to recheck whether a relation needs to be vacuumed or
* analyzed if it was reloaded before and has not been cleared yet. This
* is necessary to avoid frequent refresh of stats, especially when there
* are very large number of relations and the refresh can cause lots of
* overhead.
*
* If we determined that a relation needs to be vacuumed or analyzed,
* based on the old stats, we refresh stats and recheck the necessity
* again. Because a relation may have already been vacuumed or analyzed by
* someone since the last reload of stats.
*/
if (reuse_stats)
{
recheck_relation_needs_vacanalyze(relid, avopts, classForm,
effective_multixact_freeze_max_age,
&dovacuum, &doanalyze, &wraparound);
/* Quick exit if a relation doesn't need to be vacuumed or analyzed */
if (!doanalyze && !dovacuum)
{
heap_freetuple(classTup);
return NULL;
}
}
/* Use fresh stats and recheck again */
autovac_refresh_stats();
recheck_relation_needs_vacanalyze(relid, avopts, classForm,
effective_multixact_freeze_max_age,
&dovacuum, &doanalyze, &wraparound);
@ -2962,21 +2881,6 @@ table_recheck_autovac(Oid relid, HTAB *table_toast_map,
tab->at_dobalance =
!(avopts && (avopts->vacuum_cost_limit > 0 ||
avopts->vacuum_cost_delay > 0));
/*
* When we decide to do vacuum or analyze, the existing stats cannot
* be reused in the next cycle because it's cleared at the end of
* vacuum or analyze (by AtEOXact_PgStat()).
*/
reuse_stats = false;
}
else
{
/*
* If neither vacuum nor analyze is necessary, the existing stats is
* not cleared and can be reused in the next cycle.
*/
reuse_stats = true;
}
heap_freetuple(classTup);
@ -3001,17 +2905,10 @@ recheck_relation_needs_vacanalyze(Oid relid,
bool *wraparound)
{
PgStat_StatTabEntry *tabentry;
PgStat_StatDBEntry *shared = NULL;
PgStat_StatDBEntry *dbentry = NULL;
if (classForm->relisshared)
shared = pgstat_fetch_stat_dbentry(InvalidOid);
else
dbentry = pgstat_fetch_stat_dbentry(MyDatabaseId);
/* fetch the pgstat table entry */
tabentry = get_pgstat_tabentry_relid(relid, classForm->relisshared,
shared, dbentry);
tabentry = pgstat_fetch_stat_tabentry_ext(classForm->relisshared,
relid);
relation_needs_vacanalyze(relid, avopts, classForm, tabentry,
effective_multixact_freeze_max_age,
@ -3169,11 +3066,11 @@ relation_needs_vacanalyze(Oid relid,
}
/*
* If we found the table in the stats hash, and autovacuum is currently
* enabled, make a threshold-based decision whether to vacuum and/or
* analyze. If autovacuum is currently disabled, we must be here for
* anti-wraparound vacuuming only, so don't vacuum (or analyze) anything
* that's not being forced.
* If we found stats for the table, and autovacuum is currently enabled,
* make a threshold-based decision whether to vacuum and/or analyze. If
* autovacuum is currently disabled, we must be here for anti-wraparound
* vacuuming only, so don't vacuum (or analyze) anything that's not being
* forced.
*/
if (PointerIsValid(tabentry) && AutoVacuumingActive())
{
@ -3472,35 +3369,3 @@ AutoVacuumShmemInit(void)
else
Assert(found);
}
/*
* autovac_refresh_stats
* Refresh pgstats data for an autovacuum process
*
* Cause the next pgstats read operation to obtain fresh data, but throttle
* such refreshing in the autovacuum launcher. This is mostly to avoid
* rereading the pgstats files too many times in quick succession when there
* are many databases.
*
* Note: we avoid throttling in the autovac worker, as it would be
* counterproductive in the recheck logic.
*/
static void
autovac_refresh_stats(void)
{
if (IsAutoVacuumLauncherProcess())
{
static TimestampTz last_read = 0;
TimestampTz current_time;
current_time = GetCurrentTimestamp();
if (!TimestampDifferenceExceeds(last_read, current_time,
STATS_READ_DELAY))
return;
last_read = current_time;
}
pgstat_clear_snapshot();
}

View File

@ -212,6 +212,16 @@ CheckpointerMain(void)
*/
last_checkpoint_time = last_xlog_switch_time = (pg_time_t) time(NULL);
/*
* Write out stats after shutdown. This needs to be called by exactly one
* process during a normal shutdown, and since checkpointer is shut down
* very late...
*
* Walsenders are shut down after the checkpointer, but currently don't
* report stats. If that changes, we need a more complicated solution.
*/
before_shmem_exit(pgstat_before_server_shutdown, 0);
/*
* Create a memory context that we will do all our work in. We do this so
* that we can reset the context during error recovery and thereby avoid
@ -358,7 +368,7 @@ CheckpointerMain(void)
if (((volatile CheckpointerShmemStruct *) CheckpointerShmem)->ckpt_flags)
{
do_checkpoint = true;
PendingCheckpointerStats.m_requested_checkpoints++;
PendingCheckpointerStats.requested_checkpoints++;
}
/*
@ -372,7 +382,7 @@ CheckpointerMain(void)
if (elapsed_secs >= CheckPointTimeout)
{
if (!do_checkpoint)
PendingCheckpointerStats.m_timed_checkpoints++;
PendingCheckpointerStats.timed_checkpoints++;
do_checkpoint = true;
flags |= CHECKPOINT_CAUSE_TIME;
}
@ -569,7 +579,7 @@ HandleCheckpointerInterrupts(void)
* updates the statistics, increment the checkpoint request and flush
* out pending statistic.
*/
PendingCheckpointerStats.m_requested_checkpoints++;
PendingCheckpointerStats.requested_checkpoints++;
ShutdownXLOG(0, 0);
pgstat_report_checkpointer();
pgstat_report_wal(true);
@ -1262,9 +1272,9 @@ AbsorbSyncRequests(void)
LWLockAcquire(CheckpointerCommLock, LW_EXCLUSIVE);
/* Transfer stats counts into pending pgstats message */
PendingCheckpointerStats.m_buf_written_backend
PendingCheckpointerStats.buf_written_backend
+= CheckpointerShmem->num_backend_writes;
PendingCheckpointerStats.m_buf_fsync_backend
PendingCheckpointerStats.buf_fsync_backend
+= CheckpointerShmem->num_backend_fsync;
CheckpointerShmem->num_backend_writes = 0;

View File

@ -98,9 +98,8 @@ SignalHandlerForCrashExit(SIGNAL_ARGS)
* shut down and exit.
*
* Typically, this handler would be used for SIGTERM, but some processes use
* other signals. In particular, the checkpointer exits on SIGUSR2, the
* stats collector on SIGQUIT, and the WAL writer exits on either SIGINT
* or SIGTERM.
* other signals. In particular, the checkpointer exits on SIGUSR2, and the
* WAL writer exits on either SIGINT or SIGTERM.
*
* ShutdownRequestPending should be checked at a convenient place within the
* main loop, or else the main loop should call HandleMainLoopInterrupts.

File diff suppressed because it is too large Load Diff

View File

@ -255,7 +255,6 @@ static pid_t StartupPID = 0,
WalReceiverPID = 0,
AutoVacPID = 0,
PgArchPID = 0,
PgStatPID = 0,
SysLoggerPID = 0;
/* Startup process's status */
@ -510,7 +509,6 @@ typedef struct
PGPROC *AuxiliaryProcs;
PGPROC *PreparedXactProcs;
PMSignalData *PMSignalState;
InheritableSocket pgStatSock;
pid_t PostmasterPid;
TimestampTz PgStartTime;
TimestampTz PgReloadTime;
@ -645,9 +643,8 @@ PostmasterMain(int argc, char *argv[])
* CAUTION: when changing this list, check for side-effects on the signal
* handling setup of child processes. See tcop/postgres.c,
* bootstrap/bootstrap.c, postmaster/bgwriter.c, postmaster/walwriter.c,
* postmaster/autovacuum.c, postmaster/pgarch.c, postmaster/pgstat.c,
* postmaster/syslogger.c, postmaster/bgworker.c and
* postmaster/checkpointer.c.
* postmaster/autovacuum.c, postmaster/pgarch.c, postmaster/syslogger.c,
* postmaster/bgworker.c and postmaster/checkpointer.c.
*/
pqinitmask();
PG_SETMASK(&BlockSig);
@ -1384,12 +1381,6 @@ PostmasterMain(int argc, char *argv[])
*/
RemovePgTempFiles();
/*
* Initialize stats collection subsystem (this does NOT start the
* collector process!)
*/
pgstat_init();
/*
* Initialize the autovacuum subsystem (again, no process start yet)
*/
@ -1845,11 +1836,6 @@ ServerLoop(void)
start_autovac_launcher = false; /* signal processed */
}
/* If we have lost the stats collector, try to start a new one */
if (PgStatPID == 0 &&
(pmState == PM_RUN || pmState == PM_HOT_STANDBY))
PgStatPID = pgstat_start();
/* If we have lost the archiver, try to start a new one. */
if (PgArchPID == 0 && PgArchStartupAllowed())
PgArchPID = StartArchiver();
@ -2772,8 +2758,6 @@ SIGHUP_handler(SIGNAL_ARGS)
signal_child(PgArchPID, SIGHUP);
if (SysLoggerPID != 0)
signal_child(SysLoggerPID, SIGHUP);
if (PgStatPID != 0)
signal_child(PgStatPID, SIGHUP);
/* Reload authentication config files too */
if (!load_hba())
@ -3097,8 +3081,6 @@ reaper(SIGNAL_ARGS)
AutoVacPID = StartAutoVacLauncher();
if (PgArchStartupAllowed() && PgArchPID == 0)
PgArchPID = StartArchiver();
if (PgStatPID == 0)
PgStatPID = pgstat_start();
/* workers may be scheduled to start now */
maybe_start_bgworkers();
@ -3165,13 +3147,6 @@ reaper(SIGNAL_ARGS)
SignalChildren(SIGUSR2);
pmState = PM_SHUTDOWN_2;
/*
* We can also shut down the stats collector now; there's
* nothing left for it to do.
*/
if (PgStatPID != 0)
signal_child(PgStatPID, SIGQUIT);
}
else
{
@ -3250,22 +3225,6 @@ reaper(SIGNAL_ARGS)
continue;
}
/*
* Was it the statistics collector? If so, just try to start a new
* one; no need to force reset of the rest of the system. (If fail,
* we'll try again in future cycles of the main loop.)
*/
if (pid == PgStatPID)
{
PgStatPID = 0;
if (!EXIT_STATUS_0(exitstatus))
LogChildExit(LOG, _("statistics collector process"),
pid, exitstatus);
if (pmState == PM_RUN || pmState == PM_HOT_STANDBY)
PgStatPID = pgstat_start();
continue;
}
/* Was it the system logger? If so, try to start a new one */
if (pid == SysLoggerPID)
{
@ -3707,22 +3666,6 @@ HandleChildCrash(int pid, int exitstatus, const char *procname)
signal_child(PgArchPID, (SendStop ? SIGSTOP : SIGQUIT));
}
/*
* Force a power-cycle of the pgstat process too. (This isn't absolutely
* necessary, but it seems like a good idea for robustness, and it
* simplifies the state-machine logic in the case where a shutdown request
* arrives during crash processing.)
*/
if (PgStatPID != 0 && take_action)
{
ereport(DEBUG2,
(errmsg_internal("sending %s to process %d",
"SIGQUIT",
(int) PgStatPID)));
signal_child(PgStatPID, SIGQUIT);
allow_immediate_pgstat_restart();
}
/* We do NOT restart the syslogger */
if (Shutdown != ImmediateShutdown)
@ -3934,12 +3877,10 @@ PostmasterStateMachine(void)
FatalError = true;
pmState = PM_WAIT_DEAD_END;
/* Kill the walsenders, archiver and stats collector too */
/* Kill the walsenders and archiver too */
SignalChildren(SIGQUIT);
if (PgArchPID != 0)
signal_child(PgArchPID, SIGQUIT);
if (PgStatPID != 0)
signal_child(PgStatPID, SIGQUIT);
}
}
}
@ -3963,8 +3904,7 @@ PostmasterStateMachine(void)
{
/*
* PM_WAIT_DEAD_END state ends when the BackendList is entirely empty
* (ie, no dead_end children remain), and the archiver and stats
* collector are gone too.
* (ie, no dead_end children remain), and the archiver is gone too.
*
* The reason we wait for those two is to protect them against a new
* postmaster starting conflicting subprocesses; this isn't an
@ -3974,8 +3914,7 @@ PostmasterStateMachine(void)
* normal state transition leading up to PM_WAIT_DEAD_END, or during
* FatalError processing.
*/
if (dlist_is_empty(&BackendList) &&
PgArchPID == 0 && PgStatPID == 0)
if (dlist_is_empty(&BackendList) && PgArchPID == 0)
{
/* These other guys should be dead already */
Assert(StartupPID == 0);
@ -4183,8 +4122,6 @@ TerminateChildren(int signal)
signal_child(AutoVacPID, signal);
if (PgArchPID != 0)
signal_child(PgArchPID, signal);
if (PgStatPID != 0)
signal_child(PgStatPID, signal);
}
/*
@ -5115,12 +5052,6 @@ SubPostmasterMain(int argc, char *argv[])
StartBackgroundWorker();
}
if (strcmp(argv[1], "--forkcol") == 0)
{
/* Do not want to attach to shared memory */
PgstatCollectorMain(argc, argv); /* does not return */
}
if (strcmp(argv[1], "--forklog") == 0)
{
/* Do not want to attach to shared memory */
@ -5224,12 +5155,6 @@ sigusr1_handler(SIGNAL_ARGS)
if (CheckPostmasterSignal(PMSIGNAL_BEGIN_HOT_STANDBY) &&
pmState == PM_RECOVERY && Shutdown == NoShutdown)
{
/*
* Likewise, start other special children as needed.
*/
Assert(PgStatPID == 0);
PgStatPID = pgstat_start();
ereport(LOG,
(errmsg("database system is ready to accept read-only connections")));
@ -6145,7 +6070,6 @@ extern slock_t *ShmemLock;
extern slock_t *ProcStructLock;
extern PGPROC *AuxiliaryProcs;
extern PMSignalData *PMSignalState;
extern pgsocket pgStatSock;
extern pg_time_t first_syslogger_file_time;
#ifndef WIN32
@ -6201,8 +6125,6 @@ save_backend_variables(BackendParameters *param, Port *port,
param->AuxiliaryProcs = AuxiliaryProcs;
param->PreparedXactProcs = PreparedXactProcs;
param->PMSignalState = PMSignalState;
if (!write_inheritable_socket(&param->pgStatSock, pgStatSock, childPid))
return false;
param->PostmasterPid = PostmasterPid;
param->PgStartTime = PgStartTime;
@ -6436,7 +6358,6 @@ restore_backend_variables(BackendParameters *param, Port *port)
AuxiliaryProcs = param->AuxiliaryProcs;
PreparedXactProcs = param->PreparedXactProcs;
PMSignalState = param->PMSignalState;
read_inheritable_socket(&pgStatSock, &param->pgStatSock);
PostmasterPid = param->PostmasterPid;
PgStartTime = param->PgStartTime;
@ -6475,8 +6396,6 @@ restore_backend_variables(BackendParameters *param, Port *port)
if (postmaster_alive_fds[1] >= 0)
ReserveExternalFD();
#endif
if (pgStatSock != PGINVALID_SOCKET)
ReserveExternalFD();
}

View File

@ -1911,7 +1911,6 @@ UpdateDecodingStats(LogicalDecodingContext *ctx)
(long long) rb->totalTxns,
(long long) rb->totalBytes);
namestrcpy(&repSlotStat.slotname, NameStr(ctx->slot->data.name));
repSlotStat.spill_txns = rb->spillTxns;
repSlotStat.spill_count = rb->spillCount;
repSlotStat.spill_bytes = rb->spillBytes;

View File

@ -141,7 +141,7 @@ finish_sync_worker(void)
if (IsTransactionState())
{
CommitTransactionCommand();
pgstat_report_stat(false);
pgstat_report_stat(true);
}
/* And flush all writes. */
@ -580,7 +580,7 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn)
if (started_tx)
{
CommitTransactionCommand();
pgstat_report_stat(false);
pgstat_report_stat(true);
}
}
@ -1386,7 +1386,7 @@ LogicalRepSyncTableStart(XLogRecPtr *origin_startpos)
MyLogicalRepWorker->relstate,
MyLogicalRepWorker->relstate_lsn);
CommitTransactionCommand();
pgstat_report_stat(false);
pgstat_report_stat(true);
StartTransactionCommand();
@ -1630,7 +1630,7 @@ AllTablesyncsReady(void)
if (started_tx)
{
CommitTransactionCommand();
pgstat_report_stat(false);
pgstat_report_stat(true);
}
/*

View File

@ -2937,6 +2937,12 @@ LogicalRepApplyLoop(XLogRecPtr last_received)
}
send_feedback(last_received, requestReply, requestReply);
/*
* Force reporting to ensure long idle periods don't lead to
* arbitrarily delayed stats.
*/
pgstat_report_stat(true);
}
}

View File

@ -502,6 +502,14 @@ retry:
/* We made this slot active, so it's ours now. */
MyReplicationSlot = s;
/*
* The call to pgstat_acquire_replslot() protects against stats for
* a different slot, from before a restart or such, being present during
* pgstat_report_replslot().
*/
if (SlotIsLogical(s))
pgstat_acquire_replslot(s);
}
/*
@ -746,20 +754,10 @@ ReplicationSlotDropPtr(ReplicationSlot *slot)
elog(DEBUG3, "replication slot drop: %s: removed directory", NameStr(slot->data.name));
/*
* Send a message to drop the replication slot to the stats collector.
* Since there is no guarantee of the order of message transfer on a UDP
* connection, it's possible that a message for creating a new slot
* reaches before a message for removing the old slot. We send the drop
* and create messages while holding ReplicationSlotAllocationLock to
* reduce that possibility. If the messages reached in reverse, we would
* lose one statistics update message. But the next update message will
* create the statistics for the replication slot.
*
* XXX In case, the messages for creation and drop slot of the same name
* get lost and create happens before (auto)vacuum cleans up the dead
* slot, the stats will be accumulated into the old slot. One can imagine
* having OIDs for each slot to avoid the accumulation of stats but that
* doesn't seem worth doing as in practice this won't happen frequently.
* Drop the statistics entry for the replication slot. Do this while
* holding ReplicationSlotAllocationLock so that we don't drop a
* statistics entry for another slot with the same name just created in
* another session.
*/
if (SlotIsLogical(slot))
pgstat_drop_replslot(slot);

View File

@ -2151,7 +2151,7 @@ BufferSync(int flags)
if (SyncOneBuffer(buf_id, false, &wb_context) & BUF_WRITTEN)
{
TRACE_POSTGRESQL_BUFFER_SYNC_WRITTEN(buf_id);
PendingCheckpointerStats.m_buf_written_checkpoints++;
PendingCheckpointerStats.buf_written_checkpoints++;
num_written++;
}
}
@ -2261,7 +2261,7 @@ BgBufferSync(WritebackContext *wb_context)
strategy_buf_id = StrategySyncStart(&strategy_passes, &recent_alloc);
/* Report buffer alloc counts to pgstat */
PendingBgWriterStats.m_buf_alloc += recent_alloc;
PendingBgWriterStats.buf_alloc += recent_alloc;
/*
* If we're not running the LRU scan, just stop after doing the stats
@ -2451,7 +2451,7 @@ BgBufferSync(WritebackContext *wb_context)
reusable_buffers++;
if (++num_written >= bgwriter_lru_maxpages)
{
PendingBgWriterStats.m_maxwritten_clean++;
PendingBgWriterStats.maxwritten_clean++;
break;
}
}
@ -2459,7 +2459,7 @@ BgBufferSync(WritebackContext *wb_context)
reusable_buffers++;
}
PendingBgWriterStats.m_buf_written_clean += num_written;
PendingBgWriterStats.buf_written_clean += num_written;
#ifdef BGW_DEBUG
elog(DEBUG1, "bgwriter: recent_alloc=%u smoothed=%.2f delta=%ld ahead=%d density=%.2f reusable_est=%d upcoming_est=%d scanned=%d wrote=%d reusable=%d",

View File

@ -145,6 +145,7 @@ CalculateShmemSize(int *num_semaphores)
size = add_size(size, BTreeShmemSize());
size = add_size(size, SyncScanShmemSize());
size = add_size(size, AsyncShmemSize());
size = add_size(size, StatsShmemSize());
#ifdef EXEC_BACKEND
size = add_size(size, ShmemBackendArraySize());
#endif
@ -296,6 +297,7 @@ CreateSharedMemoryAndSemaphores(void)
BTreeShmemInit();
SyncScanShmemInit();
AsyncShmemInit();
StatsShmemInit();
#ifdef EXEC_BACKEND

View File

@ -176,7 +176,13 @@ static const char *const BuiltinTrancheNames[] = {
/* LWTRANCHE_PARALLEL_APPEND: */
"ParallelAppend",
/* LWTRANCHE_PER_XACT_PREDICATE_LIST: */
"PerXactPredicateList"
"PerXactPredicateList",
/* LWTRANCHE_PGSTATS_DSA: */
"PgStatsDSA",
/* LWTRANCHE_PGSTATS_HASH: */
"PgStatsHash",
/* LWTRANCHE_PGSTATS_DATA: */
"PgStatsData",
};
StaticAssertDecl(lengthof(BuiltinTrancheNames) ==

View File

@ -3372,6 +3372,14 @@ ProcessInterrupts(void)
IdleSessionTimeoutPending = false;
}
if (IdleStatsUpdateTimeoutPending)
{
/* timer should have been disarmed */
Assert(!IsTransactionBlock());
IdleStatsUpdateTimeoutPending = false;
pgstat_report_stat(true);
}
if (ProcSignalBarrierPending)
ProcessProcSignalBarrier();
@ -4044,6 +4052,7 @@ PostgresMain(const char *dbname, const char *username)
volatile bool send_ready_for_query = true;
bool idle_in_transaction_timeout_enabled = false;
bool idle_session_timeout_enabled = false;
bool idle_stats_update_timeout_enabled = false;
AssertArg(dbname != NULL);
AssertArg(username != NULL);
@ -4407,6 +4416,8 @@ PostgresMain(const char *dbname, const char *username)
}
else
{
long stats_timeout;
/*
* Process incoming notifies (including self-notifies), if
* any, and send relevant messages to the client. Doing it
@ -4417,7 +4428,14 @@ PostgresMain(const char *dbname, const char *username)
if (notifyInterruptPending)
ProcessNotifyInterrupt(false);
pgstat_report_stat(false);
/* Start the idle-stats-update timer */
stats_timeout = pgstat_report_stat(false);
if (stats_timeout > 0)
{
idle_stats_update_timeout_enabled = true;
enable_timeout_after(IDLE_STATS_UPDATE_TIMEOUT,
stats_timeout);
}
set_ps_display("idle");
pgstat_report_activity(STATE_IDLE, NULL);
@ -4452,9 +4470,9 @@ PostgresMain(const char *dbname, const char *username)
firstchar = ReadCommand(&input_message);
/*
* (4) turn off the idle-in-transaction and idle-session timeouts, if
* active. We do this before step (5) so that any last-moment timeout
* is certain to be detected in step (5).
* (4) turn off the idle-in-transaction, idle-session and
* idle-stats-update timeouts if active. We do this before step (5) so
* that any last-moment timeout is certain to be detected in step (5).
*
* At most one of these timeouts will be active, so there's no need to
* worry about combining the timeout.c calls into one.
@ -4469,6 +4487,11 @@ PostgresMain(const char *dbname, const char *username)
disable_timeout(IDLE_SESSION_TIMEOUT, false);
idle_session_timeout_enabled = false;
}
if (idle_stats_update_timeout_enabled)
{
disable_timeout(IDLE_STATS_UPDATE_TIMEOUT, false);
idle_stats_update_timeout_enabled = false;
}
/*
* (5) disable async signal conditions again.

View File

@ -23,6 +23,7 @@ OBJS = \
pgstat_function.o \
pgstat_relation.o \
pgstat_replslot.o \
pgstat_shmem.o \
pgstat_slru.o \
pgstat_subscription.o \
pgstat_wal.o \

View File

@ -27,14 +27,85 @@
void
pgstat_report_archiver(const char *xlog, bool failed)
{
PgStat_MsgArchiver msg;
PgStatShared_Archiver *stats_shmem = &pgStatLocal.shmem->archiver;
TimestampTz now = GetCurrentTimestamp();
/*
* Prepare and send the message
*/
pgstat_setheader(&msg.m_hdr, PGSTAT_MTYPE_ARCHIVER);
msg.m_failed = failed;
strlcpy(msg.m_xlog, xlog, sizeof(msg.m_xlog));
msg.m_timestamp = GetCurrentTimestamp();
pgstat_send(&msg, sizeof(msg));
pgstat_begin_changecount_write(&stats_shmem->changecount);
if (failed)
{
++stats_shmem->stats.failed_count;
memcpy(&stats_shmem->stats.last_failed_wal, xlog,
sizeof(stats_shmem->stats.last_failed_wal));
stats_shmem->stats.last_failed_timestamp = now;
}
else
{
++stats_shmem->stats.archived_count;
memcpy(&stats_shmem->stats.last_archived_wal, xlog,
sizeof(stats_shmem->stats.last_archived_wal));
stats_shmem->stats.last_archived_timestamp = now;
}
pgstat_end_changecount_write(&stats_shmem->changecount);
}
/*
* Support function for the SQL-callable pgstat* functions. Returns
* a pointer to the archiver statistics struct.
*/
PgStat_ArchiverStats *
pgstat_fetch_stat_archiver(void)
{
pgstat_snapshot_fixed(PGSTAT_KIND_ARCHIVER);
return &pgStatLocal.snapshot.archiver;
}
void
pgstat_archiver_reset_all_cb(TimestampTz ts)
{
PgStatShared_Archiver *stats_shmem = &pgStatLocal.shmem->archiver;
/* see explanation above PgStatShared_Archiver for the reset protocol */
LWLockAcquire(&stats_shmem->lock, LW_EXCLUSIVE);
pgstat_copy_changecounted_stats(&stats_shmem->reset_offset,
&stats_shmem->stats,
sizeof(stats_shmem->stats),
&stats_shmem->changecount);
stats_shmem->stats.stat_reset_timestamp = ts;
LWLockRelease(&stats_shmem->lock);
}
void
pgstat_archiver_snapshot_cb(void)
{
PgStatShared_Archiver *stats_shmem = &pgStatLocal.shmem->archiver;
PgStat_ArchiverStats *stat_snap = &pgStatLocal.snapshot.archiver;
PgStat_ArchiverStats *reset_offset = &stats_shmem->reset_offset;
PgStat_ArchiverStats reset;
pgstat_copy_changecounted_stats(stat_snap,
&stats_shmem->stats,
sizeof(stats_shmem->stats),
&stats_shmem->changecount);
LWLockAcquire(&stats_shmem->lock, LW_SHARED);
memcpy(&reset, reset_offset, sizeof(stats_shmem->stats));
LWLockRelease(&stats_shmem->lock);
/* compensate by reset offsets */
if (stat_snap->archived_count == reset.archived_count)
{
stat_snap->last_archived_wal[0] = 0;
stat_snap->last_archived_timestamp = 0;
}
stat_snap->archived_count -= reset.archived_count;
if (stat_snap->failed_count == reset.failed_count)
{
stat_snap->last_failed_wal[0] = 0;
stat_snap->last_failed_timestamp = 0;
}
stat_snap->failed_count -= reset.failed_count;
}

View File

@ -20,12 +20,7 @@
#include "utils/pgstat_internal.h"
/*
* BgWriter global statistics counters. Stored directly in a stats
* message structure so they can be sent without needing to copy things
* around. We assume this init to zeroes.
*/
PgStat_MsgBgWriter PendingBgWriterStats;
PgStat_BgWriterStats PendingBgWriterStats = {0};
/*
@ -34,27 +29,82 @@ PgStat_MsgBgWriter PendingBgWriterStats;
void
pgstat_report_bgwriter(void)
{
/* We assume this initializes to zeroes */
static const PgStat_MsgBgWriter all_zeroes;
PgStatShared_BgWriter *stats_shmem = &pgStatLocal.shmem->bgwriter;
static const PgStat_BgWriterStats all_zeroes;
Assert(!pgStatLocal.shmem->is_shutdown);
pgstat_assert_is_up();
/*
* This function can be called even if nothing at all has happened. In
* this case, avoid sending a completely empty message to the stats
* collector.
* this case, avoid unnecessarily modifying the stats entry.
*/
if (memcmp(&PendingBgWriterStats, &all_zeroes, sizeof(PgStat_MsgBgWriter)) == 0)
if (memcmp(&PendingBgWriterStats, &all_zeroes, sizeof(all_zeroes)) == 0)
return;
/*
* Prepare and send the message
*/
pgstat_setheader(&PendingBgWriterStats.m_hdr, PGSTAT_MTYPE_BGWRITER);
pgstat_send(&PendingBgWriterStats, sizeof(PendingBgWriterStats));
pgstat_begin_changecount_write(&stats_shmem->changecount);
#define BGWRITER_ACC(fld) stats_shmem->stats.fld += PendingBgWriterStats.fld
BGWRITER_ACC(buf_written_clean);
BGWRITER_ACC(maxwritten_clean);
BGWRITER_ACC(buf_alloc);
#undef BGWRITER_ACC
pgstat_end_changecount_write(&stats_shmem->changecount);
/*
* Clear out the statistics buffer, so it can be re-used.
*/
MemSet(&PendingBgWriterStats, 0, sizeof(PendingBgWriterStats));
}
/*
* Support function for the SQL-callable pgstat* functions. Returns
* a pointer to the bgwriter statistics struct.
*/
PgStat_BgWriterStats *
pgstat_fetch_stat_bgwriter(void)
{
pgstat_snapshot_fixed(PGSTAT_KIND_BGWRITER);
return &pgStatLocal.snapshot.bgwriter;
}
void
pgstat_bgwriter_reset_all_cb(TimestampTz ts)
{
PgStatShared_BgWriter *stats_shmem = &pgStatLocal.shmem->bgwriter;
/* see explanation above PgStatShared_BgWriter for the reset protocol */
LWLockAcquire(&stats_shmem->lock, LW_EXCLUSIVE);
pgstat_copy_changecounted_stats(&stats_shmem->reset_offset,
&stats_shmem->stats,
sizeof(stats_shmem->stats),
&stats_shmem->changecount);
stats_shmem->stats.stat_reset_timestamp = ts;
LWLockRelease(&stats_shmem->lock);
}
void
pgstat_bgwriter_snapshot_cb(void)
{
PgStatShared_BgWriter *stats_shmem = &pgStatLocal.shmem->bgwriter;
PgStat_BgWriterStats *reset_offset = &stats_shmem->reset_offset;
PgStat_BgWriterStats reset;
pgstat_copy_changecounted_stats(&pgStatLocal.snapshot.bgwriter,
&stats_shmem->stats,
sizeof(stats_shmem->stats),
&stats_shmem->changecount);
LWLockAcquire(&stats_shmem->lock, LW_SHARED);
memcpy(&reset, reset_offset, sizeof(stats_shmem->stats));
LWLockRelease(&stats_shmem->lock);
/* compensate by reset offsets */
#define BGWRITER_COMP(fld) pgStatLocal.snapshot.bgwriter.fld -= reset.fld;
BGWRITER_COMP(buf_written_clean);
BGWRITER_COMP(maxwritten_clean);
BGWRITER_COMP(buf_alloc);
#undef BGWRITER_COMP
}

View File

@ -20,12 +20,7 @@
#include "utils/pgstat_internal.h"
/*
* Checkpointer global statistics counters. Stored directly in a stats
* message structure so they can be sent without needing to copy things
* around. We assume this init to zeroes.
*/
PgStat_MsgCheckpointer PendingCheckpointerStats;
PgStat_CheckpointerStats PendingCheckpointerStats = {0};
/*
@ -35,24 +30,92 @@ void
pgstat_report_checkpointer(void)
{
/* We assume this initializes to zeroes */
static const PgStat_MsgCheckpointer all_zeroes;
static const PgStat_CheckpointerStats all_zeroes;
PgStatShared_Checkpointer *stats_shmem = &pgStatLocal.shmem->checkpointer;
Assert(!pgStatLocal.shmem->is_shutdown);
pgstat_assert_is_up();
/*
* This function can be called even if nothing at all has happened. In
* this case, avoid sending a completely empty message to the stats
* collector.
* this case, avoid unnecessarily modifying the stats entry.
*/
if (memcmp(&PendingCheckpointerStats, &all_zeroes, sizeof(PgStat_MsgCheckpointer)) == 0)
if (memcmp(&PendingCheckpointerStats, &all_zeroes,
sizeof(all_zeroes)) == 0)
return;
/*
* Prepare and send the message
*/
pgstat_setheader(&PendingCheckpointerStats.m_hdr, PGSTAT_MTYPE_CHECKPOINTER);
pgstat_send(&PendingCheckpointerStats, sizeof(PendingCheckpointerStats));
pgstat_begin_changecount_write(&stats_shmem->changecount);
#define CHECKPOINTER_ACC(fld) stats_shmem->stats.fld += PendingCheckpointerStats.fld
CHECKPOINTER_ACC(timed_checkpoints);
CHECKPOINTER_ACC(requested_checkpoints);
CHECKPOINTER_ACC(checkpoint_write_time);
CHECKPOINTER_ACC(checkpoint_sync_time);
CHECKPOINTER_ACC(buf_written_checkpoints);
CHECKPOINTER_ACC(buf_written_backend);
CHECKPOINTER_ACC(buf_fsync_backend);
#undef CHECKPOINTER_ACC
pgstat_end_changecount_write(&stats_shmem->changecount);
/*
* Clear out the statistics buffer, so it can be re-used.
*/
MemSet(&PendingCheckpointerStats, 0, sizeof(PendingCheckpointerStats));
}
/*
* pgstat_fetch_stat_checkpointer() -
*
* Support function for the SQL-callable pgstat* functions. Returns
* a pointer to the checkpointer statistics struct.
*/
PgStat_CheckpointerStats *
pgstat_fetch_stat_checkpointer(void)
{
pgstat_snapshot_fixed(PGSTAT_KIND_CHECKPOINTER);
return &pgStatLocal.snapshot.checkpointer;
}
void
pgstat_checkpointer_reset_all_cb(TimestampTz ts)
{
PgStatShared_Checkpointer *stats_shmem = &pgStatLocal.shmem->checkpointer;
/* see explanation above PgStatShared_Checkpointer for the reset protocol */
LWLockAcquire(&stats_shmem->lock, LW_EXCLUSIVE);
pgstat_copy_changecounted_stats(&stats_shmem->reset_offset,
&stats_shmem->stats,
sizeof(stats_shmem->stats),
&stats_shmem->changecount);
LWLockRelease(&stats_shmem->lock);
}
void
pgstat_checkpointer_snapshot_cb(void)
{
PgStatShared_Checkpointer *stats_shmem = &pgStatLocal.shmem->checkpointer;
PgStat_CheckpointerStats *reset_offset = &stats_shmem->reset_offset;
PgStat_CheckpointerStats reset;
pgstat_copy_changecounted_stats(&pgStatLocal.snapshot.checkpointer,
&stats_shmem->stats,
sizeof(stats_shmem->stats),
&stats_shmem->changecount);
LWLockAcquire(&stats_shmem->lock, LW_SHARED);
memcpy(&reset, reset_offset, sizeof(stats_shmem->stats));
LWLockRelease(&stats_shmem->lock);
/* compensate by reset offsets */
#define CHECKPOINTER_COMP(fld) pgStatLocal.snapshot.checkpointer.fld -= reset.fld;
CHECKPOINTER_COMP(timed_checkpoints);
CHECKPOINTER_COMP(requested_checkpoints);
CHECKPOINTER_COMP(checkpoint_write_time);
CHECKPOINTER_COMP(checkpoint_sync_time);
CHECKPOINTER_COMP(buf_written_checkpoints);
CHECKPOINTER_COMP(buf_written_backend);
CHECKPOINTER_COMP(buf_fsync_backend);
#undef CHECKPOINTER_COMP
}

View File

@ -19,13 +19,12 @@
#include "utils/pgstat_internal.h"
#include "utils/timestamp.h"
#include "storage/procsignal.h"
static bool pgstat_should_report_connstat(void);
int pgStatXactCommit = 0;
int pgStatXactRollback = 0;
PgStat_Counter pgStatBlockReadTime = 0;
PgStat_Counter pgStatBlockWriteTime = 0;
PgStat_Counter pgStatActiveTime = 0;
@ -33,25 +32,18 @@ PgStat_Counter pgStatTransactionIdleTime = 0;
SessionEndType pgStatSessionEndCause = DISCONNECT_NORMAL;
static int pgStatXactCommit = 0;
static int pgStatXactRollback = 0;
static PgStat_Counter pgLastSessionReportTime = 0;
/*
* Tell the collector that we just dropped a database.
* (If the message gets lost, we will still clean the dead DB eventually
* via future invocations of pgstat_vacuum_stat().)
* Remove entry for the database being dropped.
*/
void
pgstat_drop_database(Oid databaseid)
{
PgStat_MsgDropdb msg;
if (pgStatSock == PGINVALID_SOCKET)
return;
pgstat_setheader(&msg.m_hdr, PGSTAT_MTYPE_DROPDB);
msg.m_databaseid = databaseid;
pgstat_send(&msg, sizeof(msg));
pgstat_drop_transactional(PGSTAT_KIND_DATABASE, databaseid, InvalidOid);
}
/*
@ -62,16 +54,24 @@ pgstat_drop_database(Oid databaseid)
void
pgstat_report_autovac(Oid dboid)
{
PgStat_MsgAutovacStart msg;
PgStat_EntryRef *entry_ref;
PgStatShared_Database *dbentry;
if (pgStatSock == PGINVALID_SOCKET)
return;
/* can't get here in single user mode */
Assert(IsUnderPostmaster);
pgstat_setheader(&msg.m_hdr, PGSTAT_MTYPE_AUTOVAC_START);
msg.m_databaseid = dboid;
msg.m_start_time = GetCurrentTimestamp();
/*
* End-of-vacuum is reported instantly. Report the start the same way for
* consistency. Vacuum doesn't run frequently and is a long-lasting
* operation so it doesn't matter if we get blocked here a little.
*/
entry_ref = pgstat_get_entry_ref_locked(PGSTAT_KIND_DATABASE,
dboid, InvalidOid, false);
pgstat_send(&msg, sizeof(msg));
dbentry = (PgStatShared_Database *) entry_ref->shared_stats;
dbentry->stats.last_autovac_time = GetCurrentTimestamp();
pgstat_unlock_entry(entry_ref);
}
/*
@ -80,15 +80,39 @@ pgstat_report_autovac(Oid dboid)
void
pgstat_report_recovery_conflict(int reason)
{
PgStat_MsgRecoveryConflict msg;
PgStat_StatDBEntry *dbentry;
if (pgStatSock == PGINVALID_SOCKET || !pgstat_track_counts)
Assert(IsUnderPostmaster);
if (!pgstat_track_counts)
return;
pgstat_setheader(&msg.m_hdr, PGSTAT_MTYPE_RECOVERYCONFLICT);
msg.m_databaseid = MyDatabaseId;
msg.m_reason = reason;
pgstat_send(&msg, sizeof(msg));
dbentry = pgstat_prep_database_pending(MyDatabaseId);
switch (reason)
{
case PROCSIG_RECOVERY_CONFLICT_DATABASE:
/*
* Since we drop the information about the database as soon as it
* replicates, there is no point in counting these conflicts.
*/
break;
case PROCSIG_RECOVERY_CONFLICT_TABLESPACE:
dbentry->n_conflict_tablespace++;
break;
case PROCSIG_RECOVERY_CONFLICT_LOCK:
dbentry->n_conflict_lock++;
break;
case PROCSIG_RECOVERY_CONFLICT_SNAPSHOT:
dbentry->n_conflict_snapshot++;
break;
case PROCSIG_RECOVERY_CONFLICT_BUFFERPIN:
dbentry->n_conflict_bufferpin++;
break;
case PROCSIG_RECOVERY_CONFLICT_STARTUP_DEADLOCK:
dbentry->n_conflict_startup_deadlock++;
break;
}
}
/*
@ -97,14 +121,13 @@ pgstat_report_recovery_conflict(int reason)
void
pgstat_report_deadlock(void)
{
PgStat_MsgDeadlock msg;
PgStat_StatDBEntry *dbent;
if (pgStatSock == PGINVALID_SOCKET || !pgstat_track_counts)
if (!pgstat_track_counts)
return;
pgstat_setheader(&msg.m_hdr, PGSTAT_MTYPE_DEADLOCK);
msg.m_databaseid = MyDatabaseId;
pgstat_send(&msg, sizeof(msg));
dbent = pgstat_prep_database_pending(MyDatabaseId);
dbent->n_deadlocks++;
}
/*
@ -113,17 +136,24 @@ pgstat_report_deadlock(void)
void
pgstat_report_checksum_failures_in_db(Oid dboid, int failurecount)
{
PgStat_MsgChecksumFailure msg;
PgStat_EntryRef *entry_ref;
PgStatShared_Database *sharedent;
if (pgStatSock == PGINVALID_SOCKET || !pgstat_track_counts)
if (!pgstat_track_counts)
return;
pgstat_setheader(&msg.m_hdr, PGSTAT_MTYPE_CHECKSUMFAILURE);
msg.m_databaseid = dboid;
msg.m_failurecount = failurecount;
msg.m_failure_time = GetCurrentTimestamp();
/*
* Update the shared stats directly - checksum failures should never be
* common enough for that to be a problem.
*/
entry_ref =
pgstat_get_entry_ref_locked(PGSTAT_KIND_DATABASE, dboid, InvalidOid, false);
pgstat_send(&msg, sizeof(msg));
sharedent = (PgStatShared_Database *) entry_ref->shared_stats;
sharedent->stats.n_checksum_failures += failurecount;
sharedent->stats.last_checksum_failure = GetCurrentTimestamp();
pgstat_unlock_entry(entry_ref);
}
/*
@ -141,15 +171,14 @@ pgstat_report_checksum_failure(void)
void
pgstat_report_tempfile(size_t filesize)
{
PgStat_MsgTempFile msg;
PgStat_StatDBEntry *dbent;
if (pgStatSock == PGINVALID_SOCKET || !pgstat_track_counts)
if (!pgstat_track_counts)
return;
pgstat_setheader(&msg.m_hdr, PGSTAT_MTYPE_TEMPFILE);
msg.m_databaseid = MyDatabaseId;
msg.m_filesize = filesize;
pgstat_send(&msg, sizeof(msg));
dbent = pgstat_prep_database_pending(MyDatabaseId);
dbent->n_temp_bytes += filesize;
dbent->n_temp_files++;
}
/*
@ -158,16 +187,15 @@ pgstat_report_tempfile(size_t filesize)
void
pgstat_report_connect(Oid dboid)
{
PgStat_MsgConnect msg;
PgStat_StatDBEntry *dbentry;
if (!pgstat_should_report_connstat())
return;
pgLastSessionReportTime = MyStartTimestamp;
pgstat_setheader(&msg.m_hdr, PGSTAT_MTYPE_CONNECT);
msg.m_databaseid = MyDatabaseId;
pgstat_send(&msg, sizeof(PgStat_MsgConnect));
dbentry = pgstat_prep_database_pending(MyDatabaseId);
dbentry->n_sessions++;
}
/*
@ -176,15 +204,42 @@ pgstat_report_connect(Oid dboid)
void
pgstat_report_disconnect(Oid dboid)
{
PgStat_MsgDisconnect msg;
PgStat_StatDBEntry *dbentry;
if (!pgstat_should_report_connstat())
return;
pgstat_setheader(&msg.m_hdr, PGSTAT_MTYPE_DISCONNECT);
msg.m_databaseid = MyDatabaseId;
msg.m_cause = pgStatSessionEndCause;
pgstat_send(&msg, sizeof(PgStat_MsgDisconnect));
dbentry = pgstat_prep_database_pending(MyDatabaseId);
switch (pgStatSessionEndCause)
{
case DISCONNECT_NOT_YET:
case DISCONNECT_NORMAL:
/* we don't collect these */
break;
case DISCONNECT_CLIENT_EOF:
dbentry->n_sessions_abandoned++;
break;
case DISCONNECT_FATAL:
dbentry->n_sessions_fatal++;
break;
case DISCONNECT_KILLED:
dbentry->n_sessions_killed++;
break;
}
}
/*
* Support function for the SQL-callable pgstat* functions. Returns
* the collected statistics for one database or NULL. NULL doesn't mean
* that the database doesn't exist, just that there are no statistics, so the
* caller is better off to report ZERO instead.
*/
PgStat_StatDBEntry *
pgstat_fetch_stat_dbentry(Oid dboid)
{
return (PgStat_StatDBEntry *)
pgstat_fetch_entry(PGSTAT_KIND_DATABASE, dboid, InvalidOid);
}
void
@ -205,57 +260,47 @@ AtEOXact_PgStat_Database(bool isCommit, bool parallel)
}
/*
* Subroutine for pgstat_send_tabstat: Handle xact commit/rollback and I/O
* Subroutine for pgstat_report_stat(): Handle xact commit/rollback and I/O
* timings.
*/
void
pgstat_update_dbstats(PgStat_MsgTabstat *tsmsg, TimestampTz now)
pgstat_update_dbstats(TimestampTz ts)
{
if (OidIsValid(tsmsg->m_databaseid))
{
tsmsg->m_xact_commit = pgStatXactCommit;
tsmsg->m_xact_rollback = pgStatXactRollback;
tsmsg->m_block_read_time = pgStatBlockReadTime;
tsmsg->m_block_write_time = pgStatBlockWriteTime;
PgStat_StatDBEntry *dbentry;
if (pgstat_should_report_connstat())
{
long secs;
int usecs;
dbentry = pgstat_prep_database_pending(MyDatabaseId);
/*
* pgLastSessionReportTime is initialized to MyStartTimestamp by
* pgstat_report_connect().
*/
TimestampDifference(pgLastSessionReportTime, now, &secs, &usecs);
pgLastSessionReportTime = now;
tsmsg->m_session_time = (PgStat_Counter) secs * 1000000 + usecs;
tsmsg->m_active_time = pgStatActiveTime;
tsmsg->m_idle_in_xact_time = pgStatTransactionIdleTime;
}
else
{
tsmsg->m_session_time = 0;
tsmsg->m_active_time = 0;
tsmsg->m_idle_in_xact_time = 0;
}
pgStatXactCommit = 0;
pgStatXactRollback = 0;
pgStatBlockReadTime = 0;
pgStatBlockWriteTime = 0;
pgStatActiveTime = 0;
pgStatTransactionIdleTime = 0;
}
else
/*
* Accumulate xact commit/rollback and I/O timings to stats entry of the
* current database.
*/
dbentry->n_xact_commit += pgStatXactCommit;
dbentry->n_xact_rollback += pgStatXactRollback;
dbentry->n_block_read_time += pgStatBlockReadTime;
dbentry->n_block_write_time += pgStatBlockWriteTime;
if (pgstat_should_report_connstat())
{
tsmsg->m_xact_commit = 0;
tsmsg->m_xact_rollback = 0;
tsmsg->m_block_read_time = 0;
tsmsg->m_block_write_time = 0;
tsmsg->m_session_time = 0;
tsmsg->m_active_time = 0;
tsmsg->m_idle_in_xact_time = 0;
long secs;
int usecs;
/*
* pgLastSessionReportTime is initialized to MyStartTimestamp by
* pgstat_report_connect().
*/
TimestampDifference(pgLastSessionReportTime, ts, &secs, &usecs);
pgLastSessionReportTime = ts;
dbentry->total_session_time += (PgStat_Counter) secs * 1000000 + usecs;
dbentry->total_active_time += pgStatActiveTime;
dbentry->total_idle_in_xact_time += pgStatTransactionIdleTime;
}
pgStatXactCommit = 0;
pgStatXactRollback = 0;
pgStatBlockReadTime = 0;
pgStatBlockWriteTime = 0;
pgStatActiveTime = 0;
pgStatTransactionIdleTime = 0;
}
/*
@ -270,3 +315,111 @@ pgstat_should_report_connstat(void)
{
return MyBackendType == B_BACKEND;
}
/*
* Find or create a local PgStat_StatDBEntry entry for dboid.
*/
PgStat_StatDBEntry *
pgstat_prep_database_pending(Oid dboid)
{
PgStat_EntryRef *entry_ref;
entry_ref = pgstat_prep_pending_entry(PGSTAT_KIND_DATABASE, dboid, InvalidOid,
NULL);
return entry_ref->pending;
}
/*
* Reset the database's reset timestamp, without resetting the contents of the
* database stats.
*/
void
pgstat_reset_database_timestamp(Oid dboid, TimestampTz ts)
{
PgStat_EntryRef *dbref;
PgStatShared_Database *dbentry;
dbref = pgstat_get_entry_ref_locked(PGSTAT_KIND_DATABASE, MyDatabaseId, InvalidOid,
false);
dbentry = (PgStatShared_Database *) dbref->shared_stats;
dbentry->stats.stat_reset_timestamp = ts;
pgstat_unlock_entry(dbref);
}
/*
* Flush out pending stats for the entry
*
* If nowait is true, this function returns false if lock could not
* immediately acquired, otherwise true is returned.
*/
bool
pgstat_database_flush_cb(PgStat_EntryRef *entry_ref, bool nowait)
{
PgStatShared_Database *sharedent;
PgStat_StatDBEntry *pendingent;
pendingent = (PgStat_StatDBEntry *) entry_ref->pending;
sharedent = (PgStatShared_Database *) entry_ref->shared_stats;
if (!pgstat_lock_entry(entry_ref, nowait))
return false;
#define PGSTAT_ACCUM_DBCOUNT(item) \
(sharedent)->stats.item += (pendingent)->item
PGSTAT_ACCUM_DBCOUNT(n_xact_commit);
PGSTAT_ACCUM_DBCOUNT(n_xact_rollback);
PGSTAT_ACCUM_DBCOUNT(n_blocks_fetched);
PGSTAT_ACCUM_DBCOUNT(n_blocks_hit);
PGSTAT_ACCUM_DBCOUNT(n_tuples_returned);
PGSTAT_ACCUM_DBCOUNT(n_tuples_fetched);
PGSTAT_ACCUM_DBCOUNT(n_tuples_inserted);
PGSTAT_ACCUM_DBCOUNT(n_tuples_updated);
PGSTAT_ACCUM_DBCOUNT(n_tuples_deleted);
/* last_autovac_time is reported immediately */
Assert(pendingent->last_autovac_time == 0);
PGSTAT_ACCUM_DBCOUNT(n_conflict_tablespace);
PGSTAT_ACCUM_DBCOUNT(n_conflict_lock);
PGSTAT_ACCUM_DBCOUNT(n_conflict_snapshot);
PGSTAT_ACCUM_DBCOUNT(n_conflict_bufferpin);
PGSTAT_ACCUM_DBCOUNT(n_conflict_startup_deadlock);
PGSTAT_ACCUM_DBCOUNT(n_temp_bytes);
PGSTAT_ACCUM_DBCOUNT(n_temp_files);
PGSTAT_ACCUM_DBCOUNT(n_deadlocks);
/* checksum failures are reported immediately */
Assert(pendingent->n_checksum_failures == 0);
Assert(pendingent->last_checksum_failure == 0);
PGSTAT_ACCUM_DBCOUNT(n_block_read_time);
PGSTAT_ACCUM_DBCOUNT(n_block_write_time);
PGSTAT_ACCUM_DBCOUNT(n_sessions);
PGSTAT_ACCUM_DBCOUNT(total_session_time);
PGSTAT_ACCUM_DBCOUNT(total_active_time);
PGSTAT_ACCUM_DBCOUNT(total_idle_in_xact_time);
PGSTAT_ACCUM_DBCOUNT(n_sessions_abandoned);
PGSTAT_ACCUM_DBCOUNT(n_sessions_fatal);
PGSTAT_ACCUM_DBCOUNT(n_sessions_killed);
#undef PGSTAT_ACCUM_DBCOUNT
pgstat_unlock_entry(entry_ref);
memset(pendingent, 0, sizeof(*pendingent));
return true;
}
void
pgstat_database_reset_timestamp_cb(PgStatShared_Common *header, TimestampTz ts)
{
((PgStatShared_Database *) header)->stats.stat_reset_timestamp = ts;
}

View File

@ -17,8 +17,10 @@
#include "postgres.h"
#include "fmgr.h"
#include "utils/inval.h"
#include "utils/pgstat_internal.h"
#include "utils/timestamp.h"
#include "utils/syscache.h"
/* ----------
@ -28,18 +30,6 @@
int pgstat_track_functions = TRACK_FUNC_OFF;
/*
* Indicates if backend has some function stats that it hasn't yet
* sent to the collector.
*/
bool have_function_stats = false;
/*
* Backends store per-function info that's waiting to be sent to the collector
* in this hash table (indexed by function OID).
*/
static HTAB *pgStatFunctions = NULL;
/*
* Total time charged to functions so far in the current backend.
* We use this to help separate "self" and "other" time charges.
@ -61,6 +51,10 @@ pgstat_create_function(Oid proid)
/*
* Ensure that stats are dropped if transaction commits.
*
* NB: This is only reliable because pgstat_init_function_usage() does some
* extra work. If other places start emitting function stats they likely need
* similar logic.
*/
void
pgstat_drop_function(Oid proid)
@ -78,8 +72,9 @@ void
pgstat_init_function_usage(FunctionCallInfo fcinfo,
PgStat_FunctionCallUsage *fcu)
{
PgStat_BackendFunctionEntry *htabent;
bool found;
PgStat_EntryRef *entry_ref;
PgStat_BackendFunctionEntry *pending;
bool created_entry;
if (pgstat_track_functions <= fcinfo->flinfo->fn_stats)
{
@ -88,29 +83,48 @@ pgstat_init_function_usage(FunctionCallInfo fcinfo,
return;
}
if (!pgStatFunctions)
{
/* First time through - initialize function stat table */
HASHCTL hash_ctl;
entry_ref = pgstat_prep_pending_entry(PGSTAT_KIND_FUNCTION,
MyDatabaseId,
fcinfo->flinfo->fn_oid,
&created_entry);
hash_ctl.keysize = sizeof(Oid);
hash_ctl.entrysize = sizeof(PgStat_BackendFunctionEntry);
pgStatFunctions = hash_create("Function stat entries",
PGSTAT_FUNCTION_HASH_SIZE,
&hash_ctl,
HASH_ELEM | HASH_BLOBS);
/*
* If no shared entry already exists, check if the function has been
* deleted concurrently. This can go unnoticed until here because
* executing a statement that just calls a function, does not trigger
* cache invalidation processing. The reason we care about this case is
* that otherwise we could create a new stats entry for an already dropped
* function (for relations etc this is not possible because emitting stats
* requires a lock for the relation to already have been acquired).
*
* It's somewhat ugly to have a behavioral difference based on
* track_functions being enabled/disabled. But it seems acceptable, given
* that there's already behavioral differences depending on whether the
* function is the caches etc.
*
* For correctness it'd be sufficient to set ->dropped to true. However,
* the accepted invalidation will commonly cause "low level" failures in
* PL code, with an OID in the error message. Making this harder to
* test...
*/
if (created_entry)
{
AcceptInvalidationMessages();
if (!SearchSysCacheExists1(PROCOID, ObjectIdGetDatum(fcinfo->flinfo->fn_oid)))
{
pgstat_drop_entry(PGSTAT_KIND_FUNCTION, MyDatabaseId,
fcinfo->flinfo->fn_oid);
ereport(ERROR, errcode(ERRCODE_UNDEFINED_FUNCTION),
errmsg("function call to dropped function"));
}
}
/* Get the stats entry for this function, create if necessary */
htabent = hash_search(pgStatFunctions, &fcinfo->flinfo->fn_oid,
HASH_ENTER, &found);
if (!found)
MemSet(&htabent->f_counts, 0, sizeof(PgStat_FunctionCounts));
pending = entry_ref->pending;
fcu->fs = &htabent->f_counts;
fcu->fs = &pending->f_counts;
/* save stats for this function, later used to compensate for recursion */
fcu->save_f_total_time = htabent->f_counts.f_total_time;
fcu->save_f_total_time = pending->f_counts.f_total_time;
/* save current backend-wide total time */
fcu->save_total = total_func_time;
@ -167,64 +181,37 @@ pgstat_end_function_usage(PgStat_FunctionCallUsage *fcu, bool finalize)
fs->f_numcalls++;
fs->f_total_time = f_total;
INSTR_TIME_ADD(fs->f_self_time, f_self);
/* indicate that we have something to send */
have_function_stats = true;
}
/*
* Subroutine for pgstat_report_stat: populate and send a function stat message
* Flush out pending stats for the entry
*
* If nowait is true, this function returns false if lock could not
* immediately acquired, otherwise true is returned.
*/
void
pgstat_send_funcstats(void)
bool
pgstat_function_flush_cb(PgStat_EntryRef *entry_ref, bool nowait)
{
/* we assume this inits to all zeroes: */
static const PgStat_FunctionCounts all_zeroes;
PgStat_BackendFunctionEntry *localent;
PgStatShared_Function *shfuncent;
PgStat_MsgFuncstat msg;
PgStat_BackendFunctionEntry *entry;
HASH_SEQ_STATUS fstat;
localent = (PgStat_BackendFunctionEntry *) entry_ref->pending;
shfuncent = (PgStatShared_Function *) entry_ref->shared_stats;
if (pgStatFunctions == NULL)
return;
/* localent always has non-zero content */
pgstat_setheader(&msg.m_hdr, PGSTAT_MTYPE_FUNCSTAT);
msg.m_databaseid = MyDatabaseId;
msg.m_nentries = 0;
if (!pgstat_lock_entry(entry_ref, nowait))
return false;
hash_seq_init(&fstat, pgStatFunctions);
while ((entry = (PgStat_BackendFunctionEntry *) hash_seq_search(&fstat)) != NULL)
{
PgStat_FunctionEntry *m_ent;
shfuncent->stats.f_numcalls += localent->f_counts.f_numcalls;
shfuncent->stats.f_total_time +=
INSTR_TIME_GET_MICROSEC(localent->f_counts.f_total_time);
shfuncent->stats.f_self_time +=
INSTR_TIME_GET_MICROSEC(localent->f_counts.f_self_time);
/* Skip it if no counts accumulated since last time */
if (memcmp(&entry->f_counts, &all_zeroes,
sizeof(PgStat_FunctionCounts)) == 0)
continue;
pgstat_unlock_entry(entry_ref);
/* need to convert format of time accumulators */
m_ent = &msg.m_entry[msg.m_nentries];
m_ent->f_id = entry->f_id;
m_ent->f_numcalls = entry->f_counts.f_numcalls;
m_ent->f_total_time = INSTR_TIME_GET_MICROSEC(entry->f_counts.f_total_time);
m_ent->f_self_time = INSTR_TIME_GET_MICROSEC(entry->f_counts.f_self_time);
if (++msg.m_nentries >= PGSTAT_NUM_FUNCENTRIES)
{
pgstat_send(&msg, offsetof(PgStat_MsgFuncstat, m_entry[0]) +
msg.m_nentries * sizeof(PgStat_FunctionEntry));
msg.m_nentries = 0;
}
/* reset the entry's counts */
MemSet(&entry->f_counts, 0, sizeof(PgStat_FunctionCounts));
}
if (msg.m_nentries > 0)
pgstat_send(&msg, offsetof(PgStat_MsgFuncstat, m_entry[0]) +
msg.m_nentries * sizeof(PgStat_FunctionEntry));
have_function_stats = false;
return true;
}
/*
@ -235,12 +222,22 @@ pgstat_send_funcstats(void)
PgStat_BackendFunctionEntry *
find_funcstat_entry(Oid func_id)
{
pgstat_assert_is_up();
PgStat_EntryRef *entry_ref;
if (pgStatFunctions == NULL)
return NULL;
entry_ref = pgstat_fetch_pending_entry(PGSTAT_KIND_FUNCTION, MyDatabaseId, func_id);
return (PgStat_BackendFunctionEntry *) hash_search(pgStatFunctions,
(void *) &func_id,
HASH_FIND, NULL);
if (entry_ref)
return entry_ref->pending;
return NULL;
}
/*
* Support function for the SQL-callable pgstat* functions. Returns
* the collected statistics for one function or NULL.
*/
PgStat_StatFuncEntry *
pgstat_fetch_stat_funcentry(Oid func_id)
{
return (PgStat_StatFuncEntry *)
pgstat_fetch_entry(PGSTAT_KIND_FUNCTION, MyDatabaseId, func_id);
}

View File

@ -19,6 +19,7 @@
#include "access/twophase_rmgr.h"
#include "access/xact.h"
#include "catalog/partition.h"
#include "postmaster/autovacuum.h"
#include "utils/memutils.h"
#include "utils/pgstat_internal.h"
@ -26,38 +27,6 @@
#include "utils/timestamp.h"
/*
* Structures in which backends store per-table info that's waiting to be
* sent to the collector.
*
* NOTE: once allocated, TabStatusArray structures are never moved or deleted
* for the life of the backend. Also, we zero out the t_id fields of the
* contained PgStat_TableStatus structs whenever they are not actively in use.
* This allows relcache pgstat_info pointers to be treated as long-lived data,
* avoiding repeated searches in pgstat_init_relation() when a relation is
* repeatedly opened during a transaction.
*/
#define TABSTAT_QUANTUM 100 /* we alloc this many at a time */
typedef struct TabStatusArray
{
struct TabStatusArray *tsa_next; /* link to next array, if any */
int tsa_used; /* # entries currently used */
PgStat_TableStatus tsa_entries[TABSTAT_QUANTUM]; /* per-table data */
} TabStatusArray;
static TabStatusArray *pgStatTabList = NULL;
/*
* pgStatTabHash entry: map from relation OID to PgStat_TableStatus pointer
*/
typedef struct TabStatHashEntry
{
Oid t_id;
PgStat_TableStatus *tsa_entry;
} TabStatHashEntry;
/* Record that's written to 2PC state file when pgstat state is persisted */
typedef struct TwoPhasePgStatRecord
{
@ -74,27 +43,13 @@ typedef struct TwoPhasePgStatRecord
} TwoPhasePgStatRecord;
static PgStat_TableStatus *get_tabstat_entry(Oid rel_id, bool isshared);
static void pgstat_send_tabstat(PgStat_MsgTabstat *tsmsg, TimestampTz now);
static PgStat_TableStatus *pgstat_prep_relation_pending(Oid rel_id, bool isshared);
static void add_tabstat_xact_level(PgStat_TableStatus *pgstat_info, int nest_level);
static void ensure_tabstat_xact_level(PgStat_TableStatus *pgstat_info);
static void save_truncdrop_counters(PgStat_TableXactStatus *trans, bool is_drop);
static void restore_truncdrop_counters(PgStat_TableXactStatus *trans);
/*
* Indicates if backend has some relation stats that it hasn't yet
* sent to the collector.
*/
bool have_relation_stats;
/*
* Hash table for O(1) t_id -> tsa_entry lookup
*/
static HTAB *pgStatTabHash = NULL;
/*
* Copy stats between relations. This is used for things like REINDEX
* CONCURRENTLY.
@ -103,43 +58,39 @@ void
pgstat_copy_relation_stats(Relation dst, Relation src)
{
PgStat_StatTabEntry *srcstats;
PgStatShared_Relation *dstshstats;
PgStat_EntryRef *dst_ref;
srcstats = pgstat_fetch_stat_tabentry(RelationGetRelid(src));
srcstats = pgstat_fetch_stat_tabentry_ext(src->rd_rel->relisshared,
RelationGetRelid(src));
if (!srcstats)
return;
if (pgstat_should_count_relation(dst))
{
/*
* XXX: temporarily this does not actually quite do what the name
* says, and just copy index related fields. A subsequent commit will
* do more.
*/
dst_ref = pgstat_get_entry_ref_locked(PGSTAT_KIND_RELATION,
dst->rd_rel->relisshared ? InvalidOid : MyDatabaseId,
RelationGetRelid(dst),
false);
dst->pgstat_info->t_counts.t_numscans = srcstats->numscans;
dst->pgstat_info->t_counts.t_tuples_returned = srcstats->tuples_returned;
dst->pgstat_info->t_counts.t_tuples_fetched = srcstats->tuples_fetched;
dst->pgstat_info->t_counts.t_blocks_fetched = srcstats->blocks_fetched;
dst->pgstat_info->t_counts.t_blocks_hit = srcstats->blocks_hit;
dstshstats = (PgStatShared_Relation *) dst_ref->shared_stats;
dstshstats->stats = *srcstats;
/* the data will be sent by the next pgstat_report_stat() call */
}
pgstat_unlock_entry(dst_ref);
}
/*
* Initialize a relcache entry to count access statistics.
* Called whenever a relation is opened.
* Initialize a relcache entry to count access statistics. Called whenever a
* relation is opened.
*
* We assume that a relcache entry's pgstat_info field is zeroed by
* relcache.c when the relcache entry is made; thereafter it is long-lived
* data. We can avoid repeated searches of the TabStatus arrays when the
* same relation is touched repeatedly within a transaction.
* We assume that a relcache entry's pgstat_info field is zeroed by relcache.c
* when the relcache entry is made; thereafter it is long-lived data.
*
* This does not create a reference to a stats entry in shared memory, nor
* allocate memory for the pending stats. That happens in
* pgstat_assoc_relation().
*/
void
pgstat_init_relation(Relation rel)
{
Oid rel_id = rel->rd_id;
char relkind = rel->rd_rel->relkind;
/*
@ -147,27 +98,68 @@ pgstat_init_relation(Relation rel)
*/
if (!RELKIND_HAS_STORAGE(relkind) && relkind != RELKIND_PARTITIONED_TABLE)
{
rel->pgstat_enabled = false;
rel->pgstat_info = NULL;
return;
}
if (pgStatSock == PGINVALID_SOCKET || !pgstat_track_counts)
if (!pgstat_track_counts)
{
if (rel->pgstat_info)
pgstat_unlink_relation(rel);
/* We're not counting at all */
rel->pgstat_enabled = false;
rel->pgstat_info = NULL;
return;
}
/*
* If we already set up this relation in the current transaction, nothing
* to do.
*/
if (rel->pgstat_info != NULL &&
rel->pgstat_info->t_id == rel_id)
return;
rel->pgstat_enabled = true;
}
/*
* Prepare for statistics for this relation to be collected.
*
* This ensures we have a reference to the stats entry before stats can be
* generated. That is important because a relation drop in another connection
* could otherwise lead to the stats entry being dropped, which then later
* would get recreated when flushing stats.
*
* This is separate from pgstat_init_relation() as it is not uncommon for
* relcache entries to be opened without ever getting stats reported.
*/
void
pgstat_assoc_relation(Relation rel)
{
Assert(rel->pgstat_enabled);
Assert(rel->pgstat_info == NULL);
/* Else find or make the PgStat_TableStatus entry, and update link */
rel->pgstat_info = get_tabstat_entry(rel_id, rel->rd_rel->relisshared);
rel->pgstat_info = pgstat_prep_relation_pending(RelationGetRelid(rel),
rel->rd_rel->relisshared);
/* don't allow link a stats to multiple relcache entries */
Assert(rel->pgstat_info->relation == NULL);
/* mark this relation as the owner */
rel->pgstat_info->relation = rel;
}
/*
* Break the mutual link between a relcache entry and pending stats entry.
* This must be called whenever one end of the link is removed.
*/
void
pgstat_unlink_relation(Relation rel)
{
/* remove the link to stats info if any */
if (rel->pgstat_info == NULL)
return;
/* link sanity check */
Assert(rel->pgstat_info->relation == rel);
rel->pgstat_info->relation = NULL;
rel->pgstat_info = NULL;
}
/*
@ -187,9 +179,26 @@ pgstat_create_relation(Relation rel)
void
pgstat_drop_relation(Relation rel)
{
int nest_level = GetCurrentTransactionNestLevel();
PgStat_TableStatus *pgstat_info = rel->pgstat_info;
pgstat_drop_transactional(PGSTAT_KIND_RELATION,
rel->rd_rel->relisshared ? InvalidOid : MyDatabaseId,
RelationGetRelid(rel));
/*
* Transactionally set counters to 0. That ensures that accesses to
* pg_stat_xact_all_tables inside the transaction show 0.
*/
if (pgstat_info &&
pgstat_info->trans != NULL &&
pgstat_info->trans->nest_level == nest_level)
{
save_truncdrop_counters(pgstat_info->trans, true);
pgstat_info->trans->tuples_inserted = 0;
pgstat_info->trans->tuples_updated = 0;
pgstat_info->trans->tuples_deleted = 0;
}
}
/*
@ -199,19 +208,52 @@ void
pgstat_report_vacuum(Oid tableoid, bool shared,
PgStat_Counter livetuples, PgStat_Counter deadtuples)
{
PgStat_MsgVacuum msg;
PgStat_EntryRef *entry_ref;
PgStatShared_Relation *shtabentry;
PgStat_StatTabEntry *tabentry;
Oid dboid = (shared ? InvalidOid : MyDatabaseId);
TimestampTz ts;
if (pgStatSock == PGINVALID_SOCKET || !pgstat_track_counts)
if (!pgstat_track_counts)
return;
pgstat_setheader(&msg.m_hdr, PGSTAT_MTYPE_VACUUM);
msg.m_databaseid = shared ? InvalidOid : MyDatabaseId;
msg.m_tableoid = tableoid;
msg.m_autovacuum = IsAutoVacuumWorkerProcess();
msg.m_vacuumtime = GetCurrentTimestamp();
msg.m_live_tuples = livetuples;
msg.m_dead_tuples = deadtuples;
pgstat_send(&msg, sizeof(msg));
/* Store the data in the table's hash table entry. */
ts = GetCurrentTimestamp();
/* block acquiring lock for the same reason as pgstat_report_autovac() */
entry_ref = pgstat_get_entry_ref_locked(PGSTAT_KIND_RELATION,
dboid, tableoid, false);
shtabentry = (PgStatShared_Relation *) entry_ref->shared_stats;
tabentry = &shtabentry->stats;
tabentry->n_live_tuples = livetuples;
tabentry->n_dead_tuples = deadtuples;
/*
* It is quite possible that a non-aggressive VACUUM ended up skipping
* various pages, however, we'll zero the insert counter here regardless.
* It's currently used only to track when we need to perform an "insert"
* autovacuum, which are mainly intended to freeze newly inserted tuples.
* Zeroing this may just mean we'll not try to vacuum the table again
* until enough tuples have been inserted to trigger another insert
* autovacuum. An anti-wraparound autovacuum will catch any persistent
* stragglers.
*/
tabentry->inserts_since_vacuum = 0;
if (IsAutoVacuumWorkerProcess())
{
tabentry->autovac_vacuum_timestamp = ts;
tabentry->autovac_vacuum_count++;
}
else
{
tabentry->vacuum_timestamp = ts;
tabentry->vacuum_count++;
}
pgstat_unlock_entry(entry_ref);
}
/*
@ -225,9 +267,12 @@ pgstat_report_analyze(Relation rel,
PgStat_Counter livetuples, PgStat_Counter deadtuples,
bool resetcounter)
{
PgStat_MsgAnalyze msg;
PgStat_EntryRef *entry_ref;
PgStatShared_Relation *shtabentry;
PgStat_StatTabEntry *tabentry;
Oid dboid = (rel->rd_rel->relisshared ? InvalidOid : MyDatabaseId);
if (pgStatSock == PGINVALID_SOCKET || !pgstat_track_counts)
if (!pgstat_track_counts)
return;
/*
@ -259,15 +304,39 @@ pgstat_report_analyze(Relation rel,
deadtuples = Max(deadtuples, 0);
}
pgstat_setheader(&msg.m_hdr, PGSTAT_MTYPE_ANALYZE);
msg.m_databaseid = rel->rd_rel->relisshared ? InvalidOid : MyDatabaseId;
msg.m_tableoid = RelationGetRelid(rel);
msg.m_autovacuum = IsAutoVacuumWorkerProcess();
msg.m_resetcounter = resetcounter;
msg.m_analyzetime = GetCurrentTimestamp();
msg.m_live_tuples = livetuples;
msg.m_dead_tuples = deadtuples;
pgstat_send(&msg, sizeof(msg));
/* block acquiring lock for the same reason as pgstat_report_autovac() */
entry_ref = pgstat_get_entry_ref_locked(PGSTAT_KIND_RELATION, dboid,
RelationGetRelid(rel),
false);
/* can't get dropped while accessed */
Assert(entry_ref != NULL && entry_ref->shared_stats != NULL);
shtabentry = (PgStatShared_Relation *) entry_ref->shared_stats;
tabentry = &shtabentry->stats;
tabentry->n_live_tuples = livetuples;
tabentry->n_dead_tuples = deadtuples;
/*
* If commanded, reset changes_since_analyze to zero. This forgets any
* changes that were committed while the ANALYZE was in progress, but we
* have no good way to estimate how many of those there were.
*/
if (resetcounter)
tabentry->changes_since_analyze = 0;
if (IsAutoVacuumWorkerProcess())
{
tabentry->autovac_analyze_timestamp = GetCurrentTimestamp();
tabentry->autovac_analyze_count++;
}
else
{
tabentry->analyze_timestamp = GetCurrentTimestamp();
tabentry->analyze_count++;
}
pgstat_unlock_entry(entry_ref);
}
/*
@ -356,30 +425,61 @@ pgstat_update_heap_dead_tuples(Relation rel, int delta)
}
}
/*
* Support function for the SQL-callable pgstat* functions. Returns
* the collected statistics for one table or NULL. NULL doesn't mean
* that the table doesn't exist, just that there are no statistics, so the
* caller is better off to report ZERO instead.
*/
PgStat_StatTabEntry *
pgstat_fetch_stat_tabentry(Oid relid)
{
PgStat_StatTabEntry *tabentry;
tabentry = pgstat_fetch_stat_tabentry_ext(false, relid);
if (tabentry != NULL)
return tabentry;
/*
* If we didn't find it, maybe it's a shared table.
*/
tabentry = pgstat_fetch_stat_tabentry_ext(true, relid);
return tabentry;
}
/*
* More efficient version of pgstat_fetch_stat_tabentry(), allowing to specify
* whether the to-be-accessed table is a shared relation or not.
*/
PgStat_StatTabEntry *
pgstat_fetch_stat_tabentry_ext(bool shared, Oid reloid)
{
Oid dboid = (shared ? InvalidOid : MyDatabaseId);
return (PgStat_StatTabEntry *)
pgstat_fetch_entry(PGSTAT_KIND_RELATION, dboid, reloid);
}
/*
* find any existing PgStat_TableStatus entry for rel
*
* If no entry, return NULL, don't create a new one
* Find any existing PgStat_TableStatus entry for rel_id in the current
* database. If not found, try finding from shared tables.
*
* Note: if we got an error in the most recent execution of pgstat_report_stat,
* it's possible that an entry exists but there's no hashtable entry for it.
* That's okay, we'll treat this case as "doesn't exist".
* If no entry found, return NULL, don't create a new one
*/
PgStat_TableStatus *
find_tabstat_entry(Oid rel_id)
{
TabStatHashEntry *hash_entry;
PgStat_EntryRef *entry_ref;
/* If hashtable doesn't exist, there are no entries at all */
if (!pgStatTabHash)
return NULL;
entry_ref = pgstat_fetch_pending_entry(PGSTAT_KIND_RELATION, MyDatabaseId, rel_id);
if (!entry_ref)
entry_ref = pgstat_fetch_pending_entry(PGSTAT_KIND_RELATION, InvalidOid, rel_id);
hash_entry = hash_search(pgStatTabHash, &rel_id, HASH_FIND, NULL);
if (!hash_entry)
return NULL;
/* Note that this step could also return NULL, but that's correct */
return hash_entry->tsa_entry;
if (entry_ref)
return entry_ref->pending;
return NULL;
}
/*
@ -536,7 +636,7 @@ AtPrepare_PgStat_Relations(PgStat_SubXactStatus *xact_state)
for (trans = xact_state->first; trans != NULL; trans = trans->next)
{
PgStat_TableStatus *tabstat;
PgStat_TableStatus *tabstat PG_USED_FOR_ASSERTS_ONLY;
TwoPhasePgStatRecord record;
Assert(trans->nest_level == 1);
@ -594,7 +694,7 @@ pgstat_twophase_postcommit(TransactionId xid, uint16 info,
PgStat_TableStatus *pgstat_info;
/* Find or create a tabstat entry for the rel */
pgstat_info = get_tabstat_entry(rec->t_id, rec->t_shared);
pgstat_info = pgstat_prep_relation_pending(rec->t_id, rec->t_shared);
/* Same math as in AtEOXact_PgStat, commit case */
pgstat_info->t_counts.t_tuples_inserted += rec->tuples_inserted;
@ -630,7 +730,7 @@ pgstat_twophase_postabort(TransactionId xid, uint16 info,
PgStat_TableStatus *pgstat_info;
/* Find or create a tabstat entry for the rel */
pgstat_info = get_tabstat_entry(rec->t_id, rec->t_shared);
pgstat_info = pgstat_prep_relation_pending(rec->t_id, rec->t_shared);
/* Same math as in AtEOXact_PgStat, abort case */
if (rec->t_truncdropped)
@ -647,204 +747,116 @@ pgstat_twophase_postabort(TransactionId xid, uint16 info,
}
/*
* Subroutine for pgstat_report_stat: Send relation statistics
* Flush out pending stats for the entry
*
* If nowait is true, this function returns false if lock could not
* immediately acquired, otherwise true is returned.
*
* Some of the stats are copied to the corresponding pending database stats
* entry when successfully flushing.
*/
void
pgstat_send_tabstats(TimestampTz now, bool disconnect)
bool
pgstat_relation_flush_cb(PgStat_EntryRef *entry_ref, bool nowait)
{
/* we assume this inits to all zeroes: */
static const PgStat_TableCounts all_zeroes;
PgStat_MsgTabstat regular_msg;
PgStat_MsgTabstat shared_msg;
TabStatusArray *tsa;
int i;
Oid dboid;
PgStat_TableStatus *lstats; /* pending stats entry */
PgStatShared_Relation *shtabstats;
PgStat_StatTabEntry *tabentry; /* table entry of shared stats */
PgStat_StatDBEntry *dbentry; /* pending database entry */
dboid = entry_ref->shared_entry->key.dboid;
lstats = (PgStat_TableStatus *) entry_ref->pending;
shtabstats = (PgStatShared_Relation *) entry_ref->shared_stats;
/*
* Destroy pgStatTabHash before we start invalidating PgStat_TableEntry
* entries it points to. (Should we fail partway through the loop below,
* it's okay to have removed the hashtable already --- the only
* consequence is we'd get multiple entries for the same table in the
* pgStatTabList, and that's safe.)
* Ignore entries that didn't accumulate any actual counts, such as
* indexes that were opened by the planner but not used.
*/
if (pgStatTabHash)
hash_destroy(pgStatTabHash);
pgStatTabHash = NULL;
/*
* Scan through the TabStatusArray struct(s) to find tables that actually
* have counts, and build messages to send. We have to separate shared
* relations from regular ones because the databaseid field in the message
* header has to depend on that.
*/
regular_msg.m_databaseid = MyDatabaseId;
shared_msg.m_databaseid = InvalidOid;
regular_msg.m_nentries = 0;
shared_msg.m_nentries = 0;
for (tsa = pgStatTabList; tsa != NULL; tsa = tsa->tsa_next)
if (memcmp(&lstats->t_counts, &all_zeroes,
sizeof(PgStat_TableCounts)) == 0)
{
for (i = 0; i < tsa->tsa_used; i++)
{
PgStat_TableStatus *entry = &tsa->tsa_entries[i];
PgStat_MsgTabstat *this_msg;
PgStat_TableEntry *this_ent;
/* Shouldn't have any pending transaction-dependent counts */
Assert(entry->trans == NULL);
/*
* Ignore entries that didn't accumulate any actual counts, such
* as indexes that were opened by the planner but not used.
*/
if (memcmp(&entry->t_counts, &all_zeroes,
sizeof(PgStat_TableCounts)) == 0)
continue;
/*
* OK, insert data into the appropriate message, and send if full.
*/
this_msg = entry->t_shared ? &shared_msg : &regular_msg;
this_ent = &this_msg->m_entry[this_msg->m_nentries];
this_ent->t_id = entry->t_id;
memcpy(&this_ent->t_counts, &entry->t_counts,
sizeof(PgStat_TableCounts));
if (++this_msg->m_nentries >= PGSTAT_NUM_TABENTRIES)
{
pgstat_send_tabstat(this_msg, now);
this_msg->m_nentries = 0;
}
}
/* zero out PgStat_TableStatus structs after use */
MemSet(tsa->tsa_entries, 0,
tsa->tsa_used * sizeof(PgStat_TableStatus));
tsa->tsa_used = 0;
return true;
}
/*
* Send partial messages. Make sure that any pending xact commit/abort
* and connection stats get counted, even if there are no table stats to
* send.
*/
if (regular_msg.m_nentries > 0 ||
pgStatXactCommit > 0 || pgStatXactRollback > 0 || disconnect)
pgstat_send_tabstat(&regular_msg, now);
if (shared_msg.m_nentries > 0)
pgstat_send_tabstat(&shared_msg, now);
if (!pgstat_lock_entry(entry_ref, nowait))
return false;
have_relation_stats = false;
/* add the values to the shared entry. */
tabentry = &shtabstats->stats;
tabentry->numscans += lstats->t_counts.t_numscans;
tabentry->tuples_returned += lstats->t_counts.t_tuples_returned;
tabentry->tuples_fetched += lstats->t_counts.t_tuples_fetched;
tabentry->tuples_inserted += lstats->t_counts.t_tuples_inserted;
tabentry->tuples_updated += lstats->t_counts.t_tuples_updated;
tabentry->tuples_deleted += lstats->t_counts.t_tuples_deleted;
tabentry->tuples_hot_updated += lstats->t_counts.t_tuples_hot_updated;
/*
* If table was truncated/dropped, first reset the live/dead counters.
*/
if (lstats->t_counts.t_truncdropped)
{
tabentry->n_live_tuples = 0;
tabentry->n_dead_tuples = 0;
tabentry->inserts_since_vacuum = 0;
}
tabentry->n_live_tuples += lstats->t_counts.t_delta_live_tuples;
tabentry->n_dead_tuples += lstats->t_counts.t_delta_dead_tuples;
tabentry->changes_since_analyze += lstats->t_counts.t_changed_tuples;
tabentry->inserts_since_vacuum += lstats->t_counts.t_tuples_inserted;
tabentry->blocks_fetched += lstats->t_counts.t_blocks_fetched;
tabentry->blocks_hit += lstats->t_counts.t_blocks_hit;
/* Clamp n_live_tuples in case of negative delta_live_tuples */
tabentry->n_live_tuples = Max(tabentry->n_live_tuples, 0);
/* Likewise for n_dead_tuples */
tabentry->n_dead_tuples = Max(tabentry->n_dead_tuples, 0);
pgstat_unlock_entry(entry_ref);
/* The entry was successfully flushed, add the same to database stats */
dbentry = pgstat_prep_database_pending(dboid);
dbentry->n_tuples_returned += lstats->t_counts.t_tuples_returned;
dbentry->n_tuples_fetched += lstats->t_counts.t_tuples_fetched;
dbentry->n_tuples_inserted += lstats->t_counts.t_tuples_inserted;
dbentry->n_tuples_updated += lstats->t_counts.t_tuples_updated;
dbentry->n_tuples_deleted += lstats->t_counts.t_tuples_deleted;
dbentry->n_blocks_fetched += lstats->t_counts.t_blocks_fetched;
dbentry->n_blocks_hit += lstats->t_counts.t_blocks_hit;
return true;
}
/*
* Subroutine for pgstat_send_tabstats: finish and send one tabstat message
*/
static void
pgstat_send_tabstat(PgStat_MsgTabstat *tsmsg, TimestampTz now)
void
pgstat_relation_delete_pending_cb(PgStat_EntryRef *entry_ref)
{
int n;
int len;
PgStat_TableStatus *pending = (PgStat_TableStatus *) entry_ref->pending;
/* It's unlikely we'd get here with no socket, but maybe not impossible */
if (pgStatSock == PGINVALID_SOCKET)
return;
/*
* Report and reset accumulated xact commit/rollback and I/O timings
* whenever we send a normal tabstat message
*/
pgstat_update_dbstats(tsmsg, now);
n = tsmsg->m_nentries;
len = offsetof(PgStat_MsgTabstat, m_entry[0]) +
n * sizeof(PgStat_TableEntry);
pgstat_setheader(&tsmsg->m_hdr, PGSTAT_MTYPE_TABSTAT);
pgstat_send(tsmsg, len);
if (pending->relation)
pgstat_unlink_relation(pending->relation);
}
/*
* find or create a PgStat_TableStatus entry for rel
* Find or create a PgStat_TableStatus entry for rel. New entry is created and
* initialized if not exists.
*/
static PgStat_TableStatus *
get_tabstat_entry(Oid rel_id, bool isshared)
pgstat_prep_relation_pending(Oid rel_id, bool isshared)
{
TabStatHashEntry *hash_entry;
PgStat_TableStatus *entry;
TabStatusArray *tsa;
bool found;
PgStat_EntryRef *entry_ref;
PgStat_TableStatus *pending;
pgstat_assert_is_up();
entry_ref = pgstat_prep_pending_entry(PGSTAT_KIND_RELATION,
isshared ? InvalidOid : MyDatabaseId,
rel_id, NULL);
pending = entry_ref->pending;
pending->t_id = rel_id;
pending->t_shared = isshared;
have_relation_stats = true;
/*
* Create hash table if we don't have it already.
*/
if (pgStatTabHash == NULL)
{
HASHCTL ctl;
ctl.keysize = sizeof(Oid);
ctl.entrysize = sizeof(TabStatHashEntry);
pgStatTabHash = hash_create("pgstat TabStatusArray lookup hash table",
TABSTAT_QUANTUM,
&ctl,
HASH_ELEM | HASH_BLOBS);
}
/*
* Find an entry or create a new one.
*/
hash_entry = hash_search(pgStatTabHash, &rel_id, HASH_ENTER, &found);
if (!found)
{
/* initialize new entry with null pointer */
hash_entry->tsa_entry = NULL;
}
/*
* If entry is already valid, we're done.
*/
if (hash_entry->tsa_entry)
return hash_entry->tsa_entry;
/*
* Locate the first pgStatTabList entry with free space, making a new list
* entry if needed. Note that we could get an OOM failure here, but if so
* we have left the hashtable and the list in a consistent state.
*/
if (pgStatTabList == NULL)
{
/* Set up first pgStatTabList entry */
pgStatTabList = (TabStatusArray *)
MemoryContextAllocZero(TopMemoryContext,
sizeof(TabStatusArray));
}
tsa = pgStatTabList;
while (tsa->tsa_used >= TABSTAT_QUANTUM)
{
if (tsa->tsa_next == NULL)
tsa->tsa_next = (TabStatusArray *)
MemoryContextAllocZero(TopMemoryContext,
sizeof(TabStatusArray));
tsa = tsa->tsa_next;
}
/*
* Allocate a PgStat_TableStatus entry within this list entry. We assume
* the entry was already zeroed, either at creation or after last use.
*/
entry = &tsa->tsa_entries[tsa->tsa_used++];
entry->t_id = rel_id;
entry->t_shared = isshared;
/*
* Now we can fill the entry in pgStatTabHash.
*/
hash_entry->tsa_entry = entry;
return entry;
return pending;
}
/*

View File

@ -8,6 +8,14 @@
* storage implementation and the details about individual types of
* statistics.
*
* Replication slot stats work a bit different than other other
* variable-numbered stats. Slots do not have oids (so they can be created on
* physical replicas). Use the slot index as object id while running. However,
* the slot index can change when restarting. That is addressed by using the
* name when (de-)serializing. After a restart it is possible for slots to
* have been dropped while shut down, which is addressed by not restoring
* stats for slots that cannot be found by name when starting up.
*
* Copyright (c) 2001-2022, PostgreSQL Global Development Group
*
* IDENTIFICATION
@ -22,6 +30,9 @@
#include "utils/pgstat_internal.h"
static int get_replslot_index(const char *name);
/*
* Reset counters for a single replication slot.
*
@ -32,18 +43,10 @@ void
pgstat_reset_replslot(const char *name)
{
ReplicationSlot *slot;
PgStat_MsgResetreplslotcounter msg;
AssertArg(name != NULL);
if (pgStatSock == PGINVALID_SOCKET)
return;
/*
* Check if the slot exists with the given name. It is possible that by
* the time this message is executed the slot is dropped but at least this
* check will ensure that the given name is for a valid slot.
*/
/* Check if the slot exits with the given name. */
slot = SearchNamedReplicationSlot(name, true);
if (!slot)
@ -59,10 +62,9 @@ pgstat_reset_replslot(const char *name)
if (SlotIsPhysical(slot))
return;
pgstat_setheader(&msg.m_hdr, PGSTAT_MTYPE_RESETREPLSLOTCOUNTER);
namestrcpy(&msg.m_slotname, name);
msg.clearall = false;
pgstat_send(&msg, sizeof(msg));
/* reset this one entry */
pgstat_reset(PGSTAT_KIND_REPLSLOT, InvalidOid,
ReplicationSlotIndex(slot));
}
/*
@ -71,24 +73,34 @@ pgstat_reset_replslot(const char *name)
void
pgstat_report_replslot(ReplicationSlot *slot, const PgStat_StatReplSlotEntry *repSlotStat)
{
PgStat_MsgReplSlot msg;
PgStat_EntryRef *entry_ref;
PgStatShared_ReplSlot *shstatent;
PgStat_StatReplSlotEntry *statent;
entry_ref = pgstat_get_entry_ref_locked(PGSTAT_KIND_REPLSLOT, InvalidOid,
ReplicationSlotIndex(slot), false);
shstatent = (PgStatShared_ReplSlot *) entry_ref->shared_stats;
statent = &shstatent->stats;
/*
* Prepare and send the message
* Any mismatch should have been fixed in pgstat_create_replslot() or
* pgstat_acquire_replslot().
*/
pgstat_setheader(&msg.m_hdr, PGSTAT_MTYPE_REPLSLOT);
namestrcpy(&msg.m_slotname, NameStr(repSlotStat->slotname));
msg.m_create = false;
msg.m_drop = false;
msg.m_spill_txns = repSlotStat->spill_txns;
msg.m_spill_count = repSlotStat->spill_count;
msg.m_spill_bytes = repSlotStat->spill_bytes;
msg.m_stream_txns = repSlotStat->stream_txns;
msg.m_stream_count = repSlotStat->stream_count;
msg.m_stream_bytes = repSlotStat->stream_bytes;
msg.m_total_txns = repSlotStat->total_txns;
msg.m_total_bytes = repSlotStat->total_bytes;
pgstat_send(&msg, sizeof(PgStat_MsgReplSlot));
Assert(namestrcmp(&statent->slotname, NameStr(slot->data.name)) == 0);
/* Update the replication slot statistics */
#define REPLSLOT_ACC(fld) statent->fld += repSlotStat->fld
REPLSLOT_ACC(spill_txns);
REPLSLOT_ACC(spill_count);
REPLSLOT_ACC(spill_bytes);
REPLSLOT_ACC(stream_txns);
REPLSLOT_ACC(stream_count);
REPLSLOT_ACC(stream_bytes);
REPLSLOT_ACC(total_txns);
REPLSLOT_ACC(total_bytes);
#undef REPLSLOT_ACC
pgstat_unlock_entry(entry_ref);
}
/*
@ -100,13 +112,50 @@ pgstat_report_replslot(ReplicationSlot *slot, const PgStat_StatReplSlotEntry *re
void
pgstat_create_replslot(ReplicationSlot *slot)
{
PgStat_MsgReplSlot msg;
PgStat_EntryRef *entry_ref;
PgStatShared_ReplSlot *shstatent;
pgstat_setheader(&msg.m_hdr, PGSTAT_MTYPE_REPLSLOT);
namestrcpy(&msg.m_slotname, NameStr(slot->data.name));
msg.m_create = true;
msg.m_drop = false;
pgstat_send(&msg, sizeof(PgStat_MsgReplSlot));
entry_ref = pgstat_get_entry_ref_locked(PGSTAT_KIND_REPLSLOT, InvalidOid,
ReplicationSlotIndex(slot), false);
shstatent = (PgStatShared_ReplSlot *) entry_ref->shared_stats;
/*
* NB: need to accept that there might be stats from an older slot, e.g.
* if we previously crashed after dropping a slot.
*/
memset(&shstatent->stats, 0, sizeof(shstatent->stats));
namestrcpy(&shstatent->stats.slotname, NameStr(slot->data.name));
pgstat_unlock_entry(entry_ref);
}
/*
* Report replication slot has been acquired.
*/
void
pgstat_acquire_replslot(ReplicationSlot *slot)
{
PgStat_EntryRef *entry_ref;
PgStatShared_ReplSlot *shstatent;
PgStat_StatReplSlotEntry *statent;
entry_ref = pgstat_get_entry_ref_locked(PGSTAT_KIND_REPLSLOT, InvalidOid,
ReplicationSlotIndex(slot), false);
shstatent = (PgStatShared_ReplSlot *) entry_ref->shared_stats;
statent = &shstatent->stats;
/*
* NB: need to accept that there might be stats from an older slot, e.g.
* if we previously crashed after dropping a slot.
*/
if (NameStr(statent->slotname)[0] == 0 ||
namestrcmp(&statent->slotname, NameStr(slot->data.name)) != 0)
{
memset(statent, 0, sizeof(*statent));
namestrcpy(&statent->slotname, NameStr(slot->data.name));
}
pgstat_unlock_entry(entry_ref);
}
/*
@ -115,11 +164,65 @@ pgstat_create_replslot(ReplicationSlot *slot)
void
pgstat_drop_replslot(ReplicationSlot *slot)
{
PgStat_MsgReplSlot msg;
pgstat_setheader(&msg.m_hdr, PGSTAT_MTYPE_REPLSLOT);
namestrcpy(&msg.m_slotname, NameStr(slot->data.name));
msg.m_create = false;
msg.m_drop = true;
pgstat_send(&msg, sizeof(PgStat_MsgReplSlot));
pgstat_drop_entry(PGSTAT_KIND_REPLSLOT, InvalidOid,
ReplicationSlotIndex(slot));
}
/*
* Support function for the SQL-callable pgstat* functions. Returns
* a pointer to the replication slot statistics struct.
*/
PgStat_StatReplSlotEntry *
pgstat_fetch_replslot(NameData slotname)
{
int idx = get_replslot_index(NameStr(slotname));
if (idx == -1)
return NULL;
return (PgStat_StatReplSlotEntry *)
pgstat_fetch_entry(PGSTAT_KIND_REPLSLOT, InvalidOid, idx);
}
void
pgstat_replslot_to_serialized_name_cb(const PgStatShared_Common *header, NameData *name)
{
namestrcpy(name, NameStr(((PgStatShared_ReplSlot *) header)->stats.slotname));
}
bool
pgstat_replslot_from_serialized_name_cb(const NameData *name, PgStat_HashKey *key)
{
int idx = get_replslot_index(NameStr(*name));
/* slot might have been deleted */
if (idx == -1)
return false;
key->kind = PGSTAT_KIND_REPLSLOT;
key->dboid = InvalidOid;
key->objoid = idx;
return true;
}
void
pgstat_replslot_reset_timestamp_cb(PgStatShared_Common *header, TimestampTz ts)
{
((PgStatShared_ReplSlot *) header)->stats.stat_reset_timestamp = ts;
}
static int
get_replslot_index(const char *name)
{
ReplicationSlot *slot;
AssertArg(name != NULL);
slot = SearchNamedReplicationSlot(name, true);
if (!slot)
return -1;
return ReplicationSlotIndex(slot);
}

View File

@ -0,0 +1,987 @@
/* -------------------------------------------------------------------------
*
* pgstat_shmem.c
* Storage of stats entries in shared memory
*
* Copyright (c) 2001-2022, PostgreSQL Global Development Group
*
* IDENTIFICATION
* src/backend/utils/activity/pgstat_shmem.c
* -------------------------------------------------------------------------
*/
#include "postgres.h"
#include "pgstat.h"
#include "storage/shmem.h"
#include "utils/memutils.h"
#include "utils/pgstat_internal.h"
#define PGSTAT_ENTRY_REF_HASH_SIZE 128
/* hash table entry for finding the PgStat_EntryRef for a key */
typedef struct PgStat_EntryRefHashEntry
{
PgStat_HashKey key; /* hash key */
char status; /* for simplehash use */
PgStat_EntryRef *entry_ref;
} PgStat_EntryRefHashEntry;
/* for references to shared statistics entries */
#define SH_PREFIX pgstat_entry_ref_hash
#define SH_ELEMENT_TYPE PgStat_EntryRefHashEntry
#define SH_KEY_TYPE PgStat_HashKey
#define SH_KEY key
#define SH_HASH_KEY(tb, key) \
pgstat_hash_hash_key(&key, sizeof(PgStat_HashKey), NULL)
#define SH_EQUAL(tb, a, b) \
pgstat_cmp_hash_key(&a, &b, sizeof(PgStat_HashKey), NULL) == 0
#define SH_SCOPE static inline
#define SH_DEFINE
#define SH_DECLARE
#include "lib/simplehash.h"
static void pgstat_drop_database_and_contents(Oid dboid);
static void pgstat_free_entry(PgStatShared_HashEntry *shent, dshash_seq_status *hstat);
static void pgstat_release_entry_ref(PgStat_HashKey key, PgStat_EntryRef *entry_ref, bool discard_pending);
static bool pgstat_need_entry_refs_gc(void);
static void pgstat_gc_entry_refs(void);
static void pgstat_release_all_entry_refs(bool discard_pending);
typedef bool (*ReleaseMatchCB) (PgStat_EntryRefHashEntry *, Datum data);
static void pgstat_release_matching_entry_refs(bool discard_pending, ReleaseMatchCB match, Datum match_data);
static void pgstat_setup_memcxt(void);
/* parameter for the shared hash */
static const dshash_parameters dsh_params = {
sizeof(PgStat_HashKey),
sizeof(PgStatShared_HashEntry),
pgstat_cmp_hash_key,
pgstat_hash_hash_key,
LWTRANCHE_PGSTATS_HASH
};
/*
* Backend local references to shared stats entries. If there are pending
* updates to a stats entry, the PgStat_EntryRef is added to the pgStatPending
* list.
*
* When a stats entry is dropped each backend needs to release its reference
* to it before the memory can be released. To trigger that
* pgStatLocal.shmem->gc_request_count is incremented - which each backend
* compares to their copy of pgStatSharedRefAge on a regular basis.
*/
static pgstat_entry_ref_hash_hash *pgStatEntryRefHash = NULL;
static int pgStatSharedRefAge = 0; /* cache age of pgStatShmLookupCache */
/*
* Memory contexts containing the pgStatEntryRefHash table and the
* pgStatSharedRef entries respectively. Kept separate to make it easier to
* track / attribute memory usage.
*/
static MemoryContext pgStatSharedRefContext = NULL;
static MemoryContext pgStatEntryRefHashContext = NULL;
/* ------------------------------------------------------------
* Public functions called from postmaster follow
* ------------------------------------------------------------
*/
/*
* The size of the shared memory allocation for stats stored in the shared
* stats hash table. This allocation will be done as part of the main shared
* memory, rather than dynamic shared memory, allowing it to be initialized in
* postmaster.
*/
static Size
pgstat_dsa_init_size(void)
{
Size sz;
/*
* The dshash header / initial buckets array needs to fit into "plain"
* shared memory, but it's beneficial to not need dsm segments
* immediately. A size of 256kB seems works well and is not
* disproportional compared to other constant sized shared memory
* allocations. NB: To avoid DSMs further, the user can configure
* min_dynamic_shared_memory.
*/
sz = 256 * 1024;
Assert(dsa_minimum_size() <= sz);
return MAXALIGN(sz);
}
/*
* Compute shared memory space needed for cumulative statistics
*/
Size
StatsShmemSize(void)
{
Size sz;
sz = MAXALIGN(sizeof(PgStat_ShmemControl));
sz = add_size(sz, pgstat_dsa_init_size());
return sz;
}
/*
* Initialize cumulative statistics system during startup
*/
void
StatsShmemInit(void)
{
bool found;
Size sz;
sz = StatsShmemSize();
pgStatLocal.shmem = (PgStat_ShmemControl *)
ShmemInitStruct("Shared Memory Stats", sz, &found);
if (!IsUnderPostmaster)
{
dsa_area *dsa;
dshash_table *dsh;
PgStat_ShmemControl *ctl = pgStatLocal.shmem;
char *p = (char *) ctl;
Assert(!found);
/* the allocation of pgStatLocal.shmem itself */
p += MAXALIGN(sizeof(PgStat_ShmemControl));
/*
* Create a small dsa allocation in plain shared memory. This is
* required because postmaster cannot use dsm segments. It also
* provides a small efficiency win.
*/
ctl->raw_dsa_area = p;
p += MAXALIGN(pgstat_dsa_init_size());
dsa = dsa_create_in_place(ctl->raw_dsa_area,
pgstat_dsa_init_size(),
LWTRANCHE_PGSTATS_DSA, 0);
dsa_pin(dsa);
/*
* To ensure dshash is created in "plain" shared memory, temporarily
* limit size of dsa to the initial size of the dsa.
*/
dsa_set_size_limit(dsa, pgstat_dsa_init_size());
/*
* With the limit in place, create the dshash table. XXX: It'd be nice
* if there were dshash_create_in_place().
*/
dsh = dshash_create(dsa, &dsh_params, 0);
ctl->hash_handle = dshash_get_hash_table_handle(dsh);
/* lift limit set above */
dsa_set_size_limit(dsa, -1);
/*
* Postmaster will never access these again, thus free the local
* dsa/dshash references.
*/
dshash_detach(dsh);
dsa_detach(dsa);
pg_atomic_init_u64(&ctl->gc_request_count, 1);
/* initialize fixed-numbered stats */
LWLockInitialize(&ctl->archiver.lock, LWTRANCHE_PGSTATS_DATA);
LWLockInitialize(&ctl->bgwriter.lock, LWTRANCHE_PGSTATS_DATA);
LWLockInitialize(&ctl->checkpointer.lock, LWTRANCHE_PGSTATS_DATA);
LWLockInitialize(&ctl->slru.lock, LWTRANCHE_PGSTATS_DATA);
LWLockInitialize(&ctl->wal.lock, LWTRANCHE_PGSTATS_DATA);
}
else
{
Assert(found);
}
}
void
pgstat_attach_shmem(void)
{
MemoryContext oldcontext;
Assert(pgStatLocal.dsa == NULL);
/* stats shared memory persists for the backend lifetime */
oldcontext = MemoryContextSwitchTo(TopMemoryContext);
pgStatLocal.dsa = dsa_attach_in_place(pgStatLocal.shmem->raw_dsa_area,
NULL);
dsa_pin_mapping(pgStatLocal.dsa);
pgStatLocal.shared_hash = dshash_attach(pgStatLocal.dsa, &dsh_params,
pgStatLocal.shmem->hash_handle, 0);
MemoryContextSwitchTo(oldcontext);
}
void
pgstat_detach_shmem(void)
{
Assert(pgStatLocal.dsa);
/* we shouldn't leave references to shared stats */
pgstat_release_all_entry_refs(false);
dshash_detach(pgStatLocal.shared_hash);
pgStatLocal.shared_hash = NULL;
dsa_detach(pgStatLocal.dsa);
pgStatLocal.dsa = NULL;
}
/* ------------------------------------------------------------
* Maintenance of shared memory stats entries
* ------------------------------------------------------------
*/
PgStatShared_Common *
pgstat_init_entry(PgStat_Kind kind,
PgStatShared_HashEntry *shhashent)
{
/* Create new stats entry. */
dsa_pointer chunk;
PgStatShared_Common *shheader;
/*
* Initialize refcount to 1, marking it as valid / not dropped. The entry
* can't be freed before the initialization because it can't be found as
* long as we hold the dshash partition lock. Caller needs to increase
* further if a longer lived reference is needed.
*/
pg_atomic_init_u32(&shhashent->refcount, 1);
shhashent->dropped = false;
chunk = dsa_allocate0(pgStatLocal.dsa, pgstat_get_kind_info(kind)->shared_size);
shheader = dsa_get_address(pgStatLocal.dsa, chunk);
shheader->magic = 0xdeadbeef;
/* Link the new entry from the hash entry. */
shhashent->body = chunk;
LWLockInitialize(&shheader->lock, LWTRANCHE_PGSTATS_DATA);
return shheader;
}
static PgStatShared_Common *
pgstat_reinit_entry(PgStat_Kind kind, PgStatShared_HashEntry *shhashent)
{
PgStatShared_Common *shheader;
shheader = dsa_get_address(pgStatLocal.dsa, shhashent->body);
/* mark as not dropped anymore */
pg_atomic_fetch_add_u32(&shhashent->refcount, 1);
shhashent->dropped = false;
/* reinitialize content */
Assert(shheader->magic == 0xdeadbeef);
memset(shheader, 0, pgstat_get_kind_info(shhashent->key.kind)->shared_size);
shheader->magic = 0xdeadbeef;
return shheader;
}
static void
pgstat_setup_shared_refs(void)
{
if (likely(pgStatEntryRefHash != NULL))
return;
pgStatEntryRefHash =
pgstat_entry_ref_hash_create(pgStatEntryRefHashContext,
PGSTAT_ENTRY_REF_HASH_SIZE, NULL);
pgStatSharedRefAge = pg_atomic_read_u64(&pgStatLocal.shmem->gc_request_count);
Assert(pgStatSharedRefAge != 0);
}
/*
* Helper function for pgstat_get_entry_ref().
*/
static void
pgstat_acquire_entry_ref(PgStat_EntryRef *entry_ref,
PgStatShared_HashEntry *shhashent,
PgStatShared_Common *shheader)
{
Assert(shheader->magic == 0xdeadbeef);
Assert(pg_atomic_read_u32(&shhashent->refcount) > 0);
pg_atomic_fetch_add_u32(&shhashent->refcount, 1);
dshash_release_lock(pgStatLocal.shared_hash, shhashent);
entry_ref->shared_stats = shheader;
entry_ref->shared_entry = shhashent;
}
/*
* Helper function for pgstat_get_entry_ref().
*/
static bool
pgstat_get_entry_ref_cached(PgStat_HashKey key, PgStat_EntryRef **entry_ref_p)
{
bool found;
PgStat_EntryRefHashEntry *cache_entry;
/*
* We immediately insert a cache entry, because it avoids 1) multiple
* hashtable lookups in case of a cache miss 2) having to deal with
* out-of-memory errors after incrementing PgStatShared_Common->refcount.
*/
cache_entry = pgstat_entry_ref_hash_insert(pgStatEntryRefHash, key, &found);
if (!found || !cache_entry->entry_ref)
{
PgStat_EntryRef *entry_ref;
cache_entry->entry_ref = entry_ref =
MemoryContextAlloc(pgStatSharedRefContext,
sizeof(PgStat_EntryRef));
entry_ref->shared_stats = NULL;
entry_ref->shared_entry = NULL;
entry_ref->pending = NULL;
found = false;
}
else if (cache_entry->entry_ref->shared_stats == NULL)
{
Assert(cache_entry->entry_ref->pending == NULL);
found = false;
}
else
{
PgStat_EntryRef *entry_ref PG_USED_FOR_ASSERTS_ONLY;
entry_ref = cache_entry->entry_ref;
Assert(entry_ref->shared_entry != NULL);
Assert(entry_ref->shared_stats != NULL);
Assert(entry_ref->shared_stats->magic == 0xdeadbeef);
/* should have at least our reference */
Assert(pg_atomic_read_u32(&entry_ref->shared_entry->refcount) > 0);
}
*entry_ref_p = cache_entry->entry_ref;
return found;
}
/*
* Get a shared stats reference. If create is true, the shared stats object is
* created if it does not exist.
*
* When create is true, and created_entry is non-NULL, it'll be set to true
* if the entry is newly created, false otherwise.
*/
PgStat_EntryRef *
pgstat_get_entry_ref(PgStat_Kind kind, Oid dboid, Oid objoid, bool create,
bool *created_entry)
{
PgStat_HashKey key = {.kind = kind,.dboid = dboid,.objoid = objoid};
PgStatShared_HashEntry *shhashent;
PgStatShared_Common *shheader = NULL;
PgStat_EntryRef *entry_ref;
/*
* passing in created_entry only makes sense if we possibly could create
* entry.
*/
AssertArg(create || created_entry == NULL);
pgstat_assert_is_up();
Assert(pgStatLocal.shared_hash != NULL);
Assert(!pgStatLocal.shmem->is_shutdown);
pgstat_setup_memcxt();
pgstat_setup_shared_refs();
if (created_entry != NULL)
*created_entry = false;
/*
* Check if other backends dropped stats that could not be deleted because
* somebody held references to it. If so, check this backend's references.
* This is not expected to happen often. The location of the check is a
* bit random, but this is a relatively frequently called path, so better
* than most.
*/
if (pgstat_need_entry_refs_gc())
pgstat_gc_entry_refs();
/*
* First check the lookup cache hashtable in local memory. If we find a
* match here we can avoid taking locks / causing contention.
*/
if (pgstat_get_entry_ref_cached(key, &entry_ref))
return entry_ref;
Assert(entry_ref != NULL);
/*
* Do a lookup in the hash table first - it's quite likely that the entry
* already exists, and that way we only need a shared lock.
*/
shhashent = dshash_find(pgStatLocal.shared_hash, &key, false);
if (create && !shhashent)
{
bool shfound;
/*
* It's possible that somebody created the entry since the above
* lookup. If so, fall through to the same path as if we'd have if it
* already had been created before the dshash_find() calls.
*/
shhashent = dshash_find_or_insert(pgStatLocal.shared_hash, &key, &shfound);
if (!shfound)
{
shheader = pgstat_init_entry(kind, shhashent);
pgstat_acquire_entry_ref(entry_ref, shhashent, shheader);
if (created_entry != NULL)
*created_entry = true;
return entry_ref;
}
}
if (!shhashent)
{
/*
* If we're not creating, delete the reference again. In all
* likelihood it's just a stats lookup - no point wasting memory for a
* shared ref to nothing...
*/
pgstat_release_entry_ref(key, entry_ref, false);
return NULL;
}
else
{
/*
* Can get here either because dshash_find() found a match, or if
* dshash_find_or_insert() found a concurrently inserted entry.
*/
if (shhashent->dropped && create)
{
/*
* There are legitimate cases where the old stats entry might not
* yet have been dropped by the time it's reused. The most obvious
* case are replication slot stats, where a new slot can be
* created with the same index just after dropping. But oid
* wraparound can lead to other cases as well. We just reset the
* stats to their plain state.
*/
shheader = pgstat_reinit_entry(kind, shhashent);
pgstat_acquire_entry_ref(entry_ref, shhashent, shheader);
if (created_entry != NULL)
*created_entry = true;
return entry_ref;
}
else if (shhashent->dropped)
{
dshash_release_lock(pgStatLocal.shared_hash, shhashent);
pgstat_release_entry_ref(key, entry_ref, false);
return NULL;
}
else
{
shheader = dsa_get_address(pgStatLocal.dsa, shhashent->body);
pgstat_acquire_entry_ref(entry_ref, shhashent, shheader);
return entry_ref;
}
}
}
static void
pgstat_release_entry_ref(PgStat_HashKey key, PgStat_EntryRef *entry_ref,
bool discard_pending)
{
if (entry_ref && entry_ref->pending)
{
if (discard_pending)
pgstat_delete_pending_entry(entry_ref);
else
elog(ERROR, "releasing ref with pending data");
}
if (entry_ref && entry_ref->shared_stats)
{
Assert(entry_ref->shared_stats->magic == 0xdeadbeef);
Assert(entry_ref->pending == NULL);
/*
* This can't race with another backend looking up the stats entry and
* increasing the refcount because it is not "legal" to create
* additional references to dropped entries.
*/
if (pg_atomic_fetch_sub_u32(&entry_ref->shared_entry->refcount, 1) == 1)
{
PgStatShared_HashEntry *shent;
/*
* We're the last referrer to this entry, try to drop the shared
* entry.
*/
/* only dropped entries can reach a 0 refcount */
Assert(entry_ref->shared_entry->dropped);
shent = dshash_find(pgStatLocal.shared_hash,
&entry_ref->shared_entry->key,
true);
if (!shent)
elog(ERROR, "could not find just referenced shared stats entry");
Assert(pg_atomic_read_u32(&entry_ref->shared_entry->refcount) == 0);
Assert(entry_ref->shared_entry == shent);
pgstat_free_entry(shent, NULL);
}
}
if (!pgstat_entry_ref_hash_delete(pgStatEntryRefHash, key))
elog(ERROR, "entry ref vanished before deletion");
if (entry_ref)
pfree(entry_ref);
}
bool
pgstat_lock_entry(PgStat_EntryRef *entry_ref, bool nowait)
{
LWLock *lock = &entry_ref->shared_stats->lock;
if (nowait)
return LWLockConditionalAcquire(lock, LW_EXCLUSIVE);
LWLockAcquire(lock, LW_EXCLUSIVE);
return true;
}
void
pgstat_unlock_entry(PgStat_EntryRef *entry_ref)
{
LWLockRelease(&entry_ref->shared_stats->lock);
}
/*
* Helper function to fetch and lock shared stats.
*/
PgStat_EntryRef *
pgstat_get_entry_ref_locked(PgStat_Kind kind, Oid dboid, Oid objoid,
bool nowait)
{
PgStat_EntryRef *entry_ref;
/* find shared table stats entry corresponding to the local entry */
entry_ref = pgstat_get_entry_ref(kind, dboid, objoid, true, NULL);
/* lock the shared entry to protect the content, skip if failed */
if (!pgstat_lock_entry(entry_ref, nowait))
return NULL;
return entry_ref;
}
void
pgstat_request_entry_refs_gc(void)
{
pg_atomic_fetch_add_u64(&pgStatLocal.shmem->gc_request_count, 1);
}
static bool
pgstat_need_entry_refs_gc(void)
{
uint64 curage;
if (!pgStatEntryRefHash)
return false;
/* should have been initialized when creating pgStatEntryRefHash */
Assert(pgStatSharedRefAge != 0);
curage = pg_atomic_read_u64(&pgStatLocal.shmem->gc_request_count);
return pgStatSharedRefAge != curage;
}
static void
pgstat_gc_entry_refs(void)
{
pgstat_entry_ref_hash_iterator i;
PgStat_EntryRefHashEntry *ent;
uint64 curage;
curage = pg_atomic_read_u64(&pgStatLocal.shmem->gc_request_count);
Assert(curage != 0);
/*
* Some entries have been dropped. Invalidate cache pointer to them.
*/
pgstat_entry_ref_hash_start_iterate(pgStatEntryRefHash, &i);
while ((ent = pgstat_entry_ref_hash_iterate(pgStatEntryRefHash, &i)) != NULL)
{
PgStat_EntryRef *entry_ref = ent->entry_ref;
Assert(!entry_ref->shared_stats ||
entry_ref->shared_stats->magic == 0xdeadbeef);
if (!entry_ref->shared_entry->dropped)
continue;
/* cannot gc shared ref that has pending data */
if (entry_ref->pending != NULL)
continue;
pgstat_release_entry_ref(ent->key, entry_ref, false);
}
pgStatSharedRefAge = curage;
}
static void
pgstat_release_matching_entry_refs(bool discard_pending, ReleaseMatchCB match,
Datum match_data)
{
pgstat_entry_ref_hash_iterator i;
PgStat_EntryRefHashEntry *ent;
if (pgStatEntryRefHash == NULL)
return;
pgstat_entry_ref_hash_start_iterate(pgStatEntryRefHash, &i);
while ((ent = pgstat_entry_ref_hash_iterate(pgStatEntryRefHash, &i))
!= NULL)
{
Assert(ent->entry_ref != NULL);
if (match && !match(ent, match_data))
continue;
pgstat_release_entry_ref(ent->key, ent->entry_ref, discard_pending);
}
}
/*
* Release all local references to shared stats entries.
*
* When a process exits it cannot do so while still holding references onto
* stats entries, otherwise the shared stats entries could never be freed.
*/
static void
pgstat_release_all_entry_refs(bool discard_pending)
{
if (pgStatEntryRefHash == NULL)
return;
pgstat_release_matching_entry_refs(discard_pending, NULL, 0);
Assert(pgStatEntryRefHash->members == 0);
pgstat_entry_ref_hash_destroy(pgStatEntryRefHash);
pgStatEntryRefHash = NULL;
}
static bool
match_db(PgStat_EntryRefHashEntry *ent, Datum match_data)
{
Oid dboid = DatumGetObjectId(match_data);
return ent->key.dboid == dboid;
}
static void
pgstat_release_db_entry_refs(Oid dboid)
{
pgstat_release_matching_entry_refs( /* discard pending = */ true,
match_db,
ObjectIdGetDatum(dboid));
}
/* ------------------------------------------------------------
* Dropping and resetting of stats entries
* ------------------------------------------------------------
*/
static void
pgstat_free_entry(PgStatShared_HashEntry *shent, dshash_seq_status *hstat)
{
dsa_pointer pdsa;
/*
* Fetch dsa pointer before deleting entry - that way we can free the
* memory after releasing the lock.
*/
pdsa = shent->body;
if (!hstat)
dshash_delete_entry(pgStatLocal.shared_hash, shent);
else
dshash_delete_current(hstat);
dsa_free(pgStatLocal.dsa, pdsa);
}
/*
* Helper for both pgstat_drop_database_and_contents() and
* pgstat_drop_entry(). If hstat is non-null delete the shared entry using
* dshash_delete_current(), otherwise use dshash_delete_entry(). In either
* case the entry needs to be already locked.
*/
static bool
pgstat_drop_entry_internal(PgStatShared_HashEntry *shent,
dshash_seq_status *hstat)
{
Assert(shent->body != InvalidDsaPointer);
/* should already have released local reference */
if (pgStatEntryRefHash)
Assert(!pgstat_entry_ref_hash_lookup(pgStatEntryRefHash, shent->key));
/*
* Signal that the entry is dropped - this will eventually cause other
* backends to release their references.
*/
if (shent->dropped)
elog(ERROR, "can only drop stats once");
shent->dropped = true;
/* release refcount marking entry as not dropped */
if (pg_atomic_sub_fetch_u32(&shent->refcount, 1) == 0)
{
pgstat_free_entry(shent, hstat);
return true;
}
else
{
if (!hstat)
dshash_release_lock(pgStatLocal.shared_hash, shent);
return false;
}
}
/*
* Drop stats for the database and all the objects inside that database.
*/
static void
pgstat_drop_database_and_contents(Oid dboid)
{
dshash_seq_status hstat;
PgStatShared_HashEntry *p;
uint64 not_freed_count = 0;
Assert(OidIsValid(dboid));
Assert(pgStatLocal.shared_hash != NULL);
/*
* This backend might very well be the only backend holding a reference to
* about-to-be-dropped entries. Ensure that we're not preventing it from
* being cleaned up till later.
*
* Doing this separately from the dshash iteration below avoids having to
* do so while holding a partition lock on the shared hashtable.
*/
pgstat_release_db_entry_refs(dboid);
/* some of the dshash entries are to be removed, take exclusive lock. */
dshash_seq_init(&hstat, pgStatLocal.shared_hash, true);
while ((p = dshash_seq_next(&hstat)) != NULL)
{
if (p->dropped)
continue;
if (p->key.dboid != dboid)
continue;
if (!pgstat_drop_entry_internal(p, &hstat))
{
/*
* Even statistics for a dropped database might currently be
* accessed (consider e.g. database stats for pg_stat_database).
*/
not_freed_count++;
}
}
dshash_seq_term(&hstat);
/*
* If some of the stats data could not be freed, signal the reference
* holders to run garbage collection of their cached pgStatShmLookupCache.
*/
if (not_freed_count > 0)
pgstat_request_entry_refs_gc();
}
bool
pgstat_drop_entry(PgStat_Kind kind, Oid dboid, Oid objoid)
{
PgStat_HashKey key = {.kind = kind,.dboid = dboid,.objoid = objoid};
PgStatShared_HashEntry *shent;
bool freed = true;
/* delete local reference */
if (pgStatEntryRefHash)
{
PgStat_EntryRefHashEntry *lohashent =
pgstat_entry_ref_hash_lookup(pgStatEntryRefHash, key);
if (lohashent)
pgstat_release_entry_ref(lohashent->key, lohashent->entry_ref,
true);
}
/* mark entry in shared hashtable as deleted, drop if possible */
shent = dshash_find(pgStatLocal.shared_hash, &key, true);
if (shent)
{
freed = pgstat_drop_entry_internal(shent, NULL);
/*
* Database stats contain other stats. Drop those as well when
* dropping the database. XXX: Perhaps this should be done in a
* slightly more principled way? But not obvious what that'd look
* like, and so far this is the only case...
*/
if (key.kind == PGSTAT_KIND_DATABASE)
pgstat_drop_database_and_contents(key.dboid);
}
return freed;
}
void
pgstat_drop_all_entries(void)
{
dshash_seq_status hstat;
PgStatShared_HashEntry *ps;
uint64 not_freed_count = 0;
dshash_seq_init(&hstat, pgStatLocal.shared_hash, false);
while ((ps = dshash_seq_next(&hstat)) != NULL)
{
if (ps->dropped)
continue;
if (!pgstat_drop_entry_internal(ps, &hstat))
not_freed_count++;
}
dshash_seq_term(&hstat);
if (not_freed_count > 0)
pgstat_request_entry_refs_gc();
}
static void
shared_stat_reset_contents(PgStat_Kind kind, PgStatShared_Common *header,
TimestampTz ts)
{
const PgStat_KindInfo *kind_info = pgstat_get_kind_info(kind);
memset(pgstat_get_entry_data(kind, header), 0,
pgstat_get_entry_len(kind));
if (kind_info->reset_timestamp_cb)
kind_info->reset_timestamp_cb(header, ts);
}
/*
* Reset one variable-numbered stats entry.
*/
void
pgstat_reset_entry(PgStat_Kind kind, Oid dboid, Oid objoid, TimestampTz ts)
{
PgStat_EntryRef *entry_ref;
Assert(!pgstat_get_kind_info(kind)->fixed_amount);
entry_ref = pgstat_get_entry_ref(kind, dboid, objoid, false, NULL);
if (!entry_ref || entry_ref->shared_entry->dropped)
return;
pgstat_lock_entry(entry_ref, false);
shared_stat_reset_contents(kind, entry_ref->shared_stats, ts);
pgstat_unlock_entry(entry_ref);
}
/*
* Scan through the shared hashtable of stats, resetting statistics if
* approved by the provided do_reset() function.
*/
void
pgstat_reset_matching_entries(bool (*do_reset) (PgStatShared_HashEntry *, Datum),
Datum match_data, TimestampTz ts)
{
dshash_seq_status hstat;
PgStatShared_HashEntry *p;
/* dshash entry is not modified, take shared lock */
dshash_seq_init(&hstat, pgStatLocal.shared_hash, false);
while ((p = dshash_seq_next(&hstat)) != NULL)
{
PgStatShared_Common *header;
if (p->dropped)
continue;
if (!do_reset(p, match_data))
continue;
header = dsa_get_address(pgStatLocal.dsa, p->body);
LWLockAcquire(&header->lock, LW_EXCLUSIVE);
shared_stat_reset_contents(p->key.kind, header, ts);
LWLockRelease(&header->lock);
}
dshash_seq_term(&hstat);
}
static bool
match_kind(PgStatShared_HashEntry *p, Datum match_data)
{
return p->key.kind == DatumGetInt32(match_data);
}
void
pgstat_reset_entries_of_kind(PgStat_Kind kind, TimestampTz ts)
{
pgstat_reset_matching_entries(match_kind, Int32GetDatum(kind), ts);
}
static void
pgstat_setup_memcxt(void)
{
if (unlikely(!pgStatSharedRefContext))
pgStatSharedRefContext =
AllocSetContextCreate(CacheMemoryContext,
"PgStat Shared Ref",
ALLOCSET_SMALL_SIZES);
if (unlikely(!pgStatEntryRefHashContext))
pgStatEntryRefHashContext =
AllocSetContextCreate(CacheMemoryContext,
"PgStat Shared Ref Hash",
ALLOCSET_SMALL_SIZES);
}

View File

@ -18,18 +18,21 @@
#include "postgres.h"
#include "utils/pgstat_internal.h"
#include "utils/timestamp.h"
static inline PgStat_MsgSLRU *get_slru_entry(int slru_idx);
static inline PgStat_SLRUStats *get_slru_entry(int slru_idx);
static void pgstat_reset_slru_counter_internal(int index, TimestampTz ts);
/*
* SLRU statistics counts waiting to be sent to the collector. These are
* stored directly in stats message format so they can be sent without needing
* to copy things around. We assume this variable inits to zeroes. Entries
* are one-to-one with slru_names[].
* SLRU statistics counts waiting to be flushed out. We assume this variable
* inits to zeroes. Entries are one-to-one with slru_names[]. Changes of
* SLRU counters are reported within critical sections so we use static memory
* in order to avoid memory allocation.
*/
static PgStat_MsgSLRU SLRUStats[SLRU_NUM_ELEMENTS];
static PgStat_SLRUStats pending_SLRUStats[SLRU_NUM_ELEMENTS];
bool have_slrustats = false;
/*
@ -41,17 +44,11 @@ static PgStat_MsgSLRU SLRUStats[SLRU_NUM_ELEMENTS];
void
pgstat_reset_slru(const char *name)
{
PgStat_MsgResetslrucounter msg;
TimestampTz ts = GetCurrentTimestamp();
AssertArg(name != NULL);
if (pgStatSock == PGINVALID_SOCKET)
return;
pgstat_setheader(&msg.m_hdr, PGSTAT_MTYPE_RESETSLRUCOUNTER);
msg.m_index = pgstat_get_slru_index(name);
pgstat_send(&msg, sizeof(msg));
pgstat_reset_slru_counter_internal(pgstat_get_slru_index(name), ts);
}
/*
@ -61,43 +58,55 @@ pgstat_reset_slru(const char *name)
void
pgstat_count_slru_page_zeroed(int slru_idx)
{
get_slru_entry(slru_idx)->m_blocks_zeroed += 1;
get_slru_entry(slru_idx)->blocks_zeroed += 1;
}
void
pgstat_count_slru_page_hit(int slru_idx)
{
get_slru_entry(slru_idx)->m_blocks_hit += 1;
get_slru_entry(slru_idx)->blocks_hit += 1;
}
void
pgstat_count_slru_page_exists(int slru_idx)
{
get_slru_entry(slru_idx)->m_blocks_exists += 1;
get_slru_entry(slru_idx)->blocks_exists += 1;
}
void
pgstat_count_slru_page_read(int slru_idx)
{
get_slru_entry(slru_idx)->m_blocks_read += 1;
get_slru_entry(slru_idx)->blocks_read += 1;
}
void
pgstat_count_slru_page_written(int slru_idx)
{
get_slru_entry(slru_idx)->m_blocks_written += 1;
get_slru_entry(slru_idx)->blocks_written += 1;
}
void
pgstat_count_slru_flush(int slru_idx)
{
get_slru_entry(slru_idx)->m_flush += 1;
get_slru_entry(slru_idx)->flush += 1;
}
void
pgstat_count_slru_truncate(int slru_idx)
{
get_slru_entry(slru_idx)->m_truncate += 1;
get_slru_entry(slru_idx)->truncate += 1;
}
/*
* Support function for the SQL-callable pgstat* functions. Returns
* a pointer to the slru statistics struct.
*/
PgStat_SLRUStats *
pgstat_fetch_slru(void)
{
pgstat_snapshot_fixed(PGSTAT_KIND_SLRU);
return pgStatLocal.snapshot.slru;
}
/*
@ -135,45 +144,81 @@ pgstat_get_slru_index(const char *name)
}
/*
* Send SLRU statistics to the collector
* Flush out locally pending SLRU stats entries
*
* If nowait is true, this function returns false on lock failure. Otherwise
* this function always returns true. Writer processes are mutually excluded
* using LWLock, but readers are expected to use change-count protocol to avoid
* interference with writers.
*
* If nowait is true, this function returns true if the lock could not be
* acquired. Otherwise return false.
*/
void
pgstat_send_slru(void)
bool
pgstat_slru_flush(bool nowait)
{
/* We assume this initializes to zeroes */
static const PgStat_MsgSLRU all_zeroes;
PgStatShared_SLRU *stats_shmem = &pgStatLocal.shmem->slru;
int i;
for (int i = 0; i < SLRU_NUM_ELEMENTS; i++)
if (!have_slrustats)
return false;
if (!nowait)
LWLockAcquire(&stats_shmem->lock, LW_EXCLUSIVE);
else if (!LWLockConditionalAcquire(&stats_shmem->lock, LW_EXCLUSIVE))
return true;
for (i = 0; i < SLRU_NUM_ELEMENTS; i++)
{
/*
* This function can be called even if nothing at all has happened. In
* this case, avoid sending a completely empty message to the stats
* collector.
*/
if (memcmp(&SLRUStats[i], &all_zeroes, sizeof(PgStat_MsgSLRU)) == 0)
continue;
PgStat_SLRUStats *sharedent = &stats_shmem->stats[i];
PgStat_SLRUStats *pendingent = &pending_SLRUStats[i];
/* set the SLRU type before each send */
SLRUStats[i].m_index = i;
/*
* Prepare and send the message
*/
pgstat_setheader(&SLRUStats[i].m_hdr, PGSTAT_MTYPE_SLRU);
pgstat_send(&SLRUStats[i], sizeof(PgStat_MsgSLRU));
/*
* Clear out the statistics buffer, so it can be re-used.
*/
MemSet(&SLRUStats[i], 0, sizeof(PgStat_MsgSLRU));
#define SLRU_ACC(fld) sharedent->fld += pendingent->fld
SLRU_ACC(blocks_zeroed);
SLRU_ACC(blocks_hit);
SLRU_ACC(blocks_read);
SLRU_ACC(blocks_written);
SLRU_ACC(blocks_exists);
SLRU_ACC(flush);
SLRU_ACC(truncate);
#undef SLRU_ACC
}
/* done, clear the pending entry */
MemSet(pending_SLRUStats, 0, sizeof(pending_SLRUStats));
LWLockRelease(&stats_shmem->lock);
have_slrustats = false;
return false;
}
void
pgstat_slru_reset_all_cb(TimestampTz ts)
{
for (int i = 0; i < SLRU_NUM_ELEMENTS; i++)
pgstat_reset_slru_counter_internal(i, ts);
}
void
pgstat_slru_snapshot_cb(void)
{
PgStatShared_SLRU *stats_shmem = &pgStatLocal.shmem->slru;
LWLockAcquire(&stats_shmem->lock, LW_SHARED);
memcpy(pgStatLocal.snapshot.slru, &stats_shmem->stats,
sizeof(stats_shmem->stats));
LWLockRelease(&stats_shmem->lock);
}
/*
* Returns pointer to entry with counters for given SLRU (based on the name
* stored in SlruCtl as lwlock tranche name).
*/
static inline PgStat_MsgSLRU *
static inline PgStat_SLRUStats *
get_slru_entry(int slru_idx)
{
pgstat_assert_is_up();
@ -186,5 +231,20 @@ get_slru_entry(int slru_idx)
Assert((slru_idx >= 0) && (slru_idx < SLRU_NUM_ELEMENTS));
return &SLRUStats[slru_idx];
have_slrustats = true;
return &pending_SLRUStats[slru_idx];
}
static void
pgstat_reset_slru_counter_internal(int index, TimestampTz ts)
{
PgStatShared_SLRU *stats_shmem = &pgStatLocal.shmem->slru;
LWLockAcquire(&stats_shmem->lock, LW_EXCLUSIVE);
memset(&stats_shmem->stats[index], 0, sizeof(PgStat_SLRUStats));
stats_shmem->stats[index].stat_reset_timestamp = ts;
LWLockRelease(&stats_shmem->lock);
}

View File

@ -26,12 +26,17 @@
void
pgstat_report_subscription_error(Oid subid, bool is_apply_error)
{
PgStat_MsgSubscriptionError msg;
PgStat_EntryRef *entry_ref;
PgStat_BackendSubEntry *pending;
pgstat_setheader(&msg.m_hdr, PGSTAT_MTYPE_SUBSCRIPTIONERROR);
msg.m_subid = subid;
msg.m_is_apply_error = is_apply_error;
pgstat_send(&msg, sizeof(PgStat_MsgSubscriptionError));
entry_ref = pgstat_prep_pending_entry(PGSTAT_KIND_SUBSCRIPTION,
InvalidOid, subid, NULL);
pending = entry_ref->pending;
if (is_apply_error)
pending->apply_error_count++;
else
pending->sync_error_count++;
}
/*
@ -54,12 +59,52 @@ pgstat_create_subscription(Oid subid)
void
pgstat_drop_subscription(Oid subid)
{
PgStat_MsgSubscriptionDrop msg;
pgstat_setheader(&msg.m_hdr, PGSTAT_MTYPE_SUBSCRIPTIONDROP);
msg.m_subid = subid;
pgstat_send(&msg, sizeof(PgStat_MsgSubscriptionDrop));
pgstat_drop_transactional(PGSTAT_KIND_SUBSCRIPTION,
InvalidOid, subid);
}
/*
* Support function for the SQL-callable pgstat* functions. Returns
* the collected statistics for one subscription or NULL.
*/
PgStat_StatSubEntry *
pgstat_fetch_stat_subscription(Oid subid)
{
return (PgStat_StatSubEntry *)
pgstat_fetch_entry(PGSTAT_KIND_SUBSCRIPTION, InvalidOid, subid);
}
/*
* Flush out pending stats for the entry
*
* If nowait is true, this function returns false if lock could not
* immediately acquired, otherwise true is returned.
*/
bool
pgstat_subscription_flush_cb(PgStat_EntryRef *entry_ref, bool nowait)
{
PgStat_BackendSubEntry *localent;
PgStatShared_Subscription *shsubent;
localent = (PgStat_BackendSubEntry *) entry_ref->pending;
shsubent = (PgStatShared_Subscription *) entry_ref->shared_stats;
/* localent always has non-zero content */
if (!pgstat_lock_entry(entry_ref, nowait))
return false;
#define SUB_ACC(fld) shsubent->stats.fld += localent->fld
SUB_ACC(apply_error_count);
SUB_ACC(sync_error_count);
#undef SUB_ACC
pgstat_unlock_entry(entry_ref);
return true;
}
void
pgstat_subscription_reset_timestamp_cb(PgStatShared_Common *header, TimestampTz ts)
{
((PgStatShared_Subscription *) header)->stats.stat_reset_timestamp = ts;
}

View File

@ -21,13 +21,7 @@
#include "executor/instrument.h"
/*
* WAL global statistics counters. Stored directly in a stats message
* structure so they can be sent without needing to copy things around. We
* assume these init to zeroes.
*/
PgStat_MsgWal WalStats;
PgStat_WalStats PendingWalStats = {0};
/*
* WAL usage counters saved from pgWALUsage at the previous call to
@ -39,101 +33,100 @@ static WalUsage prevWalUsage;
/*
* Send WAL statistics to the collector.
* Calculate how much WAL usage counters have increased and update
* shared statistics.
*
* If 'force' is not set, WAL stats message is only sent if enough time has
* passed since last one was sent to reach PGSTAT_STAT_INTERVAL.
* Must be called by processes that generate WAL, that do not call
* pgstat_report_stat(), like walwriter.
*/
void
pgstat_report_wal(bool force)
{
static TimestampTz sendTime = 0;
pgstat_flush_wal(force);
}
/*
* Support function for the SQL-callable pgstat* functions. Returns
* a pointer to the WAL statistics struct.
*/
PgStat_WalStats *
pgstat_fetch_stat_wal(void)
{
pgstat_snapshot_fixed(PGSTAT_KIND_WAL);
return &pgStatLocal.snapshot.wal;
}
/*
* Calculate how much WAL usage counters have increased by subtracting the
* previous counters from the current ones.
*
* If nowait is true, this function returns true if the lock could not be
* acquired. Otherwise return false.
*/
bool
pgstat_flush_wal(bool nowait)
{
PgStatShared_Wal *stats_shmem = &pgStatLocal.shmem->wal;
WalUsage diff = {0};
Assert(IsUnderPostmaster || !IsPostmasterEnvironment);
Assert(pgStatLocal.shmem != NULL &&
!pgStatLocal.shmem->is_shutdown);
/*
* This function can be called even if nothing at all has happened. In
* this case, avoid sending a completely empty message to the stats
* collector.
*
* Check wal_records counter to determine whether any WAL activity has
* happened since last time. Note that other WalUsage counters don't need
* to be checked because they are incremented always together with
* wal_records counter.
*
* m_wal_buffers_full also doesn't need to be checked because it's
* incremented only when at least one WAL record is generated (i.e.,
* wal_records counter is incremented). But for safely, we assert that
* m_wal_buffers_full is always zero when no WAL record is generated
*
* This function can be called by a process like walwriter that normally
* generates no WAL records. To determine whether any WAL activity has
* happened at that process since the last time, the numbers of WAL writes
* and syncs are also checked.
* This function can be called even if nothing at all has happened. Avoid
* taking lock for nothing in that case.
*/
if (pgWalUsage.wal_records == prevWalUsage.wal_records &&
WalStats.m_wal_write == 0 && WalStats.m_wal_sync == 0)
{
Assert(WalStats.m_wal_buffers_full == 0);
return;
}
if (!force)
{
TimestampTz now = GetCurrentTimestamp();
/*
* Don't send a message unless it's been at least PGSTAT_STAT_INTERVAL
* msec since we last sent one to avoid overloading the stats
* collector.
*/
if (!TimestampDifferenceExceeds(sendTime, now, PGSTAT_STAT_INTERVAL))
return;
sendTime = now;
}
if (!pgstat_have_pending_wal())
return false;
/*
* Set the counters related to generated WAL data if the counters were
* updated.
* We don't update the WAL usage portion of the local WalStats elsewhere.
* Calculate how much WAL usage counters were increased by subtracting the
* previous counters from the current ones.
*/
if (pgWalUsage.wal_records != prevWalUsage.wal_records)
{
WalUsage walusage;
WalUsageAccumDiff(&diff, &pgWalUsage, &prevWalUsage);
PendingWalStats.wal_records = diff.wal_records;
PendingWalStats.wal_fpi = diff.wal_fpi;
PendingWalStats.wal_bytes = diff.wal_bytes;
/*
* Calculate how much WAL usage counters were increased by subtracting
* the previous counters from the current ones. Fill the results in
* WAL stats message.
*/
MemSet(&walusage, 0, sizeof(WalUsage));
WalUsageAccumDiff(&walusage, &pgWalUsage, &prevWalUsage);
if (!nowait)
LWLockAcquire(&stats_shmem->lock, LW_EXCLUSIVE);
else if (!LWLockConditionalAcquire(&stats_shmem->lock, LW_EXCLUSIVE))
return true;
WalStats.m_wal_records = walusage.wal_records;
WalStats.m_wal_fpi = walusage.wal_fpi;
WalStats.m_wal_bytes = walusage.wal_bytes;
#define WALSTAT_ACC(fld) stats_shmem->stats.fld += PendingWalStats.fld
WALSTAT_ACC(wal_records);
WALSTAT_ACC(wal_fpi);
WALSTAT_ACC(wal_bytes);
WALSTAT_ACC(wal_buffers_full);
WALSTAT_ACC(wal_write);
WALSTAT_ACC(wal_sync);
WALSTAT_ACC(wal_write_time);
WALSTAT_ACC(wal_sync_time);
#undef WALSTAT_ACC
/*
* Save the current counters for the subsequent calculation of WAL
* usage.
*/
prevWalUsage = pgWalUsage;
}
LWLockRelease(&stats_shmem->lock);
/*
* Prepare and send the message
* Save the current counters for the subsequent calculation of WAL usage.
*/
pgstat_setheader(&WalStats.m_hdr, PGSTAT_MTYPE_WAL);
pgstat_send(&WalStats, sizeof(WalStats));
prevWalUsage = pgWalUsage;
/*
* Clear out the statistics buffer, so it can be re-used.
*/
MemSet(&WalStats, 0, sizeof(WalStats));
MemSet(&PendingWalStats, 0, sizeof(PendingWalStats));
return false;
}
void
pgstat_init_wal(void)
{
/*
* Initialize prevWalUsage with pgWalUsage so that pgstat_report_wal() can
* Initialize prevWalUsage with pgWalUsage so that pgstat_flush_wal() can
* calculate how much pgWalUsage counters are increased by subtracting
* prevWalUsage from pgWalUsage.
*/
@ -151,6 +144,28 @@ bool
pgstat_have_pending_wal(void)
{
return pgWalUsage.wal_records != prevWalUsage.wal_records ||
WalStats.m_wal_write != 0 ||
WalStats.m_wal_sync != 0;
PendingWalStats.wal_write != 0 ||
PendingWalStats.wal_sync != 0;
}
void
pgstat_wal_reset_all_cb(TimestampTz ts)
{
PgStatShared_Wal *stats_shmem = &pgStatLocal.shmem->wal;
LWLockAcquire(&stats_shmem->lock, LW_EXCLUSIVE);
memset(&stats_shmem->stats, 0, sizeof(stats_shmem->stats));
stats_shmem->stats.stat_reset_timestamp = ts;
LWLockRelease(&stats_shmem->lock);
}
void
pgstat_wal_snapshot_cb(void)
{
PgStatShared_Wal *stats_shmem = &pgStatLocal.shmem->wal;
LWLockAcquire(&stats_shmem->lock, LW_SHARED);
memcpy(&pgStatLocal.snapshot.wal, &stats_shmem->stats,
sizeof(pgStatLocal.snapshot.wal));
LWLockRelease(&stats_shmem->lock);
}

View File

@ -68,6 +68,7 @@ static void
AtEOXact_PgStat_DroppedStats(PgStat_SubXactStatus *xact_state, bool isCommit)
{
dlist_mutable_iter iter;
int not_freed_count = 0;
if (xact_state->pending_drops_count == 0)
{
@ -79,6 +80,7 @@ AtEOXact_PgStat_DroppedStats(PgStat_SubXactStatus *xact_state, bool isCommit)
{
PgStat_PendingDroppedStatsItem *pending =
dlist_container(PgStat_PendingDroppedStatsItem, node, iter.cur);
xl_xact_stats_item *it = &pending->item;
if (isCommit && !pending->is_create)
{
@ -86,7 +88,8 @@ AtEOXact_PgStat_DroppedStats(PgStat_SubXactStatus *xact_state, bool isCommit)
* Transaction that dropped an object committed. Drop the stats
* too.
*/
/* will do work in subsequent commit */
if (!pgstat_drop_entry(it->kind, it->dboid, it->objoid))
not_freed_count++;
}
else if (!isCommit && pending->is_create)
{
@ -94,13 +97,17 @@ AtEOXact_PgStat_DroppedStats(PgStat_SubXactStatus *xact_state, bool isCommit)
* Transaction that created an object aborted. Drop the stats
* associated with the object.
*/
/* will do work in subsequent commit */
if (!pgstat_drop_entry(it->kind, it->dboid, it->objoid))
not_freed_count++;
}
dlist_delete(&pending->node);
xact_state->pending_drops_count--;
pfree(pending);
}
if (not_freed_count > 0)
pgstat_request_entry_refs_gc();
}
/*
@ -135,6 +142,7 @@ AtEOSubXact_PgStat_DroppedStats(PgStat_SubXactStatus *xact_state,
{
PgStat_SubXactStatus *parent_xact_state;
dlist_mutable_iter iter;
int not_freed_count = 0;
if (xact_state->pending_drops_count == 0)
return;
@ -145,6 +153,7 @@ AtEOSubXact_PgStat_DroppedStats(PgStat_SubXactStatus *xact_state,
{
PgStat_PendingDroppedStatsItem *pending =
dlist_container(PgStat_PendingDroppedStatsItem, node, iter.cur);
xl_xact_stats_item *it = &pending->item;
dlist_delete(&pending->node);
xact_state->pending_drops_count--;
@ -155,7 +164,8 @@ AtEOSubXact_PgStat_DroppedStats(PgStat_SubXactStatus *xact_state,
* Subtransaction creating a new stats object aborted. Drop the
* stats object.
*/
/* will do work in subsequent commit */
if (!pgstat_drop_entry(it->kind, it->dboid, it->objoid))
not_freed_count++;
pfree(pending);
}
else if (isCommit)
@ -175,6 +185,8 @@ AtEOSubXact_PgStat_DroppedStats(PgStat_SubXactStatus *xact_state,
}
Assert(xact_state->pending_drops_count == 0);
if (not_freed_count > 0)
pgstat_request_entry_refs_gc();
}
/*
@ -307,13 +319,21 @@ pgstat_get_transactional_drops(bool isCommit, xl_xact_stats_item **items)
void
pgstat_execute_transactional_drops(int ndrops, struct xl_xact_stats_item *items, bool is_redo)
{
int not_freed_count = 0;
if (ndrops == 0)
return;
for (int i = 0; i < ndrops; i++)
{
/* will do work in subsequent commit */
xl_xact_stats_item *it = &items[i];
if (!pgstat_drop_entry(it->kind, it->dboid, it->objoid))
not_freed_count++;
}
if (not_freed_count > 0)
pgstat_request_entry_refs_gc();
}
static void
@ -345,6 +365,15 @@ create_drop_transactional_internal(PgStat_Kind kind, Oid dboid, Oid objoid, bool
void
pgstat_create_transactional(PgStat_Kind kind, Oid dboid, Oid objoid)
{
if (pgstat_get_entry_ref(kind, dboid, objoid, false, NULL))
{
ereport(WARNING,
errmsg("resetting existing stats for type %s, db=%d, oid=%d",
(pgstat_get_kind_info(kind))->name, dboid, objoid));
pgstat_reset(kind, dboid, objoid);
}
create_drop_transactional_internal(kind, dboid, objoid, /* create */ true);
}

View File

@ -230,9 +230,6 @@ pgstat_get_wait_activity(WaitEventActivity w)
case WAIT_EVENT_LOGICAL_LAUNCHER_MAIN:
event_name = "LogicalLauncherMain";
break;
case WAIT_EVENT_PGSTAT_MAIN:
event_name = "PgStatMain";
break;
case WAIT_EVENT_RECOVERY_WAL_STREAM:
event_name = "RecoveryWalStream";
break;

View File

@ -2046,7 +2046,15 @@ pg_stat_get_xact_function_self_time(PG_FUNCTION_ARGS)
Datum
pg_stat_get_snapshot_timestamp(PG_FUNCTION_ARGS)
{
PG_RETURN_TIMESTAMPTZ(pgstat_fetch_global()->stats_timestamp);
bool have_snapshot;
TimestampTz ts;
ts = pgstat_get_stat_snapshot_timestamp(&have_snapshot);
if (!have_snapshot)
PG_RETURN_NULL();
PG_RETURN_TIMESTAMPTZ(ts);
}
/* Discard the active statistics snapshot */

View File

@ -73,6 +73,7 @@
#include "nodes/makefuncs.h"
#include "nodes/nodeFuncs.h"
#include "optimizer/optimizer.h"
#include "pgstat.h"
#include "rewrite/rewriteDefine.h"
#include "rewrite/rowsecurity.h"
#include "storage/lmgr.h"
@ -2409,6 +2410,9 @@ RelationDestroyRelation(Relation relation, bool remember_tupdesc)
*/
RelationCloseSmgr(relation);
/* break mutual link with stats entry */
pgstat_unlink_relation(relation);
/*
* Free all the subsidiary data structures of the relcache entry, then the
* entry itself.
@ -2716,8 +2720,9 @@ RelationClearRelation(Relation relation, bool rebuild)
SWAPFIELD(RowSecurityDesc *, rd_rsdesc);
/* toast OID override must be preserved */
SWAPFIELD(Oid, rd_toastoid);
/* pgstat_info must be preserved */
/* pgstat_info / enabled must be preserved */
SWAPFIELD(struct PgStat_TableStatus *, pgstat_info);
SWAPFIELD(bool, pgstat_enabled);
/* preserve old partition key if we have one */
if (keep_partkey)
{

View File

@ -36,6 +36,7 @@ volatile sig_atomic_t IdleInTransactionSessionTimeoutPending = false;
volatile sig_atomic_t IdleSessionTimeoutPending = false;
volatile sig_atomic_t ProcSignalBarrierPending = false;
volatile sig_atomic_t LogMemoryContextPending = false;
volatile sig_atomic_t IdleStatsUpdateTimeoutPending = false;
volatile uint32 InterruptHoldoffCount = 0;
volatile uint32 QueryCancelHoldoffCount = 0;
volatile uint32 CritSectionCount = 0;

View File

@ -288,9 +288,6 @@ GetBackendTypeDesc(BackendType backendType)
case B_ARCHIVER:
backendDesc = "archiver";
break;
case B_STATS_COLLECTOR:
backendDesc = "stats collector";
break;
case B_LOGGER:
backendDesc = "logger";
break;

View File

@ -80,6 +80,7 @@ static void StatementTimeoutHandler(void);
static void LockTimeoutHandler(void);
static void IdleInTransactionSessionTimeoutHandler(void);
static void IdleSessionTimeoutHandler(void);
static void IdleStatsUpdateTimeoutHandler(void);
static void ClientCheckTimeoutHandler(void);
static bool ThereIsAtLeastOneRole(void);
static void process_startup_options(Port *port, bool am_superuser);
@ -725,6 +726,8 @@ InitPostgres(const char *in_dbname, Oid dboid, const char *username,
IdleInTransactionSessionTimeoutHandler);
RegisterTimeout(IDLE_SESSION_TIMEOUT, IdleSessionTimeoutHandler);
RegisterTimeout(CLIENT_CONNECTION_CHECK_TIMEOUT, ClientCheckTimeoutHandler);
RegisterTimeout(IDLE_STATS_UPDATE_TIMEOUT,
IdleStatsUpdateTimeoutHandler);
}
/*
@ -752,6 +755,7 @@ InitPostgres(const char *in_dbname, Oid dboid, const char *username,
* Use before_shmem_exit() so that ShutdownXLOG() can rely on DSM
* segments etc to work (which in turn is required for pgstats).
*/
before_shmem_exit(pgstat_before_server_shutdown, 0);
before_shmem_exit(ShutdownXLOG, 0);
}
@ -1334,6 +1338,14 @@ IdleSessionTimeoutHandler(void)
SetLatch(MyLatch);
}
static void
IdleStatsUpdateTimeoutHandler(void)
{
IdleStatsUpdateTimeoutPending = true;
InterruptPending = true;
SetLatch(MyLatch);
}
static void
ClientCheckTimeoutHandler(void)
{

View File

@ -375,6 +375,16 @@ static const struct config_enum_entry track_function_options[] = {
StaticAssertDecl(lengthof(track_function_options) == (TRACK_FUNC_ALL + 2),
"array length mismatch");
static const struct config_enum_entry stats_fetch_consistency[] = {
{"none", PGSTAT_FETCH_CONSISTENCY_NONE, false},
{"cache", PGSTAT_FETCH_CONSISTENCY_CACHE, false},
{"snapshot", PGSTAT_FETCH_CONSISTENCY_SNAPSHOT, false},
{NULL, 0, false}
};
StaticAssertDecl(lengthof(stats_fetch_consistency) == (PGSTAT_FETCH_CONSISTENCY_SNAPSHOT + 2),
"array length mismatch");
static const struct config_enum_entry xmlbinary_options[] = {
{"base64", XMLBINARY_BASE64, false},
{"hex", XMLBINARY_HEX, false},
@ -4918,6 +4928,17 @@ static struct config_enum ConfigureNamesEnum[] =
NULL, NULL, NULL
},
{
{"stats_fetch_consistency", PGC_USERSET, STATS_COLLECTOR,
gettext_noop("Sets the consistency of accesses to statistics data"),
NULL
},
&pgstat_fetch_consistency,
PGSTAT_FETCH_CONSISTENCY_CACHE, stats_fetch_consistency,
NULL, NULL, NULL
},
{
{"wal_compression", PGC_SUSET, WAL_SETTINGS,
gettext_noop("Compresses full-page writes written in WAL file with specified method."),

View File

@ -614,6 +614,7 @@
#track_wal_io_timing = off
#track_functions = none # none, pl, all
#stats_temp_directory = 'pg_stat_tmp'
#stats_fetch_consistency = none
# - Monitoring -

View File

@ -94,6 +94,7 @@ extern PGDLLIMPORT volatile sig_atomic_t IdleInTransactionSessionTimeoutPending;
extern PGDLLIMPORT volatile sig_atomic_t IdleSessionTimeoutPending;
extern PGDLLIMPORT volatile sig_atomic_t ProcSignalBarrierPending;
extern PGDLLIMPORT volatile sig_atomic_t LogMemoryContextPending;
extern PGDLLIMPORT volatile sig_atomic_t IdleStatsUpdateTimeoutPending;
extern PGDLLIMPORT volatile sig_atomic_t CheckClientConnectionPending;
extern PGDLLIMPORT volatile sig_atomic_t ClientConnectionLost;
@ -333,7 +334,6 @@ typedef enum BackendType
B_WAL_SENDER,
B_WAL_WRITER,
B_ARCHIVER,
B_STATS_COLLECTOR,
B_LOGGER,
} BackendType;

View File

@ -14,10 +14,8 @@
#include "datatype/timestamp.h"
#include "portability/instr_time.h"
#include "postmaster/pgarch.h" /* for MAX_XFN_CHARS */
#include "replication/logicalproto.h"
#include "utils/backend_progress.h" /* for backward compatibility */
#include "utils/backend_status.h" /* for backward compatibility */
#include "utils/hsearch.h"
#include "utils/relcache.h"
#include "utils/wait_event.h" /* for backward compatibility */
@ -27,8 +25,8 @@
* ----------
*/
#define PGSTAT_STAT_PERMANENT_DIRECTORY "pg_stat"
#define PGSTAT_STAT_PERMANENT_FILENAME "pg_stat/global.stat"
#define PGSTAT_STAT_PERMANENT_TMPFILE "pg_stat/global.tmp"
#define PGSTAT_STAT_PERMANENT_FILENAME "pg_stat/pgstat.stat"
#define PGSTAT_STAT_PERMANENT_TMPFILE "pg_stat/pgstat.tmp"
/* Default directory to store temporary statistics data in */
#define PG_STAT_TMP_DIR "pg_stat_tmp"
@ -66,6 +64,13 @@ typedef enum TrackFunctionsLevel
TRACK_FUNC_ALL
} TrackFunctionsLevel;
typedef enum PgStat_FetchConsistency
{
PGSTAT_FETCH_CONSISTENCY_NONE,
PGSTAT_FETCH_CONSISTENCY_CACHE,
PGSTAT_FETCH_CONSISTENCY_SNAPSHOT,
} PgStat_FetchConsistency;
/* Values to track the cause of session termination */
typedef enum SessionEndType
{
@ -92,7 +97,7 @@ typedef int64 PgStat_Counter;
* PgStat_FunctionCounts The actual per-function counts kept by a backend
*
* This struct should contain only actual event counters, because we memcmp
* it against zeroes to detect whether there are any counts to transmit.
* it against zeroes to detect whether there are any pending stats.
*
* Note that the time counters are in instr_time format here. We convert to
* microseconds in PgStat_Counter format when flushing out pending statistics.
@ -106,12 +111,11 @@ typedef struct PgStat_FunctionCounts
} PgStat_FunctionCounts;
/* ----------
* PgStat_BackendFunctionEntry Entry in backend's per-function hash table
* PgStat_BackendFunctionEntry Non-flushed function stats.
* ----------
*/
typedef struct PgStat_BackendFunctionEntry
{
Oid f_id;
PgStat_FunctionCounts f_counts;
} PgStat_BackendFunctionEntry;
@ -131,13 +135,22 @@ typedef struct PgStat_FunctionCallUsage
instr_time f_start;
} PgStat_FunctionCallUsage;
/* ----------
* PgStat_BackendSubEntry Non-flushed subscription stats.
* ----------
*/
typedef struct PgStat_BackendSubEntry
{
PgStat_Counter apply_error_count;
PgStat_Counter sync_error_count;
} PgStat_BackendSubEntry;
/* ----------
* PgStat_TableCounts The actual per-table counts kept by a backend
*
* This struct should contain only actual event counters, because we memcmp
* it against zeroes to detect whether there are any counts to transmit.
* It is a component of PgStat_TableStatus (within-backend state) and
* PgStat_TableEntry (the transmitted message format).
* it against zeroes to detect whether there are any stats updates to apply.
* It is a component of PgStat_TableStatus (within-backend state).
*
* Note: for a table, tuples_returned is the number of tuples successfully
* fetched by heap_getnext, while tuples_fetched is the number of tuples
@ -194,6 +207,7 @@ typedef struct PgStat_TableStatus
bool t_shared; /* is it a shared catalog? */
struct PgStat_TableXactStatus *trans; /* lowest subxact's counts */
PgStat_TableCounts t_counts; /* event counts to be sent */
Relation relation; /* rel that is using this entry */
} PgStat_TableStatus;
/* ----------
@ -221,569 +235,14 @@ typedef struct PgStat_TableXactStatus
/* ------------------------------------------------------------
* Message formats follow
* ------------------------------------------------------------
*/
/* ----------
* The types of backend -> collector messages
* ----------
*/
typedef enum StatMsgType
{
PGSTAT_MTYPE_DUMMY,
PGSTAT_MTYPE_INQUIRY,
PGSTAT_MTYPE_TABSTAT,
PGSTAT_MTYPE_TABPURGE,
PGSTAT_MTYPE_DROPDB,
PGSTAT_MTYPE_RESETCOUNTER,
PGSTAT_MTYPE_RESETSHAREDCOUNTER,
PGSTAT_MTYPE_RESETSINGLECOUNTER,
PGSTAT_MTYPE_RESETSLRUCOUNTER,
PGSTAT_MTYPE_RESETREPLSLOTCOUNTER,
PGSTAT_MTYPE_RESETSUBCOUNTER,
PGSTAT_MTYPE_AUTOVAC_START,
PGSTAT_MTYPE_VACUUM,
PGSTAT_MTYPE_ANALYZE,
PGSTAT_MTYPE_ARCHIVER,
PGSTAT_MTYPE_BGWRITER,
PGSTAT_MTYPE_CHECKPOINTER,
PGSTAT_MTYPE_WAL,
PGSTAT_MTYPE_SLRU,
PGSTAT_MTYPE_FUNCSTAT,
PGSTAT_MTYPE_FUNCPURGE,
PGSTAT_MTYPE_RECOVERYCONFLICT,
PGSTAT_MTYPE_TEMPFILE,
PGSTAT_MTYPE_DEADLOCK,
PGSTAT_MTYPE_CHECKSUMFAILURE,
PGSTAT_MTYPE_REPLSLOT,
PGSTAT_MTYPE_CONNECT,
PGSTAT_MTYPE_DISCONNECT,
PGSTAT_MTYPE_SUBSCRIPTIONDROP,
PGSTAT_MTYPE_SUBSCRIPTIONERROR,
} StatMsgType;
/* ----------
* PgStat_MsgHdr The common message header
* ----------
*/
typedef struct PgStat_MsgHdr
{
StatMsgType m_type;
int m_size;
} PgStat_MsgHdr;
/* ----------
* Space available in a message. This will keep the UDP packets below 1K,
* which should fit unfragmented into the MTU of the loopback interface.
* (Larger values of PGSTAT_MAX_MSG_SIZE would work for that on most
* platforms, but we're being conservative here.)
* ----------
*/
#define PGSTAT_MAX_MSG_SIZE 1000
#define PGSTAT_MSG_PAYLOAD (PGSTAT_MAX_MSG_SIZE - sizeof(PgStat_MsgHdr))
/* ----------
* PgStat_MsgDummy A dummy message, ignored by the collector
* ----------
*/
typedef struct PgStat_MsgDummy
{
PgStat_MsgHdr m_hdr;
} PgStat_MsgDummy;
/* ----------
* PgStat_MsgInquiry Sent by a backend to ask the collector
* to write the stats file(s).
*
* Ordinarily, an inquiry message prompts writing of the global stats file,
* the stats file for shared catalogs, and the stats file for the specified
* database. If databaseid is InvalidOid, only the first two are written.
*
* New file(s) will be written only if the existing file has a timestamp
* older than the specified cutoff_time; this prevents duplicated effort
* when multiple requests arrive at nearly the same time, assuming that
* backends send requests with cutoff_times a little bit in the past.
*
* clock_time should be the requestor's current local time; the collector
* uses this to check for the system clock going backward, but it has no
* effect unless that occurs. We assume clock_time >= cutoff_time, though.
* ----------
*/
typedef struct PgStat_MsgInquiry
{
PgStat_MsgHdr m_hdr;
TimestampTz clock_time; /* observed local clock time */
TimestampTz cutoff_time; /* minimum acceptable file timestamp */
Oid databaseid; /* requested DB (InvalidOid => shared only) */
} PgStat_MsgInquiry;
/* ----------
* PgStat_TableEntry Per-table info in a MsgTabstat
* ----------
*/
typedef struct PgStat_TableEntry
{
Oid t_id;
PgStat_TableCounts t_counts;
} PgStat_TableEntry;
/* ----------
* PgStat_MsgTabstat Sent by the backend to report table
* and buffer access statistics.
* ----------
*/
#define PGSTAT_NUM_TABENTRIES \
((PGSTAT_MSG_PAYLOAD - sizeof(Oid) - 3 * sizeof(int) - 5 * sizeof(PgStat_Counter)) \
/ sizeof(PgStat_TableEntry))
typedef struct PgStat_MsgTabstat
{
PgStat_MsgHdr m_hdr;
Oid m_databaseid;
int m_nentries;
int m_xact_commit;
int m_xact_rollback;
PgStat_Counter m_block_read_time; /* times in microseconds */
PgStat_Counter m_block_write_time;
PgStat_Counter m_session_time;
PgStat_Counter m_active_time;
PgStat_Counter m_idle_in_xact_time;
PgStat_TableEntry m_entry[PGSTAT_NUM_TABENTRIES];
} PgStat_MsgTabstat;
/* ----------
* PgStat_MsgTabpurge Sent by the backend to tell the collector
* about dead tables.
* ----------
*/
#define PGSTAT_NUM_TABPURGE \
((PGSTAT_MSG_PAYLOAD - sizeof(Oid) - sizeof(int)) \
/ sizeof(Oid))
typedef struct PgStat_MsgTabpurge
{
PgStat_MsgHdr m_hdr;
Oid m_databaseid;
int m_nentries;
Oid m_tableid[PGSTAT_NUM_TABPURGE];
} PgStat_MsgTabpurge;
/* ----------
* PgStat_MsgDropdb Sent by the backend to tell the collector
* about a dropped database
* ----------
*/
typedef struct PgStat_MsgDropdb
{
PgStat_MsgHdr m_hdr;
Oid m_databaseid;
} PgStat_MsgDropdb;
/* ----------
* PgStat_MsgResetcounter Sent by the backend to tell the collector
* to reset counters
* ----------
*/
typedef struct PgStat_MsgResetcounter
{
PgStat_MsgHdr m_hdr;
Oid m_databaseid;
} PgStat_MsgResetcounter;
/* ----------
* PgStat_MsgResetsharedcounter Sent by the backend to tell the collector
* to reset a shared counter
* ----------
*/
typedef struct PgStat_MsgResetsharedcounter
{
PgStat_MsgHdr m_hdr;
PgStat_Kind m_resettarget;
} PgStat_MsgResetsharedcounter;
/* ----------
* PgStat_MsgResetsinglecounter Sent by the backend to tell the collector
* to reset a single counter
* ----------
*/
typedef struct PgStat_MsgResetsinglecounter
{
PgStat_MsgHdr m_hdr;
Oid m_databaseid;
PgStat_Kind m_resettype;
Oid m_objectid;
} PgStat_MsgResetsinglecounter;
/* ----------
* PgStat_MsgResetslrucounter Sent by the backend to tell the collector
* to reset a SLRU counter
* ----------
*/
typedef struct PgStat_MsgResetslrucounter
{
PgStat_MsgHdr m_hdr;
int m_index;
} PgStat_MsgResetslrucounter;
/* ----------
* PgStat_MsgResetreplslotcounter Sent by the backend to tell the collector
* to reset replication slot counter(s)
* ----------
*/
typedef struct PgStat_MsgResetreplslotcounter
{
PgStat_MsgHdr m_hdr;
NameData m_slotname;
bool clearall;
} PgStat_MsgResetreplslotcounter;
/* ----------
* PgStat_MsgResetsubcounter Sent by the backend to tell the collector
* to reset subscription counter(s)
* ----------
*/
typedef struct PgStat_MsgResetsubcounter
{
PgStat_MsgHdr m_hdr;
Oid m_subid; /* InvalidOid means reset all subscription
* stats */
} PgStat_MsgResetsubcounter;
/* ----------
* PgStat_MsgAutovacStart Sent by the autovacuum daemon to signal
* that a database is going to be processed
* ----------
*/
typedef struct PgStat_MsgAutovacStart
{
PgStat_MsgHdr m_hdr;
Oid m_databaseid;
TimestampTz m_start_time;
} PgStat_MsgAutovacStart;
/* ----------
* PgStat_MsgVacuum Sent by the backend or autovacuum daemon
* after VACUUM
* ----------
*/
typedef struct PgStat_MsgVacuum
{
PgStat_MsgHdr m_hdr;
Oid m_databaseid;
Oid m_tableoid;
bool m_autovacuum;
TimestampTz m_vacuumtime;
PgStat_Counter m_live_tuples;
PgStat_Counter m_dead_tuples;
} PgStat_MsgVacuum;
/* ----------
* PgStat_MsgAnalyze Sent by the backend or autovacuum daemon
* after ANALYZE
* ----------
*/
typedef struct PgStat_MsgAnalyze
{
PgStat_MsgHdr m_hdr;
Oid m_databaseid;
Oid m_tableoid;
bool m_autovacuum;
bool m_resetcounter;
TimestampTz m_analyzetime;
PgStat_Counter m_live_tuples;
PgStat_Counter m_dead_tuples;
} PgStat_MsgAnalyze;
/* ----------
* PgStat_MsgArchiver Sent by the archiver to update statistics.
* ----------
*/
typedef struct PgStat_MsgArchiver
{
PgStat_MsgHdr m_hdr;
bool m_failed; /* Failed attempt */
char m_xlog[MAX_XFN_CHARS + 1];
TimestampTz m_timestamp;
} PgStat_MsgArchiver;
/* ----------
* PgStat_MsgBgWriter Sent by the bgwriter to update statistics.
* ----------
*/
typedef struct PgStat_MsgBgWriter
{
PgStat_MsgHdr m_hdr;
PgStat_Counter m_buf_written_clean;
PgStat_Counter m_maxwritten_clean;
PgStat_Counter m_buf_alloc;
} PgStat_MsgBgWriter;
/* ----------
* PgStat_MsgCheckpointer Sent by the checkpointer to update statistics.
* ----------
*/
typedef struct PgStat_MsgCheckpointer
{
PgStat_MsgHdr m_hdr;
PgStat_Counter m_timed_checkpoints;
PgStat_Counter m_requested_checkpoints;
PgStat_Counter m_buf_written_checkpoints;
PgStat_Counter m_buf_written_backend;
PgStat_Counter m_buf_fsync_backend;
PgStat_Counter m_checkpoint_write_time; /* times in milliseconds */
PgStat_Counter m_checkpoint_sync_time;
} PgStat_MsgCheckpointer;
/* ----------
* PgStat_MsgWal Sent by backends and background processes to update WAL statistics.
* ----------
*/
typedef struct PgStat_MsgWal
{
PgStat_MsgHdr m_hdr;
PgStat_Counter m_wal_records;
PgStat_Counter m_wal_fpi;
uint64 m_wal_bytes;
PgStat_Counter m_wal_buffers_full;
PgStat_Counter m_wal_write;
PgStat_Counter m_wal_sync;
PgStat_Counter m_wal_write_time; /* time spent writing wal records in
* microseconds */
PgStat_Counter m_wal_sync_time; /* time spent syncing wal records in
* microseconds */
} PgStat_MsgWal;
/* ----------
* PgStat_MsgSLRU Sent by a backend to update SLRU statistics.
* ----------
*/
typedef struct PgStat_MsgSLRU
{
PgStat_MsgHdr m_hdr;
PgStat_Counter m_index;
PgStat_Counter m_blocks_zeroed;
PgStat_Counter m_blocks_hit;
PgStat_Counter m_blocks_read;
PgStat_Counter m_blocks_written;
PgStat_Counter m_blocks_exists;
PgStat_Counter m_flush;
PgStat_Counter m_truncate;
} PgStat_MsgSLRU;
/* ----------
* PgStat_MsgReplSlot Sent by a backend or a wal sender to update replication
* slot statistics.
* ----------
*/
typedef struct PgStat_MsgReplSlot
{
PgStat_MsgHdr m_hdr;
NameData m_slotname;
bool m_create;
bool m_drop;
PgStat_Counter m_spill_txns;
PgStat_Counter m_spill_count;
PgStat_Counter m_spill_bytes;
PgStat_Counter m_stream_txns;
PgStat_Counter m_stream_count;
PgStat_Counter m_stream_bytes;
PgStat_Counter m_total_txns;
PgStat_Counter m_total_bytes;
} PgStat_MsgReplSlot;
/* ----------
* PgStat_MsgSubscriptionDrop Sent by the backend and autovacuum to tell the
* collector about the dead subscription.
* ----------
*/
typedef struct PgStat_MsgSubscriptionDrop
{
PgStat_MsgHdr m_hdr;
Oid m_subid;
} PgStat_MsgSubscriptionDrop;
/* ----------
* PgStat_MsgSubscriptionError Sent by the apply worker or the table sync
* worker to report an error on the subscription.
* ----------
*/
typedef struct PgStat_MsgSubscriptionError
{
PgStat_MsgHdr m_hdr;
Oid m_subid;
bool m_is_apply_error;
} PgStat_MsgSubscriptionError;
/* ----------
* PgStat_MsgRecoveryConflict Sent by the backend upon recovery conflict
* ----------
*/
typedef struct PgStat_MsgRecoveryConflict
{
PgStat_MsgHdr m_hdr;
Oid m_databaseid;
int m_reason;
} PgStat_MsgRecoveryConflict;
/* ----------
* PgStat_MsgTempFile Sent by the backend upon creating a temp file
* ----------
*/
typedef struct PgStat_MsgTempFile
{
PgStat_MsgHdr m_hdr;
Oid m_databaseid;
size_t m_filesize;
} PgStat_MsgTempFile;
/* ----------
* PgStat_FunctionEntry Per-function info in a MsgFuncstat
* ----------
*/
typedef struct PgStat_FunctionEntry
{
Oid f_id;
PgStat_Counter f_numcalls;
PgStat_Counter f_total_time; /* times in microseconds */
PgStat_Counter f_self_time;
} PgStat_FunctionEntry;
/* ----------
* PgStat_MsgFuncstat Sent by the backend to report function
* usage statistics.
* ----------
*/
#define PGSTAT_NUM_FUNCENTRIES \
((PGSTAT_MSG_PAYLOAD - sizeof(Oid) - sizeof(int)) \
/ sizeof(PgStat_FunctionEntry))
typedef struct PgStat_MsgFuncstat
{
PgStat_MsgHdr m_hdr;
Oid m_databaseid;
int m_nentries;
PgStat_FunctionEntry m_entry[PGSTAT_NUM_FUNCENTRIES];
} PgStat_MsgFuncstat;
/* ----------
* PgStat_MsgFuncpurge Sent by the backend to tell the collector
* about dead functions.
* ----------
*/
#define PGSTAT_NUM_FUNCPURGE \
((PGSTAT_MSG_PAYLOAD - sizeof(Oid) - sizeof(int)) \
/ sizeof(Oid))
typedef struct PgStat_MsgFuncpurge
{
PgStat_MsgHdr m_hdr;
Oid m_databaseid;
int m_nentries;
Oid m_functionid[PGSTAT_NUM_FUNCPURGE];
} PgStat_MsgFuncpurge;
/* ----------
* PgStat_MsgDeadlock Sent by the backend to tell the collector
* about a deadlock that occurred.
* ----------
*/
typedef struct PgStat_MsgDeadlock
{
PgStat_MsgHdr m_hdr;
Oid m_databaseid;
} PgStat_MsgDeadlock;
/* ----------
* PgStat_MsgChecksumFailure Sent by the backend to tell the collector
* about checksum failures noticed.
* ----------
*/
typedef struct PgStat_MsgChecksumFailure
{
PgStat_MsgHdr m_hdr;
Oid m_databaseid;
int m_failurecount;
TimestampTz m_failure_time;
} PgStat_MsgChecksumFailure;
/* ----------
* PgStat_MsgConnect Sent by the backend upon connection
* establishment
* ----------
*/
typedef struct PgStat_MsgConnect
{
PgStat_MsgHdr m_hdr;
Oid m_databaseid;
} PgStat_MsgConnect;
/* ----------
* PgStat_MsgDisconnect Sent by the backend when disconnecting
* ----------
*/
typedef struct PgStat_MsgDisconnect
{
PgStat_MsgHdr m_hdr;
Oid m_databaseid;
SessionEndType m_cause;
} PgStat_MsgDisconnect;
/* ----------
* PgStat_Msg Union over all possible messages.
* ----------
*/
typedef union PgStat_Msg
{
PgStat_MsgHdr msg_hdr;
PgStat_MsgDummy msg_dummy;
PgStat_MsgInquiry msg_inquiry;
PgStat_MsgTabstat msg_tabstat;
PgStat_MsgTabpurge msg_tabpurge;
PgStat_MsgDropdb msg_dropdb;
PgStat_MsgResetcounter msg_resetcounter;
PgStat_MsgResetsharedcounter msg_resetsharedcounter;
PgStat_MsgResetsinglecounter msg_resetsinglecounter;
PgStat_MsgResetslrucounter msg_resetslrucounter;
PgStat_MsgResetreplslotcounter msg_resetreplslotcounter;
PgStat_MsgResetsubcounter msg_resetsubcounter;
PgStat_MsgAutovacStart msg_autovacuum_start;
PgStat_MsgVacuum msg_vacuum;
PgStat_MsgAnalyze msg_analyze;
PgStat_MsgArchiver msg_archiver;
PgStat_MsgBgWriter msg_bgwriter;
PgStat_MsgCheckpointer msg_checkpointer;
PgStat_MsgWal msg_wal;
PgStat_MsgSLRU msg_slru;
PgStat_MsgFuncstat msg_funcstat;
PgStat_MsgFuncpurge msg_funcpurge;
PgStat_MsgRecoveryConflict msg_recoveryconflict;
PgStat_MsgDeadlock msg_deadlock;
PgStat_MsgTempFile msg_tempfile;
PgStat_MsgChecksumFailure msg_checksumfailure;
PgStat_MsgReplSlot msg_replslot;
PgStat_MsgConnect msg_connect;
PgStat_MsgDisconnect msg_disconnect;
PgStat_MsgSubscriptionError msg_subscriptionerror;
PgStat_MsgSubscriptionDrop msg_subscriptiondrop;
} PgStat_Msg;
/* ------------------------------------------------------------
* Statistic collector data structures follow
* Data structures on disk and in shared memory follow
*
* PGSTAT_FILE_FORMAT_ID should be changed whenever any of these
* data structures change.
* ------------------------------------------------------------
*/
#define PGSTAT_FILE_FORMAT_ID 0x01A5BCA6
#define PGSTAT_FILE_FORMAT_ID 0x01A5BCA7
typedef struct PgStat_ArchiverStats
{
@ -808,7 +267,6 @@ typedef struct PgStat_BgWriterStats
typedef struct PgStat_CheckpointerStats
{
TimestampTz stats_timestamp; /* time of stats file update */
PgStat_Counter timed_checkpoints;
PgStat_Counter requested_checkpoints;
PgStat_Counter checkpoint_write_time; /* times in milliseconds */
@ -820,7 +278,6 @@ typedef struct PgStat_CheckpointerStats
typedef struct PgStat_StatDBEntry
{
Oid databaseid;
PgStat_Counter n_xact_commit;
PgStat_Counter n_xact_rollback;
PgStat_Counter n_blocks_fetched;
@ -852,34 +309,16 @@ typedef struct PgStat_StatDBEntry
PgStat_Counter n_sessions_killed;
TimestampTz stat_reset_timestamp;
TimestampTz stats_timestamp; /* time of db stats file update */
/*
* tables and functions must be last in the struct, because we don't write
* the pointers out to the stats file.
*/
HTAB *tables;
HTAB *functions;
} PgStat_StatDBEntry;
typedef struct PgStat_StatFuncEntry
{
Oid functionid;
PgStat_Counter f_numcalls;
PgStat_Counter f_total_time; /* times in microseconds */
PgStat_Counter f_self_time;
} PgStat_StatFuncEntry;
typedef struct PgStat_GlobalStats
{
TimestampTz stats_timestamp; /* time of stats file update */
PgStat_CheckpointerStats checkpointer;
PgStat_BgWriterStats bgwriter;
} PgStat_GlobalStats;
typedef struct PgStat_StatReplSlotEntry
{
NameData slotname;
@ -908,8 +347,6 @@ typedef struct PgStat_SLRUStats
typedef struct PgStat_StatSubEntry
{
Oid subid; /* hash key (must be first) */
PgStat_Counter apply_error_count;
PgStat_Counter sync_error_count;
TimestampTz stat_reset_timestamp;
@ -917,8 +354,6 @@ typedef struct PgStat_StatSubEntry
typedef struct PgStat_StatTabEntry
{
Oid tableid;
PgStat_Counter numscans;
PgStat_Counter tuples_returned;
@ -966,22 +401,19 @@ typedef struct PgStat_WalStats
*/
/* functions called from postmaster */
extern void pgstat_init(void);
extern void pgstat_reset_all(void);
extern int pgstat_start(void);
extern void allow_immediate_pgstat_restart(void);
extern Size StatsShmemSize(void);
extern void StatsShmemInit(void);
#ifdef EXEC_BACKEND
extern void PgstatCollectorMain(int argc, char *argv[]) pg_attribute_noreturn();
#endif
/* Functions called during server startup / shutdown */
extern void pgstat_restore_stats(void);
extern void pgstat_discard_stats(void);
extern void pgstat_before_server_shutdown(int code, Datum arg);
/* Functions for backend initialization */
extern void pgstat_initialize(void);
/* Functions called from backends */
extern void pgstat_report_stat(bool force);
extern void pgstat_vacuum_stat(void);
extern void pgstat_ping(void);
extern long pgstat_report_stat(bool force);
extern void pgstat_reset_counters(void);
extern void pgstat_reset(PgStat_Kind kind, Oid dboid, Oid objectid);
@ -989,24 +421,17 @@ extern void pgstat_reset_of_kind(PgStat_Kind kind);
/* stats accessors */
extern void pgstat_clear_snapshot(void);
extern PgStat_ArchiverStats *pgstat_fetch_stat_archiver(void);
extern PgStat_BgWriterStats *pgstat_fetch_stat_bgwriter(void);
extern PgStat_CheckpointerStats *pgstat_fetch_stat_checkpointer(void);
extern PgStat_StatDBEntry *pgstat_fetch_stat_dbentry(Oid dbid);
extern PgStat_StatFuncEntry *pgstat_fetch_stat_funcentry(Oid funcid);
extern PgStat_GlobalStats *pgstat_fetch_global(void);
extern PgStat_StatReplSlotEntry *pgstat_fetch_replslot(NameData slotname);
extern PgStat_StatSubEntry *pgstat_fetch_stat_subscription(Oid subid);
extern PgStat_SLRUStats *pgstat_fetch_slru(void);
extern PgStat_StatTabEntry *pgstat_fetch_stat_tabentry(Oid relid);
extern PgStat_WalStats *pgstat_fetch_stat_wal(void);
extern TimestampTz pgstat_get_stat_snapshot_timestamp(bool *have_snapshot);
/* helpers */
extern PgStat_Kind pgstat_get_kind_from_str(char *kind_str);
/*
* Functions in pgstat_archiver.c
*/
extern void pgstat_report_archiver(const char *xlog, bool failed);
extern PgStat_ArchiverStats *pgstat_fetch_stat_archiver(void);
/*
@ -1014,6 +439,7 @@ extern void pgstat_report_archiver(const char *xlog, bool failed);
*/
extern void pgstat_report_bgwriter(void);
extern PgStat_BgWriterStats *pgstat_fetch_stat_bgwriter(void);
/*
@ -1021,6 +447,7 @@ extern void pgstat_report_bgwriter(void);
*/
extern void pgstat_report_checkpointer(void);
extern PgStat_CheckpointerStats *pgstat_fetch_stat_checkpointer(void);
/*
@ -1044,6 +471,7 @@ extern void pgstat_report_connect(Oid dboid);
#define pgstat_count_conn_txn_idle_time(n) \
(pgStatTransactionIdleTime += (n))
extern PgStat_StatDBEntry *pgstat_fetch_stat_dbentry(Oid dbid);
/*
* Functions in pgstat_function.c
@ -1058,6 +486,7 @@ extern void pgstat_init_function_usage(struct FunctionCallInfoBaseData *fcinfo,
extern void pgstat_end_function_usage(PgStat_FunctionCallUsage *fcu,
bool finalize);
extern PgStat_StatFuncEntry *pgstat_fetch_stat_funcentry(Oid funcid);
extern PgStat_BackendFunctionEntry *find_funcstat_entry(Oid func_id);
@ -1070,6 +499,8 @@ extern void pgstat_drop_relation(Relation rel);
extern void pgstat_copy_relation_stats(Relation dstrel, Relation srcrel);
extern void pgstat_init_relation(Relation rel);
extern void pgstat_assoc_relation(Relation rel);
extern void pgstat_unlink_relation(Relation rel);
extern void pgstat_report_vacuum(Oid tableoid, bool shared,
PgStat_Counter livetuples, PgStat_Counter deadtuples);
@ -1077,8 +508,14 @@ extern void pgstat_report_analyze(Relation rel,
PgStat_Counter livetuples, PgStat_Counter deadtuples,
bool resetcounter);
/*
* If stats are enabled, but pending data hasn't been prepared yet, call
* pgstat_assoc_relation() to do so. See its comment for why this is done
* separately from pgstat_init_relation().
*/
#define pgstat_should_count_relation(rel) \
(likely((rel)->pgstat_info != NULL))
(likely((rel)->pgstat_info != NULL) ? true : \
((rel)->pgstat_enabled ? pgstat_assoc_relation(rel), true : false))
/* nontransactional event counts are simple enough to inline */
@ -1129,6 +566,9 @@ extern void pgstat_twophase_postcommit(TransactionId xid, uint16 info,
extern void pgstat_twophase_postabort(TransactionId xid, uint16 info,
void *recdata, uint32 len);
extern PgStat_StatTabEntry *pgstat_fetch_stat_tabentry(Oid relid);
extern PgStat_StatTabEntry *pgstat_fetch_stat_tabentry_ext(bool shared,
Oid relid);
extern PgStat_TableStatus *find_tabstat_entry(Oid rel_id);
@ -1140,7 +580,9 @@ extern void pgstat_reset_replslot(const char *name);
struct ReplicationSlot;
extern void pgstat_report_replslot(struct ReplicationSlot *slot, const PgStat_StatReplSlotEntry *repSlotStat);
extern void pgstat_create_replslot(struct ReplicationSlot *slot);
extern void pgstat_acquire_replslot(struct ReplicationSlot *slot);
extern void pgstat_drop_replslot(struct ReplicationSlot *slot);
extern PgStat_StatReplSlotEntry *pgstat_fetch_replslot(NameData slotname);
/*
@ -1157,6 +599,7 @@ extern void pgstat_count_slru_flush(int slru_idx);
extern void pgstat_count_slru_truncate(int slru_idx);
extern const char *pgstat_get_slru_name(int slru_idx);
extern int pgstat_get_slru_index(const char *name);
extern PgStat_SLRUStats *pgstat_fetch_slru(void);
/*
@ -1166,6 +609,7 @@ extern int pgstat_get_slru_index(const char *name);
extern void pgstat_report_subscription_error(Oid subid, bool is_apply_error);
extern void pgstat_create_subscription(Oid subid);
extern void pgstat_drop_subscription(Oid subid);
extern PgStat_StatSubEntry *pgstat_fetch_stat_subscription(Oid subid);
/*
@ -1186,6 +630,7 @@ extern void pgstat_execute_transactional_drops(int ndrops, struct xl_xact_stats_
*/
extern void pgstat_report_wal(bool force);
extern PgStat_WalStats *pgstat_fetch_stat_wal(void);
/*
@ -1195,6 +640,8 @@ extern void pgstat_report_wal(bool force);
/* GUC parameters */
extern PGDLLIMPORT bool pgstat_track_counts;
extern PGDLLIMPORT int pgstat_track_functions;
extern PGDLLIMPORT int pgstat_fetch_consistency;
extern char *pgstat_stat_directory;
extern char *pgstat_stat_tmpname;
extern char *pgstat_stat_filename;
@ -1205,7 +652,7 @@ extern char *pgstat_stat_filename;
*/
/* updated directly by bgwriter and bufmgr */
extern PgStat_MsgBgWriter PendingBgWriterStats;
extern PgStat_BgWriterStats PendingBgWriterStats;
/*
@ -1216,7 +663,7 @@ extern PgStat_MsgBgWriter PendingBgWriterStats;
* Checkpointer statistics counters are updated directly by checkpointer and
* bufmgr.
*/
extern PgStat_MsgCheckpointer PendingCheckpointerStats;
extern PgStat_CheckpointerStats PendingCheckpointerStats;
/*
@ -1243,7 +690,7 @@ extern SessionEndType pgStatSessionEndCause;
*/
/* updated directly by backends and background processes */
extern PgStat_MsgWal WalStats;
extern PgStat_WalStats PendingWalStats;
#endif /* PGSTAT_H */

View File

@ -190,6 +190,9 @@ typedef enum BuiltinTrancheIds
LWTRANCHE_SHARED_TIDBITMAP,
LWTRANCHE_PARALLEL_APPEND,
LWTRANCHE_PER_XACT_PREDICATE_LIST,
LWTRANCHE_PGSTATS_DSA,
LWTRANCHE_PGSTATS_HASH,
LWTRANCHE_PGSTATS_DATA,
LWTRANCHE_FIRST_USER_DEFINED
} BuiltinTrancheIds;

View File

@ -14,21 +14,134 @@
#define PGSTAT_INTERNAL_H
#include "common/hashfn.h"
#include "lib/dshash.h"
#include "lib/ilist.h"
#include "pgstat.h"
#include "storage/lwlock.h"
#include "utils/dsa.h"
#define PGSTAT_STAT_INTERVAL 500 /* Minimum time between stats file
* updates; in milliseconds. */
/* ----------
* The initial size hints for the hash tables used in the collector.
* ----------
/*
* Types related to shared memory storage of statistics.
*
* Per-object statistics are stored in the "shared stats" hashtable. That
* table's entries (PgStatShared_HashEntry) contain a pointer to the actual stats
* data for the object (the size of the stats data varies depending on the
* kind of stats). The table is keyed by PgStat_HashKey.
*
* Once a backend has a reference to a shared stats entry, it increments the
* entry's refcount. Even after stats data is dropped (e.g., due to a DROP
* TABLE), the entry itself can only be deleted once all references have been
* released.
*
* These refcounts, in combination with a backend local hashtable
* (pgStatEntryRefHash, with entries pointing to PgStat_EntryRef) in front of
* the shared hash table, mean that most stats work can happen without
* touching the shared hash table, reducing contention.
*
* Once there are pending stats updates for a table PgStat_EntryRef->pending
* is allocated to contain a working space for as-of-yet-unapplied stats
* updates. Once the stats are flushed, PgStat_EntryRef->pending is freed.
*
* Each stat kind in the shared hash table has a fixed member
* PgStatShared_Common as the first element.
*/
#define PGSTAT_DB_HASH_SIZE 16
#define PGSTAT_TAB_HASH_SIZE 512
#define PGSTAT_FUNCTION_HASH_SIZE 512
#define PGSTAT_SUBSCRIPTION_HASH_SIZE 32
#define PGSTAT_REPLSLOT_HASH_SIZE 32
/* struct for shared statistics hash entry key. */
typedef struct PgStat_HashKey
{
PgStat_Kind kind; /* statistics entry kind */
Oid dboid; /* database ID. InvalidOid for shared objects. */
Oid objoid; /* object ID, either table or function. */
} PgStat_HashKey;
/*
* Shared statistics hash entry. Doesn't itself contain any stats, but points
* to them (with ->body). That allows the stats entries themselves to be of
* variable size.
*/
typedef struct PgStatShared_HashEntry
{
PgStat_HashKey key; /* hash key */
/*
* If dropped is set, backends need to release their references so that
* the memory for the entry can be freed. No new references may be made
* once marked as dropped.
*/
bool dropped;
/*
* Refcount managing lifetime of the entry itself (as opposed to the
* dshash entry pointing to it). The stats lifetime has to be separate
* from the hash table entry lifetime because we allow backends to point
* to a stats entry without holding a hash table lock (and some other
* reasons).
*
* As long as the entry is not dropped, 1 is added to the refcount
* representing that the entry should not be dropped. In addition each
* backend that has a reference to the entry needs to increment the
* refcount as long as it does.
*
* May only be incremented / decremented while holding at least a shared
* lock on the dshash partition containing the entry. It needs to be an
* atomic variable because multiple backends can increment the refcount
* with just a shared lock.
*
* When the refcount reaches 0 the entry needs to be freed.
*/
pg_atomic_uint32 refcount;
/*
* Pointer to shared stats. The stats entry always starts with
* PgStatShared_Common, embedded in a larger struct containing the
* PgStat_Kind specific stats fields.
*/
dsa_pointer body;
} PgStatShared_HashEntry;
/*
* Common header struct for PgStatShm_Stat*Entry.
*/
typedef struct PgStatShared_Common
{
uint32 magic; /* just a validity cross-check */
/* lock protecting stats contents (i.e. data following the header) */
LWLock lock;
} PgStatShared_Common;
/*
* A backend local reference to a shared stats entry. As long as at least one
* such reference exists, the shared stats entry will not be released.
*
* If there are pending stats update to the shared stats, these are stored in
* ->pending.
*/
typedef struct PgStat_EntryRef
{
/*
* Pointer to the PgStatShared_HashEntry entry in the shared stats
* hashtable.
*/
PgStatShared_HashEntry *shared_entry;
/*
* Pointer to the stats data (i.e. PgStatShared_HashEntry->body), resolved
* as a local pointer, to avoid repeated dsa_get_address() calls.
*/
PgStatShared_Common *shared_stats;
/*
* Pending statistics data that will need to be flushed to shared memory
* stats eventually. Each stats kind utilizing pending data defines what
* format its pending data has and needs to provide a
* PgStat_KindInfo->flush_pending_cb callback to merge pending into shared
* stats.
*/
void *pending;
dlist_node pending_node; /* membership in pgStatPending list */
} PgStat_EntryRef;
/*
@ -43,11 +156,11 @@ typedef struct PgStat_SubXactStatus
struct PgStat_SubXactStatus *prev; /* higher-level subxact if any */
/*
* Dropping the statistics for objects that dropped transactionally itself
* needs to be transactional. Therefore we collect the stats dropped in
* the current (sub-)transaction and only execute the stats drop when we
* know if the transaction commits/aborts. To handle replicas and crashes,
* stats drops are included in commit records.
* Statistics for transactionally dropped objects need to be
* transactionally dropped as well. Collect the stats dropped in the
* current (sub-)transaction and only execute the stats drop when we know
* if the transaction commits/aborts. To handle replicas and crashes,
* stats drops are included in commit / abort records.
*/
dlist_head pending_drops;
int pending_drops_count;
@ -64,10 +177,96 @@ typedef struct PgStat_SubXactStatus
} PgStat_SubXactStatus;
/*
* Metadata for a specific kind of statistics.
*/
typedef struct PgStat_KindInfo
{
/*
* Do a fixed number of stats objects exist for this kind of stats (e.g.
* bgwriter stats) or not (e.g. tables).
*/
bool fixed_amount:1;
/*
* Can stats of this kind be accessed from another database? Determines
* whether a stats object gets included in stats snapshots.
*/
bool accessed_across_databases:1;
/*
* For variable-numbered stats: Identified on-disk using a name, rather
* than PgStat_HashKey. Probably only needed for replication slot stats.
*/
bool named_on_disk:1;
/*
* The size of an entry in the shared stats hash table (pointed to by
* PgStatShared_HashEntry->body).
*/
uint32 shared_size;
/*
* The offset/size of statistics inside the shared stats entry. Used when
* [de-]serializing statistics to / from disk respectively. Separate from
* shared_size because [de-]serialization may not include in-memory state
* like lwlocks.
*/
uint32 shared_data_off;
uint32 shared_data_len;
/*
* The size of the pending data for this kind. E.g. how large
* PgStat_EntryRef->pending is. Used for allocations.
*
* 0 signals that an entry of this kind should never have a pending entry.
*/
uint32 pending_size;
/*
* For variable-numbered stats: flush pending stats. Required if pending
* data is used.
*/
bool (*flush_pending_cb) (PgStat_EntryRef *sr, bool nowait);
/*
* For variable-numbered stats: delete pending stats. Optional.
*/
void (*delete_pending_cb) (PgStat_EntryRef *sr);
/*
* For variable-numbered stats: reset the reset timestamp. Optional.
*/
void (*reset_timestamp_cb) (PgStatShared_Common *header, TimestampTz ts);
/*
* For variable-numbered stats with named_on_disk. Optional.
*/
void (*to_serialized_name) (const PgStatShared_Common *header, NameData *name);
bool (*from_serialized_name) (const NameData *name, PgStat_HashKey *key);
/*
* For fixed-numbered statistics: Reset All.
*/
void (*reset_all_cb) (TimestampTz ts);
/*
* For fixed-numbered statistics: Build snapshot for entry
*/
void (*snapshot_cb) (void);
/* name of the kind of stats */
const char *const name;
} PgStat_KindInfo;
/*
* List of SLRU names that we keep stats for. There is no central registry of
* SLRUs, so we use this fixed list instead. The "other" entry is used for
* all SLRUs without an explicit entry (e.g. SLRUs in extensions).
*
* This is only defined here so that SLRU_NUM_ELEMENTS is known for later type
* definitions.
*/
static const char *const slru_names[] = {
"CommitTs",
@ -83,33 +282,271 @@ static const char *const slru_names[] = {
#define SLRU_NUM_ELEMENTS lengthof(slru_names)
/* ----------
* Types and definitions for different kinds of fixed-amount stats.
*
* Single-writer stats use the changecount mechanism to achieve low-overhead
* writes - they're obviously more performance critical than reads. Check the
* definition of struct PgBackendStatus for some explanation of the
* changecount mechanism.
*
* Because the obvious implementation of resetting single-writer stats isn't
* compatible with that (another backend needs to write), we don't scribble on
* shared stats while resetting. Instead, just record the current counter
* values in a copy of the stats data, which is protected by ->lock. See
* pgstat_fetch_stat_(archiver|bgwriter|checkpointer) for the reader side.
*
* The only exception to that is the the stat_reset_timestamp in these
* structs, which is protected by ->lock, because it has to be written by
* another backend while resetting
* ----------
*/
typedef struct PgStatShared_Archiver
{
/* lock protects ->reset_offset as well as stats->stat_reset_timestamp */
LWLock lock;
uint32 changecount;
PgStat_ArchiverStats stats;
PgStat_ArchiverStats reset_offset;
} PgStatShared_Archiver;
typedef struct PgStatShared_BgWriter
{
/* lock protects ->reset_offset as well as stats->stat_reset_timestamp */
LWLock lock;
uint32 changecount;
PgStat_BgWriterStats stats;
PgStat_BgWriterStats reset_offset;
} PgStatShared_BgWriter;
typedef struct PgStatShared_Checkpointer
{
/* lock protects ->reset_offset as well as stats->stat_reset_timestamp */
LWLock lock;
uint32 changecount;
PgStat_CheckpointerStats stats;
PgStat_CheckpointerStats reset_offset;
} PgStatShared_Checkpointer;
typedef struct PgStatShared_SLRU
{
/* lock protects ->stats */
LWLock lock;
PgStat_SLRUStats stats[SLRU_NUM_ELEMENTS];
} PgStatShared_SLRU;
typedef struct PgStatShared_Wal
{
/* lock protects ->stats */
LWLock lock;
PgStat_WalStats stats;
} PgStatShared_Wal;
/* ----------
* Types and definitions for different kinds of variable-amount stats.
*
* Each struct has to start with PgStatShared_Common, containing information
* common across the different types of stats. Kind-specific data follows.
* ----------
*/
typedef struct PgStatShared_Database
{
PgStatShared_Common header;
PgStat_StatDBEntry stats;
} PgStatShared_Database;
typedef struct PgStatShared_Relation
{
PgStatShared_Common header;
PgStat_StatTabEntry stats;
} PgStatShared_Relation;
typedef struct PgStatShared_Function
{
PgStatShared_Common header;
PgStat_StatFuncEntry stats;
} PgStatShared_Function;
typedef struct PgStatShared_Subscription
{
PgStatShared_Common header;
PgStat_StatSubEntry stats;
} PgStatShared_Subscription;
typedef struct PgStatShared_ReplSlot
{
PgStatShared_Common header;
PgStat_StatReplSlotEntry stats;
} PgStatShared_ReplSlot;
/*
* Central shared memory entry for the cumulative stats system.
*
* Fixed amount stats, the dynamic shared memory hash table for
* non-fixed-amount stats, as well as remaining bits and pieces are all
* reached from here.
*/
typedef struct PgStat_ShmemControl
{
void *raw_dsa_area;
/*
* Stats for variable-numbered objects are kept in this shared hash table.
* See comment above PgStat_Kind for details.
*/
dshash_table_handle hash_handle; /* shared dbstat hash */
/* Has the stats system already been shut down? Just a debugging check. */
bool is_shutdown;
/*
* Whenever statistics for dropped objects could not be freed - because
* backends still have references - the dropping backend calls
* pgstat_request_entry_refs_gc() incrementing this counter. Eventually
* that causes backends to run pgstat_gc_entry_refs(), allowing memory to
* be reclaimed.
*/
pg_atomic_uint64 gc_request_count;
/*
* Stats data for fixed-numbered objects.
*/
PgStatShared_Archiver archiver;
PgStatShared_BgWriter bgwriter;
PgStatShared_Checkpointer checkpointer;
PgStatShared_SLRU slru;
PgStatShared_Wal wal;
} PgStat_ShmemControl;
/*
* Cached statistics snapshot
*/
typedef struct PgStat_Snapshot
{
PgStat_FetchConsistency mode;
/* time at which snapshot was taken */
TimestampTz snapshot_timestamp;
bool fixed_valid[PGSTAT_NUM_KINDS];
PgStat_ArchiverStats archiver;
PgStat_BgWriterStats bgwriter;
PgStat_CheckpointerStats checkpointer;
PgStat_SLRUStats slru[SLRU_NUM_ELEMENTS];
PgStat_WalStats wal;
/* to free snapshot in bulk */
MemoryContext context;
struct pgstat_snapshot_hash *stats;
} PgStat_Snapshot;
/*
* Collection of backend-local stats state.
*/
typedef struct PgStat_LocalState
{
PgStat_ShmemControl *shmem;
dsa_area *dsa;
dshash_table *shared_hash;
/* the current statistics snapshot */
PgStat_Snapshot snapshot;
} PgStat_LocalState;
/*
* Inline functions defined further below.
*/
static inline void pgstat_begin_changecount_write(uint32 *cc);
static inline void pgstat_end_changecount_write(uint32 *cc);
static inline uint32 pgstat_begin_changecount_read(uint32 *cc);
static inline bool pgstat_end_changecount_read(uint32 *cc, uint32 cc_before);
static inline void pgstat_copy_changecounted_stats(void *dst, void *src, size_t len,
uint32 *cc);
static inline int pgstat_cmp_hash_key(const void *a, const void *b, size_t size, void *arg);
static inline uint32 pgstat_hash_hash_key(const void *d, size_t size, void *arg);
static inline size_t pgstat_get_entry_len(PgStat_Kind kind);
static inline void *pgstat_get_entry_data(PgStat_Kind kind, PgStatShared_Common *entry);
/*
* Functions in pgstat.c
*/
extern void pgstat_setheader(PgStat_MsgHdr *hdr, StatMsgType mtype);
extern void pgstat_send(void *msg, int len);
const PgStat_KindInfo *pgstat_get_kind_info(PgStat_Kind kind);
#ifdef USE_ASSERT_CHECKING
extern void pgstat_assert_is_up(void);
#else
#define pgstat_assert_is_up() ((void)true)
#endif
extern void pgstat_delete_pending_entry(PgStat_EntryRef *entry_ref);
extern PgStat_EntryRef *pgstat_prep_pending_entry(PgStat_Kind kind, Oid dboid, Oid objoid, bool *created_entry);
extern PgStat_EntryRef *pgstat_fetch_pending_entry(PgStat_Kind kind, Oid dboid, Oid objoid);
extern void *pgstat_fetch_entry(PgStat_Kind kind, Oid dboid, Oid objoid);
extern void pgstat_snapshot_fixed(PgStat_Kind kind);
/*
* Functions in pgstat_archiver.c
*/
extern void pgstat_archiver_reset_all_cb(TimestampTz ts);
extern void pgstat_archiver_snapshot_cb(void);
/*
* Functions in pgstat_bgwriter.c
*/
extern void pgstat_bgwriter_reset_all_cb(TimestampTz ts);
extern void pgstat_bgwriter_snapshot_cb(void);
/*
* Functions in pgstat_checkpointer.c
*/
extern void pgstat_checkpointer_reset_all_cb(TimestampTz ts);
extern void pgstat_checkpointer_snapshot_cb(void);
/*
* Functions in pgstat_database.c
*/
extern void AtEOXact_PgStat_Database(bool isCommit, bool parallel);
extern void pgstat_report_disconnect(Oid dboid);
extern void pgstat_update_dbstats(PgStat_MsgTabstat *tsmsg, TimestampTz now);
extern void pgstat_update_dbstats(TimestampTz ts);
extern void AtEOXact_PgStat_Database(bool isCommit, bool parallel);
extern PgStat_StatDBEntry *pgstat_prep_database_pending(Oid dboid);
extern void pgstat_reset_database_timestamp(Oid dboid, TimestampTz ts);
extern bool pgstat_database_flush_cb(PgStat_EntryRef *entry_ref, bool nowait);
extern void pgstat_database_reset_timestamp_cb(PgStatShared_Common *header, TimestampTz ts);
/*
* Functions in pgstat_function.c
*/
extern void pgstat_send_funcstats(void);
extern bool pgstat_function_flush_cb(PgStat_EntryRef *entry_ref, bool nowait);
/*
@ -120,23 +557,73 @@ extern void AtEOXact_PgStat_Relations(PgStat_SubXactStatus *xact_state, bool isC
extern void AtEOSubXact_PgStat_Relations(PgStat_SubXactStatus *xact_state, bool isCommit, int nestDepth);
extern void AtPrepare_PgStat_Relations(PgStat_SubXactStatus *xact_state);
extern void PostPrepare_PgStat_Relations(PgStat_SubXactStatus *xact_state);
extern void pgstat_send_tabstats(TimestampTz now, bool disconnect);
extern bool pgstat_relation_flush_cb(PgStat_EntryRef *entry_ref, bool nowait);
extern void pgstat_relation_delete_pending_cb(PgStat_EntryRef *entry_ref);
/*
* Functions in pgstat_replslot.c
*/
extern void pgstat_replslot_reset_timestamp_cb(PgStatShared_Common *header, TimestampTz ts);
extern void pgstat_replslot_to_serialized_name_cb(const PgStatShared_Common *tmp, NameData *name);
extern bool pgstat_replslot_from_serialized_name_cb(const NameData *name, PgStat_HashKey *key);
/*
* Functions in pgstat_shmem.c
*/
extern void pgstat_attach_shmem(void);
extern void pgstat_detach_shmem(void);
extern PgStat_EntryRef *pgstat_get_entry_ref(PgStat_Kind kind, Oid dboid, Oid objoid,
bool create, bool *found);
extern bool pgstat_lock_entry(PgStat_EntryRef *entry_ref, bool nowait);
extern void pgstat_unlock_entry(PgStat_EntryRef *entry_ref);
extern bool pgstat_drop_entry(PgStat_Kind kind, Oid dboid, Oid objoid);
extern void pgstat_drop_all_entries(void);
extern PgStat_EntryRef *pgstat_get_entry_ref_locked(PgStat_Kind kind, Oid dboid, Oid objoid,
bool nowait);
extern void pgstat_reset_entry(PgStat_Kind kind, Oid dboid, Oid objoid, TimestampTz ts);
extern void pgstat_reset_entries_of_kind(PgStat_Kind kind, TimestampTz ts);
extern void pgstat_reset_matching_entries(bool (*do_reset) (PgStatShared_HashEntry *, Datum),
Datum match_data,
TimestampTz ts);
extern void pgstat_request_entry_refs_gc(void);
extern PgStatShared_Common *pgstat_init_entry(PgStat_Kind kind,
PgStatShared_HashEntry *shhashent);
/*
* Functions in pgstat_slru.c
*/
extern void pgstat_send_slru(void);
extern bool pgstat_slru_flush(bool nowait);
extern void pgstat_slru_reset_all_cb(TimestampTz ts);
extern void pgstat_slru_snapshot_cb(void);
/*
* Functions in pgstat_wal.c
*/
extern bool pgstat_flush_wal(bool nowait);
extern void pgstat_init_wal(void);
extern bool pgstat_have_pending_wal(void);
extern void pgstat_wal_reset_all_cb(TimestampTz ts);
extern void pgstat_wal_snapshot_cb(void);
/*
* Functions in pgstat_subscription.c
*/
extern bool pgstat_subscription_flush_cb(PgStat_EntryRef *entry_ref, bool nowait);
extern void pgstat_subscription_reset_timestamp_cb(PgStatShared_Common *header, TimestampTz ts);
/*
* Functions in pgstat_xact.c
@ -151,29 +638,145 @@ extern void pgstat_create_transactional(PgStat_Kind kind, Oid dboid, Oid objoid)
* Variables in pgstat.c
*/
extern pgsocket pgStatSock;
extern PgStat_LocalState pgStatLocal;
/*
* Variables in pgstat_database.c
* Variables in pgstat_slru.c
*/
extern int pgStatXactCommit;
extern int pgStatXactRollback;
extern bool have_slrustats;
/*
* Variables in pgstat_functions.c
* Implementation of inline functions declared above.
*/
extern bool have_function_stats;
/*
* Helpers for changecount manipulation. See comments around struct
* PgBackendStatus for details.
*/
static inline void
pgstat_begin_changecount_write(uint32 *cc)
{
Assert((*cc & 1) == 0);
START_CRIT_SECTION();
(*cc)++;
pg_write_barrier();
}
static inline void
pgstat_end_changecount_write(uint32 *cc)
{
Assert((*cc & 1) == 1);
pg_write_barrier();
(*cc)++;
END_CRIT_SECTION();
}
static inline uint32
pgstat_begin_changecount_read(uint32 *cc)
{
uint32 before_cc = *cc;
CHECK_FOR_INTERRUPTS();
pg_read_barrier();
return before_cc;
}
/*
* Returns true if the read succeeded, false if it needs to be repeated.
*/
static inline bool
pgstat_end_changecount_read(uint32 *cc, uint32 before_cc)
{
uint32 after_cc;
pg_read_barrier();
after_cc = *cc;
/* was a write in progress when we started? */
if (before_cc & 1)
return false;
/* did writes start and complete while we read? */
return before_cc == after_cc;
}
/*
* Variables in pgstat_relation.c
* helper function for PgStat_KindInfo->snapshot_cb
* PgStat_KindInfo->reset_all_cb callbacks.
*
* Copies out the specified memory area following change-count protocol.
*/
static inline void
pgstat_copy_changecounted_stats(void *dst, void *src, size_t len,
uint32 *cc)
{
uint32 cc_before;
extern bool have_relation_stats;
do
{
cc_before = pgstat_begin_changecount_read(cc);
memcpy(dst, src, len);
}
while (!pgstat_end_changecount_read(cc, cc_before));
}
/* helpers for dshash / simplehash hashtables */
static inline int
pgstat_cmp_hash_key(const void *a, const void *b, size_t size, void *arg)
{
AssertArg(size == sizeof(PgStat_HashKey) && arg == NULL);
return memcmp(a, b, sizeof(PgStat_HashKey));
}
static inline uint32
pgstat_hash_hash_key(const void *d, size_t size, void *arg)
{
const PgStat_HashKey *key = (PgStat_HashKey *) d;
uint32 hash;
AssertArg(size == sizeof(PgStat_HashKey) && arg == NULL);
hash = murmurhash32(key->kind);
hash = hash_combine(hash, murmurhash32(key->dboid));
hash = hash_combine(hash, murmurhash32(key->objoid));
return hash;
}
/*
* The length of the data portion of a shared memory stats entry (i.e. without
* transient data such as refcounts, lwlocks, ...).
*/
static inline size_t
pgstat_get_entry_len(PgStat_Kind kind)
{
return pgstat_get_kind_info(kind)->shared_data_len;
}
/*
* Returns a pointer to the data portion of a shared memory stats entry.
*/
static inline void *
pgstat_get_entry_data(PgStat_Kind kind, PgStatShared_Common *entry)
{
size_t off = pgstat_get_kind_info(kind)->shared_data_off;
Assert(off != 0 && off < PG_UINT32_MAX);
return ((char *) (entry)) + off;
}
#endif /* PGSTAT_INTERNAL_H */

View File

@ -246,6 +246,7 @@ typedef struct RelationData
*/
Oid rd_toastoid; /* Real TOAST table's OID, or InvalidOid */
bool pgstat_enabled; /* should relation stats be counted */
/* use "struct" here to avoid needing to include pgstat.h: */
struct PgStat_TableStatus *pgstat_info; /* statistics collection area */
} RelationData;

View File

@ -32,6 +32,7 @@ typedef enum TimeoutId
STANDBY_LOCK_TIMEOUT,
IDLE_IN_TRANSACTION_SESSION_TIMEOUT,
IDLE_SESSION_TIMEOUT,
IDLE_STATS_UPDATE_TIMEOUT,
CLIENT_CONNECTION_CHECK_TIMEOUT,
STARTUP_PROGRESS_TIMEOUT,
/* First user-definable timeout reason */

View File

@ -42,7 +42,6 @@ typedef enum
WAIT_EVENT_CHECKPOINTER_MAIN,
WAIT_EVENT_LOGICAL_APPLY_MAIN,
WAIT_EVENT_LOGICAL_LAUNCHER_MAIN,
WAIT_EVENT_PGSTAT_MAIN,
WAIT_EVENT_RECOVERY_WAL_STREAM,
WAIT_EVENT_SYSLOGGER_MAIN,
WAIT_EVENT_WAL_RECEIVER_MAIN,

View File

@ -265,7 +265,7 @@ worker_spi_main(Datum main_arg)
PopActiveSnapshot();
CommitTransactionCommand();
debug_query_string = NULL;
pgstat_report_stat(false);
pgstat_report_stat(true);
pgstat_report_activity(STATE_IDLE, NULL);
}

View File

@ -17,6 +17,8 @@ SET enable_indexscan TO on;
-- for the moment, we don't want index-only scans here
SET enable_indexonlyscan TO off;
-- save counters
BEGIN;
SET LOCAL stats_fetch_consistency = snapshot;
CREATE TABLE prevstats AS
SELECT t.seq_scan, t.seq_tup_read, t.idx_scan, t.idx_tup_fetch,
(b.heap_blks_read + b.heap_blks_hit) AS heap_blks,
@ -25,6 +27,7 @@ SELECT t.seq_scan, t.seq_tup_read, t.idx_scan, t.idx_tup_fetch,
FROM pg_catalog.pg_stat_user_tables AS t,
pg_catalog.pg_statio_user_tables AS b
WHERE t.relname='tenk2' AND b.relname='tenk2';
COMMIT;
-- function to wait for counters to advance
create function wait_for_stats() returns void as $$
declare
@ -34,6 +37,8 @@ declare
updated3 bool;
updated4 bool;
begin
SET LOCAL stats_fetch_consistency = snapshot;
-- We don't want to wait forever. No timeout suffices if the OS drops our
-- stats traffic because an earlier test file left a full UDP buffer.
-- Hence, don't use PG_TEST_TIMEOUT_DEFAULT, which may be large for
@ -163,6 +168,8 @@ SELECT wait_for_stats();
(1 row)
-- check effects
BEGIN;
SET LOCAL stats_fetch_consistency = snapshot;
SELECT relname, n_tup_ins, n_tup_upd, n_tup_del, n_live_tup, n_dead_tup
FROM pg_stat_user_tables
WHERE relname like 'trunc_stats_test%' order by relname;
@ -202,6 +209,7 @@ FROM prevstats AS pr;
t
(1 row)
COMMIT;
DROP TABLE trunc_stats_test, trunc_stats_test1, trunc_stats_test2, trunc_stats_test3, trunc_stats_test4;
DROP TABLE prevstats;
-- test BRIN index doesn't block HOT update - we include this test here, as it

View File

@ -15,6 +15,8 @@ SET enable_indexscan TO on;
SET enable_indexonlyscan TO off;
-- save counters
BEGIN;
SET LOCAL stats_fetch_consistency = snapshot;
CREATE TABLE prevstats AS
SELECT t.seq_scan, t.seq_tup_read, t.idx_scan, t.idx_tup_fetch,
(b.heap_blks_read + b.heap_blks_hit) AS heap_blks,
@ -23,6 +25,7 @@ SELECT t.seq_scan, t.seq_tup_read, t.idx_scan, t.idx_tup_fetch,
FROM pg_catalog.pg_stat_user_tables AS t,
pg_catalog.pg_statio_user_tables AS b
WHERE t.relname='tenk2' AND b.relname='tenk2';
COMMIT;
-- function to wait for counters to advance
create function wait_for_stats() returns void as $$
@ -33,6 +36,8 @@ declare
updated3 bool;
updated4 bool;
begin
SET LOCAL stats_fetch_consistency = snapshot;
-- We don't want to wait forever. No timeout suffices if the OS drops our
-- stats traffic because an earlier test file left a full UDP buffer.
-- Hence, don't use PG_TEST_TIMEOUT_DEFAULT, which may be large for
@ -158,6 +163,9 @@ RESET enable_bitmapscan;
SELECT wait_for_stats();
-- check effects
BEGIN;
SET LOCAL stats_fetch_consistency = snapshot;
SELECT relname, n_tup_ins, n_tup_upd, n_tup_del, n_live_tup, n_dead_tup
FROM pg_stat_user_tables
WHERE relname like 'trunc_stats_test%' order by relname;
@ -177,6 +185,8 @@ SELECT st.heap_blks_read + st.heap_blks_hit >= pr.heap_blks + cl.relpages,
SELECT pr.snap_ts < pg_stat_get_snapshot_timestamp() as snapshot_newer
FROM prevstats AS pr;
COMMIT;
DROP TABLE trunc_stats_test, trunc_stats_test1, trunc_stats_test2, trunc_stats_test3, trunc_stats_test4;
DROP TABLE prevstats;

View File

@ -1933,51 +1933,39 @@ PgFdwPathExtraData
PgFdwRelationInfo
PgFdwScanState
PgIfAddrCallback
PgStatShared_Archiver
PgStatShared_BgWriter
PgStatShared_Checkpointer
PgStatShared_Common
PgStatShared_Database
PgStatShared_Function
PgStatShared_HashEntry
PgStatShared_Relation
PgStatShared_ReplSlot
PgStatShared_SLRU
PgStatShared_Subscription
PgStatShared_Wal
PgStat_ArchiverStats
PgStat_BackendFunctionEntry
PgStat_BackendSubEntry
PgStat_BgWriterStats
PgStat_CheckpointerStats
PgStat_Counter
PgStat_EntryRef
PgStat_EntryRefHashEntry
PgStat_FetchConsistency
PgStat_FunctionCallUsage
PgStat_FunctionCounts
PgStat_FunctionEntry
PgStat_GlobalStats
PgStat_HashKey
PgStat_Kind
PgStat_Msg
PgStat_MsgAnalyze
PgStat_MsgAnlAncestors
PgStat_MsgArchiver
PgStat_MsgAutovacStart
PgStat_MsgBgWriter
PgStat_MsgCheckpointer
PgStat_MsgChecksumFailure
PgStat_MsgConnect
PgStat_MsgDeadlock
PgStat_MsgDisconnect
PgStat_MsgDropdb
PgStat_MsgDummy
PgStat_MsgFuncpurge
PgStat_MsgFuncstat
PgStat_MsgHdr
PgStat_MsgInquiry
PgStat_MsgRecoveryConflict
PgStat_MsgReplSlot
PgStat_MsgResetcounter
PgStat_MsgResetreplslotcounter
PgStat_MsgResetsharedcounter
PgStat_MsgResetsinglecounter
PgStat_MsgResetslrucounter
PgStat_MsgResetsubcounter
PgStat_MsgSLRU
PgStat_MsgSubscriptionDrop
PgStat_MsgSubscriptionError
PgStat_MsgTabpurge
PgStat_MsgTabstat
PgStat_MsgTempFile
PgStat_MsgVacuum
PgStat_MsgWal
PgStat_KindInfo
PgStat_LocalState
PgStat_PendingDroppedStatsItem
PgStat_ReplSlotStats
PgStat_SLRUStats
PgStat_ShmemControl
PgStat_Snapshot
PgStat_SnapshotEntry
PgStat_StatDBEntry
PgStat_StatFuncEntry
PgStat_StatReplSlotEntry
@ -1985,7 +1973,6 @@ PgStat_StatSubEntry
PgStat_StatTabEntry
PgStat_SubXactStatus
PgStat_TableCounts
PgStat_TableEntry
PgStat_TableStatus
PgStat_TableXactStatus
PgStat_WalStats
@ -2533,7 +2520,6 @@ StartReplicationCmd
StartupStatusEnum
StatEntry
StatExtEntry
StatMsgType
StateFileChunk
StatisticExtInfo
Stats
@ -2647,8 +2633,6 @@ TXNEntryFile
TYPCATEGORY
T_Action
T_WorkerStatus
TabStatHashEntry
TabStatusArray
TableAmRoutine
TableAttachInfo
TableDataInfo
@ -3433,6 +3417,7 @@ pgssHashKey
pgssSharedState
pgssStoreKind
pgssVersion
pgstat_entry_ref_hash_hash
pgstat_page
pgstattuple_type
pgthreadlock_t

View File

@ -14,24 +14,6 @@
# These may contain uninitialized padding bytes. Since recipients also ignore
# those bytes as padding, this is harmless.
{
padding_pgstat_send
Memcheck:Param
socketcall.send(msg)
fun:*send*
fun:pgstat_send
}
{
padding_pgstat_sendto
Memcheck:Param
socketcall.sendto(msg)
fun:*send*
fun:pgstat_send
}
{
padding_pgstat_write
Memcheck:Param