2001-06-22 21:18:36 +02:00
|
|
|
/* ----------
|
|
|
|
* pgstat.h
|
|
|
|
*
|
2022-04-06 22:56:06 +02:00
|
|
|
* Definitions for the PostgreSQL cumulative statistics system.
|
2001-06-22 21:18:36 +02:00
|
|
|
*
|
2022-01-08 01:04:57 +01:00
|
|
|
* Copyright (c) 2001-2022, PostgreSQL Global Development Group
|
2001-06-22 21:18:36 +02:00
|
|
|
*
|
2010-09-20 22:08:53 +02:00
|
|
|
* src/include/pgstat.h
|
2001-06-22 21:18:36 +02:00
|
|
|
* ----------
|
|
|
|
*/
|
|
|
|
#ifndef PGSTAT_H
|
|
|
|
#define PGSTAT_H
|
|
|
|
|
2011-09-09 19:23:41 +02:00
|
|
|
#include "datatype/timestamp.h"
|
2008-05-15 02:17:41 +02:00
|
|
|
#include "portability/instr_time.h"
|
2021-04-03 20:42:52 +02:00
|
|
|
#include "postmaster/pgarch.h" /* for MAX_XFN_CHARS */
|
|
|
|
#include "utils/backend_progress.h" /* for backward compatibility */
|
|
|
|
#include "utils/backend_status.h" /* for backward compatibility */
|
2008-06-19 02:46:06 +02:00
|
|
|
#include "utils/relcache.h"
|
2021-04-03 04:45:24 +02:00
|
|
|
#include "utils/wait_event.h" /* for backward compatibility */
|
2003-04-26 04:57:14 +02:00
|
|
|
|
2006-06-19 03:51:22 +02:00
|
|
|
|
2014-06-04 05:09:45 +02:00
|
|
|
/* ----------
|
|
|
|
* Paths for the statistics files (relative to installation's $PGDATA).
|
|
|
|
* ----------
|
|
|
|
*/
|
|
|
|
#define PGSTAT_STAT_PERMANENT_DIRECTORY "pg_stat"
|
2022-04-07 06:29:46 +02:00
|
|
|
#define PGSTAT_STAT_PERMANENT_FILENAME "pg_stat/pgstat.stat"
|
|
|
|
#define PGSTAT_STAT_PERMANENT_TMPFILE "pg_stat/pgstat.tmp"
|
2014-06-04 05:09:45 +02:00
|
|
|
|
2014-02-03 15:19:49 +01:00
|
|
|
/* Default directory to store temporary statistics data in */
|
|
|
|
#define PG_STAT_TMP_DIR "pg_stat_tmp"
|
|
|
|
|
2022-04-07 02:56:19 +02:00
|
|
|
/* The types of statistics entries */
|
|
|
|
typedef enum PgStat_Kind
|
|
|
|
{
|
|
|
|
/* use 0 for INVALID, to catch zero-initialized data */
|
|
|
|
PGSTAT_KIND_INVALID = 0,
|
|
|
|
|
|
|
|
/* stats for variable-numbered objects */
|
|
|
|
PGSTAT_KIND_DATABASE, /* database-wide statistics */
|
|
|
|
PGSTAT_KIND_RELATION, /* per-table statistics */
|
|
|
|
PGSTAT_KIND_FUNCTION, /* per-function statistics */
|
|
|
|
PGSTAT_KIND_REPLSLOT, /* per-slot statistics */
|
|
|
|
PGSTAT_KIND_SUBSCRIPTION, /* per-subscription statistics */
|
|
|
|
|
|
|
|
/* stats for fixed-numbered objects */
|
|
|
|
PGSTAT_KIND_ARCHIVER,
|
|
|
|
PGSTAT_KIND_BGWRITER,
|
|
|
|
PGSTAT_KIND_CHECKPOINTER,
|
|
|
|
PGSTAT_KIND_SLRU,
|
|
|
|
PGSTAT_KIND_WAL,
|
|
|
|
} PgStat_Kind;
|
|
|
|
|
|
|
|
#define PGSTAT_KIND_FIRST_VALID PGSTAT_KIND_DATABASE
|
|
|
|
#define PGSTAT_KIND_LAST PGSTAT_KIND_WAL
|
|
|
|
#define PGSTAT_NUM_KINDS (PGSTAT_KIND_LAST + 1)
|
2022-03-22 00:16:42 +01:00
|
|
|
|
2008-05-15 02:17:41 +02:00
|
|
|
/* Values for track_functions GUC variable --- order is significant! */
|
|
|
|
typedef enum TrackFunctionsLevel
|
|
|
|
{
|
|
|
|
TRACK_FUNC_OFF,
|
|
|
|
TRACK_FUNC_PL,
|
|
|
|
TRACK_FUNC_ALL
|
|
|
|
} TrackFunctionsLevel;
|
|
|
|
|
2022-04-07 06:29:46 +02:00
|
|
|
typedef enum PgStat_FetchConsistency
|
|
|
|
{
|
|
|
|
PGSTAT_FETCH_CONSISTENCY_NONE,
|
|
|
|
PGSTAT_FETCH_CONSISTENCY_CACHE,
|
|
|
|
PGSTAT_FETCH_CONSISTENCY_SNAPSHOT,
|
|
|
|
} PgStat_FetchConsistency;
|
|
|
|
|
2021-01-17 13:34:09 +01:00
|
|
|
/* Values to track the cause of session termination */
|
|
|
|
typedef enum SessionEndType
|
|
|
|
{
|
|
|
|
DISCONNECT_NOT_YET, /* still active */
|
|
|
|
DISCONNECT_NORMAL,
|
|
|
|
DISCONNECT_CLIENT_EOF,
|
|
|
|
DISCONNECT_FATAL,
|
|
|
|
DISCONNECT_KILLED
|
|
|
|
} SessionEndType;
|
|
|
|
|
2001-06-22 21:18:36 +02:00
|
|
|
/* ----------
|
2022-03-22 00:16:42 +01:00
|
|
|
* The data type used for counters.
|
2001-06-22 21:18:36 +02:00
|
|
|
* ----------
|
|
|
|
*/
|
2022-03-22 00:16:42 +01:00
|
|
|
typedef int64 PgStat_Counter;
|
|
|
|
|
|
|
|
|
|
|
|
/* ------------------------------------------------------------
|
|
|
|
* Structures kept in backend local memory while accumulating counts
|
|
|
|
* ------------------------------------------------------------
|
|
|
|
*/
|
2001-06-22 21:18:36 +02:00
|
|
|
|
|
|
|
/* ----------
|
2022-03-22 00:16:42 +01:00
|
|
|
* PgStat_FunctionCounts The actual per-function counts kept by a backend
|
|
|
|
*
|
|
|
|
* This struct should contain only actual event counters, because we memcmp
|
2022-04-07 06:29:46 +02:00
|
|
|
* it against zeroes to detect whether there are any pending stats.
|
2022-03-22 00:16:42 +01:00
|
|
|
*
|
|
|
|
* Note that the time counters are in instr_time format here. We convert to
|
2022-04-06 22:56:06 +02:00
|
|
|
* microseconds in PgStat_Counter format when flushing out pending statistics.
|
2001-06-22 21:18:36 +02:00
|
|
|
* ----------
|
|
|
|
*/
|
2022-03-22 00:16:42 +01:00
|
|
|
typedef struct PgStat_FunctionCounts
|
|
|
|
{
|
|
|
|
PgStat_Counter f_numcalls;
|
|
|
|
instr_time f_total_time;
|
|
|
|
instr_time f_self_time;
|
|
|
|
} PgStat_FunctionCounts;
|
|
|
|
|
|
|
|
/* ----------
|
2022-04-07 06:29:46 +02:00
|
|
|
* PgStat_BackendFunctionEntry Non-flushed function stats.
|
2022-03-22 00:16:42 +01:00
|
|
|
* ----------
|
|
|
|
*/
|
|
|
|
typedef struct PgStat_BackendFunctionEntry
|
|
|
|
{
|
|
|
|
PgStat_FunctionCounts f_counts;
|
|
|
|
} PgStat_BackendFunctionEntry;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Working state needed to accumulate per-function-call timing statistics.
|
|
|
|
*/
|
|
|
|
typedef struct PgStat_FunctionCallUsage
|
|
|
|
{
|
|
|
|
/* Link to function's hashtable entry (must still be there at exit!) */
|
|
|
|
/* NULL means we are not tracking the current function call */
|
|
|
|
PgStat_FunctionCounts *fs;
|
|
|
|
/* Total time previously charged to function, as of function start */
|
|
|
|
instr_time save_f_total_time;
|
|
|
|
/* Backend-wide total time as of function start */
|
|
|
|
instr_time save_total;
|
|
|
|
/* system clock as of function start */
|
|
|
|
instr_time f_start;
|
|
|
|
} PgStat_FunctionCallUsage;
|
2001-06-22 21:18:36 +02:00
|
|
|
|
2022-04-07 06:29:46 +02:00
|
|
|
/* ----------
|
|
|
|
* PgStat_BackendSubEntry Non-flushed subscription stats.
|
|
|
|
* ----------
|
|
|
|
*/
|
|
|
|
typedef struct PgStat_BackendSubEntry
|
|
|
|
{
|
|
|
|
PgStat_Counter apply_error_count;
|
|
|
|
PgStat_Counter sync_error_count;
|
|
|
|
} PgStat_BackendSubEntry;
|
|
|
|
|
2007-05-27 05:50:39 +02:00
|
|
|
/* ----------
|
|
|
|
* PgStat_TableCounts The actual per-table counts kept by a backend
|
|
|
|
*
|
|
|
|
* This struct should contain only actual event counters, because we memcmp
|
2022-04-07 06:29:46 +02:00
|
|
|
* it against zeroes to detect whether there are any stats updates to apply.
|
|
|
|
* It is a component of PgStat_TableStatus (within-backend state).
|
2007-05-27 05:50:39 +02:00
|
|
|
*
|
|
|
|
* Note: for a table, tuples_returned is the number of tuples successfully
|
|
|
|
* fetched by heap_getnext, while tuples_fetched is the number of tuples
|
|
|
|
* successfully fetched by heap_fetch under the control of bitmap indexscans.
|
|
|
|
* For an index, tuples_returned is the number of index entries returned by
|
|
|
|
* the index AM, while tuples_fetched is the number of tuples successfully
|
|
|
|
* fetched by heap_fetch under the control of simple indexscans for this index.
|
|
|
|
*
|
2007-09-20 19:56:33 +02:00
|
|
|
* tuples_inserted/updated/deleted/hot_updated count attempted actions,
|
Revise pgstat's tracking of tuple changes to improve the reliability of
decisions about when to auto-analyze.
The previous code depended on n_live_tuples + n_dead_tuples - last_anl_tuples,
where all three of these numbers could be bad estimates from ANALYZE itself.
Even worse, in the presence of a steady flow of HOT updates and matching
HOT-tuple reclamations, auto-analyze might never trigger at all, even if all
three numbers are exactly right, because n_dead_tuples could hold steady.
To fix, replace last_anl_tuples with an accurately tracked count of the total
number of committed tuple inserts + updates + deletes since the last ANALYZE
on the table. This can still be compared to the same threshold as before, but
it's much more trustworthy than the old computation. Tracking this requires
one more intra-transaction counter per modified table within backends, but no
additional memory space in the stats collector. There probably isn't any
measurable speed difference; if anything it might be a bit faster than before,
since I was able to eliminate some per-tuple arithmetic operations in favor of
adding sums once per (sub)transaction.
Also, simplify the logic around pgstat vacuum and analyze reporting messages
by not trying to fold VACUUM ANALYZE into a single pgstat message.
The original thought behind this patch was to allow scheduling of analyzes
on parent tables by artificially inflating their changes_since_analyze count.
I've left that for a separate patch since this change seems to stand on its
own merit.
2009-12-30 21:32:14 +01:00
|
|
|
* regardless of whether the transaction committed. delta_live_tuples,
|
2021-08-16 23:27:52 +02:00
|
|
|
* delta_dead_tuples, and changed_tuples are set depending on commit or abort.
|
Revise pgstat's tracking of tuple changes to improve the reliability of
decisions about when to auto-analyze.
The previous code depended on n_live_tuples + n_dead_tuples - last_anl_tuples,
where all three of these numbers could be bad estimates from ANALYZE itself.
Even worse, in the presence of a steady flow of HOT updates and matching
HOT-tuple reclamations, auto-analyze might never trigger at all, even if all
three numbers are exactly right, because n_dead_tuples could hold steady.
To fix, replace last_anl_tuples with an accurately tracked count of the total
number of committed tuple inserts + updates + deletes since the last ANALYZE
on the table. This can still be compared to the same threshold as before, but
it's much more trustworthy than the old computation. Tracking this requires
one more intra-transaction counter per modified table within backends, but no
additional memory space in the stats collector. There probably isn't any
measurable speed difference; if anything it might be a bit faster than before,
since I was able to eliminate some per-tuple arithmetic operations in favor of
adding sums once per (sub)transaction.
Also, simplify the logic around pgstat vacuum and analyze reporting messages
by not trying to fold VACUUM ANALYZE into a single pgstat message.
The original thought behind this patch was to allow scheduling of analyzes
on parent tables by artificially inflating their changes_since_analyze count.
I've left that for a separate patch since this change seems to stand on its
own merit.
2009-12-30 21:32:14 +01:00
|
|
|
* Note that delta_live_tuples and delta_dead_tuples can be negative!
|
2007-05-27 05:50:39 +02:00
|
|
|
* ----------
|
|
|
|
*/
|
|
|
|
typedef struct PgStat_TableCounts
|
|
|
|
{
|
|
|
|
PgStat_Counter t_numscans;
|
|
|
|
|
|
|
|
PgStat_Counter t_tuples_returned;
|
|
|
|
PgStat_Counter t_tuples_fetched;
|
|
|
|
|
|
|
|
PgStat_Counter t_tuples_inserted;
|
|
|
|
PgStat_Counter t_tuples_updated;
|
|
|
|
PgStat_Counter t_tuples_deleted;
|
2007-09-20 19:56:33 +02:00
|
|
|
PgStat_Counter t_tuples_hot_updated;
|
2021-04-04 02:39:54 +02:00
|
|
|
bool t_truncdropped;
|
2007-05-27 05:50:39 +02:00
|
|
|
|
Revise pgstat's tracking of tuple changes to improve the reliability of
decisions about when to auto-analyze.
The previous code depended on n_live_tuples + n_dead_tuples - last_anl_tuples,
where all three of these numbers could be bad estimates from ANALYZE itself.
Even worse, in the presence of a steady flow of HOT updates and matching
HOT-tuple reclamations, auto-analyze might never trigger at all, even if all
three numbers are exactly right, because n_dead_tuples could hold steady.
To fix, replace last_anl_tuples with an accurately tracked count of the total
number of committed tuple inserts + updates + deletes since the last ANALYZE
on the table. This can still be compared to the same threshold as before, but
it's much more trustworthy than the old computation. Tracking this requires
one more intra-transaction counter per modified table within backends, but no
additional memory space in the stats collector. There probably isn't any
measurable speed difference; if anything it might be a bit faster than before,
since I was able to eliminate some per-tuple arithmetic operations in favor of
adding sums once per (sub)transaction.
Also, simplify the logic around pgstat vacuum and analyze reporting messages
by not trying to fold VACUUM ANALYZE into a single pgstat message.
The original thought behind this patch was to allow scheduling of analyzes
on parent tables by artificially inflating their changes_since_analyze count.
I've left that for a separate patch since this change seems to stand on its
own merit.
2009-12-30 21:32:14 +01:00
|
|
|
PgStat_Counter t_delta_live_tuples;
|
|
|
|
PgStat_Counter t_delta_dead_tuples;
|
|
|
|
PgStat_Counter t_changed_tuples;
|
2007-05-27 05:50:39 +02:00
|
|
|
|
|
|
|
PgStat_Counter t_blocks_fetched;
|
|
|
|
PgStat_Counter t_blocks_hit;
|
|
|
|
} PgStat_TableCounts;
|
|
|
|
|
|
|
|
/* ----------
|
|
|
|
* PgStat_TableStatus Per-table status within a backend
|
|
|
|
*
|
Revise pgstat's tracking of tuple changes to improve the reliability of
decisions about when to auto-analyze.
The previous code depended on n_live_tuples + n_dead_tuples - last_anl_tuples,
where all three of these numbers could be bad estimates from ANALYZE itself.
Even worse, in the presence of a steady flow of HOT updates and matching
HOT-tuple reclamations, auto-analyze might never trigger at all, even if all
three numbers are exactly right, because n_dead_tuples could hold steady.
To fix, replace last_anl_tuples with an accurately tracked count of the total
number of committed tuple inserts + updates + deletes since the last ANALYZE
on the table. This can still be compared to the same threshold as before, but
it's much more trustworthy than the old computation. Tracking this requires
one more intra-transaction counter per modified table within backends, but no
additional memory space in the stats collector. There probably isn't any
measurable speed difference; if anything it might be a bit faster than before,
since I was able to eliminate some per-tuple arithmetic operations in favor of
adding sums once per (sub)transaction.
Also, simplify the logic around pgstat vacuum and analyze reporting messages
by not trying to fold VACUUM ANALYZE into a single pgstat message.
The original thought behind this patch was to allow scheduling of analyzes
on parent tables by artificially inflating their changes_since_analyze count.
I've left that for a separate patch since this change seems to stand on its
own merit.
2009-12-30 21:32:14 +01:00
|
|
|
* Many of the event counters are nontransactional, ie, we count events
|
2007-05-27 05:50:39 +02:00
|
|
|
* in committed and aborted transactions alike. For these, we just count
|
Revise pgstat's tracking of tuple changes to improve the reliability of
decisions about when to auto-analyze.
The previous code depended on n_live_tuples + n_dead_tuples - last_anl_tuples,
where all three of these numbers could be bad estimates from ANALYZE itself.
Even worse, in the presence of a steady flow of HOT updates and matching
HOT-tuple reclamations, auto-analyze might never trigger at all, even if all
three numbers are exactly right, because n_dead_tuples could hold steady.
To fix, replace last_anl_tuples with an accurately tracked count of the total
number of committed tuple inserts + updates + deletes since the last ANALYZE
on the table. This can still be compared to the same threshold as before, but
it's much more trustworthy than the old computation. Tracking this requires
one more intra-transaction counter per modified table within backends, but no
additional memory space in the stats collector. There probably isn't any
measurable speed difference; if anything it might be a bit faster than before,
since I was able to eliminate some per-tuple arithmetic operations in favor of
adding sums once per (sub)transaction.
Also, simplify the logic around pgstat vacuum and analyze reporting messages
by not trying to fold VACUUM ANALYZE into a single pgstat message.
The original thought behind this patch was to allow scheduling of analyzes
on parent tables by artificially inflating their changes_since_analyze count.
I've left that for a separate patch since this change seems to stand on its
own merit.
2009-12-30 21:32:14 +01:00
|
|
|
* directly in the PgStat_TableStatus. However, delta_live_tuples,
|
|
|
|
* delta_dead_tuples, and changed_tuples must be derived from event counts
|
2007-05-27 05:50:39 +02:00
|
|
|
* with awareness of whether the transaction or subtransaction committed or
|
|
|
|
* aborted. Hence, we also keep a stack of per-(sub)transaction status
|
|
|
|
* records for every table modified in the current transaction. At commit
|
Revise pgstat's tracking of tuple changes to improve the reliability of
decisions about when to auto-analyze.
The previous code depended on n_live_tuples + n_dead_tuples - last_anl_tuples,
where all three of these numbers could be bad estimates from ANALYZE itself.
Even worse, in the presence of a steady flow of HOT updates and matching
HOT-tuple reclamations, auto-analyze might never trigger at all, even if all
three numbers are exactly right, because n_dead_tuples could hold steady.
To fix, replace last_anl_tuples with an accurately tracked count of the total
number of committed tuple inserts + updates + deletes since the last ANALYZE
on the table. This can still be compared to the same threshold as before, but
it's much more trustworthy than the old computation. Tracking this requires
one more intra-transaction counter per modified table within backends, but no
additional memory space in the stats collector. There probably isn't any
measurable speed difference; if anything it might be a bit faster than before,
since I was able to eliminate some per-tuple arithmetic operations in favor of
adding sums once per (sub)transaction.
Also, simplify the logic around pgstat vacuum and analyze reporting messages
by not trying to fold VACUUM ANALYZE into a single pgstat message.
The original thought behind this patch was to allow scheduling of analyzes
on parent tables by artificially inflating their changes_since_analyze count.
I've left that for a separate patch since this change seems to stand on its
own merit.
2009-12-30 21:32:14 +01:00
|
|
|
* or abort, we propagate tuples_inserted/updated/deleted up to the
|
2007-05-27 05:50:39 +02:00
|
|
|
* parent subtransaction level, or out to the parent PgStat_TableStatus,
|
|
|
|
* as appropriate.
|
|
|
|
* ----------
|
|
|
|
*/
|
|
|
|
typedef struct PgStat_TableStatus
|
|
|
|
{
|
|
|
|
Oid t_id; /* table's OID */
|
|
|
|
bool t_shared; /* is it a shared catalog? */
|
|
|
|
struct PgStat_TableXactStatus *trans; /* lowest subxact's counts */
|
|
|
|
PgStat_TableCounts t_counts; /* event counts to be sent */
|
2022-04-07 06:29:46 +02:00
|
|
|
Relation relation; /* rel that is using this entry */
|
2007-05-27 05:50:39 +02:00
|
|
|
} PgStat_TableStatus;
|
|
|
|
|
|
|
|
/* ----------
|
|
|
|
* PgStat_TableXactStatus Per-table, per-subtransaction status
|
|
|
|
* ----------
|
|
|
|
*/
|
|
|
|
typedef struct PgStat_TableXactStatus
|
|
|
|
{
|
|
|
|
PgStat_Counter tuples_inserted; /* tuples inserted in (sub)xact */
|
Revise pgstat's tracking of tuple changes to improve the reliability of
decisions about when to auto-analyze.
The previous code depended on n_live_tuples + n_dead_tuples - last_anl_tuples,
where all three of these numbers could be bad estimates from ANALYZE itself.
Even worse, in the presence of a steady flow of HOT updates and matching
HOT-tuple reclamations, auto-analyze might never trigger at all, even if all
three numbers are exactly right, because n_dead_tuples could hold steady.
To fix, replace last_anl_tuples with an accurately tracked count of the total
number of committed tuple inserts + updates + deletes since the last ANALYZE
on the table. This can still be compared to the same threshold as before, but
it's much more trustworthy than the old computation. Tracking this requires
one more intra-transaction counter per modified table within backends, but no
additional memory space in the stats collector. There probably isn't any
measurable speed difference; if anything it might be a bit faster than before,
since I was able to eliminate some per-tuple arithmetic operations in favor of
adding sums once per (sub)transaction.
Also, simplify the logic around pgstat vacuum and analyze reporting messages
by not trying to fold VACUUM ANALYZE into a single pgstat message.
The original thought behind this patch was to allow scheduling of analyzes
on parent tables by artificially inflating their changes_since_analyze count.
I've left that for a separate patch since this change seems to stand on its
own merit.
2009-12-30 21:32:14 +01:00
|
|
|
PgStat_Counter tuples_updated; /* tuples updated in (sub)xact */
|
2007-05-27 05:50:39 +02:00
|
|
|
PgStat_Counter tuples_deleted; /* tuples deleted in (sub)xact */
|
2021-04-04 02:39:54 +02:00
|
|
|
bool truncdropped; /* relation truncated/dropped in this
|
|
|
|
* (sub)xact */
|
|
|
|
/* tuples i/u/d prior to truncate/drop */
|
|
|
|
PgStat_Counter inserted_pre_truncdrop;
|
|
|
|
PgStat_Counter updated_pre_truncdrop;
|
|
|
|
PgStat_Counter deleted_pre_truncdrop;
|
2007-05-27 05:50:39 +02:00
|
|
|
int nest_level; /* subtransaction nest level */
|
|
|
|
/* links to other structs for same relation: */
|
|
|
|
struct PgStat_TableXactStatus *upper; /* next higher subxact if any */
|
|
|
|
PgStat_TableStatus *parent; /* per-table status */
|
|
|
|
/* structs of same subxact level are linked here: */
|
|
|
|
struct PgStat_TableXactStatus *next; /* next of same subxact */
|
|
|
|
} PgStat_TableXactStatus;
|
|
|
|
|
2001-06-22 21:18:36 +02:00
|
|
|
|
|
|
|
/* ------------------------------------------------------------
|
2022-04-07 06:29:46 +02:00
|
|
|
* Data structures on disk and in shared memory follow
|
2005-07-14 07:13:45 +02:00
|
|
|
*
|
|
|
|
* PGSTAT_FILE_FORMAT_ID should be changed whenever any of these
|
|
|
|
* data structures change.
|
2004-06-26 18:32:04 +02:00
|
|
|
* ------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
|
2022-04-07 06:29:46 +02:00
|
|
|
#define PGSTAT_FILE_FORMAT_ID 0x01A5BCA7
|
2005-07-14 07:13:45 +02:00
|
|
|
|
2022-03-22 00:16:42 +01:00
|
|
|
typedef struct PgStat_ArchiverStats
|
|
|
|
{
|
|
|
|
PgStat_Counter archived_count; /* archival successes */
|
|
|
|
char last_archived_wal[MAX_XFN_CHARS + 1]; /* last WAL file
|
|
|
|
* archived */
|
|
|
|
TimestampTz last_archived_timestamp; /* last archival success time */
|
|
|
|
PgStat_Counter failed_count; /* failed archival attempts */
|
|
|
|
char last_failed_wal[MAX_XFN_CHARS + 1]; /* WAL file involved in
|
|
|
|
* last failure */
|
|
|
|
TimestampTz last_failed_timestamp; /* last archival failure time */
|
|
|
|
TimestampTz stat_reset_timestamp;
|
|
|
|
} PgStat_ArchiverStats;
|
|
|
|
|
|
|
|
typedef struct PgStat_BgWriterStats
|
|
|
|
{
|
|
|
|
PgStat_Counter buf_written_clean;
|
|
|
|
PgStat_Counter maxwritten_clean;
|
|
|
|
PgStat_Counter buf_alloc;
|
|
|
|
TimestampTz stat_reset_timestamp;
|
|
|
|
} PgStat_BgWriterStats;
|
|
|
|
|
|
|
|
typedef struct PgStat_CheckpointerStats
|
|
|
|
{
|
|
|
|
PgStat_Counter timed_checkpoints;
|
|
|
|
PgStat_Counter requested_checkpoints;
|
|
|
|
PgStat_Counter checkpoint_write_time; /* times in milliseconds */
|
|
|
|
PgStat_Counter checkpoint_sync_time;
|
|
|
|
PgStat_Counter buf_written_checkpoints;
|
|
|
|
PgStat_Counter buf_written_backend;
|
|
|
|
PgStat_Counter buf_fsync_backend;
|
|
|
|
} PgStat_CheckpointerStats;
|
|
|
|
|
2004-06-26 18:32:04 +02:00
|
|
|
typedef struct PgStat_StatDBEntry
|
|
|
|
{
|
|
|
|
PgStat_Counter n_xact_commit;
|
|
|
|
PgStat_Counter n_xact_rollback;
|
|
|
|
PgStat_Counter n_blocks_fetched;
|
|
|
|
PgStat_Counter n_blocks_hit;
|
2007-03-16 18:57:36 +01:00
|
|
|
PgStat_Counter n_tuples_returned;
|
|
|
|
PgStat_Counter n_tuples_fetched;
|
|
|
|
PgStat_Counter n_tuples_inserted;
|
|
|
|
PgStat_Counter n_tuples_updated;
|
|
|
|
PgStat_Counter n_tuples_deleted;
|
2005-07-14 07:13:45 +02:00
|
|
|
TimestampTz last_autovac_time;
|
2011-01-03 12:46:03 +01:00
|
|
|
PgStat_Counter n_conflict_tablespace;
|
|
|
|
PgStat_Counter n_conflict_lock;
|
|
|
|
PgStat_Counter n_conflict_snapshot;
|
|
|
|
PgStat_Counter n_conflict_bufferpin;
|
|
|
|
PgStat_Counter n_conflict_startup_deadlock;
|
2012-01-26 14:41:19 +01:00
|
|
|
PgStat_Counter n_temp_files;
|
|
|
|
PgStat_Counter n_temp_bytes;
|
2012-01-26 15:58:19 +01:00
|
|
|
PgStat_Counter n_deadlocks;
|
2019-03-09 19:45:17 +01:00
|
|
|
PgStat_Counter n_checksum_failures;
|
2019-04-12 14:04:50 +02:00
|
|
|
TimestampTz last_checksum_failure;
|
2012-04-30 00:13:33 +02:00
|
|
|
PgStat_Counter n_block_read_time; /* times in microseconds */
|
|
|
|
PgStat_Counter n_block_write_time;
|
2021-01-17 13:34:09 +01:00
|
|
|
PgStat_Counter n_sessions;
|
|
|
|
PgStat_Counter total_session_time;
|
|
|
|
PgStat_Counter total_active_time;
|
|
|
|
PgStat_Counter total_idle_in_xact_time;
|
|
|
|
PgStat_Counter n_sessions_abandoned;
|
|
|
|
PgStat_Counter n_sessions_fatal;
|
|
|
|
PgStat_Counter n_sessions_killed;
|
2012-01-26 14:41:19 +01:00
|
|
|
|
2011-02-10 15:09:35 +01:00
|
|
|
TimestampTz stat_reset_timestamp;
|
2004-06-26 18:32:04 +02:00
|
|
|
} PgStat_StatDBEntry;
|
|
|
|
|
2022-03-22 00:16:42 +01:00
|
|
|
typedef struct PgStat_StatFuncEntry
|
2004-06-26 18:32:04 +02:00
|
|
|
{
|
2022-03-22 00:16:42 +01:00
|
|
|
PgStat_Counter f_numcalls;
|
2005-10-06 04:29:23 +02:00
|
|
|
|
2022-03-22 00:16:42 +01:00
|
|
|
PgStat_Counter f_total_time; /* times in microseconds */
|
|
|
|
PgStat_Counter f_self_time;
|
|
|
|
} PgStat_StatFuncEntry;
|
2021-08-05 04:16:04 +02:00
|
|
|
|
2022-03-22 00:16:42 +01:00
|
|
|
typedef struct PgStat_StatReplSlotEntry
|
2020-10-02 03:17:11 +02:00
|
|
|
{
|
2022-03-22 00:16:42 +01:00
|
|
|
NameData slotname;
|
|
|
|
PgStat_Counter spill_txns;
|
|
|
|
PgStat_Counter spill_count;
|
|
|
|
PgStat_Counter spill_bytes;
|
|
|
|
PgStat_Counter stream_txns;
|
|
|
|
PgStat_Counter stream_count;
|
|
|
|
PgStat_Counter stream_bytes;
|
|
|
|
PgStat_Counter total_txns;
|
|
|
|
PgStat_Counter total_bytes;
|
2020-10-02 03:17:11 +02:00
|
|
|
TimestampTz stat_reset_timestamp;
|
2022-03-22 00:16:42 +01:00
|
|
|
} PgStat_StatReplSlotEntry;
|
2020-10-02 03:17:11 +02:00
|
|
|
|
Collect statistics about SLRU caches
There's a number of SLRU caches used to access important data like clog,
commit timestamps, multixact, asynchronous notifications, etc. Until now
we had no easy way to monitor these shared caches, compute hit ratios,
number of reads/writes etc.
This commit extends the statistics collector to track this information
for a predefined list of SLRUs, and also introduces a new system view
pg_stat_slru displaying the data.
The list of built-in SLRUs is fixed, but additional SLRUs may be defined
in extensions. Unfortunately, there's no suitable registry of SLRUs, so
this patch simply defines a fixed list of SLRUs with entries for the
built-in ones and one entry for all additional SLRUs. Extensions adding
their own SLRU are fairly rare, so this seems acceptable.
This patch only allows monitoring of SLRUs, not tuning. The SLRU sizes
are still fixed (hard-coded in the code) and it's not entirely clear
which of the SLRUs might need a GUC to tune size. In a way, allowing us
to determine that is one of the goals of this patch.
Bump catversion as the patch introduces new functions and system view.
Author: Tomas Vondra
Reviewed-by: Alvaro Herrera
Discussion: https://www.postgresql.org/message-id/flat/20200119143707.gyinppnigokesjok@development
2020-04-02 02:11:38 +02:00
|
|
|
typedef struct PgStat_SLRUStats
|
|
|
|
{
|
|
|
|
PgStat_Counter blocks_zeroed;
|
|
|
|
PgStat_Counter blocks_hit;
|
|
|
|
PgStat_Counter blocks_read;
|
|
|
|
PgStat_Counter blocks_written;
|
|
|
|
PgStat_Counter blocks_exists;
|
|
|
|
PgStat_Counter flush;
|
|
|
|
PgStat_Counter truncate;
|
|
|
|
TimestampTz stat_reset_timestamp;
|
|
|
|
} PgStat_SLRUStats;
|
|
|
|
|
2022-03-01 01:47:52 +01:00
|
|
|
typedef struct PgStat_StatSubEntry
|
2021-11-30 04:24:30 +01:00
|
|
|
{
|
2022-03-01 01:47:52 +01:00
|
|
|
PgStat_Counter apply_error_count;
|
|
|
|
PgStat_Counter sync_error_count;
|
|
|
|
TimestampTz stat_reset_timestamp;
|
|
|
|
} PgStat_StatSubEntry;
|
2014-02-25 18:34:04 +01:00
|
|
|
|
2022-03-22 00:16:42 +01:00
|
|
|
typedef struct PgStat_StatTabEntry
|
2008-05-15 02:17:41 +02:00
|
|
|
{
|
2022-03-22 00:16:42 +01:00
|
|
|
PgStat_Counter numscans;
|
2006-06-19 03:51:22 +02:00
|
|
|
|
2022-03-22 00:16:42 +01:00
|
|
|
PgStat_Counter tuples_returned;
|
|
|
|
PgStat_Counter tuples_fetched;
|
2001-06-22 21:18:36 +02:00
|
|
|
|
2022-03-22 00:16:42 +01:00
|
|
|
PgStat_Counter tuples_inserted;
|
|
|
|
PgStat_Counter tuples_updated;
|
|
|
|
PgStat_Counter tuples_deleted;
|
|
|
|
PgStat_Counter tuples_hot_updated;
|
2021-08-05 04:16:04 +02:00
|
|
|
|
2022-03-22 00:16:42 +01:00
|
|
|
PgStat_Counter n_live_tuples;
|
|
|
|
PgStat_Counter n_dead_tuples;
|
|
|
|
PgStat_Counter changes_since_analyze;
|
|
|
|
PgStat_Counter inserts_since_vacuum;
|
2003-12-25 04:52:51 +01:00
|
|
|
|
2022-03-22 00:16:42 +01:00
|
|
|
PgStat_Counter blocks_fetched;
|
|
|
|
PgStat_Counter blocks_hit;
|
2020-10-02 03:17:11 +02:00
|
|
|
|
2022-03-22 00:16:42 +01:00
|
|
|
TimestampTz vacuum_timestamp; /* user initiated vacuum */
|
|
|
|
PgStat_Counter vacuum_count;
|
|
|
|
TimestampTz autovac_vacuum_timestamp; /* autovacuum initiated */
|
|
|
|
PgStat_Counter autovac_vacuum_count;
|
|
|
|
TimestampTz analyze_timestamp; /* user initiated */
|
|
|
|
PgStat_Counter analyze_count;
|
|
|
|
TimestampTz autovac_analyze_timestamp; /* autovacuum initiated */
|
|
|
|
PgStat_Counter autovac_analyze_count;
|
|
|
|
} PgStat_StatTabEntry;
|
2012-04-05 17:37:31 +02:00
|
|
|
|
2022-03-22 00:16:42 +01:00
|
|
|
typedef struct PgStat_WalStats
|
|
|
|
{
|
|
|
|
PgStat_Counter wal_records;
|
|
|
|
PgStat_Counter wal_fpi;
|
|
|
|
uint64 wal_bytes;
|
|
|
|
PgStat_Counter wal_buffers_full;
|
|
|
|
PgStat_Counter wal_write;
|
|
|
|
PgStat_Counter wal_sync;
|
|
|
|
PgStat_Counter wal_write_time;
|
|
|
|
PgStat_Counter wal_sync_time;
|
|
|
|
TimestampTz stat_reset_timestamp;
|
|
|
|
} PgStat_WalStats;
|
2021-04-03 20:42:52 +02:00
|
|
|
|
|
|
|
|
2021-01-17 13:34:09 +01:00
|
|
|
/*
|
2022-03-22 00:16:42 +01:00
|
|
|
* Functions in pgstat.c
|
2021-01-17 13:34:09 +01:00
|
|
|
*/
|
|
|
|
|
2022-03-22 00:16:42 +01:00
|
|
|
/* functions called from postmaster */
|
2022-04-07 06:29:46 +02:00
|
|
|
extern Size StatsShmemSize(void);
|
|
|
|
extern void StatsShmemInit(void);
|
2007-11-15 22:14:46 +01:00
|
|
|
|
2022-04-07 06:29:46 +02:00
|
|
|
/* Functions called during server startup / shutdown */
|
|
|
|
extern void pgstat_restore_stats(void);
|
|
|
|
extern void pgstat_discard_stats(void);
|
|
|
|
extern void pgstat_before_server_shutdown(int code, Datum arg);
|
2004-05-28 07:13:32 +02:00
|
|
|
|
2022-03-22 00:16:42 +01:00
|
|
|
/* Functions for backend initialization */
|
|
|
|
extern void pgstat_initialize(void);
|
2004-05-28 07:13:32 +02:00
|
|
|
|
2022-03-22 00:16:42 +01:00
|
|
|
/* Functions called from backends */
|
2022-04-07 06:29:46 +02:00
|
|
|
extern long pgstat_report_stat(bool force);
|
2022-04-07 08:35:56 +02:00
|
|
|
extern void pgstat_force_next_flush(void);
|
2006-06-19 03:51:22 +02:00
|
|
|
|
|
|
|
extern void pgstat_reset_counters(void);
|
2022-04-07 02:56:19 +02:00
|
|
|
extern void pgstat_reset(PgStat_Kind kind, Oid dboid, Oid objectid);
|
|
|
|
extern void pgstat_reset_of_kind(PgStat_Kind kind);
|
2006-06-19 03:51:22 +02:00
|
|
|
|
2022-03-22 00:16:42 +01:00
|
|
|
/* stats accessors */
|
2022-04-06 22:23:47 +02:00
|
|
|
extern void pgstat_clear_snapshot(void);
|
2022-04-07 06:29:46 +02:00
|
|
|
extern TimestampTz pgstat_get_stat_snapshot_timestamp(bool *have_snapshot);
|
2022-03-22 00:16:42 +01:00
|
|
|
|
2022-04-07 06:29:46 +02:00
|
|
|
/* helpers */
|
|
|
|
extern PgStat_Kind pgstat_get_kind_from_str(char *kind_str);
|
2022-04-07 09:03:58 +02:00
|
|
|
extern bool pgstat_have_entry(PgStat_Kind kind, Oid dboid, Oid objoid);
|
|
|
|
|
2022-03-22 00:16:42 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Functions in pgstat_archiver.c
|
|
|
|
*/
|
|
|
|
|
2022-04-06 23:08:57 +02:00
|
|
|
extern void pgstat_report_archiver(const char *xlog, bool failed);
|
2022-04-07 06:29:46 +02:00
|
|
|
extern PgStat_ArchiverStats *pgstat_fetch_stat_archiver(void);
|
2022-03-22 00:16:42 +01:00
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Functions in pgstat_bgwriter.c
|
|
|
|
*/
|
|
|
|
|
2022-04-06 23:08:57 +02:00
|
|
|
extern void pgstat_report_bgwriter(void);
|
2022-04-07 06:29:46 +02:00
|
|
|
extern PgStat_BgWriterStats *pgstat_fetch_stat_bgwriter(void);
|
2022-03-22 00:16:42 +01:00
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Functions in pgstat_checkpointer.c
|
|
|
|
*/
|
|
|
|
|
2022-04-06 23:08:57 +02:00
|
|
|
extern void pgstat_report_checkpointer(void);
|
2022-04-07 06:29:46 +02:00
|
|
|
extern PgStat_CheckpointerStats *pgstat_fetch_stat_checkpointer(void);
|
2022-03-22 00:16:42 +01:00
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Functions in pgstat_database.c
|
|
|
|
*/
|
2001-06-22 21:18:36 +02:00
|
|
|
|
2022-03-22 00:16:42 +01:00
|
|
|
extern void pgstat_drop_database(Oid databaseid);
|
2022-04-06 21:41:29 +02:00
|
|
|
extern void pgstat_report_autovac(Oid dboid);
|
2011-01-03 12:46:03 +01:00
|
|
|
extern void pgstat_report_recovery_conflict(int reason);
|
2012-01-26 15:58:19 +01:00
|
|
|
extern void pgstat_report_deadlock(void);
|
2019-03-09 19:45:17 +01:00
|
|
|
extern void pgstat_report_checksum_failures_in_db(Oid dboid, int failurecount);
|
|
|
|
extern void pgstat_report_checksum_failure(void);
|
2022-03-22 00:16:42 +01:00
|
|
|
extern void pgstat_report_connect(Oid dboid);
|
2011-01-03 12:46:03 +01:00
|
|
|
|
2022-03-22 00:16:42 +01:00
|
|
|
#define pgstat_count_buffer_read_time(n) \
|
|
|
|
(pgStatBlockReadTime += (n))
|
|
|
|
#define pgstat_count_buffer_write_time(n) \
|
|
|
|
(pgStatBlockWriteTime += (n))
|
|
|
|
#define pgstat_count_conn_active_time(n) \
|
|
|
|
(pgStatActiveTime += (n))
|
|
|
|
#define pgstat_count_conn_txn_idle_time(n) \
|
|
|
|
(pgStatTransactionIdleTime += (n))
|
2021-04-03 20:42:52 +02:00
|
|
|
|
2022-04-07 06:29:46 +02:00
|
|
|
extern PgStat_StatDBEntry *pgstat_fetch_stat_dbentry(Oid dbid);
|
Add a generic command progress reporting facility.
Using this facility, any utility command can report the target relation
upon which it is operating, if there is one, and up to 10 64-bit
counters; the intent of this is that users should be able to figure out
what a utility command is doing without having to resort to ugly hacks
like attaching strace to a backend.
As a demonstration, this adds very crude reporting to lazy vacuum; we
just report the target relation and nothing else. A forthcoming patch
will make VACUUM report a bunch of additional data that will make this
much more interesting. But this gets the basic framework in place.
Vinayak Pokale, Rahila Syed, Amit Langote, Robert Haas, reviewed by
Kyotaro Horiguchi, Jim Nasby, Thom Brown, Masahiko Sawada, Fujii Masao,
and Masanori Oyama.
2016-03-09 18:08:58 +01:00
|
|
|
|
2022-03-22 00:16:42 +01:00
|
|
|
/*
|
|
|
|
* Functions in pgstat_function.c
|
|
|
|
*/
|
|
|
|
|
pgstat: scaffolding for transactional stats creation / drop.
One problematic part of the current statistics collector design is that there
is no reliable way of getting rid of statistics entries. Because of that
pgstat_vacuum_stat() (called by [auto-]vacuum) matches all stats for the
current database with the catalog contents and tries to drop now-superfluous
entries. That's quite expensive. What's worse, it doesn't work on physical
replicas, despite physical replicas collection statistics entries.
This commit introduces infrastructure to create / drop statistics entries
transactionally, together with the underlying catalog objects (functions,
relations, subscriptions). pgstat_xact.c maintains a list of stats entries
created / dropped transactionally in the current transaction. To ensure the
removal of statistics entries is durable dropped statistics entries are
included in commit / abort (and prepare) records, which also ensures that
stats entries are dropped on standbys.
Statistics entries created separately from creating the underlying catalog
object (e.g. when stats were previously lost due to an immediate restart)
are *not* WAL logged. However that can only happen outside of the transaction
creating the catalog object, so it does not lead to "leaked" statistics
entries.
For this to work, functions creating / dropping functions / relations /
subscriptions need to call into pgstat. For subscriptions this was already
done when dropping subscriptions, via pgstat_report_subscription_drop() (now
renamed to pgstat_drop_subscription()).
This commit does not actually drop stats yet, it just provides the
infrastructure. It is however a largely independent piece of infrastructure,
so committing it separately makes sense.
Bumps XLOG_PAGE_MAGIC.
Author: Andres Freund <andres@anarazel.de>
Reviewed-By: Thomas Munro <thomas.munro@gmail.com>
Reviewed-By: Kyotaro Horiguchi <horikyota.ntt@gmail.com>
Discussion: https://postgr.es/m/20220303021600.hs34ghqcw6zcokdh@alap3.anarazel.de
2022-04-07 03:22:22 +02:00
|
|
|
extern void pgstat_create_function(Oid proid);
|
|
|
|
extern void pgstat_drop_function(Oid proid);
|
|
|
|
|
2022-03-22 00:16:42 +01:00
|
|
|
struct FunctionCallInfoBaseData;
|
|
|
|
extern void pgstat_init_function_usage(struct FunctionCallInfoBaseData *fcinfo,
|
|
|
|
PgStat_FunctionCallUsage *fcu);
|
|
|
|
extern void pgstat_end_function_usage(PgStat_FunctionCallUsage *fcu,
|
|
|
|
bool finalize);
|
|
|
|
|
2022-04-07 06:29:46 +02:00
|
|
|
extern PgStat_StatFuncEntry *pgstat_fetch_stat_funcentry(Oid funcid);
|
2010-08-08 18:27:06 +02:00
|
|
|
extern PgStat_BackendFunctionEntry *find_funcstat_entry(Oid func_id);
|
|
|
|
|
2022-03-22 00:16:42 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Functions in pgstat_relation.c
|
|
|
|
*/
|
|
|
|
|
pgstat: scaffolding for transactional stats creation / drop.
One problematic part of the current statistics collector design is that there
is no reliable way of getting rid of statistics entries. Because of that
pgstat_vacuum_stat() (called by [auto-]vacuum) matches all stats for the
current database with the catalog contents and tries to drop now-superfluous
entries. That's quite expensive. What's worse, it doesn't work on physical
replicas, despite physical replicas collection statistics entries.
This commit introduces infrastructure to create / drop statistics entries
transactionally, together with the underlying catalog objects (functions,
relations, subscriptions). pgstat_xact.c maintains a list of stats entries
created / dropped transactionally in the current transaction. To ensure the
removal of statistics entries is durable dropped statistics entries are
included in commit / abort (and prepare) records, which also ensures that
stats entries are dropped on standbys.
Statistics entries created separately from creating the underlying catalog
object (e.g. when stats were previously lost due to an immediate restart)
are *not* WAL logged. However that can only happen outside of the transaction
creating the catalog object, so it does not lead to "leaked" statistics
entries.
For this to work, functions creating / dropping functions / relations /
subscriptions need to call into pgstat. For subscriptions this was already
done when dropping subscriptions, via pgstat_report_subscription_drop() (now
renamed to pgstat_drop_subscription()).
This commit does not actually drop stats yet, it just provides the
infrastructure. It is however a largely independent piece of infrastructure,
so committing it separately makes sense.
Bumps XLOG_PAGE_MAGIC.
Author: Andres Freund <andres@anarazel.de>
Reviewed-By: Thomas Munro <thomas.munro@gmail.com>
Reviewed-By: Kyotaro Horiguchi <horikyota.ntt@gmail.com>
Discussion: https://postgr.es/m/20220303021600.hs34ghqcw6zcokdh@alap3.anarazel.de
2022-04-07 03:22:22 +02:00
|
|
|
extern void pgstat_create_relation(Relation rel);
|
|
|
|
extern void pgstat_drop_relation(Relation rel);
|
2022-04-06 23:09:18 +02:00
|
|
|
extern void pgstat_copy_relation_stats(Relation dstrel, Relation srcrel);
|
|
|
|
|
2022-04-07 06:29:46 +02:00
|
|
|
extern void pgstat_init_relation(Relation rel);
|
2022-04-07 06:29:46 +02:00
|
|
|
extern void pgstat_assoc_relation(Relation rel);
|
|
|
|
extern void pgstat_unlink_relation(Relation rel);
|
2001-06-22 21:18:36 +02:00
|
|
|
|
2022-03-22 00:16:42 +01:00
|
|
|
extern void pgstat_report_vacuum(Oid tableoid, bool shared,
|
|
|
|
PgStat_Counter livetuples, PgStat_Counter deadtuples);
|
|
|
|
extern void pgstat_report_analyze(Relation rel,
|
|
|
|
PgStat_Counter livetuples, PgStat_Counter deadtuples,
|
|
|
|
bool resetcounter);
|
|
|
|
|
2022-04-07 06:29:46 +02:00
|
|
|
/*
|
|
|
|
* If stats are enabled, but pending data hasn't been prepared yet, call
|
|
|
|
* pgstat_assoc_relation() to do so. See its comment for why this is done
|
|
|
|
* separately from pgstat_init_relation().
|
|
|
|
*/
|
2022-04-07 06:29:46 +02:00
|
|
|
#define pgstat_should_count_relation(rel) \
|
2022-04-07 06:29:46 +02:00
|
|
|
(likely((rel)->pgstat_info != NULL) ? true : \
|
|
|
|
((rel)->pgstat_enabled ? pgstat_assoc_relation(rel), true : false))
|
2022-03-21 03:12:09 +01:00
|
|
|
|
2007-05-27 05:50:39 +02:00
|
|
|
/* nontransactional event counts are simple enough to inline */
|
2001-06-29 18:29:37 +02:00
|
|
|
|
2007-09-24 05:12:23 +02:00
|
|
|
#define pgstat_count_heap_scan(rel) \
|
|
|
|
do { \
|
2022-04-07 06:29:46 +02:00
|
|
|
if (pgstat_should_count_relation(rel)) \
|
2007-09-24 05:12:23 +02:00
|
|
|
(rel)->pgstat_info->t_counts.t_numscans++; \
|
2001-06-30 01:03:02 +02:00
|
|
|
} while (0)
|
2007-09-24 05:12:23 +02:00
|
|
|
#define pgstat_count_heap_getnext(rel) \
|
|
|
|
do { \
|
2022-04-07 06:29:46 +02:00
|
|
|
if (pgstat_should_count_relation(rel)) \
|
2007-09-24 05:12:23 +02:00
|
|
|
(rel)->pgstat_info->t_counts.t_tuples_returned++; \
|
2001-06-30 01:03:02 +02:00
|
|
|
} while (0)
|
2007-09-24 05:12:23 +02:00
|
|
|
#define pgstat_count_heap_fetch(rel) \
|
|
|
|
do { \
|
2022-04-07 06:29:46 +02:00
|
|
|
if (pgstat_should_count_relation(rel)) \
|
2007-09-24 05:12:23 +02:00
|
|
|
(rel)->pgstat_info->t_counts.t_tuples_fetched++; \
|
2001-06-30 01:03:02 +02:00
|
|
|
} while (0)
|
2007-09-24 05:12:23 +02:00
|
|
|
#define pgstat_count_index_scan(rel) \
|
|
|
|
do { \
|
2022-04-07 06:29:46 +02:00
|
|
|
if (pgstat_should_count_relation(rel)) \
|
2007-09-24 05:12:23 +02:00
|
|
|
(rel)->pgstat_info->t_counts.t_numscans++; \
|
2001-06-30 01:03:02 +02:00
|
|
|
} while (0)
|
2007-09-24 05:12:23 +02:00
|
|
|
#define pgstat_count_index_tuples(rel, n) \
|
|
|
|
do { \
|
2022-04-07 06:29:46 +02:00
|
|
|
if (pgstat_should_count_relation(rel)) \
|
2007-09-24 05:12:23 +02:00
|
|
|
(rel)->pgstat_info->t_counts.t_tuples_returned += (n); \
|
2001-06-30 01:03:02 +02:00
|
|
|
} while (0)
|
2007-09-24 05:12:23 +02:00
|
|
|
#define pgstat_count_buffer_read(rel) \
|
|
|
|
do { \
|
2022-04-07 06:29:46 +02:00
|
|
|
if (pgstat_should_count_relation(rel)) \
|
2007-09-24 05:12:23 +02:00
|
|
|
(rel)->pgstat_info->t_counts.t_blocks_fetched++; \
|
2001-06-30 01:03:02 +02:00
|
|
|
} while (0)
|
2007-09-24 05:12:23 +02:00
|
|
|
#define pgstat_count_buffer_hit(rel) \
|
|
|
|
do { \
|
2022-04-07 06:29:46 +02:00
|
|
|
if (pgstat_should_count_relation(rel)) \
|
2007-09-24 05:12:23 +02:00
|
|
|
(rel)->pgstat_info->t_counts.t_blocks_hit++; \
|
2001-06-30 01:03:02 +02:00
|
|
|
} while (0)
|
2001-06-29 18:29:37 +02:00
|
|
|
|
2017-03-18 22:49:06 +01:00
|
|
|
extern void pgstat_count_heap_insert(Relation rel, PgStat_Counter n);
|
2007-09-20 19:56:33 +02:00
|
|
|
extern void pgstat_count_heap_update(Relation rel, bool hot);
|
2007-05-27 05:50:39 +02:00
|
|
|
extern void pgstat_count_heap_delete(Relation rel);
|
2015-02-20 16:10:01 +01:00
|
|
|
extern void pgstat_count_truncate(Relation rel);
|
2007-09-20 19:56:33 +02:00
|
|
|
extern void pgstat_update_heap_dead_tuples(Relation rel, int delta);
|
2007-05-27 05:50:39 +02:00
|
|
|
|
|
|
|
extern void pgstat_twophase_postcommit(TransactionId xid, uint16 info,
|
|
|
|
void *recdata, uint32 len);
|
|
|
|
extern void pgstat_twophase_postabort(TransactionId xid, uint16 info,
|
|
|
|
void *recdata, uint32 len);
|
2001-06-29 18:29:37 +02:00
|
|
|
|
2022-04-07 06:29:46 +02:00
|
|
|
extern PgStat_StatTabEntry *pgstat_fetch_stat_tabentry(Oid relid);
|
|
|
|
extern PgStat_StatTabEntry *pgstat_fetch_stat_tabentry_ext(bool shared,
|
|
|
|
Oid relid);
|
2022-03-22 00:16:42 +01:00
|
|
|
extern PgStat_TableStatus *find_tabstat_entry(Oid rel_id);
|
2001-06-22 21:18:36 +02:00
|
|
|
|
2022-03-22 00:16:42 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Functions in pgstat_replslot.c
|
2001-06-22 21:18:36 +02:00
|
|
|
*/
|
Collect statistics about SLRU caches
There's a number of SLRU caches used to access important data like clog,
commit timestamps, multixact, asynchronous notifications, etc. Until now
we had no easy way to monitor these shared caches, compute hit ratios,
number of reads/writes etc.
This commit extends the statistics collector to track this information
for a predefined list of SLRUs, and also introduces a new system view
pg_stat_slru displaying the data.
The list of built-in SLRUs is fixed, but additional SLRUs may be defined
in extensions. Unfortunately, there's no suitable registry of SLRUs, so
this patch simply defines a fixed list of SLRUs with entries for the
built-in ones and one entry for all additional SLRUs. Extensions adding
their own SLRU are fairly rare, so this seems acceptable.
This patch only allows monitoring of SLRUs, not tuning. The SLRU sizes
are still fixed (hard-coded in the code) and it's not entirely clear
which of the SLRUs might need a GUC to tune size. In a way, allowing us
to determine that is one of the goals of this patch.
Bump catversion as the patch introduces new functions and system view.
Author: Tomas Vondra
Reviewed-by: Alvaro Herrera
Discussion: https://www.postgresql.org/message-id/flat/20200119143707.gyinppnigokesjok@development
2020-04-02 02:11:38 +02:00
|
|
|
|
2022-04-07 02:56:19 +02:00
|
|
|
extern void pgstat_reset_replslot(const char *name);
|
2022-04-07 03:26:17 +02:00
|
|
|
struct ReplicationSlot;
|
|
|
|
extern void pgstat_report_replslot(struct ReplicationSlot *slot, const PgStat_StatReplSlotEntry *repSlotStat);
|
|
|
|
extern void pgstat_create_replslot(struct ReplicationSlot *slot);
|
2022-04-07 06:29:46 +02:00
|
|
|
extern void pgstat_acquire_replslot(struct ReplicationSlot *slot);
|
2022-04-07 03:26:17 +02:00
|
|
|
extern void pgstat_drop_replslot(struct ReplicationSlot *slot);
|
2022-04-07 06:29:46 +02:00
|
|
|
extern PgStat_StatReplSlotEntry *pgstat_fetch_replslot(NameData slotname);
|
2022-03-22 00:16:42 +01:00
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Functions in pgstat_slru.c
|
|
|
|
*/
|
|
|
|
|
2022-04-07 02:56:19 +02:00
|
|
|
extern void pgstat_reset_slru(const char *);
|
Improve management of SLRU statistics collection.
Instead of re-identifying which statistics bucket to use for a given
SLRU on every counter increment, do it once during shmem initialization.
This saves a fair number of cycles, and there's no real cost because
we could not have a bucket assignment that varies over time or across
backends anyway.
Also, get rid of the ill-considered decision to let pgstat.c pry
directly into SLRU's shared state; it's cleaner just to have slru.c
pass the stats bucket number.
In consequence of these changes, there's no longer any need to store
an SLRU's LWLock tranche info in shared memory, so get rid of that,
making this a net reduction in shmem consumption. (That partly
reverts fe702a7b3.)
This is basically code review for 28cac71bd, so I also cleaned up
some comments, removed a dangling extern declaration, fixed some
things that should be static and/or const, etc.
Discussion: https://postgr.es/m/3618.1589313035@sss.pgh.pa.us
2020-05-13 19:08:12 +02:00
|
|
|
extern void pgstat_count_slru_page_zeroed(int slru_idx);
|
|
|
|
extern void pgstat_count_slru_page_hit(int slru_idx);
|
|
|
|
extern void pgstat_count_slru_page_read(int slru_idx);
|
|
|
|
extern void pgstat_count_slru_page_written(int slru_idx);
|
|
|
|
extern void pgstat_count_slru_page_exists(int slru_idx);
|
|
|
|
extern void pgstat_count_slru_flush(int slru_idx);
|
|
|
|
extern void pgstat_count_slru_truncate(int slru_idx);
|
2022-04-07 06:29:46 +02:00
|
|
|
extern const char *pgstat_get_slru_name(int slru_idx);
|
|
|
|
extern int pgstat_get_slru_index(const char *name);
|
2022-04-07 06:29:46 +02:00
|
|
|
extern PgStat_SLRUStats *pgstat_fetch_slru(void);
|
2001-06-22 21:18:36 +02:00
|
|
|
|
2022-03-22 00:16:42 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Functions in pgstat_subscription.c
|
|
|
|
*/
|
|
|
|
|
|
|
|
extern void pgstat_report_subscription_error(Oid subid, bool is_apply_error);
|
pgstat: scaffolding for transactional stats creation / drop.
One problematic part of the current statistics collector design is that there
is no reliable way of getting rid of statistics entries. Because of that
pgstat_vacuum_stat() (called by [auto-]vacuum) matches all stats for the
current database with the catalog contents and tries to drop now-superfluous
entries. That's quite expensive. What's worse, it doesn't work on physical
replicas, despite physical replicas collection statistics entries.
This commit introduces infrastructure to create / drop statistics entries
transactionally, together with the underlying catalog objects (functions,
relations, subscriptions). pgstat_xact.c maintains a list of stats entries
created / dropped transactionally in the current transaction. To ensure the
removal of statistics entries is durable dropped statistics entries are
included in commit / abort (and prepare) records, which also ensures that
stats entries are dropped on standbys.
Statistics entries created separately from creating the underlying catalog
object (e.g. when stats were previously lost due to an immediate restart)
are *not* WAL logged. However that can only happen outside of the transaction
creating the catalog object, so it does not lead to "leaked" statistics
entries.
For this to work, functions creating / dropping functions / relations /
subscriptions need to call into pgstat. For subscriptions this was already
done when dropping subscriptions, via pgstat_report_subscription_drop() (now
renamed to pgstat_drop_subscription()).
This commit does not actually drop stats yet, it just provides the
infrastructure. It is however a largely independent piece of infrastructure,
so committing it separately makes sense.
Bumps XLOG_PAGE_MAGIC.
Author: Andres Freund <andres@anarazel.de>
Reviewed-By: Thomas Munro <thomas.munro@gmail.com>
Reviewed-By: Kyotaro Horiguchi <horikyota.ntt@gmail.com>
Discussion: https://postgr.es/m/20220303021600.hs34ghqcw6zcokdh@alap3.anarazel.de
2022-04-07 03:22:22 +02:00
|
|
|
extern void pgstat_create_subscription(Oid subid);
|
|
|
|
extern void pgstat_drop_subscription(Oid subid);
|
2022-04-07 06:29:46 +02:00
|
|
|
extern PgStat_StatSubEntry *pgstat_fetch_stat_subscription(Oid subid);
|
2022-03-22 00:16:42 +01:00
|
|
|
|
|
|
|
|
2022-04-06 22:23:47 +02:00
|
|
|
/*
|
|
|
|
* Functions in pgstat_xact.c
|
|
|
|
*/
|
|
|
|
|
|
|
|
extern void AtEOXact_PgStat(bool isCommit, bool parallel);
|
|
|
|
extern void AtEOSubXact_PgStat(bool isCommit, int nestDepth);
|
|
|
|
extern void AtPrepare_PgStat(void);
|
|
|
|
extern void PostPrepare_PgStat(void);
|
pgstat: scaffolding for transactional stats creation / drop.
One problematic part of the current statistics collector design is that there
is no reliable way of getting rid of statistics entries. Because of that
pgstat_vacuum_stat() (called by [auto-]vacuum) matches all stats for the
current database with the catalog contents and tries to drop now-superfluous
entries. That's quite expensive. What's worse, it doesn't work on physical
replicas, despite physical replicas collection statistics entries.
This commit introduces infrastructure to create / drop statistics entries
transactionally, together with the underlying catalog objects (functions,
relations, subscriptions). pgstat_xact.c maintains a list of stats entries
created / dropped transactionally in the current transaction. To ensure the
removal of statistics entries is durable dropped statistics entries are
included in commit / abort (and prepare) records, which also ensures that
stats entries are dropped on standbys.
Statistics entries created separately from creating the underlying catalog
object (e.g. when stats were previously lost due to an immediate restart)
are *not* WAL logged. However that can only happen outside of the transaction
creating the catalog object, so it does not lead to "leaked" statistics
entries.
For this to work, functions creating / dropping functions / relations /
subscriptions need to call into pgstat. For subscriptions this was already
done when dropping subscriptions, via pgstat_report_subscription_drop() (now
renamed to pgstat_drop_subscription()).
This commit does not actually drop stats yet, it just provides the
infrastructure. It is however a largely independent piece of infrastructure,
so committing it separately makes sense.
Bumps XLOG_PAGE_MAGIC.
Author: Andres Freund <andres@anarazel.de>
Reviewed-By: Thomas Munro <thomas.munro@gmail.com>
Reviewed-By: Kyotaro Horiguchi <horikyota.ntt@gmail.com>
Discussion: https://postgr.es/m/20220303021600.hs34ghqcw6zcokdh@alap3.anarazel.de
2022-04-07 03:22:22 +02:00
|
|
|
struct xl_xact_stats_item;
|
|
|
|
extern int pgstat_get_transactional_drops(bool isCommit, struct xl_xact_stats_item **items);
|
|
|
|
extern void pgstat_execute_transactional_drops(int ndrops, struct xl_xact_stats_item *items, bool is_redo);
|
2022-04-06 22:23:47 +02:00
|
|
|
|
|
|
|
|
2022-03-22 00:16:42 +01:00
|
|
|
/*
|
|
|
|
* Functions in pgstat_wal.c
|
|
|
|
*/
|
|
|
|
|
2022-04-06 23:08:57 +02:00
|
|
|
extern void pgstat_report_wal(bool force);
|
2022-04-07 06:29:46 +02:00
|
|
|
extern PgStat_WalStats *pgstat_fetch_stat_wal(void);
|
2022-03-22 00:16:42 +01:00
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Variables in pgstat.c
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* GUC parameters */
|
|
|
|
extern PGDLLIMPORT bool pgstat_track_counts;
|
|
|
|
extern PGDLLIMPORT int pgstat_track_functions;
|
2022-04-07 06:29:46 +02:00
|
|
|
extern PGDLLIMPORT int pgstat_fetch_consistency;
|
|
|
|
|
2022-03-22 00:16:42 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Variables in pgstat_bgwriter.c
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* updated directly by bgwriter and bufmgr */
|
2022-04-08 14:16:38 +02:00
|
|
|
extern PGDLLIMPORT PgStat_BgWriterStats PendingBgWriterStats;
|
2022-03-22 00:16:42 +01:00
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Variables in pgstat_checkpointer.c
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Checkpointer statistics counters are updated directly by checkpointer and
|
|
|
|
* bufmgr.
|
|
|
|
*/
|
2022-04-08 14:16:38 +02:00
|
|
|
extern PGDLLIMPORT PgStat_CheckpointerStats PendingCheckpointerStats;
|
2022-03-22 00:16:42 +01:00
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Variables in pgstat_database.c
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* Updated by pgstat_count_buffer_*_time macros */
|
2022-04-08 14:16:38 +02:00
|
|
|
extern PGDLLIMPORT PgStat_Counter pgStatBlockReadTime;
|
|
|
|
extern PGDLLIMPORT PgStat_Counter pgStatBlockWriteTime;
|
2022-03-22 00:16:42 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Updated by pgstat_count_conn_*_time macros, called by
|
|
|
|
* pgstat_report_activity().
|
|
|
|
*/
|
2022-04-08 14:16:38 +02:00
|
|
|
extern PGDLLIMPORT PgStat_Counter pgStatActiveTime;
|
|
|
|
extern PGDLLIMPORT PgStat_Counter pgStatTransactionIdleTime;
|
2022-03-22 00:16:42 +01:00
|
|
|
|
|
|
|
/* updated by the traffic cop and in errfinish() */
|
2022-04-08 14:16:38 +02:00
|
|
|
extern PGDLLIMPORT SessionEndType pgStatSessionEndCause;
|
2022-03-22 00:16:42 +01:00
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Variables in pgstat_wal.c
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* updated directly by backends and background processes */
|
2022-04-08 14:16:38 +02:00
|
|
|
extern PGDLLIMPORT PgStat_WalStats PendingWalStats;
|
2022-03-22 00:16:42 +01:00
|
|
|
|
|
|
|
|
2001-06-22 21:18:36 +02:00
|
|
|
#endif /* PGSTAT_H */
|