2004-10-01 23:03:42 +02:00
|
|
|
/*-------------------------------------------------------------------------
|
|
|
|
*
|
|
|
|
* pgstatfuncs.c
|
2022-04-06 22:56:06 +02:00
|
|
|
* Functions for accessing various forms of statistics data
|
2004-10-01 23:03:42 +02:00
|
|
|
*
|
2022-01-08 01:04:57 +01:00
|
|
|
* Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
|
2004-10-01 23:03:42 +02:00
|
|
|
* Portions Copyright (c) 1994, Regents of the University of California
|
|
|
|
*
|
|
|
|
*
|
|
|
|
* IDENTIFICATION
|
2010-09-20 22:08:53 +02:00
|
|
|
* src/backend/utils/adt/pgstatfuncs.c
|
2004-10-01 23:03:42 +02:00
|
|
|
*
|
|
|
|
*-------------------------------------------------------------------------
|
|
|
|
*/
|
2001-06-22 21:18:36 +02:00
|
|
|
#include "postgres.h"
|
|
|
|
|
2012-08-30 22:15:44 +02:00
|
|
|
#include "access/htup_details.h"
|
2019-04-17 13:51:48 +02:00
|
|
|
#include "access/xlog.h"
|
2022-04-07 09:28:40 +02:00
|
|
|
#include "access/xlogprefetcher.h"
|
2017-03-30 20:18:53 +02:00
|
|
|
#include "catalog/pg_authid.h"
|
2011-09-09 19:23:41 +02:00
|
|
|
#include "catalog/pg_type.h"
|
2016-09-02 12:49:59 +02:00
|
|
|
#include "common/ip.h"
|
2004-10-01 23:03:42 +02:00
|
|
|
#include "funcapi.h"
|
|
|
|
#include "miscadmin.h"
|
2001-06-22 21:18:36 +02:00
|
|
|
#include "pgstat.h"
|
2017-08-31 18:24:47 +02:00
|
|
|
#include "postmaster/bgworker_internals.h"
|
2017-03-27 04:02:22 +02:00
|
|
|
#include "postmaster/postmaster.h"
|
2016-03-10 18:44:09 +01:00
|
|
|
#include "storage/proc.h"
|
|
|
|
#include "storage/procarray.h"
|
2015-03-19 20:02:33 +01:00
|
|
|
#include "utils/acl.h"
|
2006-07-11 19:26:59 +02:00
|
|
|
#include "utils/builtins.h"
|
2005-05-09 13:31:34 +02:00
|
|
|
#include "utils/inet.h"
|
2011-09-09 19:23:41 +02:00
|
|
|
#include "utils/timestamp.h"
|
2001-06-22 21:18:36 +02:00
|
|
|
|
2016-04-21 20:02:15 +02:00
|
|
|
#define UINT32_ACCESS_ONCE(var) ((uint32)(*((volatile uint32 *)&(var))))
|
|
|
|
|
2022-03-28 21:10:04 +02:00
|
|
|
#define HAS_PGSTAT_PERMISSIONS(role) (has_privs_of_role(GetUserId(), ROLE_PG_READ_ALL_STATS) || has_privs_of_role(GetUserId(), role))
|
2020-04-20 12:53:40 +02:00
|
|
|
|
2022-12-06 02:46:35 +01:00
|
|
|
#define PG_STAT_GET_RELENTRY_INT64(stat) \
|
|
|
|
Datum \
|
|
|
|
CppConcat(pg_stat_get_,stat)(PG_FUNCTION_ARGS) \
|
|
|
|
{ \
|
|
|
|
Oid relid = PG_GETARG_OID(0); \
|
|
|
|
int64 result; \
|
|
|
|
PgStat_StatTabEntry *tabentry; \
|
|
|
|
\
|
|
|
|
if ((tabentry = pgstat_fetch_stat_tabentry(relid)) == NULL) \
|
|
|
|
result = 0; \
|
|
|
|
else \
|
|
|
|
result = (int64) (tabentry->stat); \
|
|
|
|
\
|
|
|
|
PG_RETURN_INT64(result); \
|
|
|
|
} \
|
|
|
|
|
|
|
|
/* pg_stat_get_analyze_count */
|
|
|
|
PG_STAT_GET_RELENTRY_INT64(analyze_count);
|
|
|
|
|
|
|
|
/* pg_stat_get_autoanalyze_count */
|
|
|
|
PG_STAT_GET_RELENTRY_INT64(autoanalyze_count);
|
|
|
|
|
|
|
|
/* pg_stat_get_autovacuum_count */
|
|
|
|
PG_STAT_GET_RELENTRY_INT64(autovacuum_count);
|
|
|
|
|
|
|
|
/* pg_stat_get_blocks_fetched */
|
|
|
|
PG_STAT_GET_RELENTRY_INT64(blocks_fetched);
|
|
|
|
|
|
|
|
/* pg_stat_get_blocks_hit */
|
|
|
|
PG_STAT_GET_RELENTRY_INT64(blocks_hit);
|
|
|
|
|
|
|
|
/* pg_stat_get_dead_tuples */
|
|
|
|
PG_STAT_GET_RELENTRY_INT64(dead_tuples);
|
|
|
|
|
|
|
|
/* pg_stat_get_ins_since_vacuum */
|
|
|
|
PG_STAT_GET_RELENTRY_INT64(ins_since_vacuum);
|
|
|
|
|
|
|
|
/* pg_stat_get_live_tuples */
|
|
|
|
PG_STAT_GET_RELENTRY_INT64(live_tuples);
|
|
|
|
|
|
|
|
/* pg_stat_get_mods_since_analyze */
|
|
|
|
PG_STAT_GET_RELENTRY_INT64(mod_since_analyze);
|
|
|
|
|
|
|
|
/* pg_stat_get_numscans */
|
|
|
|
PG_STAT_GET_RELENTRY_INT64(numscans);
|
|
|
|
|
|
|
|
/* pg_stat_get_tuples_deleted */
|
|
|
|
PG_STAT_GET_RELENTRY_INT64(tuples_deleted);
|
|
|
|
|
|
|
|
/* pg_stat_get_tuples_fetched */
|
|
|
|
PG_STAT_GET_RELENTRY_INT64(tuples_fetched);
|
|
|
|
|
|
|
|
/* pg_stat_get_tuples_hot_updated */
|
|
|
|
PG_STAT_GET_RELENTRY_INT64(tuples_hot_updated);
|
|
|
|
|
|
|
|
/* pg_stat_get_tuples_inserted */
|
|
|
|
PG_STAT_GET_RELENTRY_INT64(tuples_inserted);
|
|
|
|
|
|
|
|
/* pg_stat_get_tuples_returned */
|
|
|
|
PG_STAT_GET_RELENTRY_INT64(tuples_returned);
|
|
|
|
|
|
|
|
/* pg_stat_get_tuples_updated */
|
|
|
|
PG_STAT_GET_RELENTRY_INT64(tuples_updated);
|
|
|
|
|
|
|
|
/* pg_stat_get_vacuum_count */
|
|
|
|
PG_STAT_GET_RELENTRY_INT64(vacuum_count);
|
|
|
|
|
|
|
|
#define PG_STAT_GET_RELENTRY_TIMESTAMPTZ(stat) \
|
|
|
|
Datum \
|
|
|
|
CppConcat(pg_stat_get_,stat)(PG_FUNCTION_ARGS) \
|
|
|
|
{ \
|
|
|
|
Oid relid = PG_GETARG_OID(0); \
|
|
|
|
TimestampTz result; \
|
|
|
|
PgStat_StatTabEntry *tabentry; \
|
|
|
|
\
|
|
|
|
if ((tabentry = pgstat_fetch_stat_tabentry(relid)) == NULL) \
|
|
|
|
result = 0; \
|
|
|
|
else \
|
|
|
|
result = tabentry->stat; \
|
|
|
|
\
|
|
|
|
if (result == 0) \
|
|
|
|
PG_RETURN_NULL(); \
|
|
|
|
else \
|
|
|
|
PG_RETURN_TIMESTAMPTZ(result); \
|
|
|
|
} \
|
|
|
|
|
|
|
|
/* pg_stat_get_last_analyze_time */
|
|
|
|
PG_STAT_GET_RELENTRY_TIMESTAMPTZ(last_analyze_time);
|
|
|
|
|
|
|
|
/* pg_stat_get_last_autoanalyze_time */
|
|
|
|
PG_STAT_GET_RELENTRY_TIMESTAMPTZ(last_autoanalyze_time);
|
|
|
|
|
|
|
|
/* pg_stat_get_last_autovacuum_time */
|
|
|
|
PG_STAT_GET_RELENTRY_TIMESTAMPTZ(last_autovacuum_time);
|
|
|
|
|
|
|
|
/* pg_stat_get_last_vacuum_time */
|
|
|
|
PG_STAT_GET_RELENTRY_TIMESTAMPTZ(last_vacuum_time);
|
|
|
|
|
|
|
|
/* pg_stat_get_lastscan */
|
|
|
|
PG_STAT_GET_RELENTRY_TIMESTAMPTZ(lastscan);
|
2010-08-21 12:59:17 +02:00
|
|
|
|
2008-05-15 02:17:41 +02:00
|
|
|
Datum
|
|
|
|
pg_stat_get_function_calls(PG_FUNCTION_ARGS)
|
|
|
|
{
|
|
|
|
Oid funcid = PG_GETARG_OID(0);
|
|
|
|
PgStat_StatFuncEntry *funcentry;
|
|
|
|
|
|
|
|
if ((funcentry = pgstat_fetch_stat_funcentry(funcid)) == NULL)
|
|
|
|
PG_RETURN_NULL();
|
|
|
|
PG_RETURN_INT64(funcentry->f_numcalls);
|
|
|
|
}
|
|
|
|
|
|
|
|
Datum
|
2012-04-30 20:02:47 +02:00
|
|
|
pg_stat_get_function_total_time(PG_FUNCTION_ARGS)
|
2008-05-15 02:17:41 +02:00
|
|
|
{
|
|
|
|
Oid funcid = PG_GETARG_OID(0);
|
|
|
|
PgStat_StatFuncEntry *funcentry;
|
|
|
|
|
|
|
|
if ((funcentry = pgstat_fetch_stat_funcentry(funcid)) == NULL)
|
|
|
|
PG_RETURN_NULL();
|
2012-04-30 20:02:47 +02:00
|
|
|
/* convert counter from microsec to millisec for display */
|
|
|
|
PG_RETURN_FLOAT8(((double) funcentry->f_total_time) / 1000.0);
|
2008-05-15 02:17:41 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
Datum
|
|
|
|
pg_stat_get_function_self_time(PG_FUNCTION_ARGS)
|
|
|
|
{
|
|
|
|
Oid funcid = PG_GETARG_OID(0);
|
|
|
|
PgStat_StatFuncEntry *funcentry;
|
|
|
|
|
|
|
|
if ((funcentry = pgstat_fetch_stat_funcentry(funcid)) == NULL)
|
|
|
|
PG_RETURN_NULL();
|
2012-04-30 20:02:47 +02:00
|
|
|
/* convert counter from microsec to millisec for display */
|
|
|
|
PG_RETURN_FLOAT8(((double) funcentry->f_self_time) / 1000.0);
|
2008-05-15 02:17:41 +02:00
|
|
|
}
|
|
|
|
|
2001-06-22 21:18:36 +02:00
|
|
|
Datum
|
|
|
|
pg_stat_get_backend_idset(PG_FUNCTION_ARGS)
|
|
|
|
{
|
2004-10-01 23:03:42 +02:00
|
|
|
FuncCallContext *funcctx;
|
|
|
|
int *fctx;
|
2001-06-22 21:18:36 +02:00
|
|
|
|
2004-10-01 23:03:42 +02:00
|
|
|
/* stuff done only on the first call of the function */
|
|
|
|
if (SRF_IS_FIRSTCALL())
|
2001-06-22 21:18:36 +02:00
|
|
|
{
|
2004-10-01 23:03:42 +02:00
|
|
|
/* create a function context for cross-call persistence */
|
|
|
|
funcctx = SRF_FIRSTCALL_INIT();
|
|
|
|
|
|
|
|
fctx = MemoryContextAlloc(funcctx->multi_call_memory_ctx,
|
2022-09-29 18:14:39 +02:00
|
|
|
sizeof(int));
|
2004-10-01 23:03:42 +02:00
|
|
|
funcctx->user_fctx = fctx;
|
2003-07-27 06:53:12 +02:00
|
|
|
|
2004-10-01 23:03:42 +02:00
|
|
|
fctx[0] = 0;
|
2001-06-22 21:18:36 +02:00
|
|
|
}
|
|
|
|
|
2004-10-01 23:03:42 +02:00
|
|
|
/* stuff done on every call of the function */
|
|
|
|
funcctx = SRF_PERCALL_SETUP();
|
|
|
|
fctx = funcctx->user_fctx;
|
2001-06-22 21:18:36 +02:00
|
|
|
|
2004-10-01 23:03:42 +02:00
|
|
|
fctx[0] += 1;
|
|
|
|
|
2022-09-29 18:14:39 +02:00
|
|
|
/*
|
|
|
|
* We recheck pgstat_fetch_stat_numbackends() each time through, just in
|
|
|
|
* case the local status data has been refreshed since we started. It's
|
|
|
|
* plenty cheap enough if not. If a refresh does happen, we'll likely
|
|
|
|
* miss or duplicate some backend IDs, but we're content not to crash.
|
|
|
|
* (Refreshing midway through such a query would be problematic usage
|
|
|
|
* anyway, since the backend IDs we've already returned might no longer
|
|
|
|
* refer to extant sessions.)
|
|
|
|
*/
|
|
|
|
if (fctx[0] <= pgstat_fetch_stat_numbackends())
|
2001-06-22 21:18:36 +02:00
|
|
|
{
|
2004-10-01 23:03:42 +02:00
|
|
|
/* do when there is more left to send */
|
2022-09-29 18:14:39 +02:00
|
|
|
LocalPgBackendStatus *local_beentry = pgstat_fetch_stat_local_beentry(fctx[0]);
|
|
|
|
|
|
|
|
SRF_RETURN_NEXT(funcctx, Int32GetDatum(local_beentry->backend_id));
|
2004-10-01 23:03:42 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* do when there is no more left */
|
|
|
|
SRF_RETURN_DONE(funcctx);
|
2001-06-22 21:18:36 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Add a generic command progress reporting facility.
Using this facility, any utility command can report the target relation
upon which it is operating, if there is one, and up to 10 64-bit
counters; the intent of this is that users should be able to figure out
what a utility command is doing without having to resort to ugly hacks
like attaching strace to a backend.
As a demonstration, this adds very crude reporting to lazy vacuum; we
just report the target relation and nothing else. A forthcoming patch
will make VACUUM report a bunch of additional data that will make this
much more interesting. But this gets the basic framework in place.
Vinayak Pokale, Rahila Syed, Amit Langote, Robert Haas, reviewed by
Kyotaro Horiguchi, Jim Nasby, Thom Brown, Masahiko Sawada, Fujii Masao,
and Masanori Oyama.
2016-03-09 18:08:58 +01:00
|
|
|
/*
|
|
|
|
* Returns command progress information for the named command.
|
|
|
|
*/
|
|
|
|
Datum
|
|
|
|
pg_stat_get_progress_info(PG_FUNCTION_ARGS)
|
|
|
|
{
|
|
|
|
#define PG_STAT_GET_PROGRESS_COLS PGSTAT_NUM_PROGRESS_PARAM + 3
|
|
|
|
int num_backends = pgstat_fetch_stat_numbackends();
|
|
|
|
int curr_backend;
|
|
|
|
char *cmd = text_to_cstring(PG_GETARG_TEXT_PP(0));
|
|
|
|
ProgressCommandType cmdtype;
|
|
|
|
ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
|
|
|
|
|
|
|
|
/* Translate command name into command type code. */
|
|
|
|
if (pg_strcasecmp(cmd, "VACUUM") == 0)
|
|
|
|
cmdtype = PROGRESS_COMMAND_VACUUM;
|
2020-01-15 15:02:09 +01:00
|
|
|
else if (pg_strcasecmp(cmd, "ANALYZE") == 0)
|
|
|
|
cmdtype = PROGRESS_COMMAND_ANALYZE;
|
2019-03-25 15:59:04 +01:00
|
|
|
else if (pg_strcasecmp(cmd, "CLUSTER") == 0)
|
|
|
|
cmdtype = PROGRESS_COMMAND_CLUSTER;
|
Report progress of CREATE INDEX operations
This uses the progress reporting infrastructure added by c16dc1aca5e0,
adding support for CREATE INDEX and CREATE INDEX CONCURRENTLY.
There are two pieces to this: one is index-AM-agnostic, and the other is
AM-specific. The latter is fairly elaborate for btrees, including
reportage for parallel index builds and the separate phases that btree
index creation uses; other index AMs, which are much simpler in their
building procedures, have simplistic reporting only, but that seems
sufficient, at least for non-concurrent builds.
The index-AM-agnostic part is fairly complete, providing insight into
the CONCURRENTLY wait phases as well as block-based progress during the
index validation table scan. (The index validation index scan requires
patching each AM, which has not been included here.)
Reviewers: Rahila Syed, Pavan Deolasee, Tatsuro Yamada
Discussion: https://postgr.es/m/20181220220022.mg63bhk26zdpvmcj@alvherre.pgsql
2019-04-02 20:18:08 +02:00
|
|
|
else if (pg_strcasecmp(cmd, "CREATE INDEX") == 0)
|
|
|
|
cmdtype = PROGRESS_COMMAND_CREATE_INDEX;
|
2020-03-03 04:03:43 +01:00
|
|
|
else if (pg_strcasecmp(cmd, "BASEBACKUP") == 0)
|
|
|
|
cmdtype = PROGRESS_COMMAND_BASEBACKUP;
|
2021-01-06 21:46:26 +01:00
|
|
|
else if (pg_strcasecmp(cmd, "COPY") == 0)
|
|
|
|
cmdtype = PROGRESS_COMMAND_COPY;
|
Add a generic command progress reporting facility.
Using this facility, any utility command can report the target relation
upon which it is operating, if there is one, and up to 10 64-bit
counters; the intent of this is that users should be able to figure out
what a utility command is doing without having to resort to ugly hacks
like attaching strace to a backend.
As a demonstration, this adds very crude reporting to lazy vacuum; we
just report the target relation and nothing else. A forthcoming patch
will make VACUUM report a bunch of additional data that will make this
much more interesting. But this gets the basic framework in place.
Vinayak Pokale, Rahila Syed, Amit Langote, Robert Haas, reviewed by
Kyotaro Horiguchi, Jim Nasby, Thom Brown, Masahiko Sawada, Fujii Masao,
and Masanori Oyama.
2016-03-09 18:08:58 +01:00
|
|
|
else
|
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
|
|
|
errmsg("invalid command name: \"%s\"", cmd)));
|
|
|
|
|
2022-10-18 03:22:35 +02:00
|
|
|
InitMaterializedSRF(fcinfo, 0);
|
Add a generic command progress reporting facility.
Using this facility, any utility command can report the target relation
upon which it is operating, if there is one, and up to 10 64-bit
counters; the intent of this is that users should be able to figure out
what a utility command is doing without having to resort to ugly hacks
like attaching strace to a backend.
As a demonstration, this adds very crude reporting to lazy vacuum; we
just report the target relation and nothing else. A forthcoming patch
will make VACUUM report a bunch of additional data that will make this
much more interesting. But this gets the basic framework in place.
Vinayak Pokale, Rahila Syed, Amit Langote, Robert Haas, reviewed by
Kyotaro Horiguchi, Jim Nasby, Thom Brown, Masahiko Sawada, Fujii Masao,
and Masanori Oyama.
2016-03-09 18:08:58 +01:00
|
|
|
|
|
|
|
/* 1-based index */
|
|
|
|
for (curr_backend = 1; curr_backend <= num_backends; curr_backend++)
|
|
|
|
{
|
|
|
|
LocalPgBackendStatus *local_beentry;
|
|
|
|
PgBackendStatus *beentry;
|
2022-07-16 08:42:15 +02:00
|
|
|
Datum values[PG_STAT_GET_PROGRESS_COLS] = {0};
|
|
|
|
bool nulls[PG_STAT_GET_PROGRESS_COLS] = {0};
|
Add a generic command progress reporting facility.
Using this facility, any utility command can report the target relation
upon which it is operating, if there is one, and up to 10 64-bit
counters; the intent of this is that users should be able to figure out
what a utility command is doing without having to resort to ugly hacks
like attaching strace to a backend.
As a demonstration, this adds very crude reporting to lazy vacuum; we
just report the target relation and nothing else. A forthcoming patch
will make VACUUM report a bunch of additional data that will make this
much more interesting. But this gets the basic framework in place.
Vinayak Pokale, Rahila Syed, Amit Langote, Robert Haas, reviewed by
Kyotaro Horiguchi, Jim Nasby, Thom Brown, Masahiko Sawada, Fujii Masao,
and Masanori Oyama.
2016-03-09 18:08:58 +01:00
|
|
|
int i;
|
|
|
|
|
|
|
|
local_beentry = pgstat_fetch_stat_local_beentry(curr_backend);
|
|
|
|
beentry = &local_beentry->backendStatus;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Report values for only those backends which are running the given
|
|
|
|
* command.
|
|
|
|
*/
|
2022-09-29 18:14:39 +02:00
|
|
|
if (beentry->st_progress_command != cmdtype)
|
Add a generic command progress reporting facility.
Using this facility, any utility command can report the target relation
upon which it is operating, if there is one, and up to 10 64-bit
counters; the intent of this is that users should be able to figure out
what a utility command is doing without having to resort to ugly hacks
like attaching strace to a backend.
As a demonstration, this adds very crude reporting to lazy vacuum; we
just report the target relation and nothing else. A forthcoming patch
will make VACUUM report a bunch of additional data that will make this
much more interesting. But this gets the basic framework in place.
Vinayak Pokale, Rahila Syed, Amit Langote, Robert Haas, reviewed by
Kyotaro Horiguchi, Jim Nasby, Thom Brown, Masahiko Sawada, Fujii Masao,
and Masanori Oyama.
2016-03-09 18:08:58 +01:00
|
|
|
continue;
|
|
|
|
|
|
|
|
/* Value available to all callers */
|
|
|
|
values[0] = Int32GetDatum(beentry->st_procpid);
|
|
|
|
values[1] = ObjectIdGetDatum(beentry->st_databaseid);
|
|
|
|
|
|
|
|
/* show rest of the values including relid only to role members */
|
2020-04-20 12:53:40 +02:00
|
|
|
if (HAS_PGSTAT_PERMISSIONS(beentry->st_userid))
|
Add a generic command progress reporting facility.
Using this facility, any utility command can report the target relation
upon which it is operating, if there is one, and up to 10 64-bit
counters; the intent of this is that users should be able to figure out
what a utility command is doing without having to resort to ugly hacks
like attaching strace to a backend.
As a demonstration, this adds very crude reporting to lazy vacuum; we
just report the target relation and nothing else. A forthcoming patch
will make VACUUM report a bunch of additional data that will make this
much more interesting. But this gets the basic framework in place.
Vinayak Pokale, Rahila Syed, Amit Langote, Robert Haas, reviewed by
Kyotaro Horiguchi, Jim Nasby, Thom Brown, Masahiko Sawada, Fujii Masao,
and Masanori Oyama.
2016-03-09 18:08:58 +01:00
|
|
|
{
|
|
|
|
values[2] = ObjectIdGetDatum(beentry->st_progress_command_target);
|
|
|
|
for (i = 0; i < PGSTAT_NUM_PROGRESS_PARAM; i++)
|
2016-03-24 13:57:48 +01:00
|
|
|
values[i + 3] = Int64GetDatum(beentry->st_progress_param[i]);
|
Add a generic command progress reporting facility.
Using this facility, any utility command can report the target relation
upon which it is operating, if there is one, and up to 10 64-bit
counters; the intent of this is that users should be able to figure out
what a utility command is doing without having to resort to ugly hacks
like attaching strace to a backend.
As a demonstration, this adds very crude reporting to lazy vacuum; we
just report the target relation and nothing else. A forthcoming patch
will make VACUUM report a bunch of additional data that will make this
much more interesting. But this gets the basic framework in place.
Vinayak Pokale, Rahila Syed, Amit Langote, Robert Haas, reviewed by
Kyotaro Horiguchi, Jim Nasby, Thom Brown, Masahiko Sawada, Fujii Masao,
and Masanori Oyama.
2016-03-09 18:08:58 +01:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
nulls[2] = true;
|
2016-03-10 12:07:57 +01:00
|
|
|
for (i = 0; i < PGSTAT_NUM_PROGRESS_PARAM; i++)
|
Add a generic command progress reporting facility.
Using this facility, any utility command can report the target relation
upon which it is operating, if there is one, and up to 10 64-bit
counters; the intent of this is that users should be able to figure out
what a utility command is doing without having to resort to ugly hacks
like attaching strace to a backend.
As a demonstration, this adds very crude reporting to lazy vacuum; we
just report the target relation and nothing else. A forthcoming patch
will make VACUUM report a bunch of additional data that will make this
much more interesting. But this gets the basic framework in place.
Vinayak Pokale, Rahila Syed, Amit Langote, Robert Haas, reviewed by
Kyotaro Horiguchi, Jim Nasby, Thom Brown, Masahiko Sawada, Fujii Masao,
and Masanori Oyama.
2016-03-09 18:08:58 +01:00
|
|
|
nulls[i + 3] = true;
|
|
|
|
}
|
|
|
|
|
Create routine able to set single-call SRFs for Materialize mode
Set-returning functions that use the Materialize mode, creating a
tuplestore to include all the tuples returned in a set rather than doing
so in multiple calls, use roughly the same set of steps to prepare
ReturnSetInfo for this job:
- Check if ReturnSetInfo supports returning a tuplestore and if the
materialize mode is enabled.
- Create a tuplestore for all the tuples part of the returned set in the
per-query memory context, stored in ReturnSetInfo->setResult.
- Build a tuple descriptor mostly from get_call_result_type(), then
stored in ReturnSetInfo->setDesc. Note that there are some cases where
the SRF's tuple descriptor has to be the one specified by the function
caller.
This refactoring is done so as there are (well, should be) no behavior
changes in any of the in-core functions refactored, and the centralized
function that checks and sets up the function's ReturnSetInfo can be
controlled with a set of bits32 options. Two of them prove to be
necessary now:
- SRF_SINGLE_USE_EXPECTED to use expectedDesc as tuple descriptor, as
expected by the function's caller.
- SRF_SINGLE_BLESS to validate the tuple descriptor for the SRF.
The same initialization pattern is simplified in 28 places per my
count as of src/backend/, shaving up to ~900 lines of code. These
mostly come from the removal of the per-query initializations and the
sanity checks now grouped in a single location. There are more
locations that could be simplified in contrib/, that are left for a
follow-up cleanup.
fcc2817, 07daca5 and d61a361 have prepared the areas of the code related
to this change, to ease this refactoring.
Author: Melanie Plageman, Michael Paquier
Reviewed-by: Álvaro Herrera, Justin Pryzby
Discussion: https://postgr.es/m/CAAKRu_azyd1Z3W_r7Ou4sorTjRCs+PxeHw1CWJeXKofkE6TuZg@mail.gmail.com
2022-03-07 02:26:29 +01:00
|
|
|
tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc, values, nulls);
|
Add a generic command progress reporting facility.
Using this facility, any utility command can report the target relation
upon which it is operating, if there is one, and up to 10 64-bit
counters; the intent of this is that users should be able to figure out
what a utility command is doing without having to resort to ugly hacks
like attaching strace to a backend.
As a demonstration, this adds very crude reporting to lazy vacuum; we
just report the target relation and nothing else. A forthcoming patch
will make VACUUM report a bunch of additional data that will make this
much more interesting. But this gets the basic framework in place.
Vinayak Pokale, Rahila Syed, Amit Langote, Robert Haas, reviewed by
Kyotaro Horiguchi, Jim Nasby, Thom Brown, Masahiko Sawada, Fujii Masao,
and Masanori Oyama.
2016-03-09 18:08:58 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return (Datum) 0;
|
|
|
|
}
|
|
|
|
|
2015-05-09 01:25:30 +02:00
|
|
|
/*
|
|
|
|
* Returns activity of PG backends.
|
|
|
|
*/
|
2008-05-07 16:41:56 +02:00
|
|
|
Datum
|
|
|
|
pg_stat_get_activity(PG_FUNCTION_ARGS)
|
|
|
|
{
|
Make use of in-core query id added by commit 5fd9dfa5f5
Use the in-core query id computation for pg_stat_activity,
log_line_prefix, and EXPLAIN VERBOSE.
Similar to other fields in pg_stat_activity, only the queryid from the
top level statements are exposed, and if the backends status isn't
active then the queryid from the last executed statements is displayed.
Add a %Q placeholder to include the queryid in log_line_prefix, which
will also only expose top level statements.
For EXPLAIN VERBOSE, if a query identifier has been computed, either by
enabling compute_query_id or using a third-party module, display it.
Bump catalog version.
Discussion: https://postgr.es/m/20210407125726.tkvjdbw76hxnpwfi@nol
Author: Julien Rouhaud
Reviewed-by: Alvaro Herrera, Nitin Jadhav, Zhihong Yu
2021-04-07 20:03:56 +02:00
|
|
|
#define PG_STAT_GET_ACTIVITY_COLS 30
|
2015-05-09 01:25:30 +02:00
|
|
|
int num_backends = pgstat_fetch_stat_numbackends();
|
|
|
|
int curr_backend;
|
|
|
|
int pid = PG_ARGISNULL(0) ? -1 : PG_GETARG_INT32(0);
|
|
|
|
ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
|
|
|
|
|
2022-10-18 03:22:35 +02:00
|
|
|
InitMaterializedSRF(fcinfo, 0);
|
2015-05-09 01:25:30 +02:00
|
|
|
|
|
|
|
/* 1-based index */
|
|
|
|
for (curr_backend = 1; curr_backend <= num_backends; curr_backend++)
|
2008-05-07 16:41:56 +02:00
|
|
|
{
|
|
|
|
/* for each row */
|
2022-07-16 08:42:15 +02:00
|
|
|
Datum values[PG_STAT_GET_ACTIVITY_COLS] = {0};
|
|
|
|
bool nulls[PG_STAT_GET_ACTIVITY_COLS] = {0};
|
2014-02-25 18:34:04 +01:00
|
|
|
LocalPgBackendStatus *local_beentry;
|
2008-05-07 16:41:56 +02:00
|
|
|
PgBackendStatus *beentry;
|
2016-03-10 18:44:09 +01:00
|
|
|
PGPROC *proc;
|
2017-03-27 04:02:22 +02:00
|
|
|
const char *wait_event_type = NULL;
|
|
|
|
const char *wait_event = NULL;
|
2008-05-07 16:41:56 +02:00
|
|
|
|
2015-05-09 01:25:30 +02:00
|
|
|
/* Get the next one in the list */
|
|
|
|
local_beentry = pgstat_fetch_stat_local_beentry(curr_backend);
|
2016-09-10 19:49:04 +02:00
|
|
|
beentry = &local_beentry->backendStatus;
|
|
|
|
|
|
|
|
/* If looking for specific PID, ignore all the others */
|
|
|
|
if (pid != -1 && beentry->st_procpid != pid)
|
|
|
|
continue;
|
|
|
|
|
2008-05-07 16:41:56 +02:00
|
|
|
/* Values available to all callers */
|
2017-03-27 04:02:22 +02:00
|
|
|
if (beentry->st_databaseid != InvalidOid)
|
|
|
|
values[0] = ObjectIdGetDatum(beentry->st_databaseid);
|
|
|
|
else
|
|
|
|
nulls[0] = true;
|
|
|
|
|
2008-05-07 16:41:56 +02:00
|
|
|
values[1] = Int32GetDatum(beentry->st_procpid);
|
2017-03-27 04:02:22 +02:00
|
|
|
|
|
|
|
if (beentry->st_userid != InvalidOid)
|
|
|
|
values[2] = ObjectIdGetDatum(beentry->st_userid);
|
|
|
|
else
|
|
|
|
nulls[2] = true;
|
|
|
|
|
2009-11-29 19:14:32 +01:00
|
|
|
if (beentry->st_appname)
|
|
|
|
values[3] = CStringGetTextDatum(beentry->st_appname);
|
|
|
|
else
|
|
|
|
nulls[3] = true;
|
2008-05-07 16:41:56 +02:00
|
|
|
|
2014-02-25 18:34:04 +01:00
|
|
|
if (TransactionIdIsValid(local_beentry->backend_xid))
|
2016-03-10 18:44:09 +01:00
|
|
|
values[15] = TransactionIdGetDatum(local_beentry->backend_xid);
|
2014-02-25 18:34:04 +01:00
|
|
|
else
|
2016-03-10 18:44:09 +01:00
|
|
|
nulls[15] = true;
|
2014-02-25 18:34:04 +01:00
|
|
|
|
|
|
|
if (TransactionIdIsValid(local_beentry->backend_xmin))
|
2016-03-10 18:44:09 +01:00
|
|
|
values[16] = TransactionIdGetDatum(local_beentry->backend_xmin);
|
2014-02-25 18:34:04 +01:00
|
|
|
else
|
2016-03-10 18:44:09 +01:00
|
|
|
nulls[16] = true;
|
2014-02-25 18:34:04 +01:00
|
|
|
|
2017-03-30 20:18:53 +02:00
|
|
|
/* Values only available to role member or pg_read_all_stats */
|
2020-04-20 12:53:40 +02:00
|
|
|
if (HAS_PGSTAT_PERMISSIONS(beentry->st_userid))
|
2008-05-07 16:41:56 +02:00
|
|
|
{
|
2013-12-27 22:26:24 +01:00
|
|
|
SockAddr zero_clientaddr;
|
2017-09-19 20:46:07 +02:00
|
|
|
char *clipped_activity;
|
2013-12-27 22:26:24 +01:00
|
|
|
|
2012-01-19 14:19:20 +01:00
|
|
|
switch (beentry->st_state)
|
|
|
|
{
|
|
|
|
case STATE_IDLE:
|
|
|
|
values[4] = CStringGetTextDatum("idle");
|
|
|
|
break;
|
|
|
|
case STATE_RUNNING:
|
|
|
|
values[4] = CStringGetTextDatum("active");
|
|
|
|
break;
|
|
|
|
case STATE_IDLEINTRANSACTION:
|
|
|
|
values[4] = CStringGetTextDatum("idle in transaction");
|
|
|
|
break;
|
|
|
|
case STATE_FASTPATH:
|
|
|
|
values[4] = CStringGetTextDatum("fastpath function call");
|
|
|
|
break;
|
|
|
|
case STATE_IDLEINTRANSACTION_ABORTED:
|
|
|
|
values[4] = CStringGetTextDatum("idle in transaction (aborted)");
|
|
|
|
break;
|
|
|
|
case STATE_DISABLED:
|
|
|
|
values[4] = CStringGetTextDatum("disabled");
|
|
|
|
break;
|
|
|
|
case STATE_UNDEFINED:
|
|
|
|
nulls[4] = true;
|
|
|
|
break;
|
|
|
|
}
|
2013-04-03 20:13:28 +02:00
|
|
|
|
2017-09-19 20:46:07 +02:00
|
|
|
clipped_activity = pgstat_clip_activity(beentry->st_activity_raw);
|
|
|
|
values[5] = CStringGetTextDatum(clipped_activity);
|
|
|
|
pfree(clipped_activity);
|
2008-05-07 16:41:56 +02:00
|
|
|
|
2020-02-06 01:18:06 +01:00
|
|
|
/* leader_pid */
|
Remove support for SSL compression
PostgreSQL disabled compression as of e3bdb2d and the documentation
recommends against using it since. Additionally, SSL compression has
been disabled in OpenSSL since version 1.1.0, and was disabled in many
distributions long before that. The most recent TLS version, TLSv1.3,
disallows compression at the protocol level.
This commit removes the feature itself, removing support for the libpq
parameter sslcompression (parameter still listed for compatibility
reasons with existing connection strings, just ignored), and removes
the equivalent field in pg_stat_ssl and de facto PgBackendSSLStatus.
Note that, on top of removing the ability to activate compression by
configuration, compression is actively disabled in both frontend and
backend to avoid overrides from local configurations.
A TAP test is added for deprecated SSL parameters to check after
backwards compatibility.
Bump catalog version.
Author: Daniel Gustafsson
Reviewed-by: Peter Eisentraut, Magnus Hagander, Michael Paquier
Discussion: https://postgr.es/m/7E384D48-11C5-441B-9EC3-F7DB1F8518F6@yesql.se
2021-03-09 03:16:47 +01:00
|
|
|
nulls[28] = true;
|
2016-04-21 20:02:15 +02:00
|
|
|
|
2020-02-06 01:18:06 +01:00
|
|
|
proc = BackendPidGetProc(beentry->st_procpid);
|
2016-04-21 20:02:15 +02:00
|
|
|
|
2020-02-06 01:18:06 +01:00
|
|
|
if (proc == NULL && (beentry->st_backendType != B_BACKEND))
|
2016-04-21 20:02:15 +02:00
|
|
|
{
|
2017-03-27 04:02:22 +02:00
|
|
|
/*
|
|
|
|
* For an auxiliary process, retrieve process info from
|
|
|
|
* AuxiliaryProcs stored in shared-memory.
|
|
|
|
*/
|
|
|
|
proc = AuxiliaryPidGetProc(beentry->st_procpid);
|
2020-02-06 01:18:06 +01:00
|
|
|
}
|
2017-03-27 04:02:22 +02:00
|
|
|
|
2020-02-06 01:18:06 +01:00
|
|
|
/*
|
|
|
|
* If a PGPROC entry was retrieved, display wait events and lock
|
|
|
|
* group leader information if any. To avoid extra overhead, no
|
|
|
|
* extra lock is being held, so there is no guarantee of
|
|
|
|
* consistency across multiple rows.
|
|
|
|
*/
|
|
|
|
if (proc != NULL)
|
|
|
|
{
|
|
|
|
uint32 raw_wait_event;
|
|
|
|
PGPROC *leader;
|
2017-03-27 04:02:22 +02:00
|
|
|
|
2020-02-06 01:18:06 +01:00
|
|
|
raw_wait_event = UINT32_ACCESS_ONCE(proc->wait_event_info);
|
|
|
|
wait_event_type = pgstat_get_wait_event_type(raw_wait_event);
|
|
|
|
wait_event = pgstat_get_wait_event(raw_wait_event);
|
|
|
|
|
|
|
|
leader = proc->lockGroupLeader;
|
2020-07-26 09:32:11 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Show the leader only for active parallel workers. This
|
|
|
|
* leaves the field as NULL for the leader of a parallel
|
|
|
|
* group.
|
|
|
|
*/
|
|
|
|
if (leader && leader->pid != beentry->st_procpid)
|
2020-02-06 01:18:06 +01:00
|
|
|
{
|
Remove support for SSL compression
PostgreSQL disabled compression as of e3bdb2d and the documentation
recommends against using it since. Additionally, SSL compression has
been disabled in OpenSSL since version 1.1.0, and was disabled in many
distributions long before that. The most recent TLS version, TLSv1.3,
disallows compression at the protocol level.
This commit removes the feature itself, removing support for the libpq
parameter sslcompression (parameter still listed for compatibility
reasons with existing connection strings, just ignored), and removes
the equivalent field in pg_stat_ssl and de facto PgBackendSSLStatus.
Note that, on top of removing the ability to activate compression by
configuration, compression is actively disabled in both frontend and
backend to avoid overrides from local configurations.
A TAP test is added for deprecated SSL parameters to check after
backwards compatibility.
Bump catalog version.
Author: Daniel Gustafsson
Reviewed-by: Peter Eisentraut, Magnus Hagander, Michael Paquier
Discussion: https://postgr.es/m/7E384D48-11C5-441B-9EC3-F7DB1F8518F6@yesql.se
2021-03-09 03:16:47 +01:00
|
|
|
values[28] = Int32GetDatum(leader->pid);
|
|
|
|
nulls[28] = false;
|
2017-03-27 04:02:22 +02:00
|
|
|
}
|
2016-04-21 20:02:15 +02:00
|
|
|
}
|
|
|
|
|
2016-03-10 18:44:09 +01:00
|
|
|
if (wait_event_type)
|
|
|
|
values[6] = CStringGetTextDatum(wait_event_type);
|
|
|
|
else
|
|
|
|
nulls[6] = true;
|
|
|
|
|
|
|
|
if (wait_event)
|
|
|
|
values[7] = CStringGetTextDatum(wait_event);
|
2008-05-07 16:41:56 +02:00
|
|
|
else
|
2012-01-19 14:19:20 +01:00
|
|
|
nulls[7] = true;
|
2008-05-07 16:41:56 +02:00
|
|
|
|
2020-01-08 18:33:49 +01:00
|
|
|
/*
|
|
|
|
* Don't expose transaction time for walsenders; it confuses
|
|
|
|
* monitoring, particularly because we don't keep the time up-to-
|
|
|
|
* date.
|
|
|
|
*/
|
|
|
|
if (beentry->st_xact_start_timestamp != 0 &&
|
|
|
|
beentry->st_backendType != B_WAL_SENDER)
|
2016-03-10 18:44:09 +01:00
|
|
|
values[8] = TimestampTzGetDatum(beentry->st_xact_start_timestamp);
|
2008-05-07 16:41:56 +02:00
|
|
|
else
|
2012-01-19 14:19:20 +01:00
|
|
|
nulls[8] = true;
|
2008-05-07 16:41:56 +02:00
|
|
|
|
2016-03-10 18:44:09 +01:00
|
|
|
if (beentry->st_activity_start_timestamp != 0)
|
|
|
|
values[9] = TimestampTzGetDatum(beentry->st_activity_start_timestamp);
|
2008-05-07 16:41:56 +02:00
|
|
|
else
|
2012-01-19 14:19:20 +01:00
|
|
|
nulls[9] = true;
|
|
|
|
|
2016-03-10 18:44:09 +01:00
|
|
|
if (beentry->st_proc_start_timestamp != 0)
|
|
|
|
values[10] = TimestampTzGetDatum(beentry->st_proc_start_timestamp);
|
2012-01-19 14:19:20 +01:00
|
|
|
else
|
|
|
|
nulls[10] = true;
|
2008-05-07 16:41:56 +02:00
|
|
|
|
2016-03-10 18:44:09 +01:00
|
|
|
if (beentry->st_state_start_timestamp != 0)
|
|
|
|
values[11] = TimestampTzGetDatum(beentry->st_state_start_timestamp);
|
|
|
|
else
|
|
|
|
nulls[11] = true;
|
|
|
|
|
2008-05-07 16:41:56 +02:00
|
|
|
/* A zeroed client addr means we don't know */
|
|
|
|
memset(&zero_clientaddr, 0, sizeof(zero_clientaddr));
|
|
|
|
if (memcmp(&(beentry->st_clientaddr), &zero_clientaddr,
|
2013-12-27 22:26:24 +01:00
|
|
|
sizeof(zero_clientaddr)) == 0)
|
2008-05-07 16:41:56 +02:00
|
|
|
{
|
2012-01-19 14:19:20 +01:00
|
|
|
nulls[12] = true;
|
|
|
|
nulls[13] = true;
|
2016-03-10 18:44:09 +01:00
|
|
|
nulls[14] = true;
|
2008-05-07 16:41:56 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2022-08-26 00:13:22 +02:00
|
|
|
if (beentry->st_clientaddr.addr.ss_family == AF_INET ||
|
|
|
|
beentry->st_clientaddr.addr.ss_family == AF_INET6)
|
2008-05-07 16:41:56 +02:00
|
|
|
{
|
|
|
|
char remote_host[NI_MAXHOST];
|
|
|
|
char remote_port[NI_MAXSERV];
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
remote_host[0] = '\0';
|
|
|
|
remote_port[0] = '\0';
|
|
|
|
ret = pg_getnameinfo_all(&beentry->st_clientaddr.addr,
|
|
|
|
beentry->st_clientaddr.salen,
|
|
|
|
remote_host, sizeof(remote_host),
|
|
|
|
remote_port, sizeof(remote_port),
|
|
|
|
NI_NUMERICHOST | NI_NUMERICSERV);
|
2011-08-09 17:28:35 +02:00
|
|
|
if (ret == 0)
|
2008-05-07 16:41:56 +02:00
|
|
|
{
|
|
|
|
clean_ipv6_addr(beentry->st_clientaddr.addr.ss_family, remote_host);
|
2016-03-10 18:44:09 +01:00
|
|
|
values[12] = DirectFunctionCall1(inet_in,
|
2008-05-07 16:41:56 +02:00
|
|
|
CStringGetDatum(remote_host));
|
2014-04-02 03:30:08 +02:00
|
|
|
if (beentry->st_clienthostname &&
|
|
|
|
beentry->st_clienthostname[0])
|
2016-03-10 18:44:09 +01:00
|
|
|
values[13] = CStringGetTextDatum(beentry->st_clienthostname);
|
2011-02-17 22:03:28 +01:00
|
|
|
else
|
2016-03-10 18:44:09 +01:00
|
|
|
nulls[13] = true;
|
|
|
|
values[14] = Int32GetDatum(atoi(remote_port));
|
2008-05-07 16:41:56 +02:00
|
|
|
}
|
2011-08-09 17:28:35 +02:00
|
|
|
else
|
|
|
|
{
|
2012-01-19 14:19:20 +01:00
|
|
|
nulls[12] = true;
|
|
|
|
nulls[13] = true;
|
2016-03-10 18:44:09 +01:00
|
|
|
nulls[14] = true;
|
2011-08-09 17:28:35 +02:00
|
|
|
}
|
2008-05-07 16:41:56 +02:00
|
|
|
}
|
|
|
|
else if (beentry->st_clientaddr.addr.ss_family == AF_UNIX)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Unix sockets always reports NULL for host and -1 for
|
|
|
|
* port, so it's possible to tell the difference to
|
|
|
|
* connections we have no permissions to view, or with
|
|
|
|
* errors.
|
|
|
|
*/
|
2012-01-19 14:19:20 +01:00
|
|
|
nulls[12] = true;
|
2016-03-10 18:44:09 +01:00
|
|
|
nulls[13] = true;
|
2018-05-02 21:52:54 +02:00
|
|
|
values[14] = Int32GetDatum(-1);
|
2008-05-07 16:41:56 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* Unknown address type, should never happen */
|
2012-01-19 14:19:20 +01:00
|
|
|
nulls[12] = true;
|
|
|
|
nulls[13] = true;
|
2016-03-10 18:44:09 +01:00
|
|
|
nulls[14] = true;
|
2008-05-07 16:41:56 +02:00
|
|
|
}
|
|
|
|
}
|
2017-03-27 04:02:22 +02:00
|
|
|
/* Add backend type */
|
2017-08-31 18:24:47 +02:00
|
|
|
if (beentry->st_backendType == B_BG_WORKER)
|
|
|
|
{
|
|
|
|
const char *bgw_type;
|
|
|
|
|
|
|
|
bgw_type = GetBackgroundWorkerTypeByPid(beentry->st_procpid);
|
|
|
|
if (bgw_type)
|
|
|
|
values[17] = CStringGetTextDatum(bgw_type);
|
|
|
|
else
|
|
|
|
nulls[17] = true;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
values[17] =
|
2020-03-11 16:36:40 +01:00
|
|
|
CStringGetTextDatum(GetBackendTypeDesc(beentry->st_backendType));
|
2019-02-20 11:38:44 +01:00
|
|
|
|
|
|
|
/* SSL information */
|
|
|
|
if (beentry->st_ssl)
|
|
|
|
{
|
|
|
|
values[18] = BoolGetDatum(true); /* ssl */
|
|
|
|
values[19] = CStringGetTextDatum(beentry->st_sslstatus->ssl_version);
|
|
|
|
values[20] = CStringGetTextDatum(beentry->st_sslstatus->ssl_cipher);
|
|
|
|
values[21] = Int32GetDatum(beentry->st_sslstatus->ssl_bits);
|
|
|
|
|
|
|
|
if (beentry->st_sslstatus->ssl_client_dn[0])
|
Remove support for SSL compression
PostgreSQL disabled compression as of e3bdb2d and the documentation
recommends against using it since. Additionally, SSL compression has
been disabled in OpenSSL since version 1.1.0, and was disabled in many
distributions long before that. The most recent TLS version, TLSv1.3,
disallows compression at the protocol level.
This commit removes the feature itself, removing support for the libpq
parameter sslcompression (parameter still listed for compatibility
reasons with existing connection strings, just ignored), and removes
the equivalent field in pg_stat_ssl and de facto PgBackendSSLStatus.
Note that, on top of removing the ability to activate compression by
configuration, compression is actively disabled in both frontend and
backend to avoid overrides from local configurations.
A TAP test is added for deprecated SSL parameters to check after
backwards compatibility.
Bump catalog version.
Author: Daniel Gustafsson
Reviewed-by: Peter Eisentraut, Magnus Hagander, Michael Paquier
Discussion: https://postgr.es/m/7E384D48-11C5-441B-9EC3-F7DB1F8518F6@yesql.se
2021-03-09 03:16:47 +01:00
|
|
|
values[22] = CStringGetTextDatum(beentry->st_sslstatus->ssl_client_dn);
|
2019-02-20 11:38:44 +01:00
|
|
|
else
|
Remove support for SSL compression
PostgreSQL disabled compression as of e3bdb2d and the documentation
recommends against using it since. Additionally, SSL compression has
been disabled in OpenSSL since version 1.1.0, and was disabled in many
distributions long before that. The most recent TLS version, TLSv1.3,
disallows compression at the protocol level.
This commit removes the feature itself, removing support for the libpq
parameter sslcompression (parameter still listed for compatibility
reasons with existing connection strings, just ignored), and removes
the equivalent field in pg_stat_ssl and de facto PgBackendSSLStatus.
Note that, on top of removing the ability to activate compression by
configuration, compression is actively disabled in both frontend and
backend to avoid overrides from local configurations.
A TAP test is added for deprecated SSL parameters to check after
backwards compatibility.
Bump catalog version.
Author: Daniel Gustafsson
Reviewed-by: Peter Eisentraut, Magnus Hagander, Michael Paquier
Discussion: https://postgr.es/m/7E384D48-11C5-441B-9EC3-F7DB1F8518F6@yesql.se
2021-03-09 03:16:47 +01:00
|
|
|
nulls[22] = true;
|
2019-02-20 11:38:44 +01:00
|
|
|
|
|
|
|
if (beentry->st_sslstatus->ssl_client_serial[0])
|
Remove support for SSL compression
PostgreSQL disabled compression as of e3bdb2d and the documentation
recommends against using it since. Additionally, SSL compression has
been disabled in OpenSSL since version 1.1.0, and was disabled in many
distributions long before that. The most recent TLS version, TLSv1.3,
disallows compression at the protocol level.
This commit removes the feature itself, removing support for the libpq
parameter sslcompression (parameter still listed for compatibility
reasons with existing connection strings, just ignored), and removes
the equivalent field in pg_stat_ssl and de facto PgBackendSSLStatus.
Note that, on top of removing the ability to activate compression by
configuration, compression is actively disabled in both frontend and
backend to avoid overrides from local configurations.
A TAP test is added for deprecated SSL parameters to check after
backwards compatibility.
Bump catalog version.
Author: Daniel Gustafsson
Reviewed-by: Peter Eisentraut, Magnus Hagander, Michael Paquier
Discussion: https://postgr.es/m/7E384D48-11C5-441B-9EC3-F7DB1F8518F6@yesql.se
2021-03-09 03:16:47 +01:00
|
|
|
values[23] = DirectFunctionCall3(numeric_in,
|
2019-02-20 11:38:44 +01:00
|
|
|
CStringGetDatum(beentry->st_sslstatus->ssl_client_serial),
|
|
|
|
ObjectIdGetDatum(InvalidOid),
|
|
|
|
Int32GetDatum(-1));
|
|
|
|
else
|
Remove support for SSL compression
PostgreSQL disabled compression as of e3bdb2d and the documentation
recommends against using it since. Additionally, SSL compression has
been disabled in OpenSSL since version 1.1.0, and was disabled in many
distributions long before that. The most recent TLS version, TLSv1.3,
disallows compression at the protocol level.
This commit removes the feature itself, removing support for the libpq
parameter sslcompression (parameter still listed for compatibility
reasons with existing connection strings, just ignored), and removes
the equivalent field in pg_stat_ssl and de facto PgBackendSSLStatus.
Note that, on top of removing the ability to activate compression by
configuration, compression is actively disabled in both frontend and
backend to avoid overrides from local configurations.
A TAP test is added for deprecated SSL parameters to check after
backwards compatibility.
Bump catalog version.
Author: Daniel Gustafsson
Reviewed-by: Peter Eisentraut, Magnus Hagander, Michael Paquier
Discussion: https://postgr.es/m/7E384D48-11C5-441B-9EC3-F7DB1F8518F6@yesql.se
2021-03-09 03:16:47 +01:00
|
|
|
nulls[23] = true;
|
2019-02-20 11:38:44 +01:00
|
|
|
|
|
|
|
if (beentry->st_sslstatus->ssl_issuer_dn[0])
|
Remove support for SSL compression
PostgreSQL disabled compression as of e3bdb2d and the documentation
recommends against using it since. Additionally, SSL compression has
been disabled in OpenSSL since version 1.1.0, and was disabled in many
distributions long before that. The most recent TLS version, TLSv1.3,
disallows compression at the protocol level.
This commit removes the feature itself, removing support for the libpq
parameter sslcompression (parameter still listed for compatibility
reasons with existing connection strings, just ignored), and removes
the equivalent field in pg_stat_ssl and de facto PgBackendSSLStatus.
Note that, on top of removing the ability to activate compression by
configuration, compression is actively disabled in both frontend and
backend to avoid overrides from local configurations.
A TAP test is added for deprecated SSL parameters to check after
backwards compatibility.
Bump catalog version.
Author: Daniel Gustafsson
Reviewed-by: Peter Eisentraut, Magnus Hagander, Michael Paquier
Discussion: https://postgr.es/m/7E384D48-11C5-441B-9EC3-F7DB1F8518F6@yesql.se
2021-03-09 03:16:47 +01:00
|
|
|
values[24] = CStringGetTextDatum(beentry->st_sslstatus->ssl_issuer_dn);
|
2019-02-20 11:38:44 +01:00
|
|
|
else
|
Remove support for SSL compression
PostgreSQL disabled compression as of e3bdb2d and the documentation
recommends against using it since. Additionally, SSL compression has
been disabled in OpenSSL since version 1.1.0, and was disabled in many
distributions long before that. The most recent TLS version, TLSv1.3,
disallows compression at the protocol level.
This commit removes the feature itself, removing support for the libpq
parameter sslcompression (parameter still listed for compatibility
reasons with existing connection strings, just ignored), and removes
the equivalent field in pg_stat_ssl and de facto PgBackendSSLStatus.
Note that, on top of removing the ability to activate compression by
configuration, compression is actively disabled in both frontend and
backend to avoid overrides from local configurations.
A TAP test is added for deprecated SSL parameters to check after
backwards compatibility.
Bump catalog version.
Author: Daniel Gustafsson
Reviewed-by: Peter Eisentraut, Magnus Hagander, Michael Paquier
Discussion: https://postgr.es/m/7E384D48-11C5-441B-9EC3-F7DB1F8518F6@yesql.se
2021-03-09 03:16:47 +01:00
|
|
|
nulls[24] = true;
|
2019-02-20 11:38:44 +01:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
values[18] = BoolGetDatum(false); /* ssl */
|
Remove support for SSL compression
PostgreSQL disabled compression as of e3bdb2d and the documentation
recommends against using it since. Additionally, SSL compression has
been disabled in OpenSSL since version 1.1.0, and was disabled in many
distributions long before that. The most recent TLS version, TLSv1.3,
disallows compression at the protocol level.
This commit removes the feature itself, removing support for the libpq
parameter sslcompression (parameter still listed for compatibility
reasons with existing connection strings, just ignored), and removes
the equivalent field in pg_stat_ssl and de facto PgBackendSSLStatus.
Note that, on top of removing the ability to activate compression by
configuration, compression is actively disabled in both frontend and
backend to avoid overrides from local configurations.
A TAP test is added for deprecated SSL parameters to check after
backwards compatibility.
Bump catalog version.
Author: Daniel Gustafsson
Reviewed-by: Peter Eisentraut, Magnus Hagander, Michael Paquier
Discussion: https://postgr.es/m/7E384D48-11C5-441B-9EC3-F7DB1F8518F6@yesql.se
2021-03-09 03:16:47 +01:00
|
|
|
nulls[19] = nulls[20] = nulls[21] = nulls[22] = nulls[23] = nulls[24] = true;
|
2019-02-20 11:38:44 +01:00
|
|
|
}
|
GSSAPI encryption support
On both the frontend and backend, prepare for GSSAPI encryption
support by moving common code for error handling into a separate file.
Fix a TODO for handling multiple status messages in the process.
Eliminate the OIDs, which have not been needed for some time.
Add frontend and backend encryption support functions. Keep the
context initiation for authentication-only separate on both the
frontend and backend in order to avoid concerns about changing the
requested flags to include encryption support.
In postmaster, pull GSSAPI authorization checking into a shared
function. Also share the initiator name between the encryption and
non-encryption codepaths.
For HBA, add "hostgssenc" and "hostnogssenc" entries that behave
similarly to their SSL counterparts. "hostgssenc" requires either
"gss", "trust", or "reject" for its authentication.
Similarly, add a "gssencmode" parameter to libpq. Supported values are
"disable", "require", and "prefer". Notably, negotiation will only be
attempted if credentials can be acquired. Move credential acquisition
into its own function to support this behavior.
Add a simple pg_stat_gssapi view similar to pg_stat_ssl, for monitoring
if GSSAPI authentication was used, what principal was used, and if
encryption is being used on the connection.
Finally, add documentation for everything new, and update existing
documentation on connection security.
Thanks to Michael Paquier for the Windows fixes.
Author: Robbie Harwood, with changes to the read/write functions by me.
Reviewed in various forms and at different times by: Michael Paquier,
Andres Freund, David Steele.
Discussion: https://www.postgresql.org/message-id/flat/jlg1tgq1ktm.fsf@thriss.redhat.com
2019-04-03 21:02:33 +02:00
|
|
|
|
|
|
|
/* GSSAPI information */
|
|
|
|
if (beentry->st_gss)
|
|
|
|
{
|
Remove support for SSL compression
PostgreSQL disabled compression as of e3bdb2d and the documentation
recommends against using it since. Additionally, SSL compression has
been disabled in OpenSSL since version 1.1.0, and was disabled in many
distributions long before that. The most recent TLS version, TLSv1.3,
disallows compression at the protocol level.
This commit removes the feature itself, removing support for the libpq
parameter sslcompression (parameter still listed for compatibility
reasons with existing connection strings, just ignored), and removes
the equivalent field in pg_stat_ssl and de facto PgBackendSSLStatus.
Note that, on top of removing the ability to activate compression by
configuration, compression is actively disabled in both frontend and
backend to avoid overrides from local configurations.
A TAP test is added for deprecated SSL parameters to check after
backwards compatibility.
Bump catalog version.
Author: Daniel Gustafsson
Reviewed-by: Peter Eisentraut, Magnus Hagander, Michael Paquier
Discussion: https://postgr.es/m/7E384D48-11C5-441B-9EC3-F7DB1F8518F6@yesql.se
2021-03-09 03:16:47 +01:00
|
|
|
values[25] = BoolGetDatum(beentry->st_gssstatus->gss_auth); /* gss_auth */
|
|
|
|
values[26] = CStringGetTextDatum(beentry->st_gssstatus->gss_princ);
|
|
|
|
values[27] = BoolGetDatum(beentry->st_gssstatus->gss_enc); /* GSS Encryption in use */
|
GSSAPI encryption support
On both the frontend and backend, prepare for GSSAPI encryption
support by moving common code for error handling into a separate file.
Fix a TODO for handling multiple status messages in the process.
Eliminate the OIDs, which have not been needed for some time.
Add frontend and backend encryption support functions. Keep the
context initiation for authentication-only separate on both the
frontend and backend in order to avoid concerns about changing the
requested flags to include encryption support.
In postmaster, pull GSSAPI authorization checking into a shared
function. Also share the initiator name between the encryption and
non-encryption codepaths.
For HBA, add "hostgssenc" and "hostnogssenc" entries that behave
similarly to their SSL counterparts. "hostgssenc" requires either
"gss", "trust", or "reject" for its authentication.
Similarly, add a "gssencmode" parameter to libpq. Supported values are
"disable", "require", and "prefer". Notably, negotiation will only be
attempted if credentials can be acquired. Move credential acquisition
into its own function to support this behavior.
Add a simple pg_stat_gssapi view similar to pg_stat_ssl, for monitoring
if GSSAPI authentication was used, what principal was used, and if
encryption is being used on the connection.
Finally, add documentation for everything new, and update existing
documentation on connection security.
Thanks to Michael Paquier for the Windows fixes.
Author: Robbie Harwood, with changes to the read/write functions by me.
Reviewed in various forms and at different times by: Michael Paquier,
Andres Freund, David Steele.
Discussion: https://www.postgresql.org/message-id/flat/jlg1tgq1ktm.fsf@thriss.redhat.com
2019-04-03 21:02:33 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
Remove support for SSL compression
PostgreSQL disabled compression as of e3bdb2d and the documentation
recommends against using it since. Additionally, SSL compression has
been disabled in OpenSSL since version 1.1.0, and was disabled in many
distributions long before that. The most recent TLS version, TLSv1.3,
disallows compression at the protocol level.
This commit removes the feature itself, removing support for the libpq
parameter sslcompression (parameter still listed for compatibility
reasons with existing connection strings, just ignored), and removes
the equivalent field in pg_stat_ssl and de facto PgBackendSSLStatus.
Note that, on top of removing the ability to activate compression by
configuration, compression is actively disabled in both frontend and
backend to avoid overrides from local configurations.
A TAP test is added for deprecated SSL parameters to check after
backwards compatibility.
Bump catalog version.
Author: Daniel Gustafsson
Reviewed-by: Peter Eisentraut, Magnus Hagander, Michael Paquier
Discussion: https://postgr.es/m/7E384D48-11C5-441B-9EC3-F7DB1F8518F6@yesql.se
2021-03-09 03:16:47 +01:00
|
|
|
values[25] = BoolGetDatum(false); /* gss_auth */
|
|
|
|
nulls[26] = true; /* No GSS principal */
|
|
|
|
values[27] = BoolGetDatum(false); /* GSS Encryption not in
|
GSSAPI encryption support
On both the frontend and backend, prepare for GSSAPI encryption
support by moving common code for error handling into a separate file.
Fix a TODO for handling multiple status messages in the process.
Eliminate the OIDs, which have not been needed for some time.
Add frontend and backend encryption support functions. Keep the
context initiation for authentication-only separate on both the
frontend and backend in order to avoid concerns about changing the
requested flags to include encryption support.
In postmaster, pull GSSAPI authorization checking into a shared
function. Also share the initiator name between the encryption and
non-encryption codepaths.
For HBA, add "hostgssenc" and "hostnogssenc" entries that behave
similarly to their SSL counterparts. "hostgssenc" requires either
"gss", "trust", or "reject" for its authentication.
Similarly, add a "gssencmode" parameter to libpq. Supported values are
"disable", "require", and "prefer". Notably, negotiation will only be
attempted if credentials can be acquired. Move credential acquisition
into its own function to support this behavior.
Add a simple pg_stat_gssapi view similar to pg_stat_ssl, for monitoring
if GSSAPI authentication was used, what principal was used, and if
encryption is being used on the connection.
Finally, add documentation for everything new, and update existing
documentation on connection security.
Thanks to Michael Paquier for the Windows fixes.
Author: Robbie Harwood, with changes to the read/write functions by me.
Reviewed in various forms and at different times by: Michael Paquier,
Andres Freund, David Steele.
Discussion: https://www.postgresql.org/message-id/flat/jlg1tgq1ktm.fsf@thriss.redhat.com
2019-04-03 21:02:33 +02:00
|
|
|
* use */
|
|
|
|
}
|
2021-04-20 18:22:26 +02:00
|
|
|
if (beentry->st_query_id == 0)
|
Make use of in-core query id added by commit 5fd9dfa5f5
Use the in-core query id computation for pg_stat_activity,
log_line_prefix, and EXPLAIN VERBOSE.
Similar to other fields in pg_stat_activity, only the queryid from the
top level statements are exposed, and if the backends status isn't
active then the queryid from the last executed statements is displayed.
Add a %Q placeholder to include the queryid in log_line_prefix, which
will also only expose top level statements.
For EXPLAIN VERBOSE, if a query identifier has been computed, either by
enabling compute_query_id or using a third-party module, display it.
Bump catalog version.
Discussion: https://postgr.es/m/20210407125726.tkvjdbw76hxnpwfi@nol
Author: Julien Rouhaud
Reviewed-by: Alvaro Herrera, Nitin Jadhav, Zhihong Yu
2021-04-07 20:03:56 +02:00
|
|
|
nulls[29] = true;
|
|
|
|
else
|
2021-04-20 18:22:26 +02:00
|
|
|
values[29] = UInt64GetDatum(beentry->st_query_id);
|
2008-05-07 16:41:56 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* No permissions to view data about this session */
|
2012-01-19 14:19:20 +01:00
|
|
|
values[5] = CStringGetTextDatum("<insufficient privilege>");
|
|
|
|
nulls[4] = true;
|
2008-05-07 16:41:56 +02:00
|
|
|
nulls[6] = true;
|
|
|
|
nulls[7] = true;
|
|
|
|
nulls[8] = true;
|
|
|
|
nulls[9] = true;
|
2009-11-29 00:38:08 +01:00
|
|
|
nulls[10] = true;
|
2011-02-17 22:03:28 +01:00
|
|
|
nulls[11] = true;
|
2012-01-19 14:19:20 +01:00
|
|
|
nulls[12] = true;
|
|
|
|
nulls[13] = true;
|
2016-03-10 18:44:09 +01:00
|
|
|
nulls[14] = true;
|
2017-03-27 04:02:22 +02:00
|
|
|
nulls[17] = true;
|
2019-02-20 11:38:44 +01:00
|
|
|
nulls[18] = true;
|
|
|
|
nulls[19] = true;
|
|
|
|
nulls[20] = true;
|
|
|
|
nulls[21] = true;
|
|
|
|
nulls[22] = true;
|
|
|
|
nulls[23] = true;
|
|
|
|
nulls[24] = true;
|
|
|
|
nulls[25] = true;
|
GSSAPI encryption support
On both the frontend and backend, prepare for GSSAPI encryption
support by moving common code for error handling into a separate file.
Fix a TODO for handling multiple status messages in the process.
Eliminate the OIDs, which have not been needed for some time.
Add frontend and backend encryption support functions. Keep the
context initiation for authentication-only separate on both the
frontend and backend in order to avoid concerns about changing the
requested flags to include encryption support.
In postmaster, pull GSSAPI authorization checking into a shared
function. Also share the initiator name between the encryption and
non-encryption codepaths.
For HBA, add "hostgssenc" and "hostnogssenc" entries that behave
similarly to their SSL counterparts. "hostgssenc" requires either
"gss", "trust", or "reject" for its authentication.
Similarly, add a "gssencmode" parameter to libpq. Supported values are
"disable", "require", and "prefer". Notably, negotiation will only be
attempted if credentials can be acquired. Move credential acquisition
into its own function to support this behavior.
Add a simple pg_stat_gssapi view similar to pg_stat_ssl, for monitoring
if GSSAPI authentication was used, what principal was used, and if
encryption is being used on the connection.
Finally, add documentation for everything new, and update existing
documentation on connection security.
Thanks to Michael Paquier for the Windows fixes.
Author: Robbie Harwood, with changes to the read/write functions by me.
Reviewed in various forms and at different times by: Michael Paquier,
Andres Freund, David Steele.
Discussion: https://www.postgresql.org/message-id/flat/jlg1tgq1ktm.fsf@thriss.redhat.com
2019-04-03 21:02:33 +02:00
|
|
|
nulls[26] = true;
|
|
|
|
nulls[27] = true;
|
|
|
|
nulls[28] = true;
|
Make use of in-core query id added by commit 5fd9dfa5f5
Use the in-core query id computation for pg_stat_activity,
log_line_prefix, and EXPLAIN VERBOSE.
Similar to other fields in pg_stat_activity, only the queryid from the
top level statements are exposed, and if the backends status isn't
active then the queryid from the last executed statements is displayed.
Add a %Q placeholder to include the queryid in log_line_prefix, which
will also only expose top level statements.
For EXPLAIN VERBOSE, if a query identifier has been computed, either by
enabling compute_query_id or using a third-party module, display it.
Bump catalog version.
Discussion: https://postgr.es/m/20210407125726.tkvjdbw76hxnpwfi@nol
Author: Julien Rouhaud
Reviewed-by: Alvaro Herrera, Nitin Jadhav, Zhihong Yu
2021-04-07 20:03:56 +02:00
|
|
|
nulls[29] = true;
|
2008-05-07 16:41:56 +02:00
|
|
|
}
|
|
|
|
|
Create routine able to set single-call SRFs for Materialize mode
Set-returning functions that use the Materialize mode, creating a
tuplestore to include all the tuples returned in a set rather than doing
so in multiple calls, use roughly the same set of steps to prepare
ReturnSetInfo for this job:
- Check if ReturnSetInfo supports returning a tuplestore and if the
materialize mode is enabled.
- Create a tuplestore for all the tuples part of the returned set in the
per-query memory context, stored in ReturnSetInfo->setResult.
- Build a tuple descriptor mostly from get_call_result_type(), then
stored in ReturnSetInfo->setDesc. Note that there are some cases where
the SRF's tuple descriptor has to be the one specified by the function
caller.
This refactoring is done so as there are (well, should be) no behavior
changes in any of the in-core functions refactored, and the centralized
function that checks and sets up the function's ReturnSetInfo can be
controlled with a set of bits32 options. Two of them prove to be
necessary now:
- SRF_SINGLE_USE_EXPECTED to use expectedDesc as tuple descriptor, as
expected by the function's caller.
- SRF_SINGLE_BLESS to validate the tuple descriptor for the SRF.
The same initialization pattern is simplified in 28 places per my
count as of src/backend/, shaving up to ~900 lines of code. These
mostly come from the removal of the per-query initializations and the
sanity checks now grouped in a single location. There are more
locations that could be simplified in contrib/, that are left for a
follow-up cleanup.
fcc2817, 07daca5 and d61a361 have prepared the areas of the code related
to this change, to ease this refactoring.
Author: Melanie Plageman, Michael Paquier
Reviewed-by: Álvaro Herrera, Justin Pryzby
Discussion: https://postgr.es/m/CAAKRu_azyd1Z3W_r7Ou4sorTjRCs+PxeHw1CWJeXKofkE6TuZg@mail.gmail.com
2022-03-07 02:26:29 +01:00
|
|
|
tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc, values, nulls);
|
2008-05-07 16:41:56 +02:00
|
|
|
|
2015-05-09 01:25:30 +02:00
|
|
|
/* If only a single backend was requested, and we found it, break. */
|
|
|
|
if (pid != -1)
|
|
|
|
break;
|
2008-05-07 16:41:56 +02:00
|
|
|
}
|
2015-05-09 01:25:30 +02:00
|
|
|
|
|
|
|
return (Datum) 0;
|
2008-05-07 16:41:56 +02:00
|
|
|
}
|
|
|
|
|
2001-06-22 21:18:36 +02:00
|
|
|
|
2002-07-31 02:40:40 +02:00
|
|
|
Datum
|
2002-08-04 21:51:30 +02:00
|
|
|
pg_backend_pid(PG_FUNCTION_ARGS)
|
2002-07-31 02:40:40 +02:00
|
|
|
{
|
|
|
|
PG_RETURN_INT32(MyProcPid);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2001-06-22 21:18:36 +02:00
|
|
|
Datum
|
|
|
|
pg_stat_get_backend_pid(PG_FUNCTION_ARGS)
|
|
|
|
{
|
2006-06-19 03:51:22 +02:00
|
|
|
int32 beid = PG_GETARG_INT32(0);
|
|
|
|
PgBackendStatus *beentry;
|
2001-06-22 21:18:36 +02:00
|
|
|
|
|
|
|
if ((beentry = pgstat_fetch_stat_beentry(beid)) == NULL)
|
|
|
|
PG_RETURN_NULL();
|
|
|
|
|
2006-06-19 03:51:22 +02:00
|
|
|
PG_RETURN_INT32(beentry->st_procpid);
|
2001-06-22 21:18:36 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
Datum
|
|
|
|
pg_stat_get_backend_dbid(PG_FUNCTION_ARGS)
|
|
|
|
{
|
2006-06-19 03:51:22 +02:00
|
|
|
int32 beid = PG_GETARG_INT32(0);
|
|
|
|
PgBackendStatus *beentry;
|
2001-06-22 21:18:36 +02:00
|
|
|
|
|
|
|
if ((beentry = pgstat_fetch_stat_beentry(beid)) == NULL)
|
|
|
|
PG_RETURN_NULL();
|
|
|
|
|
2006-06-19 03:51:22 +02:00
|
|
|
PG_RETURN_OID(beentry->st_databaseid);
|
2001-06-22 21:18:36 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
Datum
|
|
|
|
pg_stat_get_backend_userid(PG_FUNCTION_ARGS)
|
|
|
|
{
|
2006-06-19 03:51:22 +02:00
|
|
|
int32 beid = PG_GETARG_INT32(0);
|
|
|
|
PgBackendStatus *beentry;
|
2001-06-22 21:18:36 +02:00
|
|
|
|
|
|
|
if ((beentry = pgstat_fetch_stat_beentry(beid)) == NULL)
|
|
|
|
PG_RETURN_NULL();
|
|
|
|
|
2006-06-19 03:51:22 +02:00
|
|
|
PG_RETURN_OID(beentry->st_userid);
|
2001-06-22 21:18:36 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
Datum
|
|
|
|
pg_stat_get_backend_activity(PG_FUNCTION_ARGS)
|
|
|
|
{
|
2006-06-19 03:51:22 +02:00
|
|
|
int32 beid = PG_GETARG_INT32(0);
|
|
|
|
PgBackendStatus *beentry;
|
|
|
|
const char *activity;
|
2017-09-19 20:46:07 +02:00
|
|
|
char *clipped_activity;
|
|
|
|
text *ret;
|
2001-06-22 21:18:36 +02:00
|
|
|
|
|
|
|
if ((beentry = pgstat_fetch_stat_beentry(beid)) == NULL)
|
2004-02-12 02:44:22 +01:00
|
|
|
activity = "<backend information not available>";
|
2020-04-20 12:53:40 +02:00
|
|
|
else if (!HAS_PGSTAT_PERMISSIONS(beentry->st_userid))
|
2004-02-12 02:44:22 +01:00
|
|
|
activity = "<insufficient privilege>";
|
2017-09-19 20:46:07 +02:00
|
|
|
else if (*(beentry->st_activity_raw) == '\0')
|
2004-02-12 02:44:22 +01:00
|
|
|
activity = "<command string not enabled>";
|
|
|
|
else
|
2017-09-19 20:46:07 +02:00
|
|
|
activity = beentry->st_activity_raw;
|
2003-03-20 19:51:16 +01:00
|
|
|
|
2017-09-19 20:46:07 +02:00
|
|
|
clipped_activity = pgstat_clip_activity(activity);
|
|
|
|
ret = cstring_to_text(activity);
|
|
|
|
pfree(clipped_activity);
|
|
|
|
|
|
|
|
PG_RETURN_TEXT_P(ret);
|
2001-06-22 21:18:36 +02:00
|
|
|
}
|
|
|
|
|
2006-08-19 03:36:34 +02:00
|
|
|
Datum
|
2016-03-10 18:44:09 +01:00
|
|
|
pg_stat_get_backend_wait_event_type(PG_FUNCTION_ARGS)
|
2006-08-19 03:36:34 +02:00
|
|
|
{
|
|
|
|
int32 beid = PG_GETARG_INT32(0);
|
|
|
|
PgBackendStatus *beentry;
|
2016-03-10 18:44:09 +01:00
|
|
|
PGPROC *proc;
|
2016-04-21 20:02:15 +02:00
|
|
|
const char *wait_event_type = NULL;
|
2006-08-19 03:36:34 +02:00
|
|
|
|
|
|
|
if ((beentry = pgstat_fetch_stat_beentry(beid)) == NULL)
|
2016-03-10 18:44:09 +01:00
|
|
|
wait_event_type = "<backend information not available>";
|
2020-04-20 12:53:40 +02:00
|
|
|
else if (!HAS_PGSTAT_PERMISSIONS(beentry->st_userid))
|
2016-03-10 18:44:09 +01:00
|
|
|
wait_event_type = "<insufficient privilege>";
|
2016-04-21 20:02:15 +02:00
|
|
|
else if ((proc = BackendPidGetProc(beentry->st_procpid)) != NULL)
|
2016-03-10 18:44:09 +01:00
|
|
|
wait_event_type = pgstat_get_wait_event_type(proc->wait_event_info);
|
2006-08-19 03:36:34 +02:00
|
|
|
|
2016-03-10 18:44:09 +01:00
|
|
|
if (!wait_event_type)
|
2006-08-19 03:36:34 +02:00
|
|
|
PG_RETURN_NULL();
|
|
|
|
|
2016-03-10 18:44:09 +01:00
|
|
|
PG_RETURN_TEXT_P(cstring_to_text(wait_event_type));
|
|
|
|
}
|
|
|
|
|
|
|
|
Datum
|
|
|
|
pg_stat_get_backend_wait_event(PG_FUNCTION_ARGS)
|
|
|
|
{
|
|
|
|
int32 beid = PG_GETARG_INT32(0);
|
|
|
|
PgBackendStatus *beentry;
|
|
|
|
PGPROC *proc;
|
2016-04-21 20:02:15 +02:00
|
|
|
const char *wait_event = NULL;
|
2016-03-10 18:44:09 +01:00
|
|
|
|
|
|
|
if ((beentry = pgstat_fetch_stat_beentry(beid)) == NULL)
|
|
|
|
wait_event = "<backend information not available>";
|
2020-04-20 12:53:40 +02:00
|
|
|
else if (!HAS_PGSTAT_PERMISSIONS(beentry->st_userid))
|
2016-03-10 18:44:09 +01:00
|
|
|
wait_event = "<insufficient privilege>";
|
2016-04-21 20:02:15 +02:00
|
|
|
else if ((proc = BackendPidGetProc(beentry->st_procpid)) != NULL)
|
2016-03-10 18:44:09 +01:00
|
|
|
wait_event = pgstat_get_wait_event(proc->wait_event_info);
|
|
|
|
|
|
|
|
if (!wait_event)
|
|
|
|
PG_RETURN_NULL();
|
2006-08-19 03:36:34 +02:00
|
|
|
|
2016-03-10 18:44:09 +01:00
|
|
|
PG_RETURN_TEXT_P(cstring_to_text(wait_event));
|
2006-08-19 03:36:34 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2003-03-20 04:34:57 +01:00
|
|
|
Datum
|
|
|
|
pg_stat_get_backend_activity_start(PG_FUNCTION_ARGS)
|
|
|
|
{
|
2005-06-30 00:51:57 +02:00
|
|
|
int32 beid = PG_GETARG_INT32(0);
|
2003-04-04 05:03:54 +02:00
|
|
|
TimestampTz result;
|
2006-06-19 03:51:22 +02:00
|
|
|
PgBackendStatus *beentry;
|
2003-03-20 04:34:57 +01:00
|
|
|
|
2003-04-04 05:03:54 +02:00
|
|
|
if ((beentry = pgstat_fetch_stat_beentry(beid)) == NULL)
|
2003-03-20 04:34:57 +01:00
|
|
|
PG_RETURN_NULL();
|
|
|
|
|
2020-04-20 12:53:40 +02:00
|
|
|
else if (!HAS_PGSTAT_PERMISSIONS(beentry->st_userid))
|
2003-03-20 04:34:57 +01:00
|
|
|
PG_RETURN_NULL();
|
|
|
|
|
2006-06-19 03:51:22 +02:00
|
|
|
result = beentry->st_activity_start_timestamp;
|
2003-03-20 04:34:57 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* No time recorded for start of current query -- this is the case if the
|
|
|
|
* user hasn't enabled query-level stats collection.
|
|
|
|
*/
|
2005-06-30 00:51:57 +02:00
|
|
|
if (result == 0)
|
2003-03-20 04:34:57 +01:00
|
|
|
PG_RETURN_NULL();
|
|
|
|
|
2003-04-04 05:03:54 +02:00
|
|
|
PG_RETURN_TIMESTAMPTZ(result);
|
2003-03-20 04:34:57 +01:00
|
|
|
}
|
|
|
|
|
2006-12-06 19:06:48 +01:00
|
|
|
|
|
|
|
Datum
|
2007-09-11 05:28:05 +02:00
|
|
|
pg_stat_get_backend_xact_start(PG_FUNCTION_ARGS)
|
2006-12-06 19:06:48 +01:00
|
|
|
{
|
|
|
|
int32 beid = PG_GETARG_INT32(0);
|
|
|
|
TimestampTz result;
|
|
|
|
PgBackendStatus *beentry;
|
|
|
|
|
|
|
|
if ((beentry = pgstat_fetch_stat_beentry(beid)) == NULL)
|
|
|
|
PG_RETURN_NULL();
|
|
|
|
|
2020-04-20 12:53:40 +02:00
|
|
|
else if (!HAS_PGSTAT_PERMISSIONS(beentry->st_userid))
|
2006-12-06 19:06:48 +01:00
|
|
|
PG_RETURN_NULL();
|
|
|
|
|
2007-09-11 05:28:05 +02:00
|
|
|
result = beentry->st_xact_start_timestamp;
|
2006-12-06 19:06:48 +01:00
|
|
|
|
|
|
|
if (result == 0) /* not in a transaction */
|
|
|
|
PG_RETURN_NULL();
|
|
|
|
|
|
|
|
PG_RETURN_TIMESTAMPTZ(result);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2005-05-09 13:31:34 +02:00
|
|
|
Datum
|
|
|
|
pg_stat_get_backend_start(PG_FUNCTION_ARGS)
|
|
|
|
{
|
2005-06-30 00:51:57 +02:00
|
|
|
int32 beid = PG_GETARG_INT32(0);
|
2005-05-09 13:31:34 +02:00
|
|
|
TimestampTz result;
|
2006-06-19 03:51:22 +02:00
|
|
|
PgBackendStatus *beentry;
|
2005-05-09 13:31:34 +02:00
|
|
|
|
|
|
|
if ((beentry = pgstat_fetch_stat_beentry(beid)) == NULL)
|
|
|
|
PG_RETURN_NULL();
|
|
|
|
|
2020-04-20 12:53:40 +02:00
|
|
|
else if (!HAS_PGSTAT_PERMISSIONS(beentry->st_userid))
|
2005-05-09 13:31:34 +02:00
|
|
|
PG_RETURN_NULL();
|
|
|
|
|
2006-06-19 03:51:22 +02:00
|
|
|
result = beentry->st_proc_start_timestamp;
|
2005-05-09 13:31:34 +02:00
|
|
|
|
2005-06-30 00:51:57 +02:00
|
|
|
if (result == 0) /* probably can't happen? */
|
2005-05-09 13:31:34 +02:00
|
|
|
PG_RETURN_NULL();
|
|
|
|
|
|
|
|
PG_RETURN_TIMESTAMPTZ(result);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
Datum
|
|
|
|
pg_stat_get_backend_client_addr(PG_FUNCTION_ARGS)
|
|
|
|
{
|
2006-06-19 03:51:22 +02:00
|
|
|
int32 beid = PG_GETARG_INT32(0);
|
|
|
|
PgBackendStatus *beentry;
|
2006-05-19 17:15:37 +02:00
|
|
|
SockAddr zero_clientaddr;
|
2005-05-09 13:31:34 +02:00
|
|
|
char remote_host[NI_MAXHOST];
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if ((beentry = pgstat_fetch_stat_beentry(beid)) == NULL)
|
|
|
|
PG_RETURN_NULL();
|
|
|
|
|
2020-04-20 12:53:40 +02:00
|
|
|
else if (!HAS_PGSTAT_PERMISSIONS(beentry->st_userid))
|
2005-05-09 13:31:34 +02:00
|
|
|
PG_RETURN_NULL();
|
|
|
|
|
2006-05-19 17:15:37 +02:00
|
|
|
/* A zeroed client addr means we don't know */
|
|
|
|
memset(&zero_clientaddr, 0, sizeof(zero_clientaddr));
|
2006-06-19 03:51:22 +02:00
|
|
|
if (memcmp(&(beentry->st_clientaddr), &zero_clientaddr,
|
2013-12-27 22:26:24 +01:00
|
|
|
sizeof(zero_clientaddr)) == 0)
|
2006-05-19 17:15:37 +02:00
|
|
|
PG_RETURN_NULL();
|
|
|
|
|
2006-06-19 03:51:22 +02:00
|
|
|
switch (beentry->st_clientaddr.addr.ss_family)
|
2005-05-09 13:31:34 +02:00
|
|
|
{
|
|
|
|
case AF_INET:
|
|
|
|
case AF_INET6:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
PG_RETURN_NULL();
|
|
|
|
}
|
|
|
|
|
|
|
|
remote_host[0] = '\0';
|
2006-06-19 03:51:22 +02:00
|
|
|
ret = pg_getnameinfo_all(&beentry->st_clientaddr.addr,
|
|
|
|
beentry->st_clientaddr.salen,
|
2005-10-17 18:24:20 +02:00
|
|
|
remote_host, sizeof(remote_host),
|
|
|
|
NULL, 0,
|
|
|
|
NI_NUMERICHOST | NI_NUMERICSERV);
|
2011-08-09 17:28:35 +02:00
|
|
|
if (ret != 0)
|
2005-05-09 13:31:34 +02:00
|
|
|
PG_RETURN_NULL();
|
|
|
|
|
2007-05-18 01:31:49 +02:00
|
|
|
clean_ipv6_addr(beentry->st_clientaddr.addr.ss_family, remote_host);
|
|
|
|
|
2022-08-28 10:47:10 +02:00
|
|
|
PG_RETURN_DATUM(DirectFunctionCall1(inet_in,
|
2005-05-09 13:31:34 +02:00
|
|
|
CStringGetDatum(remote_host)));
|
|
|
|
}
|
|
|
|
|
|
|
|
Datum
|
|
|
|
pg_stat_get_backend_client_port(PG_FUNCTION_ARGS)
|
|
|
|
{
|
2006-06-19 03:51:22 +02:00
|
|
|
int32 beid = PG_GETARG_INT32(0);
|
|
|
|
PgBackendStatus *beentry;
|
2006-05-19 17:15:37 +02:00
|
|
|
SockAddr zero_clientaddr;
|
2005-05-09 13:31:34 +02:00
|
|
|
char remote_port[NI_MAXSERV];
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if ((beentry = pgstat_fetch_stat_beentry(beid)) == NULL)
|
|
|
|
PG_RETURN_NULL();
|
|
|
|
|
2020-04-20 12:53:40 +02:00
|
|
|
else if (!HAS_PGSTAT_PERMISSIONS(beentry->st_userid))
|
2005-05-09 13:31:34 +02:00
|
|
|
PG_RETURN_NULL();
|
|
|
|
|
2006-05-19 17:15:37 +02:00
|
|
|
/* A zeroed client addr means we don't know */
|
|
|
|
memset(&zero_clientaddr, 0, sizeof(zero_clientaddr));
|
2006-06-19 03:51:22 +02:00
|
|
|
if (memcmp(&(beentry->st_clientaddr), &zero_clientaddr,
|
2013-12-27 22:26:24 +01:00
|
|
|
sizeof(zero_clientaddr)) == 0)
|
2006-05-19 17:15:37 +02:00
|
|
|
PG_RETURN_NULL();
|
|
|
|
|
2006-06-19 03:51:22 +02:00
|
|
|
switch (beentry->st_clientaddr.addr.ss_family)
|
2005-05-09 13:31:34 +02:00
|
|
|
{
|
|
|
|
case AF_INET:
|
|
|
|
case AF_INET6:
|
|
|
|
break;
|
|
|
|
case AF_UNIX:
|
|
|
|
PG_RETURN_INT32(-1);
|
|
|
|
default:
|
|
|
|
PG_RETURN_NULL();
|
|
|
|
}
|
|
|
|
|
|
|
|
remote_port[0] = '\0';
|
2006-06-19 03:51:22 +02:00
|
|
|
ret = pg_getnameinfo_all(&beentry->st_clientaddr.addr,
|
|
|
|
beentry->st_clientaddr.salen,
|
2005-10-17 18:24:20 +02:00
|
|
|
NULL, 0,
|
|
|
|
remote_port, sizeof(remote_port),
|
|
|
|
NI_NUMERICHOST | NI_NUMERICSERV);
|
2011-08-09 17:28:35 +02:00
|
|
|
if (ret != 0)
|
2005-05-09 13:31:34 +02:00
|
|
|
PG_RETURN_NULL();
|
|
|
|
|
2006-06-19 03:51:22 +02:00
|
|
|
PG_RETURN_DATUM(DirectFunctionCall1(int4in,
|
|
|
|
CStringGetDatum(remote_port)));
|
2005-05-09 13:31:34 +02:00
|
|
|
}
|
|
|
|
|
2003-03-20 04:34:57 +01:00
|
|
|
|
2001-06-22 21:18:36 +02:00
|
|
|
Datum
|
|
|
|
pg_stat_get_db_numbackends(PG_FUNCTION_ARGS)
|
|
|
|
{
|
2006-06-19 03:51:22 +02:00
|
|
|
Oid dbid = PG_GETARG_OID(0);
|
2001-06-22 21:18:36 +02:00
|
|
|
int32 result;
|
2006-06-19 03:51:22 +02:00
|
|
|
int tot_backends = pgstat_fetch_stat_numbackends();
|
|
|
|
int beid;
|
2001-06-22 21:18:36 +02:00
|
|
|
|
2006-06-19 03:51:22 +02:00
|
|
|
result = 0;
|
|
|
|
for (beid = 1; beid <= tot_backends; beid++)
|
|
|
|
{
|
2022-09-29 18:14:39 +02:00
|
|
|
LocalPgBackendStatus *local_beentry = pgstat_fetch_stat_local_beentry(beid);
|
2001-06-22 21:18:36 +02:00
|
|
|
|
2022-09-29 18:14:39 +02:00
|
|
|
if (local_beentry->backendStatus.st_databaseid == dbid)
|
2006-06-19 03:51:22 +02:00
|
|
|
result++;
|
|
|
|
}
|
2001-06-22 21:18:36 +02:00
|
|
|
|
|
|
|
PG_RETURN_INT32(result);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2022-12-07 01:11:48 +01:00
|
|
|
#define PG_STAT_GET_DBENTRY_INT64(stat) \
|
|
|
|
Datum \
|
|
|
|
CppConcat(pg_stat_get_db_,stat)(PG_FUNCTION_ARGS) \
|
|
|
|
{ \
|
|
|
|
Oid dbid = PG_GETARG_OID(0); \
|
|
|
|
int64 result; \
|
|
|
|
PgStat_StatDBEntry *dbentry; \
|
|
|
|
\
|
|
|
|
if ((dbentry = pgstat_fetch_stat_dbentry(dbid)) == NULL) \
|
|
|
|
result = 0; \
|
|
|
|
else \
|
|
|
|
result = (int64) (dbentry->stat); \
|
|
|
|
\
|
|
|
|
PG_RETURN_INT64(result); \
|
|
|
|
} \
|
2001-06-22 21:18:36 +02:00
|
|
|
|
2022-12-07 01:11:48 +01:00
|
|
|
/* pg_stat_get_db_blocks_fetched */
|
|
|
|
PG_STAT_GET_DBENTRY_INT64(blocks_fetched);
|
2001-06-22 21:18:36 +02:00
|
|
|
|
2022-12-07 01:11:48 +01:00
|
|
|
/* pg_stat_get_db_blocks_hit */
|
|
|
|
PG_STAT_GET_DBENTRY_INT64(blocks_hit);
|
2007-02-08 00:11:30 +01:00
|
|
|
|
2022-12-07 01:11:48 +01:00
|
|
|
/* pg_stat_get_db_conflict_bufferpin */
|
|
|
|
PG_STAT_GET_DBENTRY_INT64(conflict_bufferpin);
|
2007-02-08 00:11:30 +01:00
|
|
|
|
2022-12-07 01:11:48 +01:00
|
|
|
/* pg_stat_get_db_conflict_lock */
|
|
|
|
PG_STAT_GET_DBENTRY_INT64(conflict_lock);
|
2007-03-16 18:57:36 +01:00
|
|
|
|
2022-12-07 01:11:48 +01:00
|
|
|
/* pg_stat_get_db_conflict_snapshot */
|
|
|
|
PG_STAT_GET_DBENTRY_INT64(conflict_snapshot);
|
2007-03-16 18:57:36 +01:00
|
|
|
|
2022-12-07 01:11:48 +01:00
|
|
|
/* pg_stat_get_db_conflict_startup_deadlock */
|
|
|
|
PG_STAT_GET_DBENTRY_INT64(conflict_startup_deadlock);
|
2007-03-16 18:57:36 +01:00
|
|
|
|
2022-12-07 01:11:48 +01:00
|
|
|
/* pg_stat_get_db_conflict_tablespace */
|
|
|
|
PG_STAT_GET_DBENTRY_INT64(conflict_tablespace);
|
2007-03-16 18:57:36 +01:00
|
|
|
|
2022-12-07 01:11:48 +01:00
|
|
|
/* pg_stat_get_db_deadlocks */
|
|
|
|
PG_STAT_GET_DBENTRY_INT64(deadlocks);
|
2007-03-16 18:57:36 +01:00
|
|
|
|
2022-12-07 01:11:48 +01:00
|
|
|
/* pg_stat_get_db_sessions */
|
|
|
|
PG_STAT_GET_DBENTRY_INT64(sessions);
|
2007-03-16 18:57:36 +01:00
|
|
|
|
2022-12-07 01:11:48 +01:00
|
|
|
/* pg_stat_get_db_sessions_abandoned */
|
|
|
|
PG_STAT_GET_DBENTRY_INT64(sessions_abandoned);
|
2007-03-16 18:57:36 +01:00
|
|
|
|
2022-12-07 01:11:48 +01:00
|
|
|
/* pg_stat_get_db_sessions_fatal */
|
|
|
|
PG_STAT_GET_DBENTRY_INT64(sessions_fatal);
|
2007-03-16 18:57:36 +01:00
|
|
|
|
2022-12-07 01:11:48 +01:00
|
|
|
/* pg_stat_get_db_sessions_killed */
|
|
|
|
PG_STAT_GET_DBENTRY_INT64(sessions_killed);
|
2007-03-16 18:57:36 +01:00
|
|
|
|
2022-12-07 01:11:48 +01:00
|
|
|
/* pg_stat_get_db_temp_bytes */
|
|
|
|
PG_STAT_GET_DBENTRY_INT64(temp_bytes);
|
2007-03-16 18:57:36 +01:00
|
|
|
|
2022-12-07 01:11:48 +01:00
|
|
|
/* pg_stat_get_db_temp_files */
|
|
|
|
PG_STAT_GET_DBENTRY_INT64(temp_files);
|
2007-03-16 18:57:36 +01:00
|
|
|
|
2022-12-07 01:11:48 +01:00
|
|
|
/* pg_stat_get_db_tuples_deleted */
|
|
|
|
PG_STAT_GET_DBENTRY_INT64(tuples_deleted);
|
2007-03-16 18:57:36 +01:00
|
|
|
|
2022-12-07 01:11:48 +01:00
|
|
|
/* pg_stat_get_db_tuples_fetched */
|
|
|
|
PG_STAT_GET_DBENTRY_INT64(tuples_fetched);
|
2007-03-16 18:57:36 +01:00
|
|
|
|
2022-12-07 01:11:48 +01:00
|
|
|
/* pg_stat_get_db_tuples_inserted */
|
|
|
|
PG_STAT_GET_DBENTRY_INT64(tuples_inserted);
|
2007-03-16 18:57:36 +01:00
|
|
|
|
2022-12-07 01:11:48 +01:00
|
|
|
/* pg_stat_get_db_tuples_returned */
|
|
|
|
PG_STAT_GET_DBENTRY_INT64(tuples_returned);
|
2007-03-16 18:57:36 +01:00
|
|
|
|
2022-12-07 01:11:48 +01:00
|
|
|
/* pg_stat_get_db_tuples_updated */
|
|
|
|
PG_STAT_GET_DBENTRY_INT64(tuples_updated);
|
2007-03-16 18:57:36 +01:00
|
|
|
|
2022-12-07 01:11:48 +01:00
|
|
|
/* pg_stat_get_db_xact_commit */
|
|
|
|
PG_STAT_GET_DBENTRY_INT64(xact_commit);
|
2007-03-16 18:57:36 +01:00
|
|
|
|
2022-12-07 01:11:48 +01:00
|
|
|
/* pg_stat_get_db_xact_rollback */
|
|
|
|
PG_STAT_GET_DBENTRY_INT64(xact_rollback);
|
2007-03-16 18:57:36 +01:00
|
|
|
|
|
|
|
|
2011-02-10 15:09:35 +01:00
|
|
|
Datum
|
|
|
|
pg_stat_get_db_stat_reset_time(PG_FUNCTION_ARGS)
|
|
|
|
{
|
|
|
|
Oid dbid = PG_GETARG_OID(0);
|
|
|
|
TimestampTz result;
|
|
|
|
PgStat_StatDBEntry *dbentry;
|
|
|
|
|
|
|
|
if ((dbentry = pgstat_fetch_stat_dbentry(dbid)) == NULL)
|
|
|
|
result = 0;
|
|
|
|
else
|
|
|
|
result = dbentry->stat_reset_timestamp;
|
|
|
|
|
|
|
|
if (result == 0)
|
|
|
|
PG_RETURN_NULL();
|
|
|
|
else
|
|
|
|
PG_RETURN_TIMESTAMPTZ(result);
|
|
|
|
}
|
|
|
|
|
2011-01-03 12:46:03 +01:00
|
|
|
|
|
|
|
Datum
|
|
|
|
pg_stat_get_db_conflict_all(PG_FUNCTION_ARGS)
|
|
|
|
{
|
|
|
|
Oid dbid = PG_GETARG_OID(0);
|
|
|
|
int64 result;
|
|
|
|
PgStat_StatDBEntry *dbentry;
|
|
|
|
|
|
|
|
if ((dbentry = pgstat_fetch_stat_dbentry(dbid)) == NULL)
|
|
|
|
result = 0;
|
|
|
|
else
|
2022-12-07 01:11:48 +01:00
|
|
|
result = (int64) (dbentry->conflict_tablespace +
|
|
|
|
dbentry->conflict_lock +
|
|
|
|
dbentry->conflict_snapshot +
|
|
|
|
dbentry->conflict_bufferpin +
|
|
|
|
dbentry->conflict_startup_deadlock);
|
2012-01-26 15:58:19 +01:00
|
|
|
|
|
|
|
PG_RETURN_INT64(result);
|
|
|
|
}
|
|
|
|
|
2019-03-09 19:45:17 +01:00
|
|
|
Datum
|
|
|
|
pg_stat_get_db_checksum_failures(PG_FUNCTION_ARGS)
|
|
|
|
{
|
|
|
|
Oid dbid = PG_GETARG_OID(0);
|
|
|
|
int64 result;
|
|
|
|
PgStat_StatDBEntry *dbentry;
|
|
|
|
|
2019-04-17 13:51:48 +02:00
|
|
|
if (!DataChecksumsEnabled())
|
|
|
|
PG_RETURN_NULL();
|
|
|
|
|
2019-03-09 19:45:17 +01:00
|
|
|
if ((dbentry = pgstat_fetch_stat_dbentry(dbid)) == NULL)
|
|
|
|
result = 0;
|
|
|
|
else
|
2022-12-07 01:11:48 +01:00
|
|
|
result = (int64) (dbentry->checksum_failures);
|
2019-03-09 19:45:17 +01:00
|
|
|
|
|
|
|
PG_RETURN_INT64(result);
|
|
|
|
}
|
|
|
|
|
2019-04-12 14:04:50 +02:00
|
|
|
Datum
|
|
|
|
pg_stat_get_db_checksum_last_failure(PG_FUNCTION_ARGS)
|
|
|
|
{
|
|
|
|
Oid dbid = PG_GETARG_OID(0);
|
|
|
|
TimestampTz result;
|
|
|
|
PgStat_StatDBEntry *dbentry;
|
|
|
|
|
2019-04-17 13:51:48 +02:00
|
|
|
if (!DataChecksumsEnabled())
|
|
|
|
PG_RETURN_NULL();
|
|
|
|
|
2019-04-12 14:04:50 +02:00
|
|
|
if ((dbentry = pgstat_fetch_stat_dbentry(dbid)) == NULL)
|
|
|
|
result = 0;
|
|
|
|
else
|
|
|
|
result = dbentry->last_checksum_failure;
|
|
|
|
|
|
|
|
if (result == 0)
|
|
|
|
PG_RETURN_NULL();
|
|
|
|
else
|
|
|
|
PG_RETURN_TIMESTAMPTZ(result);
|
|
|
|
}
|
|
|
|
|
2022-12-07 01:11:48 +01:00
|
|
|
#define PG_STAT_GET_DBENTRY_FLOAT8(stat) \
|
|
|
|
Datum \
|
|
|
|
CppConcat(pg_stat_get_db_,stat)(PG_FUNCTION_ARGS) \
|
|
|
|
{ \
|
|
|
|
Oid dbid = PG_GETARG_OID(0); \
|
|
|
|
double result; \
|
|
|
|
PgStat_StatDBEntry *dbentry; \
|
|
|
|
\
|
|
|
|
if ((dbentry = pgstat_fetch_stat_dbentry(dbid)) == NULL) \
|
|
|
|
result = 0; \
|
|
|
|
else \
|
|
|
|
result = ((double) dbentry->stat) / 1000.0; \
|
|
|
|
\
|
|
|
|
PG_RETURN_FLOAT8(result); \
|
|
|
|
} \
|
2021-01-17 13:34:09 +01:00
|
|
|
|
2022-12-07 01:11:48 +01:00
|
|
|
/* pg_stat_get_db_active_time */
|
|
|
|
PG_STAT_GET_DBENTRY_FLOAT8(active_time);
|
2021-01-17 13:34:09 +01:00
|
|
|
|
2022-12-07 01:11:48 +01:00
|
|
|
/* pg_stat_get_db_blk_read_time */
|
|
|
|
PG_STAT_GET_DBENTRY_FLOAT8(blk_read_time);
|
2021-01-17 13:34:09 +01:00
|
|
|
|
2022-12-07 01:11:48 +01:00
|
|
|
/* pg_stat_get_db_blk_write_time */
|
|
|
|
PG_STAT_GET_DBENTRY_FLOAT8(blk_write_time);
|
2021-01-17 13:34:09 +01:00
|
|
|
|
2022-12-07 01:11:48 +01:00
|
|
|
/* pg_stat_get_db_idle_in_transaction_time */
|
|
|
|
PG_STAT_GET_DBENTRY_FLOAT8(idle_in_transaction_time);
|
2021-01-17 13:34:09 +01:00
|
|
|
|
2022-12-07 01:11:48 +01:00
|
|
|
/* pg_stat_get_db_session_time */
|
|
|
|
PG_STAT_GET_DBENTRY_FLOAT8(session_time);
|
2021-01-17 13:34:09 +01:00
|
|
|
|
2007-03-30 20:34:56 +02:00
|
|
|
Datum
|
|
|
|
pg_stat_get_bgwriter_timed_checkpoints(PG_FUNCTION_ARGS)
|
|
|
|
{
|
2021-08-05 04:16:04 +02:00
|
|
|
PG_RETURN_INT64(pgstat_fetch_stat_checkpointer()->timed_checkpoints);
|
2007-03-30 20:34:56 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
Datum
|
|
|
|
pg_stat_get_bgwriter_requested_checkpoints(PG_FUNCTION_ARGS)
|
|
|
|
{
|
2021-08-05 04:16:04 +02:00
|
|
|
PG_RETURN_INT64(pgstat_fetch_stat_checkpointer()->requested_checkpoints);
|
2007-03-30 20:34:56 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
Datum
|
|
|
|
pg_stat_get_bgwriter_buf_written_checkpoints(PG_FUNCTION_ARGS)
|
|
|
|
{
|
2021-08-05 04:16:04 +02:00
|
|
|
PG_RETURN_INT64(pgstat_fetch_stat_checkpointer()->buf_written_checkpoints);
|
2007-03-30 20:34:56 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
Datum
|
2007-06-28 02:02:40 +02:00
|
|
|
pg_stat_get_bgwriter_buf_written_clean(PG_FUNCTION_ARGS)
|
2007-03-30 20:34:56 +02:00
|
|
|
{
|
2021-08-05 04:16:04 +02:00
|
|
|
PG_RETURN_INT64(pgstat_fetch_stat_bgwriter()->buf_written_clean);
|
2007-03-30 20:34:56 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
Datum
|
2007-06-28 02:02:40 +02:00
|
|
|
pg_stat_get_bgwriter_maxwritten_clean(PG_FUNCTION_ARGS)
|
2007-03-30 20:34:56 +02:00
|
|
|
{
|
2021-08-05 04:16:04 +02:00
|
|
|
PG_RETURN_INT64(pgstat_fetch_stat_bgwriter()->maxwritten_clean);
|
2007-03-30 20:34:56 +02:00
|
|
|
}
|
|
|
|
|
2012-04-05 20:03:21 +02:00
|
|
|
Datum
|
|
|
|
pg_stat_get_checkpoint_write_time(PG_FUNCTION_ARGS)
|
|
|
|
{
|
2012-04-30 20:02:47 +02:00
|
|
|
/* time is already in msec, just convert to double for presentation */
|
2021-08-05 04:16:04 +02:00
|
|
|
PG_RETURN_FLOAT8((double)
|
|
|
|
pgstat_fetch_stat_checkpointer()->checkpoint_write_time);
|
2012-04-05 20:03:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
Datum
|
|
|
|
pg_stat_get_checkpoint_sync_time(PG_FUNCTION_ARGS)
|
|
|
|
{
|
2012-04-30 20:02:47 +02:00
|
|
|
/* time is already in msec, just convert to double for presentation */
|
2021-08-05 04:16:04 +02:00
|
|
|
PG_RETURN_FLOAT8((double)
|
|
|
|
pgstat_fetch_stat_checkpointer()->checkpoint_sync_time);
|
2012-04-05 20:03:21 +02:00
|
|
|
}
|
|
|
|
|
2011-02-10 15:09:35 +01:00
|
|
|
Datum
|
|
|
|
pg_stat_get_bgwriter_stat_reset_time(PG_FUNCTION_ARGS)
|
|
|
|
{
|
2021-08-05 04:16:04 +02:00
|
|
|
PG_RETURN_TIMESTAMPTZ(pgstat_fetch_stat_bgwriter()->stat_reset_timestamp);
|
2011-02-10 15:09:35 +01:00
|
|
|
}
|
|
|
|
|
2007-09-25 22:03:38 +02:00
|
|
|
Datum
|
|
|
|
pg_stat_get_buf_written_backend(PG_FUNCTION_ARGS)
|
|
|
|
{
|
2021-08-05 04:16:04 +02:00
|
|
|
PG_RETURN_INT64(pgstat_fetch_stat_checkpointer()->buf_written_backend);
|
2007-09-25 22:03:38 +02:00
|
|
|
}
|
|
|
|
|
2010-11-15 18:42:59 +01:00
|
|
|
Datum
|
|
|
|
pg_stat_get_buf_fsync_backend(PG_FUNCTION_ARGS)
|
|
|
|
{
|
2021-08-05 04:16:04 +02:00
|
|
|
PG_RETURN_INT64(pgstat_fetch_stat_checkpointer()->buf_fsync_backend);
|
2010-11-15 18:42:59 +01:00
|
|
|
}
|
|
|
|
|
2007-09-25 22:03:38 +02:00
|
|
|
Datum
|
|
|
|
pg_stat_get_buf_alloc(PG_FUNCTION_ARGS)
|
|
|
|
{
|
2021-08-05 04:16:04 +02:00
|
|
|
PG_RETURN_INT64(pgstat_fetch_stat_bgwriter()->buf_alloc);
|
2007-09-25 22:03:38 +02:00
|
|
|
}
|
|
|
|
|
2020-10-02 03:17:11 +02:00
|
|
|
/*
|
|
|
|
* Returns statistics of WAL activity
|
|
|
|
*/
|
|
|
|
Datum
|
|
|
|
pg_stat_get_wal(PG_FUNCTION_ARGS)
|
|
|
|
{
|
Track total amounts of times spent writing and syncing WAL data to disk.
This commit adds new GUC track_wal_io_timing. When this is enabled,
the total amounts of time XLogWrite writes and issue_xlog_fsync syncs
WAL data to disk are counted in pg_stat_wal. This information would be
useful to check how much WAL write and sync affect the performance.
Enabling track_wal_io_timing will make the server query the operating
system for the current time every time WAL is written or synced,
which may cause significant overhead on some platforms. To avoid such
additional overhead in the server with track_io_timing enabled,
this commit introduces track_wal_io_timing as a separate parameter from
track_io_timing.
Note that WAL write and sync activity by walreceiver has not been tracked yet.
This commit makes the server also track the numbers of times XLogWrite
writes and issue_xlog_fsync syncs WAL data to disk, in pg_stat_wal,
regardless of the setting of track_wal_io_timing. This counters can be
used to calculate the WAL write and sync time per request, for example.
Bump PGSTAT_FILE_FORMAT_ID.
Bump catalog version.
Author: Masahiro Ikeda
Reviewed-By: Japin Li, Hayato Kuroda, Masahiko Sawada, David Johnston, Fujii Masao
Discussion: https://postgr.es/m/0509ad67b585a5b86a83d445dfa75392@oss.nttdata.com
2021-03-09 08:52:06 +01:00
|
|
|
#define PG_STAT_GET_WAL_COLS 9
|
2020-10-02 03:17:11 +02:00
|
|
|
TupleDesc tupdesc;
|
2022-07-16 08:42:15 +02:00
|
|
|
Datum values[PG_STAT_GET_WAL_COLS] = {0};
|
|
|
|
bool nulls[PG_STAT_GET_WAL_COLS] = {0};
|
2020-12-02 05:00:15 +01:00
|
|
|
char buf[256];
|
2020-10-02 03:17:11 +02:00
|
|
|
PgStat_WalStats *wal_stats;
|
|
|
|
|
|
|
|
/* Initialise attributes information in the tuple descriptor */
|
|
|
|
tupdesc = CreateTemplateTupleDesc(PG_STAT_GET_WAL_COLS);
|
2020-12-02 05:00:15 +01:00
|
|
|
TupleDescInitEntry(tupdesc, (AttrNumber) 1, "wal_records",
|
2020-10-02 03:17:11 +02:00
|
|
|
INT8OID, -1, 0);
|
2020-12-02 05:00:15 +01:00
|
|
|
TupleDescInitEntry(tupdesc, (AttrNumber) 2, "wal_fpi",
|
|
|
|
INT8OID, -1, 0);
|
|
|
|
TupleDescInitEntry(tupdesc, (AttrNumber) 3, "wal_bytes",
|
|
|
|
NUMERICOID, -1, 0);
|
|
|
|
TupleDescInitEntry(tupdesc, (AttrNumber) 4, "wal_buffers_full",
|
|
|
|
INT8OID, -1, 0);
|
Track total amounts of times spent writing and syncing WAL data to disk.
This commit adds new GUC track_wal_io_timing. When this is enabled,
the total amounts of time XLogWrite writes and issue_xlog_fsync syncs
WAL data to disk are counted in pg_stat_wal. This information would be
useful to check how much WAL write and sync affect the performance.
Enabling track_wal_io_timing will make the server query the operating
system for the current time every time WAL is written or synced,
which may cause significant overhead on some platforms. To avoid such
additional overhead in the server with track_io_timing enabled,
this commit introduces track_wal_io_timing as a separate parameter from
track_io_timing.
Note that WAL write and sync activity by walreceiver has not been tracked yet.
This commit makes the server also track the numbers of times XLogWrite
writes and issue_xlog_fsync syncs WAL data to disk, in pg_stat_wal,
regardless of the setting of track_wal_io_timing. This counters can be
used to calculate the WAL write and sync time per request, for example.
Bump PGSTAT_FILE_FORMAT_ID.
Bump catalog version.
Author: Masahiro Ikeda
Reviewed-By: Japin Li, Hayato Kuroda, Masahiko Sawada, David Johnston, Fujii Masao
Discussion: https://postgr.es/m/0509ad67b585a5b86a83d445dfa75392@oss.nttdata.com
2021-03-09 08:52:06 +01:00
|
|
|
TupleDescInitEntry(tupdesc, (AttrNumber) 5, "wal_write",
|
|
|
|
INT8OID, -1, 0);
|
|
|
|
TupleDescInitEntry(tupdesc, (AttrNumber) 6, "wal_sync",
|
|
|
|
INT8OID, -1, 0);
|
|
|
|
TupleDescInitEntry(tupdesc, (AttrNumber) 7, "wal_write_time",
|
|
|
|
FLOAT8OID, -1, 0);
|
|
|
|
TupleDescInitEntry(tupdesc, (AttrNumber) 8, "wal_sync_time",
|
|
|
|
FLOAT8OID, -1, 0);
|
|
|
|
TupleDescInitEntry(tupdesc, (AttrNumber) 9, "stats_reset",
|
2020-10-02 03:17:11 +02:00
|
|
|
TIMESTAMPTZOID, -1, 0);
|
|
|
|
|
|
|
|
BlessTupleDesc(tupdesc);
|
|
|
|
|
|
|
|
/* Get statistics about WAL activity */
|
|
|
|
wal_stats = pgstat_fetch_stat_wal();
|
|
|
|
|
|
|
|
/* Fill values and NULLs */
|
2020-12-02 05:00:15 +01:00
|
|
|
values[0] = Int64GetDatum(wal_stats->wal_records);
|
|
|
|
values[1] = Int64GetDatum(wal_stats->wal_fpi);
|
|
|
|
|
|
|
|
/* Convert to numeric. */
|
|
|
|
snprintf(buf, sizeof buf, UINT64_FORMAT, wal_stats->wal_bytes);
|
|
|
|
values[2] = DirectFunctionCall3(numeric_in,
|
|
|
|
CStringGetDatum(buf),
|
|
|
|
ObjectIdGetDatum(0),
|
|
|
|
Int32GetDatum(-1));
|
|
|
|
|
|
|
|
values[3] = Int64GetDatum(wal_stats->wal_buffers_full);
|
Track total amounts of times spent writing and syncing WAL data to disk.
This commit adds new GUC track_wal_io_timing. When this is enabled,
the total amounts of time XLogWrite writes and issue_xlog_fsync syncs
WAL data to disk are counted in pg_stat_wal. This information would be
useful to check how much WAL write and sync affect the performance.
Enabling track_wal_io_timing will make the server query the operating
system for the current time every time WAL is written or synced,
which may cause significant overhead on some platforms. To avoid such
additional overhead in the server with track_io_timing enabled,
this commit introduces track_wal_io_timing as a separate parameter from
track_io_timing.
Note that WAL write and sync activity by walreceiver has not been tracked yet.
This commit makes the server also track the numbers of times XLogWrite
writes and issue_xlog_fsync syncs WAL data to disk, in pg_stat_wal,
regardless of the setting of track_wal_io_timing. This counters can be
used to calculate the WAL write and sync time per request, for example.
Bump PGSTAT_FILE_FORMAT_ID.
Bump catalog version.
Author: Masahiro Ikeda
Reviewed-By: Japin Li, Hayato Kuroda, Masahiko Sawada, David Johnston, Fujii Masao
Discussion: https://postgr.es/m/0509ad67b585a5b86a83d445dfa75392@oss.nttdata.com
2021-03-09 08:52:06 +01:00
|
|
|
values[4] = Int64GetDatum(wal_stats->wal_write);
|
|
|
|
values[5] = Int64GetDatum(wal_stats->wal_sync);
|
|
|
|
|
|
|
|
/* Convert counters from microsec to millisec for display */
|
|
|
|
values[6] = Float8GetDatum(((double) wal_stats->wal_write_time) / 1000.0);
|
|
|
|
values[7] = Float8GetDatum(((double) wal_stats->wal_sync_time) / 1000.0);
|
|
|
|
|
|
|
|
values[8] = TimestampTzGetDatum(wal_stats->stat_reset_timestamp);
|
2020-10-02 03:17:11 +02:00
|
|
|
|
|
|
|
/* Returns the record as Datum */
|
|
|
|
PG_RETURN_DATUM(HeapTupleGetDatum(heap_form_tuple(tupdesc, values, nulls)));
|
|
|
|
}
|
|
|
|
|
Collect statistics about SLRU caches
There's a number of SLRU caches used to access important data like clog,
commit timestamps, multixact, asynchronous notifications, etc. Until now
we had no easy way to monitor these shared caches, compute hit ratios,
number of reads/writes etc.
This commit extends the statistics collector to track this information
for a predefined list of SLRUs, and also introduces a new system view
pg_stat_slru displaying the data.
The list of built-in SLRUs is fixed, but additional SLRUs may be defined
in extensions. Unfortunately, there's no suitable registry of SLRUs, so
this patch simply defines a fixed list of SLRUs with entries for the
built-in ones and one entry for all additional SLRUs. Extensions adding
their own SLRU are fairly rare, so this seems acceptable.
This patch only allows monitoring of SLRUs, not tuning. The SLRU sizes
are still fixed (hard-coded in the code) and it's not entirely clear
which of the SLRUs might need a GUC to tune size. In a way, allowing us
to determine that is one of the goals of this patch.
Bump catversion as the patch introduces new functions and system view.
Author: Tomas Vondra
Reviewed-by: Alvaro Herrera
Discussion: https://www.postgresql.org/message-id/flat/20200119143707.gyinppnigokesjok@development
2020-04-02 02:11:38 +02:00
|
|
|
/*
|
|
|
|
* Returns statistics of SLRU caches.
|
|
|
|
*/
|
|
|
|
Datum
|
|
|
|
pg_stat_get_slru(PG_FUNCTION_ARGS)
|
|
|
|
{
|
|
|
|
#define PG_STAT_GET_SLRU_COLS 9
|
|
|
|
ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
|
|
|
|
int i;
|
|
|
|
PgStat_SLRUStats *stats;
|
|
|
|
|
2022-10-18 03:22:35 +02:00
|
|
|
InitMaterializedSRF(fcinfo, 0);
|
Collect statistics about SLRU caches
There's a number of SLRU caches used to access important data like clog,
commit timestamps, multixact, asynchronous notifications, etc. Until now
we had no easy way to monitor these shared caches, compute hit ratios,
number of reads/writes etc.
This commit extends the statistics collector to track this information
for a predefined list of SLRUs, and also introduces a new system view
pg_stat_slru displaying the data.
The list of built-in SLRUs is fixed, but additional SLRUs may be defined
in extensions. Unfortunately, there's no suitable registry of SLRUs, so
this patch simply defines a fixed list of SLRUs with entries for the
built-in ones and one entry for all additional SLRUs. Extensions adding
their own SLRU are fairly rare, so this seems acceptable.
This patch only allows monitoring of SLRUs, not tuning. The SLRU sizes
are still fixed (hard-coded in the code) and it's not entirely clear
which of the SLRUs might need a GUC to tune size. In a way, allowing us
to determine that is one of the goals of this patch.
Bump catversion as the patch introduces new functions and system view.
Author: Tomas Vondra
Reviewed-by: Alvaro Herrera
Discussion: https://www.postgresql.org/message-id/flat/20200119143707.gyinppnigokesjok@development
2020-04-02 02:11:38 +02:00
|
|
|
|
2022-04-06 22:56:06 +02:00
|
|
|
/* request SLRU stats from the cumulative stats system */
|
Collect statistics about SLRU caches
There's a number of SLRU caches used to access important data like clog,
commit timestamps, multixact, asynchronous notifications, etc. Until now
we had no easy way to monitor these shared caches, compute hit ratios,
number of reads/writes etc.
This commit extends the statistics collector to track this information
for a predefined list of SLRUs, and also introduces a new system view
pg_stat_slru displaying the data.
The list of built-in SLRUs is fixed, but additional SLRUs may be defined
in extensions. Unfortunately, there's no suitable registry of SLRUs, so
this patch simply defines a fixed list of SLRUs with entries for the
built-in ones and one entry for all additional SLRUs. Extensions adding
their own SLRU are fairly rare, so this seems acceptable.
This patch only allows monitoring of SLRUs, not tuning. The SLRU sizes
are still fixed (hard-coded in the code) and it's not entirely clear
which of the SLRUs might need a GUC to tune size. In a way, allowing us
to determine that is one of the goals of this patch.
Bump catversion as the patch introduces new functions and system view.
Author: Tomas Vondra
Reviewed-by: Alvaro Herrera
Discussion: https://www.postgresql.org/message-id/flat/20200119143707.gyinppnigokesjok@development
2020-04-02 02:11:38 +02:00
|
|
|
stats = pgstat_fetch_slru();
|
|
|
|
|
|
|
|
for (i = 0;; i++)
|
|
|
|
{
|
|
|
|
/* for each row */
|
2022-07-16 08:42:15 +02:00
|
|
|
Datum values[PG_STAT_GET_SLRU_COLS] = {0};
|
|
|
|
bool nulls[PG_STAT_GET_SLRU_COLS] = {0};
|
2021-11-12 13:49:21 +01:00
|
|
|
PgStat_SLRUStats stat;
|
Improve management of SLRU statistics collection.
Instead of re-identifying which statistics bucket to use for a given
SLRU on every counter increment, do it once during shmem initialization.
This saves a fair number of cycles, and there's no real cost because
we could not have a bucket assignment that varies over time or across
backends anyway.
Also, get rid of the ill-considered decision to let pgstat.c pry
directly into SLRU's shared state; it's cleaner just to have slru.c
pass the stats bucket number.
In consequence of these changes, there's no longer any need to store
an SLRU's LWLock tranche info in shared memory, so get rid of that,
making this a net reduction in shmem consumption. (That partly
reverts fe702a7b3.)
This is basically code review for 28cac71bd, so I also cleaned up
some comments, removed a dangling extern declaration, fixed some
things that should be static and/or const, etc.
Discussion: https://postgr.es/m/3618.1589313035@sss.pgh.pa.us
2020-05-13 19:08:12 +02:00
|
|
|
const char *name;
|
Collect statistics about SLRU caches
There's a number of SLRU caches used to access important data like clog,
commit timestamps, multixact, asynchronous notifications, etc. Until now
we had no easy way to monitor these shared caches, compute hit ratios,
number of reads/writes etc.
This commit extends the statistics collector to track this information
for a predefined list of SLRUs, and also introduces a new system view
pg_stat_slru displaying the data.
The list of built-in SLRUs is fixed, but additional SLRUs may be defined
in extensions. Unfortunately, there's no suitable registry of SLRUs, so
this patch simply defines a fixed list of SLRUs with entries for the
built-in ones and one entry for all additional SLRUs. Extensions adding
their own SLRU are fairly rare, so this seems acceptable.
This patch only allows monitoring of SLRUs, not tuning. The SLRU sizes
are still fixed (hard-coded in the code) and it's not entirely clear
which of the SLRUs might need a GUC to tune size. In a way, allowing us
to determine that is one of the goals of this patch.
Bump catversion as the patch introduces new functions and system view.
Author: Tomas Vondra
Reviewed-by: Alvaro Herrera
Discussion: https://www.postgresql.org/message-id/flat/20200119143707.gyinppnigokesjok@development
2020-04-02 02:11:38 +02:00
|
|
|
|
2022-04-07 06:29:46 +02:00
|
|
|
name = pgstat_get_slru_name(i);
|
Collect statistics about SLRU caches
There's a number of SLRU caches used to access important data like clog,
commit timestamps, multixact, asynchronous notifications, etc. Until now
we had no easy way to monitor these shared caches, compute hit ratios,
number of reads/writes etc.
This commit extends the statistics collector to track this information
for a predefined list of SLRUs, and also introduces a new system view
pg_stat_slru displaying the data.
The list of built-in SLRUs is fixed, but additional SLRUs may be defined
in extensions. Unfortunately, there's no suitable registry of SLRUs, so
this patch simply defines a fixed list of SLRUs with entries for the
built-in ones and one entry for all additional SLRUs. Extensions adding
their own SLRU are fairly rare, so this seems acceptable.
This patch only allows monitoring of SLRUs, not tuning. The SLRU sizes
are still fixed (hard-coded in the code) and it's not entirely clear
which of the SLRUs might need a GUC to tune size. In a way, allowing us
to determine that is one of the goals of this patch.
Bump catversion as the patch introduces new functions and system view.
Author: Tomas Vondra
Reviewed-by: Alvaro Herrera
Discussion: https://www.postgresql.org/message-id/flat/20200119143707.gyinppnigokesjok@development
2020-04-02 02:11:38 +02:00
|
|
|
|
|
|
|
if (!name)
|
|
|
|
break;
|
|
|
|
|
2021-11-12 13:49:21 +01:00
|
|
|
stat = stats[i];
|
Collect statistics about SLRU caches
There's a number of SLRU caches used to access important data like clog,
commit timestamps, multixact, asynchronous notifications, etc. Until now
we had no easy way to monitor these shared caches, compute hit ratios,
number of reads/writes etc.
This commit extends the statistics collector to track this information
for a predefined list of SLRUs, and also introduces a new system view
pg_stat_slru displaying the data.
The list of built-in SLRUs is fixed, but additional SLRUs may be defined
in extensions. Unfortunately, there's no suitable registry of SLRUs, so
this patch simply defines a fixed list of SLRUs with entries for the
built-in ones and one entry for all additional SLRUs. Extensions adding
their own SLRU are fairly rare, so this seems acceptable.
This patch only allows monitoring of SLRUs, not tuning. The SLRU sizes
are still fixed (hard-coded in the code) and it's not entirely clear
which of the SLRUs might need a GUC to tune size. In a way, allowing us
to determine that is one of the goals of this patch.
Bump catversion as the patch introduces new functions and system view.
Author: Tomas Vondra
Reviewed-by: Alvaro Herrera
Discussion: https://www.postgresql.org/message-id/flat/20200119143707.gyinppnigokesjok@development
2020-04-02 02:11:38 +02:00
|
|
|
|
|
|
|
values[0] = PointerGetDatum(cstring_to_text(name));
|
|
|
|
values[1] = Int64GetDatum(stat.blocks_zeroed);
|
|
|
|
values[2] = Int64GetDatum(stat.blocks_hit);
|
|
|
|
values[3] = Int64GetDatum(stat.blocks_read);
|
|
|
|
values[4] = Int64GetDatum(stat.blocks_written);
|
|
|
|
values[5] = Int64GetDatum(stat.blocks_exists);
|
|
|
|
values[6] = Int64GetDatum(stat.flush);
|
|
|
|
values[7] = Int64GetDatum(stat.truncate);
|
2020-05-13 15:20:37 +02:00
|
|
|
values[8] = TimestampTzGetDatum(stat.stat_reset_timestamp);
|
Collect statistics about SLRU caches
There's a number of SLRU caches used to access important data like clog,
commit timestamps, multixact, asynchronous notifications, etc. Until now
we had no easy way to monitor these shared caches, compute hit ratios,
number of reads/writes etc.
This commit extends the statistics collector to track this information
for a predefined list of SLRUs, and also introduces a new system view
pg_stat_slru displaying the data.
The list of built-in SLRUs is fixed, but additional SLRUs may be defined
in extensions. Unfortunately, there's no suitable registry of SLRUs, so
this patch simply defines a fixed list of SLRUs with entries for the
built-in ones and one entry for all additional SLRUs. Extensions adding
their own SLRU are fairly rare, so this seems acceptable.
This patch only allows monitoring of SLRUs, not tuning. The SLRU sizes
are still fixed (hard-coded in the code) and it's not entirely clear
which of the SLRUs might need a GUC to tune size. In a way, allowing us
to determine that is one of the goals of this patch.
Bump catversion as the patch introduces new functions and system view.
Author: Tomas Vondra
Reviewed-by: Alvaro Herrera
Discussion: https://www.postgresql.org/message-id/flat/20200119143707.gyinppnigokesjok@development
2020-04-02 02:11:38 +02:00
|
|
|
|
Create routine able to set single-call SRFs for Materialize mode
Set-returning functions that use the Materialize mode, creating a
tuplestore to include all the tuples returned in a set rather than doing
so in multiple calls, use roughly the same set of steps to prepare
ReturnSetInfo for this job:
- Check if ReturnSetInfo supports returning a tuplestore and if the
materialize mode is enabled.
- Create a tuplestore for all the tuples part of the returned set in the
per-query memory context, stored in ReturnSetInfo->setResult.
- Build a tuple descriptor mostly from get_call_result_type(), then
stored in ReturnSetInfo->setDesc. Note that there are some cases where
the SRF's tuple descriptor has to be the one specified by the function
caller.
This refactoring is done so as there are (well, should be) no behavior
changes in any of the in-core functions refactored, and the centralized
function that checks and sets up the function's ReturnSetInfo can be
controlled with a set of bits32 options. Two of them prove to be
necessary now:
- SRF_SINGLE_USE_EXPECTED to use expectedDesc as tuple descriptor, as
expected by the function's caller.
- SRF_SINGLE_BLESS to validate the tuple descriptor for the SRF.
The same initialization pattern is simplified in 28 places per my
count as of src/backend/, shaving up to ~900 lines of code. These
mostly come from the removal of the per-query initializations and the
sanity checks now grouped in a single location. There are more
locations that could be simplified in contrib/, that are left for a
follow-up cleanup.
fcc2817, 07daca5 and d61a361 have prepared the areas of the code related
to this change, to ease this refactoring.
Author: Melanie Plageman, Michael Paquier
Reviewed-by: Álvaro Herrera, Justin Pryzby
Discussion: https://postgr.es/m/CAAKRu_azyd1Z3W_r7Ou4sorTjRCs+PxeHw1CWJeXKofkE6TuZg@mail.gmail.com
2022-03-07 02:26:29 +01:00
|
|
|
tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc, values, nulls);
|
Collect statistics about SLRU caches
There's a number of SLRU caches used to access important data like clog,
commit timestamps, multixact, asynchronous notifications, etc. Until now
we had no easy way to monitor these shared caches, compute hit ratios,
number of reads/writes etc.
This commit extends the statistics collector to track this information
for a predefined list of SLRUs, and also introduces a new system view
pg_stat_slru displaying the data.
The list of built-in SLRUs is fixed, but additional SLRUs may be defined
in extensions. Unfortunately, there's no suitable registry of SLRUs, so
this patch simply defines a fixed list of SLRUs with entries for the
built-in ones and one entry for all additional SLRUs. Extensions adding
their own SLRU are fairly rare, so this seems acceptable.
This patch only allows monitoring of SLRUs, not tuning. The SLRU sizes
are still fixed (hard-coded in the code) and it's not entirely clear
which of the SLRUs might need a GUC to tune size. In a way, allowing us
to determine that is one of the goals of this patch.
Bump catversion as the patch introduces new functions and system view.
Author: Tomas Vondra
Reviewed-by: Alvaro Herrera
Discussion: https://www.postgresql.org/message-id/flat/20200119143707.gyinppnigokesjok@development
2020-04-02 02:11:38 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return (Datum) 0;
|
|
|
|
}
|
|
|
|
|
2010-08-08 18:27:06 +02:00
|
|
|
Datum
|
|
|
|
pg_stat_get_xact_numscans(PG_FUNCTION_ARGS)
|
|
|
|
{
|
|
|
|
Oid relid = PG_GETARG_OID(0);
|
|
|
|
int64 result;
|
|
|
|
PgStat_TableStatus *tabentry;
|
|
|
|
|
|
|
|
if ((tabentry = find_tabstat_entry(relid)) == NULL)
|
|
|
|
result = 0;
|
|
|
|
else
|
|
|
|
result = (int64) (tabentry->t_counts.t_numscans);
|
|
|
|
|
|
|
|
PG_RETURN_INT64(result);
|
|
|
|
}
|
|
|
|
|
|
|
|
Datum
|
|
|
|
pg_stat_get_xact_tuples_returned(PG_FUNCTION_ARGS)
|
|
|
|
{
|
|
|
|
Oid relid = PG_GETARG_OID(0);
|
|
|
|
int64 result;
|
|
|
|
PgStat_TableStatus *tabentry;
|
|
|
|
|
|
|
|
if ((tabentry = find_tabstat_entry(relid)) == NULL)
|
|
|
|
result = 0;
|
|
|
|
else
|
|
|
|
result = (int64) (tabentry->t_counts.t_tuples_returned);
|
|
|
|
|
|
|
|
PG_RETURN_INT64(result);
|
|
|
|
}
|
|
|
|
|
|
|
|
Datum
|
|
|
|
pg_stat_get_xact_tuples_fetched(PG_FUNCTION_ARGS)
|
|
|
|
{
|
|
|
|
Oid relid = PG_GETARG_OID(0);
|
|
|
|
int64 result;
|
|
|
|
PgStat_TableStatus *tabentry;
|
|
|
|
|
|
|
|
if ((tabentry = find_tabstat_entry(relid)) == NULL)
|
|
|
|
result = 0;
|
|
|
|
else
|
|
|
|
result = (int64) (tabentry->t_counts.t_tuples_fetched);
|
|
|
|
|
|
|
|
PG_RETURN_INT64(result);
|
|
|
|
}
|
|
|
|
|
|
|
|
Datum
|
|
|
|
pg_stat_get_xact_tuples_inserted(PG_FUNCTION_ARGS)
|
|
|
|
{
|
|
|
|
Oid relid = PG_GETARG_OID(0);
|
|
|
|
int64 result;
|
|
|
|
PgStat_TableStatus *tabentry;
|
|
|
|
PgStat_TableXactStatus *trans;
|
|
|
|
|
|
|
|
if ((tabentry = find_tabstat_entry(relid)) == NULL)
|
|
|
|
result = 0;
|
|
|
|
else
|
|
|
|
{
|
|
|
|
result = tabentry->t_counts.t_tuples_inserted;
|
|
|
|
/* live subtransactions' counts aren't in t_tuples_inserted yet */
|
|
|
|
for (trans = tabentry->trans; trans != NULL; trans = trans->upper)
|
|
|
|
result += trans->tuples_inserted;
|
|
|
|
}
|
|
|
|
|
|
|
|
PG_RETURN_INT64(result);
|
|
|
|
}
|
|
|
|
|
|
|
|
Datum
|
|
|
|
pg_stat_get_xact_tuples_updated(PG_FUNCTION_ARGS)
|
|
|
|
{
|
|
|
|
Oid relid = PG_GETARG_OID(0);
|
|
|
|
int64 result;
|
|
|
|
PgStat_TableStatus *tabentry;
|
|
|
|
PgStat_TableXactStatus *trans;
|
|
|
|
|
|
|
|
if ((tabentry = find_tabstat_entry(relid)) == NULL)
|
|
|
|
result = 0;
|
|
|
|
else
|
|
|
|
{
|
|
|
|
result = tabentry->t_counts.t_tuples_updated;
|
|
|
|
/* live subtransactions' counts aren't in t_tuples_updated yet */
|
|
|
|
for (trans = tabentry->trans; trans != NULL; trans = trans->upper)
|
|
|
|
result += trans->tuples_updated;
|
|
|
|
}
|
|
|
|
|
|
|
|
PG_RETURN_INT64(result);
|
|
|
|
}
|
|
|
|
|
|
|
|
Datum
|
|
|
|
pg_stat_get_xact_tuples_deleted(PG_FUNCTION_ARGS)
|
|
|
|
{
|
|
|
|
Oid relid = PG_GETARG_OID(0);
|
|
|
|
int64 result;
|
|
|
|
PgStat_TableStatus *tabentry;
|
|
|
|
PgStat_TableXactStatus *trans;
|
|
|
|
|
|
|
|
if ((tabentry = find_tabstat_entry(relid)) == NULL)
|
|
|
|
result = 0;
|
|
|
|
else
|
|
|
|
{
|
|
|
|
result = tabentry->t_counts.t_tuples_deleted;
|
|
|
|
/* live subtransactions' counts aren't in t_tuples_deleted yet */
|
|
|
|
for (trans = tabentry->trans; trans != NULL; trans = trans->upper)
|
|
|
|
result += trans->tuples_deleted;
|
|
|
|
}
|
|
|
|
|
|
|
|
PG_RETURN_INT64(result);
|
|
|
|
}
|
|
|
|
|
|
|
|
Datum
|
|
|
|
pg_stat_get_xact_tuples_hot_updated(PG_FUNCTION_ARGS)
|
|
|
|
{
|
|
|
|
Oid relid = PG_GETARG_OID(0);
|
|
|
|
int64 result;
|
|
|
|
PgStat_TableStatus *tabentry;
|
|
|
|
|
|
|
|
if ((tabentry = find_tabstat_entry(relid)) == NULL)
|
|
|
|
result = 0;
|
|
|
|
else
|
|
|
|
result = (int64) (tabentry->t_counts.t_tuples_hot_updated);
|
|
|
|
|
|
|
|
PG_RETURN_INT64(result);
|
|
|
|
}
|
|
|
|
|
|
|
|
Datum
|
|
|
|
pg_stat_get_xact_blocks_fetched(PG_FUNCTION_ARGS)
|
|
|
|
{
|
|
|
|
Oid relid = PG_GETARG_OID(0);
|
|
|
|
int64 result;
|
|
|
|
PgStat_TableStatus *tabentry;
|
|
|
|
|
|
|
|
if ((tabentry = find_tabstat_entry(relid)) == NULL)
|
|
|
|
result = 0;
|
|
|
|
else
|
|
|
|
result = (int64) (tabentry->t_counts.t_blocks_fetched);
|
|
|
|
|
|
|
|
PG_RETURN_INT64(result);
|
|
|
|
}
|
|
|
|
|
|
|
|
Datum
|
|
|
|
pg_stat_get_xact_blocks_hit(PG_FUNCTION_ARGS)
|
|
|
|
{
|
|
|
|
Oid relid = PG_GETARG_OID(0);
|
|
|
|
int64 result;
|
|
|
|
PgStat_TableStatus *tabentry;
|
|
|
|
|
|
|
|
if ((tabentry = find_tabstat_entry(relid)) == NULL)
|
|
|
|
result = 0;
|
|
|
|
else
|
|
|
|
result = (int64) (tabentry->t_counts.t_blocks_hit);
|
|
|
|
|
|
|
|
PG_RETURN_INT64(result);
|
|
|
|
}
|
|
|
|
|
|
|
|
Datum
|
|
|
|
pg_stat_get_xact_function_calls(PG_FUNCTION_ARGS)
|
|
|
|
{
|
|
|
|
Oid funcid = PG_GETARG_OID(0);
|
|
|
|
PgStat_BackendFunctionEntry *funcentry;
|
|
|
|
|
|
|
|
if ((funcentry = find_funcstat_entry(funcid)) == NULL)
|
|
|
|
PG_RETURN_NULL();
|
|
|
|
PG_RETURN_INT64(funcentry->f_counts.f_numcalls);
|
|
|
|
}
|
|
|
|
|
|
|
|
Datum
|
2012-04-30 20:02:47 +02:00
|
|
|
pg_stat_get_xact_function_total_time(PG_FUNCTION_ARGS)
|
2010-08-08 18:27:06 +02:00
|
|
|
{
|
|
|
|
Oid funcid = PG_GETARG_OID(0);
|
|
|
|
PgStat_BackendFunctionEntry *funcentry;
|
|
|
|
|
|
|
|
if ((funcentry = find_funcstat_entry(funcid)) == NULL)
|
|
|
|
PG_RETURN_NULL();
|
2012-04-30 20:02:47 +02:00
|
|
|
PG_RETURN_FLOAT8(INSTR_TIME_GET_MILLISEC(funcentry->f_counts.f_total_time));
|
2010-08-08 18:27:06 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
Datum
|
|
|
|
pg_stat_get_xact_function_self_time(PG_FUNCTION_ARGS)
|
|
|
|
{
|
|
|
|
Oid funcid = PG_GETARG_OID(0);
|
|
|
|
PgStat_BackendFunctionEntry *funcentry;
|
|
|
|
|
|
|
|
if ((funcentry = find_funcstat_entry(funcid)) == NULL)
|
|
|
|
PG_RETURN_NULL();
|
2012-04-30 20:02:47 +02:00
|
|
|
PG_RETURN_FLOAT8(INSTR_TIME_GET_MILLISEC(funcentry->f_counts.f_self_time));
|
2010-08-08 18:27:06 +02:00
|
|
|
}
|
|
|
|
|
2007-03-16 18:57:36 +01:00
|
|
|
|
2015-02-20 03:36:50 +01:00
|
|
|
/* Get the timestamp of the current statistics snapshot */
|
|
|
|
Datum
|
|
|
|
pg_stat_get_snapshot_timestamp(PG_FUNCTION_ARGS)
|
|
|
|
{
|
2022-04-07 06:29:46 +02:00
|
|
|
bool have_snapshot;
|
|
|
|
TimestampTz ts;
|
|
|
|
|
|
|
|
ts = pgstat_get_stat_snapshot_timestamp(&have_snapshot);
|
|
|
|
|
|
|
|
if (!have_snapshot)
|
|
|
|
PG_RETURN_NULL();
|
|
|
|
|
|
|
|
PG_RETURN_TIMESTAMPTZ(ts);
|
2015-02-20 03:36:50 +01:00
|
|
|
}
|
|
|
|
|
2007-02-08 00:11:30 +01:00
|
|
|
/* Discard the active statistics snapshot */
|
|
|
|
Datum
|
|
|
|
pg_stat_clear_snapshot(PG_FUNCTION_ARGS)
|
|
|
|
{
|
|
|
|
pgstat_clear_snapshot();
|
|
|
|
|
|
|
|
PG_RETURN_VOID();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2022-04-07 08:35:56 +02:00
|
|
|
/* Force statistics to be reported at the next occasion */
|
|
|
|
Datum
|
|
|
|
pg_stat_force_next_flush(PG_FUNCTION_ARGS)
|
|
|
|
{
|
|
|
|
pgstat_force_next_flush();
|
|
|
|
|
|
|
|
PG_RETURN_VOID();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-02-08 00:11:30 +01:00
|
|
|
/* Reset all counters for the current database */
|
|
|
|
Datum
|
|
|
|
pg_stat_reset(PG_FUNCTION_ARGS)
|
|
|
|
{
|
|
|
|
pgstat_reset_counters();
|
|
|
|
|
|
|
|
PG_RETURN_VOID();
|
|
|
|
}
|
2010-01-19 15:11:32 +01:00
|
|
|
|
|
|
|
/* Reset some shared cluster-wide counters */
|
|
|
|
Datum
|
|
|
|
pg_stat_reset_shared(PG_FUNCTION_ARGS)
|
|
|
|
{
|
2011-07-03 13:15:58 +02:00
|
|
|
char *target = text_to_cstring(PG_GETARG_TEXT_PP(0));
|
|
|
|
|
2022-04-07 02:56:19 +02:00
|
|
|
if (strcmp(target, "archiver") == 0)
|
|
|
|
pgstat_reset_of_kind(PGSTAT_KIND_ARCHIVER);
|
|
|
|
else if (strcmp(target, "bgwriter") == 0)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Historically checkpointer was part of bgwriter, continue to reset
|
|
|
|
* both for now.
|
|
|
|
*/
|
|
|
|
pgstat_reset_of_kind(PGSTAT_KIND_BGWRITER);
|
|
|
|
pgstat_reset_of_kind(PGSTAT_KIND_CHECKPOINTER);
|
|
|
|
}
|
2022-04-07 09:28:40 +02:00
|
|
|
else if (strcmp(target, "recovery_prefetch") == 0)
|
|
|
|
XLogPrefetchResetStats();
|
2022-04-07 02:56:19 +02:00
|
|
|
else if (strcmp(target, "wal") == 0)
|
|
|
|
pgstat_reset_of_kind(PGSTAT_KIND_WAL);
|
|
|
|
else
|
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
|
|
|
errmsg("unrecognized reset target: \"%s\"", target),
|
2022-04-07 09:28:40 +02:00
|
|
|
errhint("Target must be \"archiver\", \"bgwriter\", \"recovery_prefetch\", or \"wal\".")));
|
2010-01-19 15:11:32 +01:00
|
|
|
|
|
|
|
PG_RETURN_VOID();
|
|
|
|
}
|
2010-01-28 15:25:41 +01:00
|
|
|
|
2014-10-20 16:23:40 +02:00
|
|
|
/* Reset a single counter in the current database */
|
2010-01-28 15:25:41 +01:00
|
|
|
Datum
|
|
|
|
pg_stat_reset_single_table_counters(PG_FUNCTION_ARGS)
|
|
|
|
{
|
|
|
|
Oid taboid = PG_GETARG_OID(0);
|
|
|
|
|
2022-04-07 02:56:19 +02:00
|
|
|
pgstat_reset(PGSTAT_KIND_RELATION, MyDatabaseId, taboid);
|
2010-01-28 15:25:41 +01:00
|
|
|
|
|
|
|
PG_RETURN_VOID();
|
|
|
|
}
|
|
|
|
|
|
|
|
Datum
|
|
|
|
pg_stat_reset_single_function_counters(PG_FUNCTION_ARGS)
|
|
|
|
{
|
|
|
|
Oid funcoid = PG_GETARG_OID(0);
|
|
|
|
|
2022-04-07 02:56:19 +02:00
|
|
|
pgstat_reset(PGSTAT_KIND_FUNCTION, MyDatabaseId, funcoid);
|
2010-01-28 15:25:41 +01:00
|
|
|
|
|
|
|
PG_RETURN_VOID();
|
|
|
|
}
|
2014-01-28 18:58:22 +01:00
|
|
|
|
Collect statistics about SLRU caches
There's a number of SLRU caches used to access important data like clog,
commit timestamps, multixact, asynchronous notifications, etc. Until now
we had no easy way to monitor these shared caches, compute hit ratios,
number of reads/writes etc.
This commit extends the statistics collector to track this information
for a predefined list of SLRUs, and also introduces a new system view
pg_stat_slru displaying the data.
The list of built-in SLRUs is fixed, but additional SLRUs may be defined
in extensions. Unfortunately, there's no suitable registry of SLRUs, so
this patch simply defines a fixed list of SLRUs with entries for the
built-in ones and one entry for all additional SLRUs. Extensions adding
their own SLRU are fairly rare, so this seems acceptable.
This patch only allows monitoring of SLRUs, not tuning. The SLRU sizes
are still fixed (hard-coded in the code) and it's not entirely clear
which of the SLRUs might need a GUC to tune size. In a way, allowing us
to determine that is one of the goals of this patch.
Bump catversion as the patch introduces new functions and system view.
Author: Tomas Vondra
Reviewed-by: Alvaro Herrera
Discussion: https://www.postgresql.org/message-id/flat/20200119143707.gyinppnigokesjok@development
2020-04-02 02:11:38 +02:00
|
|
|
/* Reset SLRU counters (a specific one or all of them). */
|
|
|
|
Datum
|
|
|
|
pg_stat_reset_slru(PG_FUNCTION_ARGS)
|
|
|
|
{
|
|
|
|
char *target = NULL;
|
|
|
|
|
2022-04-07 02:56:19 +02:00
|
|
|
if (PG_ARGISNULL(0))
|
|
|
|
pgstat_reset_of_kind(PGSTAT_KIND_SLRU);
|
|
|
|
else
|
|
|
|
{
|
Collect statistics about SLRU caches
There's a number of SLRU caches used to access important data like clog,
commit timestamps, multixact, asynchronous notifications, etc. Until now
we had no easy way to monitor these shared caches, compute hit ratios,
number of reads/writes etc.
This commit extends the statistics collector to track this information
for a predefined list of SLRUs, and also introduces a new system view
pg_stat_slru displaying the data.
The list of built-in SLRUs is fixed, but additional SLRUs may be defined
in extensions. Unfortunately, there's no suitable registry of SLRUs, so
this patch simply defines a fixed list of SLRUs with entries for the
built-in ones and one entry for all additional SLRUs. Extensions adding
their own SLRU are fairly rare, so this seems acceptable.
This patch only allows monitoring of SLRUs, not tuning. The SLRU sizes
are still fixed (hard-coded in the code) and it's not entirely clear
which of the SLRUs might need a GUC to tune size. In a way, allowing us
to determine that is one of the goals of this patch.
Bump catversion as the patch introduces new functions and system view.
Author: Tomas Vondra
Reviewed-by: Alvaro Herrera
Discussion: https://www.postgresql.org/message-id/flat/20200119143707.gyinppnigokesjok@development
2020-04-02 02:11:38 +02:00
|
|
|
target = text_to_cstring(PG_GETARG_TEXT_PP(0));
|
2022-04-07 02:56:19 +02:00
|
|
|
pgstat_reset_slru(target);
|
|
|
|
}
|
Collect statistics about SLRU caches
There's a number of SLRU caches used to access important data like clog,
commit timestamps, multixact, asynchronous notifications, etc. Until now
we had no easy way to monitor these shared caches, compute hit ratios,
number of reads/writes etc.
This commit extends the statistics collector to track this information
for a predefined list of SLRUs, and also introduces a new system view
pg_stat_slru displaying the data.
The list of built-in SLRUs is fixed, but additional SLRUs may be defined
in extensions. Unfortunately, there's no suitable registry of SLRUs, so
this patch simply defines a fixed list of SLRUs with entries for the
built-in ones and one entry for all additional SLRUs. Extensions adding
their own SLRU are fairly rare, so this seems acceptable.
This patch only allows monitoring of SLRUs, not tuning. The SLRU sizes
are still fixed (hard-coded in the code) and it's not entirely clear
which of the SLRUs might need a GUC to tune size. In a way, allowing us
to determine that is one of the goals of this patch.
Bump catversion as the patch introduces new functions and system view.
Author: Tomas Vondra
Reviewed-by: Alvaro Herrera
Discussion: https://www.postgresql.org/message-id/flat/20200119143707.gyinppnigokesjok@development
2020-04-02 02:11:38 +02:00
|
|
|
|
|
|
|
PG_RETURN_VOID();
|
|
|
|
}
|
|
|
|
|
2020-10-08 05:39:08 +02:00
|
|
|
/* Reset replication slots stats (a specific one or all of them). */
|
|
|
|
Datum
|
|
|
|
pg_stat_reset_replication_slot(PG_FUNCTION_ARGS)
|
|
|
|
{
|
|
|
|
char *target = NULL;
|
|
|
|
|
2022-04-07 02:56:19 +02:00
|
|
|
if (PG_ARGISNULL(0))
|
|
|
|
pgstat_reset_of_kind(PGSTAT_KIND_REPLSLOT);
|
|
|
|
else
|
2021-04-27 05:39:11 +02:00
|
|
|
{
|
2020-10-08 05:39:08 +02:00
|
|
|
target = text_to_cstring(PG_GETARG_TEXT_PP(0));
|
2022-04-07 02:56:19 +02:00
|
|
|
pgstat_reset_replslot(target);
|
2021-04-27 05:39:11 +02:00
|
|
|
}
|
|
|
|
|
2020-10-08 05:39:08 +02:00
|
|
|
PG_RETURN_VOID();
|
|
|
|
}
|
|
|
|
|
2022-03-01 01:47:52 +01:00
|
|
|
/* Reset subscription stats (a specific one or all of them) */
|
|
|
|
Datum
|
|
|
|
pg_stat_reset_subscription_stats(PG_FUNCTION_ARGS)
|
|
|
|
{
|
|
|
|
Oid subid;
|
|
|
|
|
|
|
|
if (PG_ARGISNULL(0))
|
|
|
|
{
|
|
|
|
/* Clear all subscription stats */
|
2022-04-07 02:56:19 +02:00
|
|
|
pgstat_reset_of_kind(PGSTAT_KIND_SUBSCRIPTION);
|
2022-03-01 01:47:52 +01:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
subid = PG_GETARG_OID(0);
|
|
|
|
|
|
|
|
if (!OidIsValid(subid))
|
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
|
|
|
errmsg("invalid subscription OID %u", subid)));
|
2022-04-07 02:56:19 +02:00
|
|
|
pgstat_reset(PGSTAT_KIND_SUBSCRIPTION, InvalidOid, subid);
|
2022-03-01 01:47:52 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
PG_RETURN_VOID();
|
|
|
|
}
|
|
|
|
|
2014-01-28 18:58:22 +01:00
|
|
|
Datum
|
|
|
|
pg_stat_get_archiver(PG_FUNCTION_ARGS)
|
|
|
|
{
|
|
|
|
TupleDesc tupdesc;
|
2022-07-16 08:42:15 +02:00
|
|
|
Datum values[7] = {0};
|
|
|
|
bool nulls[7] = {0};
|
2014-01-28 18:58:22 +01:00
|
|
|
PgStat_ArchiverStats *archiver_stats;
|
|
|
|
|
|
|
|
/* Initialise attributes information in the tuple descriptor */
|
Remove WITH OIDS support, change oid catalog column visibility.
Previously tables declared WITH OIDS, including a significant fraction
of the catalog tables, stored the oid column not as a normal column,
but as part of the tuple header.
This special column was not shown by default, which was somewhat odd,
as it's often (consider e.g. pg_class.oid) one of the more important
parts of a row. Neither pg_dump nor COPY included the contents of the
oid column by default.
The fact that the oid column was not an ordinary column necessitated a
significant amount of special case code to support oid columns. That
already was painful for the existing, but upcoming work aiming to make
table storage pluggable, would have required expanding and duplicating
that "specialness" significantly.
WITH OIDS has been deprecated since 2005 (commit ff02d0a05280e0).
Remove it.
Removing includes:
- CREATE TABLE and ALTER TABLE syntax for declaring the table to be
WITH OIDS has been removed (WITH (oids[ = true]) will error out)
- pg_dump does not support dumping tables declared WITH OIDS and will
issue a warning when dumping one (and ignore the oid column).
- restoring an pg_dump archive with pg_restore will warn when
restoring a table with oid contents (and ignore the oid column)
- COPY will refuse to load binary dump that includes oids.
- pg_upgrade will error out when encountering tables declared WITH
OIDS, they have to be altered to remove the oid column first.
- Functionality to access the oid of the last inserted row (like
plpgsql's RESULT_OID, spi's SPI_lastoid, ...) has been removed.
The syntax for declaring a table WITHOUT OIDS (or WITH (oids = false)
for CREATE TABLE) is still supported. While that requires a bit of
support code, it seems unnecessary to break applications / dumps that
do not use oids, and are explicit about not using them.
The biggest user of WITH OID columns was postgres' catalog. This
commit changes all 'magic' oid columns to be columns that are normally
declared and stored. To reduce unnecessary query breakage all the
newly added columns are still named 'oid', even if a table's column
naming scheme would indicate 'reloid' or such. This obviously
requires adapting a lot code, mostly replacing oid access via
HeapTupleGetOid() with access to the underlying Form_pg_*->oid column.
The bootstrap process now assigns oids for all oid columns in
genbki.pl that do not have an explicit value (starting at the largest
oid previously used), only oids assigned later by oids will be above
FirstBootstrapObjectId. As the oid column now is a normal column the
special bootstrap syntax for oids has been removed.
Oids are not automatically assigned during insertion anymore, all
backend code explicitly assigns oids with GetNewOidWithIndex(). For
the rare case that insertions into the catalog via SQL are called for
the new pg_nextoid() function can be used (which only works on catalog
tables).
The fact that oid columns on system tables are now normal columns
means that they will be included in the set of columns expanded
by * (i.e. SELECT * FROM pg_class will now include the table's oid,
previously it did not). It'd not technically be hard to hide oid
column by default, but that'd mean confusing behavior would either
have to be carried forward forever, or it'd cause breakage down the
line.
While it's not unlikely that further adjustments are needed, the
scope/invasiveness of the patch makes it worthwhile to get merge this
now. It's painful to maintain externally, too complicated to commit
after the code code freeze, and a dependency of a number of other
patches.
Catversion bump, for obvious reasons.
Author: Andres Freund, with contributions by John Naylor
Discussion: https://postgr.es/m/20180930034810.ywp2c7awz7opzcfr@alap3.anarazel.de
2018-11-21 00:36:57 +01:00
|
|
|
tupdesc = CreateTemplateTupleDesc(7);
|
2014-01-28 18:58:22 +01:00
|
|
|
TupleDescInitEntry(tupdesc, (AttrNumber) 1, "archived_count",
|
|
|
|
INT8OID, -1, 0);
|
|
|
|
TupleDescInitEntry(tupdesc, (AttrNumber) 2, "last_archived_wal",
|
|
|
|
TEXTOID, -1, 0);
|
|
|
|
TupleDescInitEntry(tupdesc, (AttrNumber) 3, "last_archived_time",
|
|
|
|
TIMESTAMPTZOID, -1, 0);
|
|
|
|
TupleDescInitEntry(tupdesc, (AttrNumber) 4, "failed_count",
|
|
|
|
INT8OID, -1, 0);
|
|
|
|
TupleDescInitEntry(tupdesc, (AttrNumber) 5, "last_failed_wal",
|
|
|
|
TEXTOID, -1, 0);
|
|
|
|
TupleDescInitEntry(tupdesc, (AttrNumber) 6, "last_failed_time",
|
|
|
|
TIMESTAMPTZOID, -1, 0);
|
|
|
|
TupleDescInitEntry(tupdesc, (AttrNumber) 7, "stats_reset",
|
|
|
|
TIMESTAMPTZOID, -1, 0);
|
|
|
|
|
|
|
|
BlessTupleDesc(tupdesc);
|
|
|
|
|
|
|
|
/* Get statistics about the archiver process */
|
|
|
|
archiver_stats = pgstat_fetch_stat_archiver();
|
|
|
|
|
|
|
|
/* Fill values and NULLs */
|
|
|
|
values[0] = Int64GetDatum(archiver_stats->archived_count);
|
2014-02-04 02:59:39 +01:00
|
|
|
if (*(archiver_stats->last_archived_wal) == '\0')
|
2014-01-28 18:58:22 +01:00
|
|
|
nulls[1] = true;
|
|
|
|
else
|
|
|
|
values[1] = CStringGetTextDatum(archiver_stats->last_archived_wal);
|
|
|
|
|
|
|
|
if (archiver_stats->last_archived_timestamp == 0)
|
|
|
|
nulls[2] = true;
|
|
|
|
else
|
|
|
|
values[2] = TimestampTzGetDatum(archiver_stats->last_archived_timestamp);
|
|
|
|
|
|
|
|
values[3] = Int64GetDatum(archiver_stats->failed_count);
|
2014-02-04 02:59:39 +01:00
|
|
|
if (*(archiver_stats->last_failed_wal) == '\0')
|
2014-01-28 18:58:22 +01:00
|
|
|
nulls[4] = true;
|
|
|
|
else
|
|
|
|
values[4] = CStringGetTextDatum(archiver_stats->last_failed_wal);
|
|
|
|
|
|
|
|
if (archiver_stats->last_failed_timestamp == 0)
|
|
|
|
nulls[5] = true;
|
|
|
|
else
|
|
|
|
values[5] = TimestampTzGetDatum(archiver_stats->last_failed_timestamp);
|
|
|
|
|
|
|
|
if (archiver_stats->stat_reset_timestamp == 0)
|
|
|
|
nulls[6] = true;
|
|
|
|
else
|
|
|
|
values[6] = TimestampTzGetDatum(archiver_stats->stat_reset_timestamp);
|
|
|
|
|
|
|
|
/* Returns the record as Datum */
|
|
|
|
PG_RETURN_DATUM(HeapTupleGetDatum(heap_form_tuple(tupdesc, values, nulls)));
|
|
|
|
}
|
2020-10-08 05:39:08 +02:00
|
|
|
|
2021-04-27 05:39:11 +02:00
|
|
|
/*
|
|
|
|
* Get the statistics for the replication slot. If the slot statistics is not
|
|
|
|
* available, return all-zeroes stats.
|
|
|
|
*/
|
2020-10-08 05:39:08 +02:00
|
|
|
Datum
|
2021-04-27 05:39:11 +02:00
|
|
|
pg_stat_get_replication_slot(PG_FUNCTION_ARGS)
|
2020-10-08 05:39:08 +02:00
|
|
|
{
|
2021-04-16 04:04:43 +02:00
|
|
|
#define PG_STAT_GET_REPLICATION_SLOT_COLS 10
|
2021-04-27 05:39:11 +02:00
|
|
|
text *slotname_text = PG_GETARG_TEXT_P(0);
|
|
|
|
NameData slotname;
|
2020-10-08 05:39:08 +02:00
|
|
|
TupleDesc tupdesc;
|
2022-07-16 08:42:15 +02:00
|
|
|
Datum values[PG_STAT_GET_REPLICATION_SLOT_COLS] = {0};
|
|
|
|
bool nulls[PG_STAT_GET_REPLICATION_SLOT_COLS] = {0};
|
2021-04-27 05:39:11 +02:00
|
|
|
PgStat_StatReplSlotEntry *slotent;
|
|
|
|
PgStat_StatReplSlotEntry allzero;
|
2020-10-08 05:39:08 +02:00
|
|
|
|
2021-04-27 05:39:11 +02:00
|
|
|
/* Initialise attributes information in the tuple descriptor */
|
|
|
|
tupdesc = CreateTemplateTupleDesc(PG_STAT_GET_REPLICATION_SLOT_COLS);
|
|
|
|
TupleDescInitEntry(tupdesc, (AttrNumber) 1, "slot_name",
|
|
|
|
TEXTOID, -1, 0);
|
|
|
|
TupleDescInitEntry(tupdesc, (AttrNumber) 2, "spill_txns",
|
|
|
|
INT8OID, -1, 0);
|
|
|
|
TupleDescInitEntry(tupdesc, (AttrNumber) 3, "spill_count",
|
|
|
|
INT8OID, -1, 0);
|
|
|
|
TupleDescInitEntry(tupdesc, (AttrNumber) 4, "spill_bytes",
|
|
|
|
INT8OID, -1, 0);
|
|
|
|
TupleDescInitEntry(tupdesc, (AttrNumber) 5, "stream_txns",
|
|
|
|
INT8OID, -1, 0);
|
|
|
|
TupleDescInitEntry(tupdesc, (AttrNumber) 6, "stream_count",
|
|
|
|
INT8OID, -1, 0);
|
|
|
|
TupleDescInitEntry(tupdesc, (AttrNumber) 7, "stream_bytes",
|
|
|
|
INT8OID, -1, 0);
|
|
|
|
TupleDescInitEntry(tupdesc, (AttrNumber) 8, "total_txns",
|
|
|
|
INT8OID, -1, 0);
|
|
|
|
TupleDescInitEntry(tupdesc, (AttrNumber) 9, "total_bytes",
|
|
|
|
INT8OID, -1, 0);
|
|
|
|
TupleDescInitEntry(tupdesc, (AttrNumber) 10, "stats_reset",
|
|
|
|
TIMESTAMPTZOID, -1, 0);
|
|
|
|
BlessTupleDesc(tupdesc);
|
2020-10-08 05:39:08 +02:00
|
|
|
|
2021-04-27 05:39:11 +02:00
|
|
|
namestrcpy(&slotname, text_to_cstring(slotname_text));
|
|
|
|
slotent = pgstat_fetch_replslot(slotname);
|
|
|
|
if (!slotent)
|
2020-10-08 05:39:08 +02:00
|
|
|
{
|
2021-04-27 05:39:11 +02:00
|
|
|
/*
|
|
|
|
* If the slot is not found, initialise its stats. This is possible if
|
|
|
|
* the create slot message is lost.
|
|
|
|
*/
|
|
|
|
memset(&allzero, 0, sizeof(PgStat_StatReplSlotEntry));
|
|
|
|
slotent = &allzero;
|
2020-10-08 05:39:08 +02:00
|
|
|
}
|
|
|
|
|
2021-04-27 05:39:11 +02:00
|
|
|
values[0] = CStringGetTextDatum(NameStr(slotname));
|
|
|
|
values[1] = Int64GetDatum(slotent->spill_txns);
|
|
|
|
values[2] = Int64GetDatum(slotent->spill_count);
|
|
|
|
values[3] = Int64GetDatum(slotent->spill_bytes);
|
|
|
|
values[4] = Int64GetDatum(slotent->stream_txns);
|
|
|
|
values[5] = Int64GetDatum(slotent->stream_count);
|
|
|
|
values[6] = Int64GetDatum(slotent->stream_bytes);
|
|
|
|
values[7] = Int64GetDatum(slotent->total_txns);
|
|
|
|
values[8] = Int64GetDatum(slotent->total_bytes);
|
2020-10-08 05:39:08 +02:00
|
|
|
|
2021-04-27 05:39:11 +02:00
|
|
|
if (slotent->stat_reset_timestamp == 0)
|
|
|
|
nulls[9] = true;
|
|
|
|
else
|
|
|
|
values[9] = TimestampTzGetDatum(slotent->stat_reset_timestamp);
|
|
|
|
|
|
|
|
/* Returns the record as Datum */
|
|
|
|
PG_RETURN_DATUM(HeapTupleGetDatum(heap_form_tuple(tupdesc, values, nulls)));
|
2020-10-08 05:39:08 +02:00
|
|
|
}
|
2021-11-30 04:24:30 +01:00
|
|
|
|
|
|
|
/*
|
2022-03-01 01:47:52 +01:00
|
|
|
* Get the subscription statistics for the given subscription. If the
|
|
|
|
* subscription statistics is not available, return all-zeros stats.
|
2021-11-30 04:24:30 +01:00
|
|
|
*/
|
|
|
|
Datum
|
2022-03-01 01:47:52 +01:00
|
|
|
pg_stat_get_subscription_stats(PG_FUNCTION_ARGS)
|
2021-11-30 04:24:30 +01:00
|
|
|
{
|
2022-03-01 01:47:52 +01:00
|
|
|
#define PG_STAT_GET_SUBSCRIPTION_STATS_COLS 4
|
2021-11-30 04:24:30 +01:00
|
|
|
Oid subid = PG_GETARG_OID(0);
|
|
|
|
TupleDesc tupdesc;
|
2022-07-16 08:42:15 +02:00
|
|
|
Datum values[PG_STAT_GET_SUBSCRIPTION_STATS_COLS] = {0};
|
|
|
|
bool nulls[PG_STAT_GET_SUBSCRIPTION_STATS_COLS] = {0};
|
2022-03-01 01:47:52 +01:00
|
|
|
PgStat_StatSubEntry *subentry;
|
|
|
|
PgStat_StatSubEntry allzero;
|
2021-11-30 04:24:30 +01:00
|
|
|
|
2022-03-01 01:47:52 +01:00
|
|
|
/* Get subscription stats */
|
|
|
|
subentry = pgstat_fetch_stat_subscription(subid);
|
2021-11-30 04:24:30 +01:00
|
|
|
|
|
|
|
/* Initialise attributes information in the tuple descriptor */
|
2022-03-01 01:47:52 +01:00
|
|
|
tupdesc = CreateTemplateTupleDesc(PG_STAT_GET_SUBSCRIPTION_STATS_COLS);
|
2021-11-30 04:24:30 +01:00
|
|
|
TupleDescInitEntry(tupdesc, (AttrNumber) 1, "subid",
|
|
|
|
OIDOID, -1, 0);
|
2022-03-01 01:47:52 +01:00
|
|
|
TupleDescInitEntry(tupdesc, (AttrNumber) 2, "apply_error_count",
|
2021-11-30 04:24:30 +01:00
|
|
|
INT8OID, -1, 0);
|
2022-03-01 01:47:52 +01:00
|
|
|
TupleDescInitEntry(tupdesc, (AttrNumber) 3, "sync_error_count",
|
|
|
|
INT8OID, -1, 0);
|
|
|
|
TupleDescInitEntry(tupdesc, (AttrNumber) 4, "stats_reset",
|
2021-11-30 04:24:30 +01:00
|
|
|
TIMESTAMPTZOID, -1, 0);
|
|
|
|
BlessTupleDesc(tupdesc);
|
|
|
|
|
2022-03-01 01:47:52 +01:00
|
|
|
if (!subentry)
|
|
|
|
{
|
|
|
|
/* If the subscription is not found, initialise its stats */
|
|
|
|
memset(&allzero, 0, sizeof(PgStat_StatSubEntry));
|
|
|
|
subentry = &allzero;
|
|
|
|
}
|
2021-11-30 04:24:30 +01:00
|
|
|
|
2022-03-01 01:47:52 +01:00
|
|
|
/* subid */
|
|
|
|
values[0] = ObjectIdGetDatum(subid);
|
2021-11-30 04:24:30 +01:00
|
|
|
|
2022-03-01 01:47:52 +01:00
|
|
|
/* apply_error_count */
|
|
|
|
values[1] = Int64GetDatum(subentry->apply_error_count);
|
2021-11-30 04:24:30 +01:00
|
|
|
|
2022-03-01 01:47:52 +01:00
|
|
|
/* sync_error_count */
|
|
|
|
values[2] = Int64GetDatum(subentry->sync_error_count);
|
2021-11-30 04:24:30 +01:00
|
|
|
|
2022-03-01 01:47:52 +01:00
|
|
|
/* stats_reset */
|
|
|
|
if (subentry->stat_reset_timestamp == 0)
|
|
|
|
nulls[3] = true;
|
2021-11-30 04:24:30 +01:00
|
|
|
else
|
2022-03-01 01:47:52 +01:00
|
|
|
values[3] = TimestampTzGetDatum(subentry->stat_reset_timestamp);
|
2021-11-30 04:24:30 +01:00
|
|
|
|
|
|
|
/* Returns the record as Datum */
|
|
|
|
PG_RETURN_DATUM(HeapTupleGetDatum(heap_form_tuple(tupdesc, values, nulls)));
|
|
|
|
}
|
2022-04-07 09:03:58 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Checks for presence of stats for object with provided kind, database oid,
|
|
|
|
* object oid.
|
|
|
|
*
|
|
|
|
* This is useful for tests, but not really anything else. Therefore not
|
|
|
|
* documented.
|
|
|
|
*/
|
|
|
|
Datum
|
|
|
|
pg_stat_have_stats(PG_FUNCTION_ARGS)
|
|
|
|
{
|
|
|
|
char *stats_type = text_to_cstring(PG_GETARG_TEXT_P(0));
|
|
|
|
Oid dboid = PG_GETARG_OID(1);
|
|
|
|
Oid objoid = PG_GETARG_OID(2);
|
|
|
|
PgStat_Kind kind = pgstat_get_kind_from_str(stats_type);
|
|
|
|
|
|
|
|
PG_RETURN_BOOL(pgstat_have_entry(kind, dboid, objoid));
|
|
|
|
}
|