Content-Length: 255126 | pFad | http://github.com/postgrespro/pg_wait_sampling/pull/97.patch
thub.com
From 6cd4efa3e21170e66d5bbea318dc8843fadaaa2e Mon Sep 17 00:00:00 2001
From: Oleg Tselebrovskiy
Date: Tue, 18 Mar 2025 12:57:31 +0700
Subject: [PATCH 01/11] Add profile_extended and history_extended views with
additional dimensions
Sometimes it can be useful to have additional info collected with wait_events,
so we add two new views/functions that include more information. The structure of
those views could be changed in new versions of pg_wait_sampling extension
---
Makefile | 2 +-
collector.c | 119 ++++-
expected/queries.out | 45 ++
meson.build | 1 +
pg_wait_sampling--1.1--1.2.sql | 79 ++++
pg_wait_sampling.c | 765 +++++++++++++++++++++++++++++++++
pg_wait_sampling.control | 2 +-
pg_wait_sampling.h | 63 ++-
sql/queries.sql | 14 +
9 files changed, 1067 insertions(+), 23 deletions(-)
create mode 100644 pg_wait_sampling--1.1--1.2.sql
diff --git a/Makefile b/Makefile
index 32711a3..f9de6d9 100644
--- a/Makefile
+++ b/Makefile
@@ -4,7 +4,7 @@ MODULE_big = pg_wait_sampling
OBJS = pg_wait_sampling.o collector.o
EXTENSION = pg_wait_sampling
-DATA = pg_wait_sampling--1.1.sql pg_wait_sampling--1.0--1.1.sql
+DATA = pg_wait_sampling--1.1.sql pg_wait_sampling--1.0--1.1.sql pg_wait_sampling--1.1--1.2.sql
REGRESS = load queries
diff --git a/collector.c b/collector.c
index 721299f..e073062 100644
--- a/collector.c
+++ b/collector.c
@@ -10,6 +10,7 @@
#include "postgres.h"
#include
+#include
#include "compat.h"
#include "miscadmin.h"
@@ -30,6 +31,13 @@
#include "utils/resowner.h"
#include "utils/timestamp.h"
+#define check_bestatus_dimensions(dimensions) \
+ (dimensions & (PGWS_DIMENSIONS_BE_TYPE |\
+ PGWS_DIMENSIONS_BE_STATE |\
+ PGWS_DIMENSIONS_BE_START_TIME |\
+ PGWS_DIMENSIONS_CLIENT_ADDR |\
+ PGWS_DIMENSIONS_CLIENT_HOSTNAME |\
+ PGWS_DIMENSIONS_APPNAME))
static volatile sig_atomic_t shutdown_requested = false;
static void handle_sigterm(SIGNAL_ARGS);
@@ -162,25 +170,103 @@ probe_waits(History *observations, HTAB *profile_hash,
LWLockAcquire(ProcArrayLock, LW_SHARED);
for (i = 0; i < ProcGlobal->allProcCount; i++)
{
- HistoryItem item,
+ HistoryItem item_history,
*observation;
+ ProfileItem item_profile;
PGPROC *proc = &ProcGlobal->allProcs[i];
+ int pid;
+ uint32 wait_event_info;
- if (!pgws_should_sample_proc(proc, &item.pid, &item.wait_event_info))
+ /* Check if we need to sample this process */
+ if (!pgws_should_sample_proc(proc, &pid, &wait_event_info))
continue;
+ /* We zero whole HistoryItem to avoid doing it field-by-field */
+ memset(&item_history, 0, sizeof(HistoryItem));
+ memset(&item_profile, 0, sizeof(ProfileItem));
+
+ item_history.pid = pid;
+ item_profile.pid = pid;
+
+ item_history.wait_event_info = wait_event_info;
+ item_profile.wait_event_info = wait_event_info;
+
if (pgws_profileQueries)
- item.queryId = pgws_proc_queryids[i];
- else
- item.queryId = 0;
+ {
+ item_history.queryId = pgws_proc_queryids[i];
+ item_profile.queryId = pgws_proc_queryids[i];
+ }
- item.ts = ts;
+ item_history.ts = ts;
+
+ /* Copy everything we need from PGPROC */
+ if (pgws_history_dimensions & PGWS_DIMENSIONS_ROLE_ID)
+ item_history.role_id = proc->roleId;
+ if (pgws_profile_dimensions & PGWS_DIMENSIONS_ROLE_ID)
+ item_profile.role_id = proc->roleId;
+
+ if (pgws_history_dimensions & PGWS_DIMENSIONS_DB_ID)
+ item_history.database_id = proc->databaseId;
+ if (pgws_profile_dimensions & PGWS_DIMENSIONS_DB_ID)
+ item_profile.database_id = proc->databaseId;
+
+ if (pgws_history_dimensions & PGWS_DIMENSIONS_PARALLEL_LEADER_PID)
+ item_history.parallel_leader_pid = (proc->lockGroupLeader ?
+ proc->lockGroupLeader->pid :
+ 0);
+ if (pgws_profile_dimensions & PGWS_DIMENSIONS_PARALLEL_LEADER_PID)
+ item_profile.parallel_leader_pid = (proc->lockGroupLeader ?
+ proc->lockGroupLeader->pid :
+ 0);
+ /* Look into BackendStatus only if necessary */
+ if (check_bestatus_dimensions(pgws_history_dimensions) ||
+ check_bestatus_dimensions(pgws_profile_dimensions))
+ {
+#if PG_VERSION_NUM >= 170000
+ PgBackendStatus *bestatus = pgstat_get_beentry_by_proc_number(GetNumberFromPGProc(proc));
+#else
+ PgBackendStatus *bestatus = get_beentry_by_procpid(proc->pid);
+#endif
+ /* Copy everything we need from BackendStatus */
+ if (bestatus)
+ {
+ if (pgws_history_dimensions & PGWS_DIMENSIONS_BE_TYPE)
+ item_history.backend_type = bestatus->st_backendType;
+ if (pgws_profile_dimensions & PGWS_DIMENSIONS_BE_TYPE)
+ item_profile.backend_type = bestatus->st_backendType;
+
+ if (pgws_history_dimensions & PGWS_DIMENSIONS_BE_STATE)
+ item_history.backend_state = bestatus->st_state;
+ if (pgws_profile_dimensions & PGWS_DIMENSIONS_BE_STATE)
+ item_profile.backend_state = bestatus->st_state;
+
+ if (pgws_history_dimensions & PGWS_DIMENSIONS_BE_START_TIME)
+ item_history.proc_start = bestatus->st_proc_start_timestamp;
+ if (pgws_profile_dimensions & PGWS_DIMENSIONS_BE_START_TIME)
+ item_profile.proc_start = bestatus->st_proc_start_timestamp;
+
+ if (pgws_history_dimensions & PGWS_DIMENSIONS_CLIENT_ADDR)
+ item_history.client_addr = bestatus->st_clientaddr;
+ if (pgws_profile_dimensions & PGWS_DIMENSIONS_CLIENT_ADDR)
+ item_profile.client_addr = bestatus->st_clientaddr;
+
+ if (pgws_history_dimensions & PGWS_DIMENSIONS_CLIENT_HOSTNAME)
+ strcpy(item_history.client_hostname, bestatus->st_clienthostname);
+ if (pgws_profile_dimensions & PGWS_DIMENSIONS_CLIENT_HOSTNAME)
+ strcpy(item_profile.client_hostname, bestatus->st_clienthostname);
+
+ if (pgws_history_dimensions & PGWS_DIMENSIONS_APPNAME)
+ strcpy(item_history.appname, bestatus->st_appname);
+ if (pgws_profile_dimensions & PGWS_DIMENSIONS_APPNAME)
+ strcpy(item_profile.appname, bestatus->st_appname);
+ }
+ }
/* Write to the history if needed */
if (write_history)
{
observation = get_next_observation(observations);
- *observation = item;
+ *observation = item_history;
}
/* Write to the profile if needed */
@@ -190,9 +276,9 @@ probe_waits(History *observations, HTAB *profile_hash,
bool found;
if (!profile_pid)
- item.pid = 0;
+ item_profile.pid = 0;
- profileItem = (ProfileItem *) hash_search(profile_hash, &item, HASH_ENTER, &found);
+ profileItem = (ProfileItem *) hash_search(profile_hash, &item_profile, HASH_ENTER, &found);
if (found)
profileItem->count++;
else
@@ -200,6 +286,11 @@ probe_waits(History *observations, HTAB *profile_hash,
}
}
LWLockRelease(ProcArrayLock);
+#if PG_VERSION_NUM >= 140000
+ pgstat_clear_backend_activity_snapshot();
+#else
+ pgstat_clear_snapshot();
+#endif
}
/*
@@ -287,10 +378,12 @@ make_profile_hash()
{
HASHCTL hash_ctl;
- if (pgws_profileQueries)
- hash_ctl.keysize = offsetof(ProfileItem, count);
- else
- hash_ctl.keysize = offsetof(ProfileItem, queryId);
+ /*
+ * Since adding additional dimensions we include everyting except count
+ * into hashtable key. This is fine for cases when some fields are 0 since
+ * it doesn't impede our ability to search the hash table for entries
+ */
+ hash_ctl.keysize = offsetof(ProfileItem, count);
hash_ctl.entrysize = sizeof(ProfileItem);
return hash_create("Waits profile hash", 1024, &hash_ctl,
diff --git a/expected/queries.out b/expected/queries.out
index 722df5f..6718c14 100644
--- a/expected/queries.out
+++ b/expected/queries.out
@@ -20,6 +20,27 @@ WITH t as (SELECT sum(0) FROM pg_wait_sampling_profile)
0
(1 row)
+WITH t as (SELECT sum(0) FROM pg_wait_sampling_current_extended)
+ SELECT sum(0) FROM generate_series(1, 2), t;
+ sum
+-----
+ 0
+(1 row)
+
+WITH t as (SELECT sum(0) FROM pg_wait_sampling_history_extended)
+ SELECT sum(0) FROM generate_series(1, 2), t;
+ sum
+-----
+ 0
+(1 row)
+
+WITH t as (SELECT sum(0) FROM pg_wait_sampling_profile_extended)
+ SELECT sum(0) FROM generate_series(1, 2), t;
+ sum
+-----
+ 0
+(1 row)
+
-- Some dummy checks just to be sure that all our functions work and return something.
SELECT count(*) = 1 as test FROM pg_wait_sampling_get_current(pg_backend_pid());
test
@@ -45,4 +66,28 @@ SELECT pg_wait_sampling_reset_profile();
(1 row)
+SELECT count(*) = 1 as test FROM pg_wait_sampling_get_current_extended(pg_backend_pid());
+ test
+------
+ t
+(1 row)
+
+SELECT count(*) >= 0 as test FROM pg_wait_sampling_get_profile_extended();
+ test
+------
+ t
+(1 row)
+
+SELECT count(*) >= 0 as test FROM pg_wait_sampling_get_history_extended();
+ test
+------
+ t
+(1 row)
+
+SELECT pg_wait_sampling_reset_profile();
+ pg_wait_sampling_reset_profile
+--------------------------------
+
+(1 row)
+
DROP EXTENSION pg_wait_sampling;
diff --git a/meson.build b/meson.build
index c3c3dc9..162bb0e 100644
--- a/meson.build
+++ b/meson.build
@@ -24,6 +24,7 @@ install_data(
'pg_wait_sampling.control',
'pg_wait_sampling--1.0--1.1.sql',
'pg_wait_sampling--1.1.sql',
+ 'pg_wait_sampling--1.1--1.2.sql',
kwargs: contrib_data_args,
)
diff --git a/pg_wait_sampling--1.1--1.2.sql b/pg_wait_sampling--1.1--1.2.sql
new file mode 100644
index 0000000..df95826
--- /dev/null
+++ b/pg_wait_sampling--1.1--1.2.sql
@@ -0,0 +1,79 @@
+/* contrib/pg_wait_sampling/pg_wait_sampling--1.1--1.2.sql */
+
+-- complain if script is sourced in psql, rather than via ALTER EXTENSION
+\echo Use "ALTER EXTENSION pg_wait_sampling UPDATE TO 1.2" to load this file. \quit
+
+CREATE FUNCTION pg_wait_sampling_get_current_extended (
+ pid int4,
+ OUT pid int4,
+ OUT event_type text,
+ OUT event text,
+ OUT queryid int8,
+ OUT role_id int8,
+ OUT database_id int8,
+ OUT parallel_leader_pid int4,
+ OUT backend_type text,
+ OUT backend_state text,
+ OUT proc_start timestamptz,
+ OUT client_addr text,
+ OUT client_hostname text,
+ OUT appname text
+)
+RETURNS SETOF record
+AS 'MODULE_PATHNAME'
+LANGUAGE C VOLATILE CALLED ON NULL INPUT;
+
+CREATE VIEW pg_wait_sampling_current_extended AS
+ SELECT * FROM pg_wait_sampling_get_current_extended(NULL::integer);
+
+GRANT SELECT ON pg_wait_sampling_current TO PUBLIC;
+
+CREATE FUNCTION pg_wait_sampling_get_history_extended (
+ OUT pid int4,
+ OUT ts timestamptz,
+ OUT event_type text,
+ OUT event text,
+ OUT queryid int8,
+ OUT role_id int8,
+ OUT database_id int8,
+ OUT parallel_leader_pid int4,
+ OUT backend_type text,
+ OUT backend_state text,
+ OUT proc_start timestamptz,
+ OUT client_addr text,
+ OUT client_hostname text,
+ OUT appname text
+)
+RETURNS SETOF record
+AS 'MODULE_PATHNAME'
+LANGUAGE C VOLATILE STRICT;
+
+CREATE VIEW pg_wait_sampling_history_extended AS
+ SELECT * FROM pg_wait_sampling_get_history_extended();
+
+GRANT SELECT ON pg_wait_sampling_history_extended TO PUBLIC;
+
+CREATE FUNCTION pg_wait_sampling_get_profile_extended (
+ OUT pid int4,
+ OUT event_type text,
+ OUT event text,
+ OUT queryid int8,
+ OUT role_id int8,
+ OUT database_id int8,
+ OUT parallel_leader_pid int4,
+ OUT backend_type text,
+ OUT backend_state text,
+ OUT proc_start timestamptz,
+ OUT client_addr text,
+ OUT client_hostname text,
+ OUT appname text,
+ OUT count int8
+)
+RETURNS SETOF record
+AS 'MODULE_PATHNAME'
+LANGUAGE C VOLATILE STRICT;
+
+CREATE VIEW pg_wait_sampling_profile_extended AS
+ SELECT * FROM pg_wait_sampling_get_profile_extended();
+
+GRANT SELECT ON pg_wait_sampling_profile_extended TO PUBLIC;
diff --git a/pg_wait_sampling.c b/pg_wait_sampling.c
index e165a6a..fcc4384 100644
--- a/pg_wait_sampling.c
+++ b/pg_wait_sampling.c
@@ -13,6 +13,7 @@
#include "access/htup_details.h"
#include "catalog/pg_type_d.h"
+#include "common/ip.h"
#include "executor/executor.h"
#include "funcapi.h"
#include "miscadmin.h"
@@ -32,6 +33,7 @@
#include "utils/guc.h"
#include "utils/memutils.h"
#include "utils/timestamp.h"
+#include "utils/varlena.h"
#if PG_VERSION_NUM < 150000
#include "postmaster/autovacuum.h"
@@ -133,6 +135,10 @@ int pgws_profilePeriod = 10;
bool pgws_profilePid = true;
int pgws_profileQueries = PGWS_PROFILE_QUERIES_TOP;
bool pgws_sampleCpu = true;
+static char *pgws_history_dimensions_string = NULL;
+static char *pgws_profile_dimensions_string = NULL;
+int pgws_history_dimensions; /* bit mask that is derived from GUC */
+int pgws_profile_dimensions; /* bit mask that is derived from GUC */
#define pgws_enabled(level) \
((pgws_profileQueries == PGWS_PROFILE_QUERIES_ALL) || \
@@ -301,6 +307,109 @@ pgws_cleanup_callback(int code, Datum arg)
LockRelease(&queueTag, ExclusiveLock, false);
}
+/*
+ * Check tokens of string and fill bitmask accordingly
+ * Mostly copied from plpgsql_extra_checks_check_hook
+ */
+static bool
+pgws_general_dimensions_check_hook (char **newvalue, void **extra, GucSource source)
+{
+ char *rawstring;
+ List *elemlist;
+ ListCell *l;
+ int extrachecks = 0;
+ int *myextra;
+
+ /* Check special cases when we turn all or none dimensions */
+ if (pg_strcasecmp(*newvalue, "all") == 0)
+ extrachecks = PGWS_DIMENSIONS_ALL;
+ else if (pg_strcasecmp(*newvalue, "none") == 0)
+ extrachecks = PGWS_DIMENSIONS_NONE;
+ else
+ {
+ /* Need a modifiable copy of string */
+ rawstring = pstrdup(*newvalue);
+
+ /* Parse string into list of identifiers */
+ if (!SplitIdentifierString(rawstring, ',', &elemlist))
+ {
+ /* syntax error in list */
+ GUC_check_errdetail("List syntax is invalid.");
+ pfree(rawstring);
+ list_free(elemlist);
+ return false;
+ }
+
+ /* Loop over all recieved options */
+ foreach(l, elemlist)
+ {
+ char *tok = (char *) lfirst(l);
+
+ /* Process all allowed values */
+ if (pg_strcasecmp(tok, "role_id") == 0)
+ extrachecks |= PGWS_DIMENSIONS_ROLE_ID;
+ else if (pg_strcasecmp(tok, "database_id") == 0)
+ extrachecks |= PGWS_DIMENSIONS_DB_ID;
+ else if (pg_strcasecmp(tok, "parallel_leader_pid") == 0)
+ extrachecks |= PGWS_DIMENSIONS_PARALLEL_LEADER_PID;
+ else if (pg_strcasecmp(tok, "backend_type") == 0)
+ extrachecks |= PGWS_DIMENSIONS_BE_TYPE;
+ else if (pg_strcasecmp(tok, "backend_state") == 0)
+ extrachecks |= PGWS_DIMENSIONS_BE_STATE;
+ else if (pg_strcasecmp(tok, "backend_start_time") == 0)
+ extrachecks |= PGWS_DIMENSIONS_BE_START_TIME;
+ else if (pg_strcasecmp(tok, "client_addr") == 0)
+ extrachecks |= PGWS_DIMENSIONS_CLIENT_ADDR;
+ else if (pg_strcasecmp(tok, "client_hostname") == 0)
+ extrachecks |= PGWS_DIMENSIONS_CLIENT_HOSTNAME;
+ else if (pg_strcasecmp(tok, "appname") == 0)
+ extrachecks |= PGWS_DIMENSIONS_APPNAME;
+ else if (pg_strcasecmp(tok, "all") == 0 || pg_strcasecmp(tok, "none") == 0)
+ {
+ GUC_check_errdetail("Key word \"%s\" cannot be combined with other key words.", tok);
+ pfree(rawstring);
+ list_free(elemlist);
+ return false;
+ }
+ else
+ {
+ GUC_check_errdetail("Unrecognized key word: \"%s\".", tok);
+ pfree(rawstring);
+ list_free(elemlist);
+ return false;
+ }
+ }
+
+ pfree(rawstring);
+ list_free(elemlist);
+ }
+#if PG_VERSION_NUM >= 160000
+ myextra = (int *) guc_malloc(LOG, sizeof(int));
+#else
+ myextra = (int *) malloc(sizeof(int));
+#endif
+ if (!myextra)
+ return false;
+ *myextra = extrachecks;
+ *extra = myextra;
+
+ return true;
+}
+
+/* Assign actual value to dimension bitmask */
+static void
+pgws_history_dimensions_assign_hook (const char *newvalue, void *extra)
+{
+ pgws_history_dimensions = *((int *) extra);
+}
+
+/* Assign actual value to dimension bitmask */
+static void
+pgws_profile_dimensions_assign_hook (const char *newvalue, void *extra)
+{
+ pgws_profile_dimensions = *((int *) extra);
+}
+
/*
* Module load callback
*/
@@ -421,6 +530,28 @@ _PG_init(void)
NULL,
NULL);
+ DefineCustomStringVariable("pg_wait_sampling.history_dimensions",
+ "Sets sampling dimensions for history",
+ NULL,
+ &pgws_history_dimensions_string,
+ "none",
+ PGC_SIGHUP,
+ GUC_LIST_INPUT,
+ pgws_general_dimensions_check_hook,
+ pgws_history_dimensions_assign_hook,
+ NULL);
+
+ DefineCustomStringVariable("pg_wait_sampling.profile_dimensions",
+ "Sets sampling dimensions for profile",
+ NULL,
+ &pgws_profile_dimensions_string,
+ "none",
+ PGC_SIGHUP,
+ GUC_LIST_INPUT,
+ pgws_general_dimensions_check_hook,
+ pgws_profile_dimensions_assign_hook,
+ NULL);
+
#if PG_VERSION_NUM >= 150000
MarkGUCPrefixReserved("pg_wait_sampling");
#endif
@@ -609,6 +740,332 @@ pg_wait_sampling_get_current(PG_FUNCTION_ARGS)
}
}
+static Datum
+GetBackendState(BackendState state, bool *is_null)
+{
+ switch (state)
+ {
+#if PG_VERSION_NUM >= 180000
+ case STATE_STARTING:
+ return CStringGetTextDatum("starting");
+#endif
+ case STATE_IDLE:
+ return CStringGetTextDatum("idle");
+ case STATE_RUNNING:
+ return CStringGetTextDatum("active");
+ case STATE_IDLEINTRANSACTION:
+ return CStringGetTextDatum("idle in transaction");
+ case STATE_FASTPATH:
+ return CStringGetTextDatum("fastpath function call");
+ case STATE_IDLEINTRANSACTION_ABORTED:
+ return CStringGetTextDatum("idle in transaction (aborted)");
+ case STATE_DISABLED:
+ return CStringGetTextDatum("disabled");
+ case STATE_UNDEFINED:
+ *is_null = true;
+ }
+ return (Datum) 0;
+}
+
+/* Copied from pg_stat_get_backend_client_addr */
+static Datum
+get_backend_client_addr(SockAddr client_addr, bool *is_null)
+{
+ char remote_host[NI_MAXHOST];
+ int ret;
+
+ /* A zeroed client addr means we don't know */
+#if PG_VERSION_NUM >= 180000
+ if (pg_memory_is_all_zeros(&client_addr,
+ sizeof(client_addr)))
+#else
+ SockAddr zero_clientaddr;
+
+ memset(&zero_clientaddr, 0, sizeof(zero_clientaddr));
+ if (memcmp(&client_addr, &zero_clientaddr,
+ sizeof(zero_clientaddr)) == 0)
+#endif
+ {
+ *is_null = true;
+ return (Datum) 0;
+ }
+
+ switch (client_addr.addr.ss_family)
+ {
+ case AF_INET:
+ case AF_INET6:
+ break;
+ default:
+ *is_null = true;
+ return (Datum) 0;
+ }
+
+ remote_host[0] = '\0';
+ ret = pg_getnameinfo_all(&client_addr.addr,
+ client_addr.salen,
+ remote_host, sizeof(remote_host),
+ NULL, 0,
+ NI_NUMERICHOST | NI_NUMERICSERV);
+ if (ret != 0)
+ {
+ *is_null = true;
+ return (Datum) 0;
+ }
+
+ clean_ipv6_addr(client_addr.addr.ss_family, remote_host);
+
+ return (DirectFunctionCall1(inet_in, CStringGetDatum(remote_host)));
+}
+
+/*
+ * Needed for PostgreSQL 16 and earlier since there is no good way to get
+ * PgBackendStatus when having only PGPROC structure.
+ *
+ * pgstat_fetch_stat_beentry (13-15) works with indices of localBackendStatusTable
+ * pgstat_get_beentry_by_backend_id (16) works with "backend_ids", but we still
+ * cannot get them without looking into LocalPgBackendStatus, so work with indices
+ *
+ * This function is very inefficient
+ *
+ * Maybe we should just iterate over localBackendStatusTable and somehow get
+ * PGPROC entries from there but it is up for discussion
+ */
+PgBackendStatus *
+get_beentry_by_procpid(int pid)
+{
+ int backend_num = pgstat_fetch_stat_numbackends(), cur_be_idx;
+
+ for (cur_be_idx = 1; cur_be_idx <= backend_num; cur_be_idx++)
+ {
+ LocalPgBackendStatus *local_beentry;
+
+#if PG_VERSION_NUM >= 160000
+ local_beentry = pgstat_get_local_beentry_by_index(cur_be_idx);
+#else
+ /* Here beid is just index in localBackendStatusTable */
+ local_beentry = pgstat_fetch_stat_local_beentry(cur_be_idx);
+#endif
+ if (local_beentry->backendStatus.st_procpid == pid)
+ return &local_beentry->backendStatus;
+ }
+ return NULL;
+}
+
+PG_FUNCTION_INFO_V1(pg_wait_sampling_get_current_extended);
+Datum
+pg_wait_sampling_get_current_extended(PG_FUNCTION_ARGS)
+{
+ FuncCallContext *funcctx;
+ WaitCurrentContext *params;
+
+ check_shmem();
+
+ /* Initialization, done only on the first call */
+ if (SRF_IS_FIRSTCALL())
+ {
+ MemoryContext oldcontext;
+ TupleDesc tupdesc;
+
+ funcctx = SRF_FIRSTCALL_INIT();
+
+ oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
+ params = (WaitCurrentContext *) palloc0(sizeof(WaitCurrentContext));
+ params->ts = GetCurrentTimestamp();
+
+ funcctx->user_fctx = params;
+ /* Setup tuple desc */
+ tupdesc = CreateTemplateTupleDesc(13);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 1, "pid",
+ INT4OID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 2, "type",
+ TEXTOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 3, "event",
+ TEXTOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 4, "queryid",
+ INT8OID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 5, "role_id",
+ INT8OID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 6, "database_id",
+ INT8OID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 7, "parallel_leader_pid",
+ INT4OID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 8, "backend_type",
+ TEXTOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 9, "backend_state",
+ TEXTOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 10, "proc_start",
+ TIMESTAMPTZOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 11, "client_addr",
+ TEXTOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 12, "client_hostname",
+ TEXTOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 13, "appname",
+ TEXTOID, -1, 0);
+
+ funcctx->tuple_desc = BlessTupleDesc(tupdesc);
+
+ LWLockAcquire(ProcArrayLock, LW_SHARED);
+
+ if (!PG_ARGISNULL(0))
+ {
+ /* pg_wait_sampling_get_current_extended(pid int4) function */
+ HistoryItem *item;
+ PGPROC *proc;
+ PgBackendStatus *bestatus;
+
+ proc = search_proc(PG_GETARG_UINT32(0));
+#if PG_VERSION_NUM >= 170000
+ bestatus = pgstat_get_beentry_by_proc_number(GetNumberFromPGProc(proc));
+#else
+ bestatus = get_beentry_by_procpid(proc->pid);
+#endif
+ params->items = (HistoryItem *) palloc0(sizeof(HistoryItem));
+ item = ¶ms->items[0];
+ /* Show all fields without looking at GUC variables */
+ item->pid = proc->pid;
+ item->wait_event_info = proc->wait_event_info;
+ item->queryId = pgws_proc_queryids[proc - ProcGlobal->allProcs];
+ item->role_id = proc->roleId;
+ item->database_id = proc->databaseId;
+ item->parallel_leader_pid = (proc->lockGroupLeader ?
+ proc->lockGroupLeader->pid :
+ 0);
+ if (bestatus)
+ {
+ item->backend_type = bestatus->st_backendType;
+ item->backend_state = bestatus->st_state;
+ item->proc_start = bestatus->st_proc_start_timestamp;
+ item->client_addr = bestatus->st_clientaddr;
+ strcpy(item->client_hostname, bestatus->st_clienthostname);
+ strcpy(item->appname, bestatus->st_appname);
+ }
+ funcctx->max_calls = 1;
+ }
+ else
+ {
+ /* pg_wait_sampling_current view */
+ int procCount = ProcGlobal->allProcCount,
+ i,
+ j = 0;
+
+ params->items = (HistoryItem *) palloc0(sizeof(HistoryItem) * procCount);
+ for (i = 0; i < procCount; i++)
+ {
+ PGPROC *proc = &ProcGlobal->allProcs[i];
+#if PG_VERSION_NUM >= 170000
+ PgBackendStatus *bestatus = pgstat_get_beentry_by_proc_number(GetNumberFromPGProc(proc));
+#else
+ PgBackendStatus *bestatus = get_beentry_by_procpid(proc->pid);
+#endif
+
+ if (!pgws_should_sample_proc(proc,
+ ¶ms->items[j].pid,
+ ¶ms->items[j].wait_event_info))
+ continue;
+
+ /* Show all fields without looking at GUC variables */
+ params->items[j].pid = proc->pid;
+ params->items[j].wait_event_info = proc->wait_event_info;
+ params->items[j].queryId = pgws_proc_queryids[i];
+ params->items[j].role_id = proc->roleId;
+ params->items[j].database_id = proc->databaseId;
+ params->items[j].parallel_leader_pid = (proc->lockGroupLeader ?
+ proc->lockGroupLeader->pid :
+ 0);
+ if (bestatus)
+ {
+ params->items[j].backend_type = bestatus->st_backendType;
+ params->items[j].backend_state = bestatus->st_state;
+ params->items[j].proc_start = bestatus->st_proc_start_timestamp;
+ params->items[j].client_addr = bestatus->st_clientaddr;
+ strcpy(params->items[j].client_hostname, bestatus->st_clienthostname);
+ strcpy(params->items[j].appname, bestatus->st_appname);
+ }
+ j++;
+ }
+ funcctx->max_calls = j;
+ }
+
+ LWLockRelease(ProcArrayLock);
+#if PG_VERSION_NUM >= 140000
+ pgstat_clear_backend_activity_snapshot();
+#else
+ pgstat_clear_snapshot();
+#endif
+
+ MemoryContextSwitchTo(oldcontext);
+ }
+
+ /* stuff done on every call of the function */
+ funcctx = SRF_PERCALL_SETUP();
+ params = (WaitCurrentContext *) funcctx->user_fctx;
+
+ if (funcctx->call_cntr < funcctx->max_calls)
+ {
+ HeapTuple tuple;
+ Datum values[13];
+ bool nulls[13];
+ const char *event_type,
+ *event,
+ *backend_type;
+ Datum backend_state, proc_start, client_addr;
+ bool is_null_be_state = false,
+ is_null_client_addr = false;
+ HistoryItem *item;
+
+ item = ¶ms->items[funcctx->call_cntr];
+
+ /* Make and return next tuple to caller */
+ MemSet(values, 0, sizeof(values));
+ MemSet(nulls, 0, sizeof(nulls));
+
+ event_type = pgstat_get_wait_event_type(item->wait_event_info);
+ event = pgstat_get_wait_event(item->wait_event_info);
+ backend_type = GetBackendTypeDesc(item->backend_type);
+ backend_state = GetBackendState(item->backend_state, &is_null_be_state);
+ proc_start = TimestampTzGetDatum(item->proc_start);
+ client_addr = get_backend_client_addr(item->client_addr, &is_null_client_addr);
+
+ values[0] = Int32GetDatum(item->pid);
+ if (event_type)
+ values[1] = PointerGetDatum(cstring_to_text(event_type));
+ else
+ nulls[1] = true;
+ if (event)
+ values[2] = PointerGetDatum(cstring_to_text(event));
+ else
+ nulls[2] = true;
+ values[3] = UInt64GetDatum(item->queryId);
+ values[4] = ObjectIdGetDatum(item->role_id);
+ values[5] = ObjectIdGetDatum(item->database_id);
+ values[6] = Int32GetDatum(item->parallel_leader_pid);
+ if (backend_type)
+ values[7] = PointerGetDatum(cstring_to_text(backend_type));
+ else
+ nulls[7] = true;
+ if (!is_null_be_state)
+ values[8] = backend_state;
+ else
+ nulls[8] = true;
+ values[9] = proc_start;
+ if (!is_null_client_addr)
+ values[10] = client_addr;
+ else
+ nulls[10] = true;
+ values[11] = PointerGetDatum(cstring_to_text(item->client_hostname));
+ values[12] = PointerGetDatum(cstring_to_text(item->appname));
+
+ tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls);
+
+ SRF_RETURN_NEXT(funcctx, HeapTupleGetDatum(tuple));
+ }
+ else
+ {
+ /* nothing left */
+ SRF_RETURN_DONE(funcctx);
+ }
+}
+
typedef struct
{
Size count;
@@ -806,6 +1263,161 @@ pg_wait_sampling_get_profile(PG_FUNCTION_ARGS)
}
}
+PG_FUNCTION_INFO_V1(pg_wait_sampling_get_profile_extended);
+Datum
+pg_wait_sampling_get_profile_extended(PG_FUNCTION_ARGS)
+{
+ Profile *profile;
+ FuncCallContext *funcctx;
+
+ check_shmem();
+
+ if (SRF_IS_FIRSTCALL())
+ {
+ MemoryContext oldcontext;
+ TupleDesc tupdesc;
+
+ funcctx = SRF_FIRSTCALL_INIT();
+ oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
+
+ /* Receive profile from shmq */
+ profile = (Profile *) palloc0(sizeof(Profile));
+ profile->items = (ProfileItem *) receive_array(PROFILE_REQUEST,
+ sizeof(ProfileItem), &profile->count);
+
+ funcctx->user_fctx = profile;
+ funcctx->max_calls = profile->count;
+
+ /* Make tuple descriptor */
+ tupdesc = CreateTemplateTupleDesc(14);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 1, "pid",
+ INT4OID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 2, "type",
+ TEXTOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 3, "event",
+ TEXTOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 4, "queryid",
+ INT8OID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 5, "role_id",
+ INT8OID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 6, "database_id",
+ INT8OID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 7, "parallel_leader_pid",
+ INT4OID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 8, "backend_type",
+ TEXTOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 9, "backend_state",
+ TEXTOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 10, "proc_start",
+ TIMESTAMPTZOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 11, "client_addr",
+ TEXTOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 12, "client_hostname",
+ TEXTOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 13, "appname",
+ TEXTOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 14, "count",
+ INT8OID, -1, 0);
+ funcctx->tuple_desc = BlessTupleDesc(tupdesc);
+
+ MemoryContextSwitchTo(oldcontext);
+ }
+
+ /* stuff done on every call of the function */
+ funcctx = SRF_PERCALL_SETUP();
+
+ profile = (Profile *) funcctx->user_fctx;
+
+ if (funcctx->call_cntr < funcctx->max_calls)
+ {
+ /* for each row */
+ Datum values[14];
+ bool nulls[14];
+ HeapTuple tuple;
+ ProfileItem *item;
+ const char *event_type,
+ *event,
+ *backend_type;
+ Datum backend_state, proc_start, client_addr;
+ bool is_null_be_state = false,
+ is_null_client_addr = false;
+
+ item = &profile->items[funcctx->call_cntr];
+
+ MemSet(values, 0, sizeof(values));
+ MemSet(nulls, 0, sizeof(nulls));
+
+ /* Make and return next tuple to caller */
+ event_type = pgstat_get_wait_event_type(item->wait_event_info);
+ event = pgstat_get_wait_event(item->wait_event_info);
+ backend_type = GetBackendTypeDesc(item->backend_type);
+ backend_state = GetBackendState(item->backend_state, &is_null_be_state);
+ proc_start = TimestampTzGetDatum(item->proc_start);
+ client_addr = get_backend_client_addr(item->client_addr, &is_null_client_addr);
+
+ values[0] = Int32GetDatum(item->pid);
+ if (event_type)
+ values[1] = PointerGetDatum(cstring_to_text(event_type));
+ else
+ nulls[1] = true;
+ if (event)
+ values[2] = PointerGetDatum(cstring_to_text(event));
+ else
+ nulls[2] = true;
+ if (pgws_profileQueries)
+ values[3] = UInt64GetDatum(item->queryId);
+ else
+ values[3] = (Datum) 0;
+ if (pgws_profile_dimensions & PGWS_DIMENSIONS_ROLE_ID)
+ values[4] = ObjectIdGetDatum(item->role_id);
+ else
+ nulls[4] = true;
+ if (pgws_profile_dimensions & PGWS_DIMENSIONS_DB_ID)
+ values[5] = ObjectIdGetDatum(item->database_id);
+ else
+ nulls[5] = true;
+ if (pgws_profile_dimensions & PGWS_DIMENSIONS_PARALLEL_LEADER_PID)
+ values[6] = Int32GetDatum(item->parallel_leader_pid);
+ else
+ nulls[6] = true;
+ if (backend_type && (pgws_profile_dimensions & PGWS_DIMENSIONS_BE_TYPE))
+ values[7] = PointerGetDatum(cstring_to_text(backend_type));
+ else
+ nulls[7] = true;
+ if (!is_null_be_state && (pgws_profile_dimensions & PGWS_DIMENSIONS_BE_STATE))
+ values[8] = backend_state;
+ else
+ nulls[8] = true;
+ if (pgws_profile_dimensions & PGWS_DIMENSIONS_BE_START_TIME)
+ values[9] = proc_start;
+ else
+ nulls[9] = true;
+ if (!is_null_client_addr && pgws_profile_dimensions & PGWS_DIMENSIONS_CLIENT_ADDR)
+ values[10] = client_addr;
+ else
+ nulls[10] = true;
+ if (pgws_profile_dimensions & PGWS_DIMENSIONS_CLIENT_HOSTNAME)
+ values[11] = PointerGetDatum(cstring_to_text(item->client_hostname));
+ else
+ nulls[11] = true;
+ if (pgws_profile_dimensions & PGWS_DIMENSIONS_APPNAME)
+ values[12] = PointerGetDatum(cstring_to_text(item->appname));
+ else
+ nulls[12] = true;
+
+ values[13] = UInt64GetDatum(item->count);
+
+ tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls);
+
+ SRF_RETURN_NEXT(funcctx, HeapTupleGetDatum(tuple));
+ }
+ else
+ {
+ /* nothing left */
+ SRF_RETURN_DONE(funcctx);
+ }
+}
+
PG_FUNCTION_INFO_V1(pg_wait_sampling_reset_profile);
Datum
pg_wait_sampling_reset_profile(PG_FUNCTION_ARGS)
@@ -928,6 +1540,159 @@ pg_wait_sampling_get_history(PG_FUNCTION_ARGS)
PG_RETURN_VOID();
}
+PG_FUNCTION_INFO_V1(pg_wait_sampling_get_history_extended);
+Datum
+pg_wait_sampling_get_history_extended(PG_FUNCTION_ARGS)
+{
+ History *history;
+ FuncCallContext *funcctx;
+
+ check_shmem();
+
+ if (SRF_IS_FIRSTCALL())
+ {
+ MemoryContext oldcontext;
+ TupleDesc tupdesc;
+
+ funcctx = SRF_FIRSTCALL_INIT();
+ oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
+
+ /* Receive history from shmq */
+ history = (History *) palloc0(sizeof(History));
+ history->items = (HistoryItem *) receive_array(HISTORY_REQUEST,
+ sizeof(HistoryItem), &history->count);
+
+ funcctx->user_fctx = history;
+ funcctx->max_calls = history->count;
+
+ /* Make tuple descriptor */
+ tupdesc = CreateTemplateTupleDesc(14);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 1, "pid",
+ INT4OID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 2, "sample_ts",
+ TIMESTAMPTZOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 3, "type",
+ TEXTOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 4, "event",
+ TEXTOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 5, "queryid",
+ INT8OID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 6, "role_id",
+ INT8OID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 7, "database_id",
+ INT8OID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 8, "parallel_leader_pid",
+ INT4OID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 9, "backend_type",
+ TEXTOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 10, "backend_state",
+ TEXTOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 11, "proc_start",
+ TIMESTAMPTZOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 12, "client_addr",
+ TEXTOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 13, "client_hostname",
+ TEXTOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 14, "appname",
+ TEXTOID, -1, 0);
+ funcctx->tuple_desc = BlessTupleDesc(tupdesc);
+
+ MemoryContextSwitchTo(oldcontext);
+ }
+
+ /* stuff done on every call of the function */
+ funcctx = SRF_PERCALL_SETUP();
+
+ history = (History *) funcctx->user_fctx;
+
+ if (history->index < history->count)
+ {
+ HeapTuple tuple;
+ HistoryItem *item;
+ Datum values[14];
+ bool nulls[14];
+ const char *event_type,
+ *event,
+ *backend_type;
+ Datum backend_state, proc_start, client_addr;
+ bool is_null_be_state = false,
+ is_null_client_addr = false;
+
+ item = &history->items[history->index];
+
+ /* Make and return next tuple to caller */
+ MemSet(values, 0, sizeof(values));
+ MemSet(nulls, 0, sizeof(nulls));
+
+ event_type = pgstat_get_wait_event_type(item->wait_event_info);
+ event = pgstat_get_wait_event(item->wait_event_info);
+ backend_type = GetBackendTypeDesc(item->backend_type);
+ backend_state = GetBackendState(item->backend_state, &is_null_be_state);
+ proc_start = TimestampTzGetDatum(item->proc_start);
+ client_addr = get_backend_client_addr(item->client_addr, &is_null_client_addr);
+
+ values[0] = Int32GetDatum(item->pid);
+ values[1] = TimestampTzGetDatum(item->ts);
+ if (event_type)
+ values[2] = PointerGetDatum(cstring_to_text(event_type));
+ else
+ nulls[2] = true;
+ if (event)
+ values[3] = PointerGetDatum(cstring_to_text(event));
+ else
+ nulls[3] = true;
+ values[4] = UInt64GetDatum(item->queryId);
+ if (pgws_profile_dimensions & PGWS_DIMENSIONS_ROLE_ID)
+ values[5] = ObjectIdGetDatum(item->role_id);
+ else
+ nulls[5] = true;
+ if (pgws_profile_dimensions & PGWS_DIMENSIONS_DB_ID)
+ values[6] = ObjectIdGetDatum(item->database_id);
+ else
+ nulls[6] = true;
+ if (pgws_profile_dimensions & PGWS_DIMENSIONS_PARALLEL_LEADER_PID)
+ values[7] = Int32GetDatum(item->parallel_leader_pid);
+ else
+ nulls[7] = true;
+ if (backend_type && (pgws_profile_dimensions & PGWS_DIMENSIONS_BE_TYPE))
+ values[8] = PointerGetDatum(cstring_to_text(backend_type));
+ else
+ nulls[8] = true;
+ if (!is_null_be_state && (pgws_profile_dimensions & PGWS_DIMENSIONS_BE_STATE))
+ values[9] = backend_state;
+ else
+ nulls[9] = true;
+ if (pgws_profile_dimensions & PGWS_DIMENSIONS_BE_START_TIME)
+ values[10] = proc_start;
+ else
+ nulls[10] = true;
+ if (!is_null_client_addr && pgws_profile_dimensions & PGWS_DIMENSIONS_CLIENT_ADDR)
+ values[11] = client_addr;
+ else
+ nulls[11] = true;
+ if (pgws_profile_dimensions & PGWS_DIMENSIONS_CLIENT_HOSTNAME)
+ values[12] = PointerGetDatum(cstring_to_text(item->client_hostname));
+ else
+ nulls[12] = true;
+ if (pgws_profile_dimensions & PGWS_DIMENSIONS_APPNAME)
+ values[13] = PointerGetDatum(cstring_to_text(item->appname));
+ else
+ nulls[13] = true;
+
+ tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls);
+
+ history->index++;
+ SRF_RETURN_NEXT(funcctx, HeapTupleGetDatum(tuple));
+ }
+ else
+ {
+ /* nothing left */
+ SRF_RETURN_DONE(funcctx);
+ }
+
+ PG_RETURN_VOID();
+}
+
/*
* planner_hook hook, save queryId for collector
*/
diff --git a/pg_wait_sampling.control b/pg_wait_sampling.control
index 97d9a34..d2d0ffe 100644
--- a/pg_wait_sampling.control
+++ b/pg_wait_sampling.control
@@ -1,5 +1,5 @@
# pg_wait_sampling extension
comment = 'sampling based statistics of wait events'
-default_version = '1.1'
+default_version = '1.2'
module_pathname = '$libdir/pg_wait_sampling'
relocatable = true
diff --git a/pg_wait_sampling.h b/pg_wait_sampling.h
index dab773c..9009fe8 100644
--- a/pg_wait_sampling.h
+++ b/pg_wait_sampling.h
@@ -15,26 +15,70 @@
#include "storage/lock.h"
#include "storage/shm_mq.h"
+#if PG_VERSION_NUM >= 140000
+#include "utils/backend_status.h"
+#else
+#include "pgstat.h"
+#endif
+
#define PG_WAIT_SAMPLING_MAGIC 0xCA94B107
#define COLLECTOR_QUEUE_SIZE (16 * 1024)
#define HISTORY_TIME_MULTIPLIER 10
#define PGWS_QUEUE_LOCK 0
#define PGWS_COLLECTOR_LOCK 1
+/* Values for sampling dimensions */
+#define PGWS_DIMENSIONS_NONE 0
+
+#define PGWS_DIMENSIONS_ROLE_ID (1 << 1)
+#define PGWS_DIMENSIONS_DB_ID (1 << 2)
+#define PGWS_DIMENSIONS_PARALLEL_LEADER_PID (1 << 3)
+#define PGWS_DIMENSIONS_BE_TYPE (1 << 4)
+#define PGWS_DIMENSIONS_BE_STATE (1 << 5)
+#define PGWS_DIMENSIONS_BE_START_TIME (1 << 6)
+#define PGWS_DIMENSIONS_CLIENT_ADDR (1 << 7)
+#define PGWS_DIMENSIONS_CLIENT_HOSTNAME (1 << 8)
+#define PGWS_DIMENSIONS_APPNAME (1 << 9)
+
+#define PGWS_DIMENSIONS_ALL ((int) ~0)
+/* ^ all 1 in binary */
+
+/*
+ * Next two structures must match in fields until count/ts so make_profile_hash
+ * works properly
+ */
typedef struct
{
- int pid;
- uint32 wait_event_info;
- uint64 queryId;
- uint64 count;
+ int pid;
+ uint32 wait_event_info;
+ uint64 queryId;
+ Oid role_id;
+ Oid database_id;
+ int parallel_leader_pid;
+ BackendType backend_type;
+ BackendState backend_state;
+ TimestampTz proc_start;
+ SockAddr client_addr;
+ char client_hostname[NAMEDATALEN];
+ char appname[NAMEDATALEN];
+ uint64 count;
} ProfileItem;
typedef struct
{
- int pid;
- uint32 wait_event_info;
- uint64 queryId;
- TimestampTz ts;
+ int pid;
+ uint32 wait_event_info;
+ uint64 queryId;
+ Oid role_id;
+ Oid database_id;
+ int parallel_leader_pid;
+ BackendType backend_type;
+ BackendState backend_state;
+ TimestampTz proc_start;
+ SockAddr client_addr;
+ char client_hostname[NAMEDATALEN];
+ char appname[NAMEDATALEN];
+ TimestampTz ts;
} HistoryItem;
typedef struct
@@ -73,6 +117,9 @@ extern shm_mq *pgws_collector_mq;
extern uint64 *pgws_proc_queryids;
extern void pgws_init_lock_tag(LOCKTAG *tag, uint32 lock);
extern bool pgws_should_sample_proc(PGPROC *proc, int *pid_p, uint32 *wait_event_info_p);
+extern int pgws_history_dimensions; /* bit mask that is derived from GUC */
+extern int pgws_profile_dimensions; /* bit mask that is derived from GUC */
+extern PgBackendStatus* get_beentry_by_procpid(int pid);
/* collector.c */
extern void pgws_register_wait_collector(void);
diff --git a/sql/queries.sql b/sql/queries.sql
index de44c6d..6658c74 100644
--- a/sql/queries.sql
+++ b/sql/queries.sql
@@ -9,10 +9,24 @@ WITH t as (SELECT sum(0) FROM pg_wait_sampling_history)
WITH t as (SELECT sum(0) FROM pg_wait_sampling_profile)
SELECT sum(0) FROM generate_series(1, 2), t;
+WITH t as (SELECT sum(0) FROM pg_wait_sampling_current_extended)
+ SELECT sum(0) FROM generate_series(1, 2), t;
+
+WITH t as (SELECT sum(0) FROM pg_wait_sampling_history_extended)
+ SELECT sum(0) FROM generate_series(1, 2), t;
+
+WITH t as (SELECT sum(0) FROM pg_wait_sampling_profile_extended)
+ SELECT sum(0) FROM generate_series(1, 2), t;
+
-- Some dummy checks just to be sure that all our functions work and return something.
SELECT count(*) = 1 as test FROM pg_wait_sampling_get_current(pg_backend_pid());
SELECT count(*) >= 0 as test FROM pg_wait_sampling_get_profile();
SELECT count(*) >= 0 as test FROM pg_wait_sampling_get_history();
SELECT pg_wait_sampling_reset_profile();
+SELECT count(*) = 1 as test FROM pg_wait_sampling_get_current_extended(pg_backend_pid());
+SELECT count(*) >= 0 as test FROM pg_wait_sampling_get_profile_extended();
+SELECT count(*) >= 0 as test FROM pg_wait_sampling_get_history_extended();
+SELECT pg_wait_sampling_reset_profile();
+
DROP EXTENSION pg_wait_sampling;
From 9208eb2cce05a4e60503bed1e0677cf6edb1101e Mon Sep 17 00:00:00 2001
From: Oleg Tselebrovskiy
Date: Wed, 26 Mar 2025 16:21:17 +0700
Subject: [PATCH 02/11] Update README to include information about new
*_extended views
Also fix some typos/reword some sentences
---
README.md | 135 +++++++++++++++++++++++++++++++++++++++++++-----------
1 file changed, 107 insertions(+), 28 deletions(-)
diff --git a/README.md b/README.md
index bbdbd20..f5f68cd 100644
--- a/README.md
+++ b/README.md
@@ -9,13 +9,13 @@ Introduction
PostgreSQL provides information about current wait event of particular
process. However, in order to gather descriptive statistics of server
-behavior user have to sample current wait event multiple times.
+behavior users have to sample current wait events multiple times.
`pg_wait_sampling` is an extension for collecting sampling statistics of wait
events.
The module must be loaded by adding `pg_wait_sampling` to
`shared_preload_libraries` in postgresql.conf, because it requires additional
-shared memory and launches background worker. This means that a server restart
+shared memory and launches a background worker. This means that a server restart
is needed to add or remove the module.
When used with `pg_stat_statements` it is recommended to put `pg_stat_statements`
@@ -25,17 +25,16 @@ utility statements are not rewritten by the former.
When `pg_wait_sampling` is enabled, it collects two kinds of statistics.
* History of waits events. It's implemented as in-memory ring buffer where
- samples of each process wait events are written with given (configurable)
+ samples of each process' wait events are written with given (configurable)
period. Therefore, for each running process user can see some number of
- recent samples depending on history size (configurable). Assuming there is
- a client who periodically read this history and dump it somewhere, user
- can have continuous history.
- * Waits profile. It's implemented as in-memory hash table where count
- of samples are accumulated per each process and each wait event
- (and each query with `pg_stat_statements`). This hash
- table can be reset by user request. Assuming there is a client who
- periodically dumps profile and resets it, user can have statistics of
- intensivity of wait events among time.
+ recent samples depending on history size (configurable). Assuming there is
+ a client who periodically reads this history and dumps it somewhere, user
+ can have continuous history of wait events.
+ * Waits profile. It's implemented as in-memory hash table where samples
+ are accumulated per each wait event and can be divided by process,
+ query and other dimensions. This hash table can be reset by user request.
+ Assuming there is a client who periodically dumps profile and resets it,
+ user can have statistics of wait events over time.
In combination with `pg_stat_statements` this extension can also provide
per query statistics.
@@ -66,10 +65,10 @@ Manual build
higher. Before build and install you should ensure following:
* PostgreSQL version is 13 or higher.
- * You have development package of PostgreSQL installed or you built
+ * You have development package of PostgreSQL installed or you have built
PostgreSQL from source.
* Your PATH variable is configured so that `pg_config` command available, or
- set PG_CONFIG variable.
+ PG_CONFIG variable is set.
Typical installation procedure may look like this:
@@ -98,9 +97,9 @@ Usage
`pg_wait_sampling` interacts with user by set of views and functions.
`pg_wait_sampling_current` view – information about current wait events for
-all processed including background workers.
+all processes including background workers.
-| Column name | Column type | Description |
+| Column name | Column type | Description |
| ----------- | ----------- | ----------------------- |
| pid | int4 | Id of process |
| event_type | text | Name of wait event type |
@@ -110,10 +109,33 @@ all processed including background workers.
`pg_wait_sampling_get_current(pid int4)` returns the same table for single given
process.
+`pg_wait_sampling_current_extended` view – information about current wait events for
+all processes including background workers. Structure of this view can be changed
+between verions.
+
+| Column name | Column type | Description |
+| ------------------- | ----------- | --------------------------- |
+| pid | int4 | Id of process |
+| event_type | text | Name of wait event type |
+| event | text | Name of wait event |
+| queryid | int8 | Id of query |
+| role_id | int4 | Id of role |
+| database_id | int4 | Id of database |
+| parallel_leader_pid | int4 | Id of parallel query leader |
+| backend_type | text | Name of backend type |
+| backend_state | text | Name of backend state |
+| proc_start | timestamptz | Timestamp of process start |
+| client_addr | text | Client address |
+| client_hostname | text | Client hostname |
+| appname | text | Application name |
+
+`pg_wait_sampling_get_current_extended(pid int4)` returns the same table for single given
+process.
+
`pg_wait_sampling_history` view – history of wait events obtained by sampling into
in-memory ring buffer.
-| Column name | Column type | Description |
+| Column name | Column type | Description |
| ----------- | ----------- | ----------------------- |
| pid | int4 | Id of process |
| ts | timestamptz | Sample timestamp |
@@ -121,30 +143,74 @@ in-memory ring buffer.
| event | text | Name of wait event |
| queryid | int8 | Id of query |
+`pg_wait_sampling_history_extended` view – history of wait events obtained by
+sampling into in-memory ring buffer. Structure of this view can be changed
+between verions
+
+| Column name | Column type | Description |
+| ------------------- | ----------- | --------------------------- |
+| pid | int4 | Id of process |
+| ts | timestamptz | Sample timestamp |
+| event_type | text | Name of wait event type |
+| event | text | Name of wait event |
+| queryid | int8 | Id of query |
+| role_id | int4 | Id of role |
+| database_id | int4 | Id of database |
+| parallel_leader_pid | int4 | Id of parallel query leader |
+| backend_type | text | Name of backend type |
+| backend_state | text | Name of backend state |
+| proc_start | timestamptz | Timestamp of process start |
+| client_addr | text | Client address |
+| client_hostname | text | Client hostname |
+| appname | text | Application name |
+
`pg_wait_sampling_profile` view – profile of wait events obtained by sampling into
in-memory hash table.
-| Column name | Column type | Description |
+| Column name | Column type | Description |
| ----------- | ----------- | ----------------------- |
| pid | int4 | Id of process |
| event_type | text | Name of wait event type |
| event | text | Name of wait event |
| queryid | int8 | Id of query |
-| count | text | Count of samples |
+| count | int8 | Count of samples |
+
+`pg_wait_sampling_profile_extended` view – history of wait events obtained by
+sampling into in-memory ring buffer. Structure of this view can be changed
+between verions
+
+| Column name | Column type | Description |
+| ------------------- | ----------- | --------------------------- |
+| pid | int4 | Id of process |
+| event_type | text | Name of wait event type |
+| event | text | Name of wait event |
+| queryid | int8 | Id of query |
+| role_id | int4 | Id of role |
+| database_id | int4 | Id of database |
+| parallel_leader_pid | int4 | Id of parallel query leader |
+| backend_type | text | Name of backend type |
+| backend_state | text | Name of backend state |
+| proc_start | timestamptz | Timestamp of process start |
+| client_addr | text | Client address |
+| client_hostname | text | Client hostname |
+| appname | text | Application name |
+| count | int8 | Count of samples |
`pg_wait_sampling_reset_profile()` function resets the profile.
The work of wait event statistics collector worker is controlled by following
GUCs.
-| Parameter name | Data type | Description | Default value |
-|----------------------------------| --------- |---------------------------------------------|--------------:|
-| pg_wait_sampling.history_size | int4 | Size of history in-memory ring buffer | 5000 |
-| pg_wait_sampling.history_period | int4 | Period for history sampling in milliseconds | 10 |
-| pg_wait_sampling.profile_period | int4 | Period for profile sampling in milliseconds | 10 |
-| pg_wait_sampling.profile_pid | bool | Whether profile should be per pid | true |
-| pg_wait_sampling.profile_queries | enum | Whether profile should be per query | top |
-| pg_wait_sampling.sample_cpu | bool | Whether on CPU backends should be sampled | true |
+| Parameter name | Data type | Description | Default value |
+|-------------------------------------| --------- |---------------------------------------------|--------------:|
+| pg_wait_sampling.history_size | int4 | Size of history in-memory ring buffer | 5000 |
+| pg_wait_sampling.history_period | int4 | Period for history sampling in milliseconds | 10 |
+| pg_wait_sampling.profile_period | int4 | Period for profile sampling in milliseconds | 10 |
+| pg_wait_sampling.profile_pid | bool | Whether profile should be per pid | true |
+| pg_wait_sampling.profile_queries | enum | Whether profile should be per query | top |
+| pg_wait_sampling.sample_cpu | bool | Whether on CPU backends should be sampled | true |
+| pg_wait_sampling.history_dimensions | text | Additional columns in extended history view | 'none' |
+| pg_wait_sampling.profile_dimensions | text | Additional columns in extended profile view | 'none' |
If `pg_wait_sampling.profile_pid` is set to false, sampling profile wouldn't be
collected in per-process manner. In this case the value of pid could would
@@ -158,6 +224,19 @@ If `pg_wait_sampling.sample_cpu` is set to true then processes that are not
waiting on anything are also sampled. The wait event columns for such processes
will be NULL.
+`pg_wait_sampling.history_dimenstions` and `pg_wait_sampling.profile_dimensions`
+determine what additional columns will be sampled in `history/profile_extended`
+views. Possible values are `none`, `all`, `role_id`, `database_id`,
+`parallel_leader_pid`, `backend_type`, `backend_state`, `backend_start_time`,
+`client_addr`, `client_hostname`, `appname` and any combination of column names.
+`none` and `all` cannot be used together with any other values and must be used alone.
+
+> [!WARNING]
+> Turning on any of the following columns: `backend_type`, `backend_state`,
+> `backend_start_time`, `client_addr`, `client_hostname`, `appname` will reduce
+> performance compared to sampling none of those due to the need to look into
+> BackendStatusTable. This is especially noticeable with PostgreSQL 13-16
+
Values of these GUC variables can be changed only in config file or with ALTER SYSTEM.
Then you need to reload server's configuration (such as with pg_reload_conf function)
for changes to take effect.
@@ -170,7 +249,7 @@ Contribution
------------
Please, notice, that `pg_wait_sampling` is still under development and while
-it's stable and tested, it may contains some bugs. Don't hesitate to raise
+it's stable and tested, it may contain some bugs. Don't hesitate to raise
[issues at github](https://github.com/postgrespro/pg_wait_sampling/issues) with
your bug reports.
From f0ee939d959f441fb1e18e654dde8ecdc924c2e7 Mon Sep 17 00:00:00 2001
From: Oleg Tselebrovskiy
Date: Mon, 2 Jun 2025 15:21:42 +0300
Subject: [PATCH 03/11] Fixes after review
---
collector.c | 206 ++++++++++++---------
pg_wait_sampling--1.1--1.2.sql | 17 ++
pg_wait_sampling.c | 314 +++++++++++++++++----------------
pg_wait_sampling.h | 39 ++--
4 files changed, 322 insertions(+), 254 deletions(-)
diff --git a/collector.c b/collector.c
index e073062..8b1f5a1 100644
--- a/collector.c
+++ b/collector.c
@@ -149,6 +149,111 @@ get_next_observation(History *observations)
return result;
}
+static void
+fill_dimensions(SamplingDimensions *dimensions, PGPROC *proc,
+ int pid, uint32 wait_event_info, uint64 queryId,
+ int dimensions_mask)
+{
+ Oid role_id = proc->roleId;
+ Oid database_id = proc->databaseId;
+ PGPROC *lockGroupLeader = proc->lockGroupLeader;
+ bool is_regular_backend = proc->isRegularBackend;
+
+ dimensions->pid = pid;
+
+ dimensions->wait_event_info = wait_event_info;
+
+ if (pgws_profileQueries)
+ dimensions->queryId = queryId;
+
+ /* Copy everything we need from PGPROC */
+ if (dimensions_mask & PGWS_DIMENSIONS_ROLE_ID)
+ dimensions->role_id = role_id;
+
+ if (dimensions_mask & PGWS_DIMENSIONS_DB_ID)
+ dimensions->database_id = database_id;
+
+ if (dimensions_mask & PGWS_DIMENSIONS_PARALLEL_LEADER_PID)
+ dimensions->parallel_leader_pid = (lockGroupLeader ?
+ lockGroupLeader->pid :
+ 0);
+
+ if (dimensions_mask & PGWS_DIMENSIONS_IS_REGULAR_BE)
+ dimensions->is_regular_backend = is_regular_backend;
+
+ /* Look into BackendStatus only if necessary */
+ if (check_bestatus_dimensions(dimensions_mask))
+ {
+#if PG_VERSION_NUM >= 170000
+ PgBackendStatus *bestatus = pgstat_get_beentry_by_proc_number(GetNumberFromPGProc(proc));
+#else
+ PgBackendStatus *bestatus = get_beentry_by_procpid(proc->pid);
+#endif
+ /* Copy everything we need from BackendStatus */
+ if (bestatus)
+ {
+ if (dimensions_mask & PGWS_DIMENSIONS_BE_TYPE)
+ dimensions->backend_type = bestatus->st_backendType;
+
+ if (dimensions_mask & PGWS_DIMENSIONS_BE_STATE)
+ dimensions->backend_state = bestatus->st_state;
+
+ if (dimensions_mask & PGWS_DIMENSIONS_BE_START_TIME)
+ dimensions->proc_start = bestatus->st_proc_start_timestamp;
+
+ if (dimensions_mask & PGWS_DIMENSIONS_CLIENT_ADDR)
+ dimensions->client_addr = bestatus->st_clientaddr;
+
+ if (dimensions_mask & PGWS_DIMENSIONS_CLIENT_HOSTNAME)
+ strcpy(dimensions->client_hostname, bestatus->st_clienthostname);
+
+ if (dimensions_mask & PGWS_DIMENSIONS_APPNAME)
+ strcpy(dimensions->appname, bestatus->st_appname);
+ }
+ }
+}
+
+static void
+copy_dimensions (SamplingDimensions *dst, SamplingDimensions *src,
+ int dst_dimensions_mask)
+{
+ dst->pid = src->pid;
+
+ dst->wait_event_info = src->wait_event_info;
+
+ dst->queryId = src->queryId;
+
+ if (dst_dimensions_mask & PGWS_DIMENSIONS_ROLE_ID)
+ dst->role_id = src->role_id;
+
+ if (dst_dimensions_mask & PGWS_DIMENSIONS_DB_ID)
+ dst->database_id = src->database_id;
+
+ if (dst_dimensions_mask & PGWS_DIMENSIONS_PARALLEL_LEADER_PID)
+ dst->parallel_leader_pid = src->parallel_leader_pid;
+
+ if (dst_dimensions_mask & PGWS_DIMENSIONS_IS_REGULAR_BE)
+ dst->is_regular_backend = src->is_regular_backend;
+
+ if (dst_dimensions_mask & PGWS_DIMENSIONS_BE_TYPE)
+ dst->backend_type = src->backend_type;
+
+ if (dst_dimensions_mask & PGWS_DIMENSIONS_BE_STATE)
+ dst->backend_state = src->backend_state;
+
+ if (dst_dimensions_mask & PGWS_DIMENSIONS_BE_START_TIME)
+ dst->proc_start = src->proc_start;
+
+ if (dst_dimensions_mask & PGWS_DIMENSIONS_CLIENT_ADDR)
+ dst->client_addr = src->client_addr;
+
+ if (dst_dimensions_mask & PGWS_DIMENSIONS_CLIENT_HOSTNAME)
+ strcpy(dst->client_hostname, src->client_hostname);
+
+ if (dst_dimensions_mask & PGWS_DIMENSIONS_APPNAME)
+ strcpy(dst->appname, src->appname);
+}
+
/*
* Read current waits from backends and write them to history array
* and/or profile hash.
@@ -176,92 +281,34 @@ probe_waits(History *observations, HTAB *profile_hash,
PGPROC *proc = &ProcGlobal->allProcs[i];
int pid;
uint32 wait_event_info;
+ SamplingDimensions common_dimensions;
+ int dimensions_mask_common = pgws_history_dimensions |
+ pgws_profile_dimensions;
/* Check if we need to sample this process */
if (!pgws_should_sample_proc(proc, &pid, &wait_event_info))
continue;
- /* We zero whole HistoryItem to avoid doing it field-by-field */
+ /*
+ * We zero items and dimensions with memset
+ * to avoid doing it field-by-field
+ */
memset(&item_history, 0, sizeof(HistoryItem));
memset(&item_profile, 0, sizeof(ProfileItem));
+ memset(&common_dimensions, 0, sizeof(SamplingDimensions));
- item_history.pid = pid;
- item_profile.pid = pid;
+ fill_dimensions(&common_dimensions, proc, pid, wait_event_info,
+ pgws_proc_queryids[i], dimensions_mask_common);
- item_history.wait_event_info = wait_event_info;
- item_profile.wait_event_info = wait_event_info;
-
- if (pgws_profileQueries)
- {
- item_history.queryId = pgws_proc_queryids[i];
- item_profile.queryId = pgws_proc_queryids[i];
- }
+ copy_dimensions(&item_history.dimensions,
+ &common_dimensions,
+ pgws_history_dimensions);
+ copy_dimensions(&item_history.dimensions,
+ &common_dimensions,
+ pgws_profile_dimensions);
item_history.ts = ts;
- /* Copy everything we need from PGPROC */
- if (pgws_history_dimensions & PGWS_DIMENSIONS_ROLE_ID)
- item_history.role_id = proc->roleId;
- if (pgws_profile_dimensions & PGWS_DIMENSIONS_ROLE_ID)
- item_profile.role_id = proc->roleId;
-
- if (pgws_history_dimensions & PGWS_DIMENSIONS_DB_ID)
- item_history.database_id = proc->databaseId;
- if (pgws_profile_dimensions & PGWS_DIMENSIONS_DB_ID)
- item_profile.database_id = proc->databaseId;
-
- if (pgws_history_dimensions & PGWS_DIMENSIONS_PARALLEL_LEADER_PID)
- item_history.parallel_leader_pid = (proc->lockGroupLeader ?
- proc->lockGroupLeader->pid :
- 0);
- if (pgws_profile_dimensions & PGWS_DIMENSIONS_PARALLEL_LEADER_PID)
- item_profile.parallel_leader_pid = (proc->lockGroupLeader ?
- proc->lockGroupLeader->pid :
- 0);
- /* Look into BackendStatus only if necessary */
- if (check_bestatus_dimensions(pgws_history_dimensions) ||
- check_bestatus_dimensions(pgws_profile_dimensions))
- {
-#if PG_VERSION_NUM >= 170000
- PgBackendStatus *bestatus = pgstat_get_beentry_by_proc_number(GetNumberFromPGProc(proc));
-#else
- PgBackendStatus *bestatus = get_beentry_by_procpid(proc->pid);
-#endif
- /* Copy everything we need from BackendStatus */
- if (bestatus)
- {
- if (pgws_history_dimensions & PGWS_DIMENSIONS_BE_TYPE)
- item_history.backend_type = bestatus->st_backendType;
- if (pgws_profile_dimensions & PGWS_DIMENSIONS_BE_TYPE)
- item_profile.backend_type = bestatus->st_backendType;
-
- if (pgws_history_dimensions & PGWS_DIMENSIONS_BE_STATE)
- item_history.backend_state = bestatus->st_state;
- if (pgws_profile_dimensions & PGWS_DIMENSIONS_BE_STATE)
- item_profile.backend_state = bestatus->st_state;
-
- if (pgws_history_dimensions & PGWS_DIMENSIONS_BE_START_TIME)
- item_history.proc_start = bestatus->st_proc_start_timestamp;
- if (pgws_profile_dimensions & PGWS_DIMENSIONS_BE_START_TIME)
- item_profile.proc_start = bestatus->st_proc_start_timestamp;
-
- if (pgws_history_dimensions & PGWS_DIMENSIONS_CLIENT_ADDR)
- item_history.client_addr = bestatus->st_clientaddr;
- if (pgws_profile_dimensions & PGWS_DIMENSIONS_CLIENT_ADDR)
- item_profile.client_addr = bestatus->st_clientaddr;
-
- if (pgws_history_dimensions & PGWS_DIMENSIONS_CLIENT_HOSTNAME)
- strcpy(item_history.client_hostname, bestatus->st_clienthostname);
- if (pgws_profile_dimensions & PGWS_DIMENSIONS_CLIENT_HOSTNAME)
- strcpy(item_profile.client_hostname, bestatus->st_clienthostname);
-
- if (pgws_history_dimensions & PGWS_DIMENSIONS_APPNAME)
- strcpy(item_history.appname, bestatus->st_appname);
- if (pgws_profile_dimensions & PGWS_DIMENSIONS_APPNAME)
- strcpy(item_profile.appname, bestatus->st_appname);
- }
- }
-
/* Write to the history if needed */
if (write_history)
{
@@ -276,9 +323,10 @@ probe_waits(History *observations, HTAB *profile_hash,
bool found;
if (!profile_pid)
- item_profile.pid = 0;
+ item_profile.dimensions.pid = 0;
- profileItem = (ProfileItem *) hash_search(profile_hash, &item_profile, HASH_ENTER, &found);
+ profileItem = (ProfileItem *) hash_search(profile_hash, &item_profile,
+ HASH_ENTER, &found);
if (found)
profileItem->count++;
else
@@ -379,11 +427,11 @@ make_profile_hash()
HASHCTL hash_ctl;
/*
- * Since adding additional dimensions we include everyting except count
- * into hashtable key. This is fine for cases when some fields are 0 since
+ * Since adding additional dimensions we use SamplingDimensions as
+ * hashtable key. This is fine for cases when some fields are 0 since
* it doesn't impede our ability to search the hash table for entries
*/
- hash_ctl.keysize = offsetof(ProfileItem, count);
+ hash_ctl.keysize = sizeof(SamplingDimensions);
hash_ctl.entrysize = sizeof(ProfileItem);
return hash_create("Waits profile hash", 1024, &hash_ctl,
diff --git a/pg_wait_sampling--1.1--1.2.sql b/pg_wait_sampling--1.1--1.2.sql
index df95826..29def16 100644
--- a/pg_wait_sampling--1.1--1.2.sql
+++ b/pg_wait_sampling--1.1--1.2.sql
@@ -3,6 +3,13 @@
-- complain if script is sourced in psql, rather than via ALTER EXTENSION
\echo Use "ALTER EXTENSION pg_wait_sampling UPDATE TO 1.2" to load this file. \quit
+DROP FUNCTION pg_wait_sampling_get_profile (
+ OUT pid int4,
+ OUT event_type text,
+ OUT event text,
+ OUT count bigint
+) CASCADE;
+
CREATE FUNCTION pg_wait_sampling_get_current_extended (
pid int4,
OUT pid int4,
@@ -12,6 +19,7 @@ CREATE FUNCTION pg_wait_sampling_get_current_extended (
OUT role_id int8,
OUT database_id int8,
OUT parallel_leader_pid int4,
+ OUT is_regular_backend bool,
OUT backend_type text,
OUT backend_state text,
OUT proc_start timestamptz,
@@ -37,6 +45,7 @@ CREATE FUNCTION pg_wait_sampling_get_history_extended (
OUT role_id int8,
OUT database_id int8,
OUT parallel_leader_pid int4,
+ OUT is_regular_backend bool,
OUT backend_type text,
OUT backend_state text,
OUT proc_start timestamptz,
@@ -61,6 +70,7 @@ CREATE FUNCTION pg_wait_sampling_get_profile_extended (
OUT role_id int8,
OUT database_id int8,
OUT parallel_leader_pid int4,
+ OUT is_regular_backend bool,
OUT backend_type text,
OUT backend_state text,
OUT proc_start timestamptz,
@@ -77,3 +87,10 @@ CREATE VIEW pg_wait_sampling_profile_extended AS
SELECT * FROM pg_wait_sampling_get_profile_extended();
GRANT SELECT ON pg_wait_sampling_profile_extended TO PUBLIC;
+
+CREATE VIEW pg_wait_sampling_profile AS
+ SELECT pid, event_type, event, queryid, SUM(count) FROM pg_wait_sampling_profile_extended
+ GROUP BY pid, event_type, event, queryid;
+
+GRANT SELECT ON pg_wait_sampling_profile TO PUBLIC;
+
diff --git a/pg_wait_sampling.c b/pg_wait_sampling.c
index fcc4384..d0ebd10 100644
--- a/pg_wait_sampling.c
+++ b/pg_wait_sampling.c
@@ -352,6 +352,8 @@ pgws_general_dimensions_check_hook (char **newvalue, void **extra, GucSource sou
extrachecks |= PGWS_DIMENSIONS_DB_ID;
else if (pg_strcasecmp(tok, "parallel_leader_pid") == 0)
extrachecks |= PGWS_DIMENSIONS_PARALLEL_LEADER_PID;
+ else if (pg_strcasecmp(tok, "is_regular_backend") == 0)
+ extrachecks |= PGWS_DIMENSIONS_IS_REGULAR_BE;
else if (pg_strcasecmp(tok, "backend_type") == 0)
extrachecks |= PGWS_DIMENSIONS_BE_TYPE;
else if (pg_strcasecmp(tok, "backend_state") == 0)
@@ -662,9 +664,9 @@ pg_wait_sampling_get_current(PG_FUNCTION_ARGS)
proc = search_proc(PG_GETARG_UINT32(0));
params->items = (HistoryItem *) palloc0(sizeof(HistoryItem));
item = ¶ms->items[0];
- item->pid = proc->pid;
- item->wait_event_info = proc->wait_event_info;
- item->queryId = pgws_proc_queryids[proc - ProcGlobal->allProcs];
+ item->dimensions.pid = proc->pid;
+ item->dimensions.wait_event_info = proc->wait_event_info;
+ item->dimensions.queryId = pgws_proc_queryids[proc - ProcGlobal->allProcs];
funcctx->max_calls = 1;
}
else
@@ -680,13 +682,13 @@ pg_wait_sampling_get_current(PG_FUNCTION_ARGS)
PGPROC *proc = &ProcGlobal->allProcs[i];
if (!pgws_should_sample_proc(proc,
- ¶ms->items[j].pid,
- ¶ms->items[j].wait_event_info))
+ ¶ms->items[j].dimensions.pid,
+ ¶ms->items[j].dimensions.wait_event_info))
continue;
- params->items[j].pid = proc->pid;
- params->items[j].wait_event_info = proc->wait_event_info;
- params->items[j].queryId = pgws_proc_queryids[i];
+ params->items[j].dimensions.pid = proc->pid;
+ params->items[j].dimensions.wait_event_info = proc->wait_event_info;
+ params->items[j].dimensions.queryId = pgws_proc_queryids[i];
j++;
}
funcctx->max_calls = j;
@@ -716,9 +718,9 @@ pg_wait_sampling_get_current(PG_FUNCTION_ARGS)
MemSet(values, 0, sizeof(values));
MemSet(nulls, 0, sizeof(nulls));
- event_type = pgstat_get_wait_event_type(item->wait_event_info);
- event = pgstat_get_wait_event(item->wait_event_info);
- values[0] = Int32GetDatum(item->pid);
+ event_type = pgstat_get_wait_event_type(item->dimensions.wait_event_info);
+ event = pgstat_get_wait_event(item->dimensions.wait_event_info);
+ values[0] = Int32GetDatum(item->dimensions.pid);
if (event_type)
values[1] = PointerGetDatum(cstring_to_text(event_type));
else
@@ -728,7 +730,7 @@ pg_wait_sampling_get_current(PG_FUNCTION_ARGS)
else
nulls[2] = true;
- values[3] = UInt64GetDatum(item->queryId);
+ values[3] = UInt64GetDatum(item->dimensions.queryId);
tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls);
SRF_RETURN_NEXT(funcctx, HeapTupleGetDatum(tuple));
@@ -874,7 +876,7 @@ pg_wait_sampling_get_current_extended(PG_FUNCTION_ARGS)
funcctx->user_fctx = params;
/* Setup tuple desc */
- tupdesc = CreateTemplateTupleDesc(13);
+ tupdesc = CreateTemplateTupleDesc(14);
TupleDescInitEntry(tupdesc, (AttrNumber) 1, "pid",
INT4OID, -1, 0);
TupleDescInitEntry(tupdesc, (AttrNumber) 2, "type",
@@ -889,17 +891,19 @@ pg_wait_sampling_get_current_extended(PG_FUNCTION_ARGS)
INT8OID, -1, 0);
TupleDescInitEntry(tupdesc, (AttrNumber) 7, "parallel_leader_pid",
INT4OID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 8, "backend_type",
+ TupleDescInitEntry(tupdesc, (AttrNumber) 8, "is_regular_backend",
+ BOOLOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 9, "backend_type",
TEXTOID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 9, "backend_state",
+ TupleDescInitEntry(tupdesc, (AttrNumber) 10, "backend_state",
TEXTOID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 10, "proc_start",
+ TupleDescInitEntry(tupdesc, (AttrNumber) 11, "proc_start",
TIMESTAMPTZOID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 11, "client_addr",
+ TupleDescInitEntry(tupdesc, (AttrNumber) 12, "client_addr",
TEXTOID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 12, "client_hostname",
+ TupleDescInitEntry(tupdesc, (AttrNumber) 13, "client_hostname",
TEXTOID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 13, "appname",
+ TupleDescInitEntry(tupdesc, (AttrNumber) 14, "appname",
TEXTOID, -1, 0);
funcctx->tuple_desc = BlessTupleDesc(tupdesc);
@@ -914,31 +918,15 @@ pg_wait_sampling_get_current_extended(PG_FUNCTION_ARGS)
PgBackendStatus *bestatus;
proc = search_proc(PG_GETARG_UINT32(0));
-#if PG_VERSION_NUM >= 170000
- bestatus = pgstat_get_beentry_by_proc_number(GetNumberFromPGProc(proc));
-#else
- bestatus = get_beentry_by_procpid(proc->pid);
-#endif
+
params->items = (HistoryItem *) palloc0(sizeof(HistoryItem));
item = ¶ms->items[0];
- /* Show all fields without looking at GUC variables */
- item->pid = proc->pid;
- item->wait_event_info = proc->wait_event_info;
- item->queryId = pgws_proc_queryids[proc - ProcGlobal->allProcs];
- item->role_id = proc->roleId;
- item->database_id = proc->databaseId;
- item->parallel_leader_pid = (proc->lockGroupLeader ?
- proc->lockGroupLeader->pid :
- 0);
- if (bestatus)
- {
- item->backend_type = bestatus->st_backendType;
- item->backend_state = bestatus->st_state;
- item->proc_start = bestatus->st_proc_start_timestamp;
- item->client_addr = bestatus->st_clientaddr;
- strcpy(item->client_hostname, bestatus->st_clienthostname);
- strcpy(item->appname, bestatus->st_appname);
- }
+
+ fill_dimensions(&item->dimensions, proc, proc->pid,
+ proc->wait_event_info,
+ pgws_proc_queryids[proc - ProcGlobal->allProcs],
+ PGWS_DIMENSIONS_ALL);
+
funcctx->max_calls = 1;
}
else
@@ -952,35 +940,36 @@ pg_wait_sampling_get_current_extended(PG_FUNCTION_ARGS)
for (i = 0; i < procCount; i++)
{
PGPROC *proc = &ProcGlobal->allProcs[i];
-#if PG_VERSION_NUM >= 170000
- PgBackendStatus *bestatus = pgstat_get_beentry_by_proc_number(GetNumberFromPGProc(proc));
-#else
- PgBackendStatus *bestatus = get_beentry_by_procpid(proc->pid);
-#endif
- if (!pgws_should_sample_proc(proc,
- ¶ms->items[j].pid,
- ¶ms->items[j].wait_event_info))
+ if (!pgws_should_sample_proc(&proc,
+ ¶ms->items[j].dimensions.pid,
+ ¶ms->items[j].dimensions.wait_event_info))
continue;
- /* Show all fields without looking at GUC variables */
- params->items[j].pid = proc->pid;
- params->items[j].wait_event_info = proc->wait_event_info;
- params->items[j].queryId = pgws_proc_queryids[i];
- params->items[j].role_id = proc->roleId;
- params->items[j].database_id = proc->databaseId;
- params->items[j].parallel_leader_pid = (proc->lockGroupLeader ?
- proc->lockGroupLeader->pid :
- 0);
- if (bestatus)
- {
- params->items[j].backend_type = bestatus->st_backendType;
- params->items[j].backend_state = bestatus->st_state;
- params->items[j].proc_start = bestatus->st_proc_start_timestamp;
- params->items[j].client_addr = bestatus->st_clientaddr;
- strcpy(params->items[j].client_hostname, bestatus->st_clienthostname);
- strcpy(params->items[j].appname, bestatus->st_appname);
- }
+ fill_dimensions(¶ms->items[j]->dimensions, proc, proc->pid,
+ proc->wait_event_info,
+ pgws_proc_queryids[proc - ProcGlobal->allProcs],
+ PGWS_DIMENSIONS_ALL);
+
+// /* Show all fields without looking at GUC variables */
+// params->items[j].dimensions.pid = proc.pid;
+// params->items[j].dimensions.wait_event_info = proc.wait_event_info;
+// params->items[j].dimensions.queryId = pgws_proc_queryids[i];
+// params->items[j].dimensions.role_id = proc.roleId;
+// params->items[j].dimensions.database_id = proc.databaseId;
+// params->items[j].dimensions.parallel_leader_pid = (proc.lockGroupLeader ?
+// proc.lockGroupLeader->pid :
+// 0);
+// params->items[j].dimensions.is_regular_backend = proc.isRegularBackend;
+// if (bestatus)
+// {
+// params->items[j].dimensions.backend_type = bestatus->st_backendType;
+// params->items[j].dimensions.backend_state = bestatus->st_state;
+// params->items[j].dimensions.proc_start = bestatus->st_proc_start_timestamp;
+// params->items[j].dimensions.client_addr = bestatus->st_clientaddr;
+// strcpy(params->items[j].dimensions.client_hostname, bestatus->st_clienthostname);
+// strcpy(params->items[j].dimensions.appname, bestatus->st_appname);
+// }
j++;
}
funcctx->max_calls = j;
@@ -1003,8 +992,8 @@ pg_wait_sampling_get_current_extended(PG_FUNCTION_ARGS)
if (funcctx->call_cntr < funcctx->max_calls)
{
HeapTuple tuple;
- Datum values[13];
- bool nulls[13];
+ Datum values[14];
+ bool nulls[14];
const char *event_type,
*event,
*backend_type;
@@ -1019,14 +1008,14 @@ pg_wait_sampling_get_current_extended(PG_FUNCTION_ARGS)
MemSet(values, 0, sizeof(values));
MemSet(nulls, 0, sizeof(nulls));
- event_type = pgstat_get_wait_event_type(item->wait_event_info);
- event = pgstat_get_wait_event(item->wait_event_info);
- backend_type = GetBackendTypeDesc(item->backend_type);
- backend_state = GetBackendState(item->backend_state, &is_null_be_state);
- proc_start = TimestampTzGetDatum(item->proc_start);
- client_addr = get_backend_client_addr(item->client_addr, &is_null_client_addr);
+ event_type = pgstat_get_wait_event_type(item->dimensions.wait_event_info);
+ event = pgstat_get_wait_event(item->dimensions.wait_event_info);
+ backend_type = GetBackendTypeDesc(item->dimensions.backend_type);
+ backend_state = GetBackendState(item->dimensions.backend_state, &is_null_be_state);
+ proc_start = TimestampTzGetDatum(item->dimensions.proc_start);
+ client_addr = get_backend_client_addr(item->dimensions.client_addr, &is_null_client_addr);
- values[0] = Int32GetDatum(item->pid);
+ values[0] = Int32GetDatum(item->dimensions.pid);
if (event_type)
values[1] = PointerGetDatum(cstring_to_text(event_type));
else
@@ -1035,10 +1024,11 @@ pg_wait_sampling_get_current_extended(PG_FUNCTION_ARGS)
values[2] = PointerGetDatum(cstring_to_text(event));
else
nulls[2] = true;
- values[3] = UInt64GetDatum(item->queryId);
- values[4] = ObjectIdGetDatum(item->role_id);
- values[5] = ObjectIdGetDatum(item->database_id);
- values[6] = Int32GetDatum(item->parallel_leader_pid);
+ values[3] = UInt64GetDatum(item->dimensions.queryId);
+ values[4] = ObjectIdGetDatum(item->dimensions.role_id);
+ values[5] = ObjectIdGetDatum(item->dimensions.database_id);
+ values[6] = Int32GetDatum(item->dimensions.parallel_leader_pid);
+ values[7] = BoolGetDatum(item->dimensions.is_regular_backend);
if (backend_type)
values[7] = PointerGetDatum(cstring_to_text(backend_type));
else
@@ -1052,8 +1042,8 @@ pg_wait_sampling_get_current_extended(PG_FUNCTION_ARGS)
values[10] = client_addr;
else
nulls[10] = true;
- values[11] = PointerGetDatum(cstring_to_text(item->client_hostname));
- values[12] = PointerGetDatum(cstring_to_text(item->appname));
+ values[11] = PointerGetDatum(cstring_to_text(item->dimensions.client_hostname));
+ values[12] = PointerGetDatum(cstring_to_text(item->dimensions.appname));
tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls);
@@ -1233,9 +1223,9 @@ pg_wait_sampling_get_profile(PG_FUNCTION_ARGS)
MemSet(nulls, 0, sizeof(nulls));
/* Make and return next tuple to caller */
- event_type = pgstat_get_wait_event_type(item->wait_event_info);
- event = pgstat_get_wait_event(item->wait_event_info);
- values[0] = Int32GetDatum(item->pid);
+ event_type = pgstat_get_wait_event_type(item->dimensions.wait_event_info);
+ event = pgstat_get_wait_event(item->dimensions.wait_event_info);
+ values[0] = Int32GetDatum(item->dimensions.pid);
if (event_type)
values[1] = PointerGetDatum(cstring_to_text(event_type));
else
@@ -1246,7 +1236,7 @@ pg_wait_sampling_get_profile(PG_FUNCTION_ARGS)
nulls[2] = true;
if (pgws_profileQueries)
- values[3] = UInt64GetDatum(item->queryId);
+ values[3] = UInt64GetDatum(item->dimensions.queryId);
else
values[3] = (Datum) 0;
@@ -1289,7 +1279,7 @@ pg_wait_sampling_get_profile_extended(PG_FUNCTION_ARGS)
funcctx->max_calls = profile->count;
/* Make tuple descriptor */
- tupdesc = CreateTemplateTupleDesc(14);
+ tupdesc = CreateTemplateTupleDesc(15);
TupleDescInitEntry(tupdesc, (AttrNumber) 1, "pid",
INT4OID, -1, 0);
TupleDescInitEntry(tupdesc, (AttrNumber) 2, "type",
@@ -1304,19 +1294,21 @@ pg_wait_sampling_get_profile_extended(PG_FUNCTION_ARGS)
INT8OID, -1, 0);
TupleDescInitEntry(tupdesc, (AttrNumber) 7, "parallel_leader_pid",
INT4OID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 8, "backend_type",
+ TupleDescInitEntry(tupdesc, (AttrNumber) 8, "is_regular_backend",
+ BOOLOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 9, "backend_type",
TEXTOID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 9, "backend_state",
+ TupleDescInitEntry(tupdesc, (AttrNumber) 10, "backend_state",
TEXTOID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 10, "proc_start",
+ TupleDescInitEntry(tupdesc, (AttrNumber) 11, "proc_start",
TIMESTAMPTZOID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 11, "client_addr",
+ TupleDescInitEntry(tupdesc, (AttrNumber) 12, "client_addr",
TEXTOID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 12, "client_hostname",
+ TupleDescInitEntry(tupdesc, (AttrNumber) 13, "client_hostname",
TEXTOID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 13, "appname",
+ TupleDescInitEntry(tupdesc, (AttrNumber) 14, "appname",
TEXTOID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 14, "count",
+ TupleDescInitEntry(tupdesc, (AttrNumber) 15, "count",
INT8OID, -1, 0);
funcctx->tuple_desc = BlessTupleDesc(tupdesc);
@@ -1331,8 +1323,8 @@ pg_wait_sampling_get_profile_extended(PG_FUNCTION_ARGS)
if (funcctx->call_cntr < funcctx->max_calls)
{
/* for each row */
- Datum values[14];
- bool nulls[14];
+ Datum values[15];
+ bool nulls[15];
HeapTuple tuple;
ProfileItem *item;
const char *event_type,
@@ -1348,14 +1340,14 @@ pg_wait_sampling_get_profile_extended(PG_FUNCTION_ARGS)
MemSet(nulls, 0, sizeof(nulls));
/* Make and return next tuple to caller */
- event_type = pgstat_get_wait_event_type(item->wait_event_info);
- event = pgstat_get_wait_event(item->wait_event_info);
- backend_type = GetBackendTypeDesc(item->backend_type);
- backend_state = GetBackendState(item->backend_state, &is_null_be_state);
- proc_start = TimestampTzGetDatum(item->proc_start);
- client_addr = get_backend_client_addr(item->client_addr, &is_null_client_addr);
-
- values[0] = Int32GetDatum(item->pid);
+ event_type = pgstat_get_wait_event_type(item->dimensions.wait_event_info);
+ event = pgstat_get_wait_event(item->dimensions.wait_event_info);
+ backend_type = GetBackendTypeDesc(item->dimensions.backend_type);
+ backend_state = GetBackendState(item->dimensions.backend_state, &is_null_be_state);
+ proc_start = TimestampTzGetDatum(item->dimensions.proc_start);
+ client_addr = get_backend_client_addr(item->dimensions.client_addr, &is_null_client_addr);
+
+ values[0] = Int32GetDatum(item->dimensions.pid);
if (event_type)
values[1] = PointerGetDatum(cstring_to_text(event_type));
else
@@ -1365,47 +1357,51 @@ pg_wait_sampling_get_profile_extended(PG_FUNCTION_ARGS)
else
nulls[2] = true;
if (pgws_profileQueries)
- values[3] = UInt64GetDatum(item->queryId);
+ values[3] = UInt64GetDatum(item->dimensions.queryId);
else
values[3] = (Datum) 0;
if (pgws_profile_dimensions & PGWS_DIMENSIONS_ROLE_ID)
- values[4] = ObjectIdGetDatum(item->role_id);
+ values[4] = ObjectIdGetDatum(item->dimensions.role_id);
else
nulls[4] = true;
if (pgws_profile_dimensions & PGWS_DIMENSIONS_DB_ID)
- values[5] = ObjectIdGetDatum(item->database_id);
+ values[5] = ObjectIdGetDatum(item->dimensions.database_id);
else
nulls[5] = true;
if (pgws_profile_dimensions & PGWS_DIMENSIONS_PARALLEL_LEADER_PID)
- values[6] = Int32GetDatum(item->parallel_leader_pid);
+ values[6] = Int32GetDatum(item->dimensions.parallel_leader_pid);
else
nulls[6] = true;
- if (backend_type && (pgws_profile_dimensions & PGWS_DIMENSIONS_BE_TYPE))
- values[7] = PointerGetDatum(cstring_to_text(backend_type));
+ if (pgws_profile_dimensions & PGWS_DIMENSIONS_IS_REGULAR_BE)
+ values[7] = BoolGetDatum(item->dimensions.is_regular_backend);
else
nulls[7] = true;
- if (!is_null_be_state && (pgws_profile_dimensions & PGWS_DIMENSIONS_BE_STATE))
- values[8] = backend_state;
+ if (backend_type && (pgws_profile_dimensions & PGWS_DIMENSIONS_BE_TYPE))
+ values[8] = PointerGetDatum(cstring_to_text(backend_type));
else
nulls[8] = true;
- if (pgws_profile_dimensions & PGWS_DIMENSIONS_BE_START_TIME)
- values[9] = proc_start;
+ if (!is_null_be_state && (pgws_profile_dimensions & PGWS_DIMENSIONS_BE_STATE))
+ values[9] = backend_state;
else
nulls[9] = true;
- if (!is_null_client_addr && pgws_profile_dimensions & PGWS_DIMENSIONS_CLIENT_ADDR)
- values[10] = client_addr;
+ if (pgws_profile_dimensions & PGWS_DIMENSIONS_BE_START_TIME)
+ values[10] = proc_start;
else
nulls[10] = true;
- if (pgws_profile_dimensions & PGWS_DIMENSIONS_CLIENT_HOSTNAME)
- values[11] = PointerGetDatum(cstring_to_text(item->client_hostname));
+ if (!is_null_client_addr && pgws_profile_dimensions & PGWS_DIMENSIONS_CLIENT_ADDR)
+ values[11] = client_addr;
else
nulls[11] = true;
- if (pgws_profile_dimensions & PGWS_DIMENSIONS_APPNAME)
- values[12] = PointerGetDatum(cstring_to_text(item->appname));
+ if (pgws_profile_dimensions & PGWS_DIMENSIONS_CLIENT_HOSTNAME)
+ values[12] = PointerGetDatum(cstring_to_text(item->dimensions.client_hostname));
else
nulls[12] = true;
+ if (pgws_profile_dimensions & PGWS_DIMENSIONS_APPNAME)
+ values[13] = PointerGetDatum(cstring_to_text(item->dimensions.appname));
+ else
+ nulls[13] = true;
- values[13] = UInt64GetDatum(item->count);
+ values[14] = UInt64GetDatum(item->count);
tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls);
@@ -1512,9 +1508,9 @@ pg_wait_sampling_get_history(PG_FUNCTION_ARGS)
MemSet(values, 0, sizeof(values));
MemSet(nulls, 0, sizeof(nulls));
- event_type = pgstat_get_wait_event_type(item->wait_event_info);
- event = pgstat_get_wait_event(item->wait_event_info);
- values[0] = Int32GetDatum(item->pid);
+ event_type = pgstat_get_wait_event_type(item->dimensions.wait_event_info);
+ event = pgstat_get_wait_event(item->dimensions.wait_event_info);
+ values[0] = Int32GetDatum(item->dimensions.pid);
values[1] = TimestampTzGetDatum(item->ts);
if (event_type)
values[2] = PointerGetDatum(cstring_to_text(event_type));
@@ -1525,7 +1521,7 @@ pg_wait_sampling_get_history(PG_FUNCTION_ARGS)
else
nulls[3] = true;
- values[4] = UInt64GetDatum(item->queryId);
+ values[4] = UInt64GetDatum(item->dimensions.queryId);
tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls);
history->index++;
@@ -1566,7 +1562,7 @@ pg_wait_sampling_get_history_extended(PG_FUNCTION_ARGS)
funcctx->max_calls = history->count;
/* Make tuple descriptor */
- tupdesc = CreateTemplateTupleDesc(14);
+ tupdesc = CreateTemplateTupleDesc(15);
TupleDescInitEntry(tupdesc, (AttrNumber) 1, "pid",
INT4OID, -1, 0);
TupleDescInitEntry(tupdesc, (AttrNumber) 2, "sample_ts",
@@ -1583,17 +1579,19 @@ pg_wait_sampling_get_history_extended(PG_FUNCTION_ARGS)
INT8OID, -1, 0);
TupleDescInitEntry(tupdesc, (AttrNumber) 8, "parallel_leader_pid",
INT4OID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 9, "backend_type",
+ TupleDescInitEntry(tupdesc, (AttrNumber) 9, "is_regular_backend",
+ BOOLOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 10, "backend_type",
TEXTOID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 10, "backend_state",
+ TupleDescInitEntry(tupdesc, (AttrNumber) 11, "backend_state",
TEXTOID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 11, "proc_start",
+ TupleDescInitEntry(tupdesc, (AttrNumber) 12, "proc_start",
TIMESTAMPTZOID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 12, "client_addr",
+ TupleDescInitEntry(tupdesc, (AttrNumber) 13, "client_addr",
TEXTOID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 13, "client_hostname",
+ TupleDescInitEntry(tupdesc, (AttrNumber) 14, "client_hostname",
TEXTOID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 14, "appname",
+ TupleDescInitEntry(tupdesc, (AttrNumber) 15, "appname",
TEXTOID, -1, 0);
funcctx->tuple_desc = BlessTupleDesc(tupdesc);
@@ -1624,14 +1622,14 @@ pg_wait_sampling_get_history_extended(PG_FUNCTION_ARGS)
MemSet(values, 0, sizeof(values));
MemSet(nulls, 0, sizeof(nulls));
- event_type = pgstat_get_wait_event_type(item->wait_event_info);
- event = pgstat_get_wait_event(item->wait_event_info);
- backend_type = GetBackendTypeDesc(item->backend_type);
- backend_state = GetBackendState(item->backend_state, &is_null_be_state);
- proc_start = TimestampTzGetDatum(item->proc_start);
- client_addr = get_backend_client_addr(item->client_addr, &is_null_client_addr);
+ event_type = pgstat_get_wait_event_type(item->dimensions.wait_event_info);
+ event = pgstat_get_wait_event(item->dimensions.wait_event_info);
+ backend_type = GetBackendTypeDesc(item->dimensions.backend_type);
+ backend_state = GetBackendState(item->dimensions.backend_state, &is_null_be_state);
+ proc_start = TimestampTzGetDatum(item->dimensions.proc_start);
+ client_addr = get_backend_client_addr(item->dimensions.client_addr, &is_null_client_addr);
- values[0] = Int32GetDatum(item->pid);
+ values[0] = Int32GetDatum(item->dimensions.pid);
values[1] = TimestampTzGetDatum(item->ts);
if (event_type)
values[2] = PointerGetDatum(cstring_to_text(event_type));
@@ -1641,43 +1639,47 @@ pg_wait_sampling_get_history_extended(PG_FUNCTION_ARGS)
values[3] = PointerGetDatum(cstring_to_text(event));
else
nulls[3] = true;
- values[4] = UInt64GetDatum(item->queryId);
+ values[4] = UInt64GetDatum(item->dimensions.queryId);
if (pgws_profile_dimensions & PGWS_DIMENSIONS_ROLE_ID)
- values[5] = ObjectIdGetDatum(item->role_id);
+ values[5] = ObjectIdGetDatum(item->dimensions.role_id);
else
nulls[5] = true;
if (pgws_profile_dimensions & PGWS_DIMENSIONS_DB_ID)
- values[6] = ObjectIdGetDatum(item->database_id);
+ values[6] = ObjectIdGetDatum(item->dimensions.database_id);
else
nulls[6] = true;
if (pgws_profile_dimensions & PGWS_DIMENSIONS_PARALLEL_LEADER_PID)
- values[7] = Int32GetDatum(item->parallel_leader_pid);
+ values[7] = Int32GetDatum(item->dimensions.parallel_leader_pid);
else
nulls[7] = true;
- if (backend_type && (pgws_profile_dimensions & PGWS_DIMENSIONS_BE_TYPE))
- values[8] = PointerGetDatum(cstring_to_text(backend_type));
+ if (pgws_profile_dimensions & PGWS_DIMENSIONS_IS_REGULAR_BE)
+ values[8] = BoolGetDatum(item->dimensions.is_regular_backend);
else
nulls[8] = true;
- if (!is_null_be_state && (pgws_profile_dimensions & PGWS_DIMENSIONS_BE_STATE))
- values[9] = backend_state;
+ if (backend_type && (pgws_profile_dimensions & PGWS_DIMENSIONS_BE_TYPE))
+ values[9] = PointerGetDatum(cstring_to_text(backend_type));
else
nulls[9] = true;
- if (pgws_profile_dimensions & PGWS_DIMENSIONS_BE_START_TIME)
- values[10] = proc_start;
+ if (!is_null_be_state && (pgws_profile_dimensions & PGWS_DIMENSIONS_BE_STATE))
+ values[10] = backend_state;
else
nulls[10] = true;
- if (!is_null_client_addr && pgws_profile_dimensions & PGWS_DIMENSIONS_CLIENT_ADDR)
- values[11] = client_addr;
+ if (pgws_profile_dimensions & PGWS_DIMENSIONS_BE_START_TIME)
+ values[11] = proc_start;
else
nulls[11] = true;
- if (pgws_profile_dimensions & PGWS_DIMENSIONS_CLIENT_HOSTNAME)
- values[12] = PointerGetDatum(cstring_to_text(item->client_hostname));
+ if (!is_null_client_addr && pgws_profile_dimensions & PGWS_DIMENSIONS_CLIENT_ADDR)
+ values[12] = client_addr;
else
nulls[12] = true;
- if (pgws_profile_dimensions & PGWS_DIMENSIONS_APPNAME)
- values[13] = PointerGetDatum(cstring_to_text(item->appname));
+ if (pgws_profile_dimensions & PGWS_DIMENSIONS_CLIENT_HOSTNAME)
+ values[13] = PointerGetDatum(cstring_to_text(item->dimensions.client_hostname));
else
nulls[13] = true;
+ if (pgws_profile_dimensions & PGWS_DIMENSIONS_APPNAME)
+ values[14] = PointerGetDatum(cstring_to_text(item->dimensions.appname));
+ else
+ nulls[14] = true;
tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls);
diff --git a/pg_wait_sampling.h b/pg_wait_sampling.h
index 9009fe8..0216f47 100644
--- a/pg_wait_sampling.h
+++ b/pg_wait_sampling.h
@@ -30,9 +30,10 @@
/* Values for sampling dimensions */
#define PGWS_DIMENSIONS_NONE 0
-#define PGWS_DIMENSIONS_ROLE_ID (1 << 1)
-#define PGWS_DIMENSIONS_DB_ID (1 << 2)
-#define PGWS_DIMENSIONS_PARALLEL_LEADER_PID (1 << 3)
+#define PGWS_DIMENSIONS_ROLE_ID (1 << 0)
+#define PGWS_DIMENSIONS_DB_ID (1 << 1)
+#define PGWS_DIMENSIONS_PARALLEL_LEADER_PID (1 << 2)
+#define PGWS_DIMENSIONS_IS_REGULAR_BE (1 << 3)
#define PGWS_DIMENSIONS_BE_TYPE (1 << 4)
#define PGWS_DIMENSIONS_BE_STATE (1 << 5)
#define PGWS_DIMENSIONS_BE_START_TIME (1 << 6)
@@ -44,41 +45,41 @@
/* ^ all 1 in binary */
/*
- * Next two structures must match in fields until count/ts so make_profile_hash
- * works properly
+ * Common data (sampling dimenstions) for ProfileItem and HistoryItem
*/
typedef struct
{
+ /* Fields from PGPROC */
int pid;
uint32 wait_event_info;
uint64 queryId;
Oid role_id;
Oid database_id;
int parallel_leader_pid;
+ bool is_regular_backend;
+ /* Fields from BackendStatus */
BackendType backend_type;
BackendState backend_state;
TimestampTz proc_start;
SockAddr client_addr;
char client_hostname[NAMEDATALEN];
char appname[NAMEDATALEN];
- uint64 count;
+} SamplingDimensions;
+
+/*
+ * Next two structures must match in fields until count/ts so make_profile_hash
+ * works properly
+ */
+typedef struct
+{
+ SamplingDimensions dimensions;
+ uint64 count;
} ProfileItem;
typedef struct
{
- int pid;
- uint32 wait_event_info;
- uint64 queryId;
- Oid role_id;
- Oid database_id;
- int parallel_leader_pid;
- BackendType backend_type;
- BackendState backend_state;
- TimestampTz proc_start;
- SockAddr client_addr;
- char client_hostname[NAMEDATALEN];
- char appname[NAMEDATALEN];
- TimestampTz ts;
+ SamplingDimensions dimensions;
+ TimestampTz ts;
} HistoryItem;
typedef struct
From 292aaa9d19d12f372db05f2ab64b4dd31439415f Mon Sep 17 00:00:00 2001
From: Oleg Tselebrovskiy
Date: Tue, 10 Jun 2025 12:59:23 +0700
Subject: [PATCH 04/11] Fixes after review, part 2
---
collector.c | 2 +-
pg_wait_sampling.c | 402 ++++++++++++++-------------------------------
pg_wait_sampling.h | 3 +
3 files changed, 124 insertions(+), 283 deletions(-)
diff --git a/collector.c b/collector.c
index 8b1f5a1..f74251c 100644
--- a/collector.c
+++ b/collector.c
@@ -149,7 +149,7 @@ get_next_observation(History *observations)
return result;
}
-static void
+void
fill_dimensions(SamplingDimensions *dimensions, PGPROC *proc,
int pid, uint32 wait_event_info, uint64 queryId,
int dimensions_mask)
diff --git a/pg_wait_sampling.c b/pg_wait_sampling.c
index d0ebd10..221ec10 100644
--- a/pg_wait_sampling.c
+++ b/pg_wait_sampling.c
@@ -853,6 +853,114 @@ get_beentry_by_procpid(int pid)
return NULL;
}
+/*
+ * Common routine to fill "dimensions" part of tupdesc
+ */
+static void
+fill_tuple_desc (TupleDesc tupdesc)
+{
+ TupleDescInitEntry(tupdesc, (AttrNumber) 1, "pid",
+ INT4OID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 2, "type",
+ TEXTOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 3, "event",
+ TEXTOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 4, "queryid",
+ INT8OID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 5, "role_id",
+ INT8OID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 6, "database_id",
+ INT8OID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 7, "parallel_leader_pid",
+ INT4OID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 8, "is_regular_backend",
+ BOOLOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 9, "backend_type",
+ TEXTOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 10, "backend_state",
+ TEXTOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 11, "proc_start",
+ TIMESTAMPTZOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 12, "client_addr",
+ TEXTOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 13, "client_hostname",
+ TEXTOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 14, "appname",
+ TEXTOID, -1, 0);
+}
+
+static void
+fill_values_and_nulls(Datum *values, bool *nulls, SamplingDimensions dimensions, bool skip_mask)
+{
+ const char *event_type,
+ *event,
+ *backend_type;
+ Datum backend_state, proc_start, client_addr;
+ bool is_null_be_state = false,
+ is_null_client_addr = false;
+
+ event_type = pgstat_get_wait_event_type(dimensions.wait_event_info);
+ event = pgstat_get_wait_event(dimensions.wait_event_info);
+ backend_type = GetBackendTypeDesc(dimensions.backend_type);
+ backend_state = GetBackendState(dimensions.backend_state, &is_null_be_state);
+ proc_start = TimestampTzGetDatum(dimensions.proc_start);
+ client_addr = get_backend_client_addr(dimensions.client_addr, &is_null_client_addr);
+
+ values[0] = Int32GetDatum(dimensions.pid);
+ if (event_type)
+ values[1] = PointerGetDatum(cstring_to_text(event_type));
+ else
+ nulls[1] = true;
+ if (event)
+ values[2] = PointerGetDatum(cstring_to_text(event));
+ else
+ nulls[2] = true;
+ if (pgws_profileQueries || skip_mask)
+ values[3] = UInt64GetDatum(dimensions.queryId);
+ else
+ values[3] = (Datum) 0;
+ if ((pgws_profile_dimensions & PGWS_DIMENSIONS_ROLE_ID) || skip_mask)
+ values[4] = ObjectIdGetDatum(dimensions.role_id);
+ else
+ nulls[4] = true;
+ if (pgws_profile_dimensions & PGWS_DIMENSIONS_DB_ID || skip_mask)
+ values[5] = ObjectIdGetDatum(dimensions.database_id);
+ else
+ nulls[5] = true;
+ if (pgws_profile_dimensions & PGWS_DIMENSIONS_PARALLEL_LEADER_PID || skip_mask)
+ values[6] = Int32GetDatum(dimensions.parallel_leader_pid);
+ else
+ nulls[6] = true;
+ if (pgws_profile_dimensions & PGWS_DIMENSIONS_IS_REGULAR_BE || skip_mask)
+ values[7] = BoolGetDatum(dimensions.is_regular_backend);
+ else
+ nulls[7] = true;
+ if (backend_type && ((pgws_profile_dimensions & PGWS_DIMENSIONS_BE_TYPE) || skip_mask))
+ values[8] = PointerGetDatum(cstring_to_text(backend_type));
+ else
+ nulls[8] = true;
+ if (!is_null_be_state && ((pgws_profile_dimensions & PGWS_DIMENSIONS_BE_STATE) || skip_mask))
+ values[9] = backend_state;
+ else
+ nulls[9] = true;
+ if ((pgws_profile_dimensions & PGWS_DIMENSIONS_BE_START_TIME) || skip_mask)
+ values[10] = proc_start;
+ else
+ nulls[10] = true;
+ if (!is_null_client_addr && ((pgws_profile_dimensions & PGWS_DIMENSIONS_CLIENT_ADDR) || skip_mask))
+ values[11] = client_addr;
+ else
+ nulls[11] = true;
+ if ((pgws_profile_dimensions & PGWS_DIMENSIONS_CLIENT_HOSTNAME) || skip_mask)
+ values[12] = PointerGetDatum(cstring_to_text(dimensions.client_hostname));
+ else
+ nulls[12] = true;
+ if ((pgws_profile_dimensions & PGWS_DIMENSIONS_APPNAME) || skip_mask)
+ values[13] = PointerGetDatum(cstring_to_text(dimensions.appname));
+ else
+ nulls[13] = true;
+}
+
PG_FUNCTION_INFO_V1(pg_wait_sampling_get_current_extended);
Datum
pg_wait_sampling_get_current_extended(PG_FUNCTION_ARGS)
@@ -877,35 +985,7 @@ pg_wait_sampling_get_current_extended(PG_FUNCTION_ARGS)
funcctx->user_fctx = params;
/* Setup tuple desc */
tupdesc = CreateTemplateTupleDesc(14);
- TupleDescInitEntry(tupdesc, (AttrNumber) 1, "pid",
- INT4OID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 2, "type",
- TEXTOID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 3, "event",
- TEXTOID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 4, "queryid",
- INT8OID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 5, "role_id",
- INT8OID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 6, "database_id",
- INT8OID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 7, "parallel_leader_pid",
- INT4OID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 8, "is_regular_backend",
- BOOLOID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 9, "backend_type",
- TEXTOID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 10, "backend_state",
- TEXTOID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 11, "proc_start",
- TIMESTAMPTZOID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 12, "client_addr",
- TEXTOID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 13, "client_hostname",
- TEXTOID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 14, "appname",
- TEXTOID, -1, 0);
-
+ fill_tuple_desc (tupdesc);
funcctx->tuple_desc = BlessTupleDesc(tupdesc);
LWLockAcquire(ProcArrayLock, LW_SHARED);
@@ -951,25 +1031,6 @@ pg_wait_sampling_get_current_extended(PG_FUNCTION_ARGS)
pgws_proc_queryids[proc - ProcGlobal->allProcs],
PGWS_DIMENSIONS_ALL);
-// /* Show all fields without looking at GUC variables */
-// params->items[j].dimensions.pid = proc.pid;
-// params->items[j].dimensions.wait_event_info = proc.wait_event_info;
-// params->items[j].dimensions.queryId = pgws_proc_queryids[i];
-// params->items[j].dimensions.role_id = proc.roleId;
-// params->items[j].dimensions.database_id = proc.databaseId;
-// params->items[j].dimensions.parallel_leader_pid = (proc.lockGroupLeader ?
-// proc.lockGroupLeader->pid :
-// 0);
-// params->items[j].dimensions.is_regular_backend = proc.isRegularBackend;
-// if (bestatus)
-// {
-// params->items[j].dimensions.backend_type = bestatus->st_backendType;
-// params->items[j].dimensions.backend_state = bestatus->st_state;
-// params->items[j].dimensions.proc_start = bestatus->st_proc_start_timestamp;
-// params->items[j].dimensions.client_addr = bestatus->st_clientaddr;
-// strcpy(params->items[j].dimensions.client_hostname, bestatus->st_clienthostname);
-// strcpy(params->items[j].dimensions.appname, bestatus->st_appname);
-// }
j++;
}
funcctx->max_calls = j;
@@ -994,12 +1055,6 @@ pg_wait_sampling_get_current_extended(PG_FUNCTION_ARGS)
HeapTuple tuple;
Datum values[14];
bool nulls[14];
- const char *event_type,
- *event,
- *backend_type;
- Datum backend_state, proc_start, client_addr;
- bool is_null_be_state = false,
- is_null_client_addr = false;
HistoryItem *item;
item = ¶ms->items[funcctx->call_cntr];
@@ -1008,42 +1063,7 @@ pg_wait_sampling_get_current_extended(PG_FUNCTION_ARGS)
MemSet(values, 0, sizeof(values));
MemSet(nulls, 0, sizeof(nulls));
- event_type = pgstat_get_wait_event_type(item->dimensions.wait_event_info);
- event = pgstat_get_wait_event(item->dimensions.wait_event_info);
- backend_type = GetBackendTypeDesc(item->dimensions.backend_type);
- backend_state = GetBackendState(item->dimensions.backend_state, &is_null_be_state);
- proc_start = TimestampTzGetDatum(item->dimensions.proc_start);
- client_addr = get_backend_client_addr(item->dimensions.client_addr, &is_null_client_addr);
-
- values[0] = Int32GetDatum(item->dimensions.pid);
- if (event_type)
- values[1] = PointerGetDatum(cstring_to_text(event_type));
- else
- nulls[1] = true;
- if (event)
- values[2] = PointerGetDatum(cstring_to_text(event));
- else
- nulls[2] = true;
- values[3] = UInt64GetDatum(item->dimensions.queryId);
- values[4] = ObjectIdGetDatum(item->dimensions.role_id);
- values[5] = ObjectIdGetDatum(item->dimensions.database_id);
- values[6] = Int32GetDatum(item->dimensions.parallel_leader_pid);
- values[7] = BoolGetDatum(item->dimensions.is_regular_backend);
- if (backend_type)
- values[7] = PointerGetDatum(cstring_to_text(backend_type));
- else
- nulls[7] = true;
- if (!is_null_be_state)
- values[8] = backend_state;
- else
- nulls[8] = true;
- values[9] = proc_start;
- if (!is_null_client_addr)
- values[10] = client_addr;
- else
- nulls[10] = true;
- values[11] = PointerGetDatum(cstring_to_text(item->dimensions.client_hostname));
- values[12] = PointerGetDatum(cstring_to_text(item->dimensions.appname));
+ fill_values_and_nulls(values, nulls, item->dimensions, true);
tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls);
@@ -1280,34 +1300,7 @@ pg_wait_sampling_get_profile_extended(PG_FUNCTION_ARGS)
/* Make tuple descriptor */
tupdesc = CreateTemplateTupleDesc(15);
- TupleDescInitEntry(tupdesc, (AttrNumber) 1, "pid",
- INT4OID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 2, "type",
- TEXTOID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 3, "event",
- TEXTOID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 4, "queryid",
- INT8OID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 5, "role_id",
- INT8OID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 6, "database_id",
- INT8OID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 7, "parallel_leader_pid",
- INT4OID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 8, "is_regular_backend",
- BOOLOID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 9, "backend_type",
- TEXTOID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 10, "backend_state",
- TEXTOID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 11, "proc_start",
- TIMESTAMPTZOID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 12, "client_addr",
- TEXTOID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 13, "client_hostname",
- TEXTOID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 14, "appname",
- TEXTOID, -1, 0);
+ fill_tuple_desc (tupdesc);
TupleDescInitEntry(tupdesc, (AttrNumber) 15, "count",
INT8OID, -1, 0);
funcctx->tuple_desc = BlessTupleDesc(tupdesc);
@@ -1327,82 +1320,16 @@ pg_wait_sampling_get_profile_extended(PG_FUNCTION_ARGS)
bool nulls[15];
HeapTuple tuple;
ProfileItem *item;
- const char *event_type,
- *event,
- *backend_type;
- Datum backend_state, proc_start, client_addr;
- bool is_null_be_state = false,
- is_null_client_addr = false;
item = &profile->items[funcctx->call_cntr];
+ /* Make and return next tuple to caller */
MemSet(values, 0, sizeof(values));
MemSet(nulls, 0, sizeof(nulls));
- /* Make and return next tuple to caller */
- event_type = pgstat_get_wait_event_type(item->dimensions.wait_event_info);
- event = pgstat_get_wait_event(item->dimensions.wait_event_info);
- backend_type = GetBackendTypeDesc(item->dimensions.backend_type);
- backend_state = GetBackendState(item->dimensions.backend_state, &is_null_be_state);
- proc_start = TimestampTzGetDatum(item->dimensions.proc_start);
- client_addr = get_backend_client_addr(item->dimensions.client_addr, &is_null_client_addr);
-
- values[0] = Int32GetDatum(item->dimensions.pid);
- if (event_type)
- values[1] = PointerGetDatum(cstring_to_text(event_type));
- else
- nulls[1] = true;
- if (event)
- values[2] = PointerGetDatum(cstring_to_text(event));
- else
- nulls[2] = true;
- if (pgws_profileQueries)
- values[3] = UInt64GetDatum(item->dimensions.queryId);
- else
- values[3] = (Datum) 0;
- if (pgws_profile_dimensions & PGWS_DIMENSIONS_ROLE_ID)
- values[4] = ObjectIdGetDatum(item->dimensions.role_id);
- else
- nulls[4] = true;
- if (pgws_profile_dimensions & PGWS_DIMENSIONS_DB_ID)
- values[5] = ObjectIdGetDatum(item->dimensions.database_id);
- else
- nulls[5] = true;
- if (pgws_profile_dimensions & PGWS_DIMENSIONS_PARALLEL_LEADER_PID)
- values[6] = Int32GetDatum(item->dimensions.parallel_leader_pid);
- else
- nulls[6] = true;
- if (pgws_profile_dimensions & PGWS_DIMENSIONS_IS_REGULAR_BE)
- values[7] = BoolGetDatum(item->dimensions.is_regular_backend);
- else
- nulls[7] = true;
- if (backend_type && (pgws_profile_dimensions & PGWS_DIMENSIONS_BE_TYPE))
- values[8] = PointerGetDatum(cstring_to_text(backend_type));
- else
- nulls[8] = true;
- if (!is_null_be_state && (pgws_profile_dimensions & PGWS_DIMENSIONS_BE_STATE))
- values[9] = backend_state;
- else
- nulls[9] = true;
- if (pgws_profile_dimensions & PGWS_DIMENSIONS_BE_START_TIME)
- values[10] = proc_start;
- else
- nulls[10] = true;
- if (!is_null_client_addr && pgws_profile_dimensions & PGWS_DIMENSIONS_CLIENT_ADDR)
- values[11] = client_addr;
- else
- nulls[11] = true;
- if (pgws_profile_dimensions & PGWS_DIMENSIONS_CLIENT_HOSTNAME)
- values[12] = PointerGetDatum(cstring_to_text(item->dimensions.client_hostname));
- else
- nulls[12] = true;
- if (pgws_profile_dimensions & PGWS_DIMENSIONS_APPNAME)
- values[13] = PointerGetDatum(cstring_to_text(item->dimensions.appname));
- else
- nulls[13] = true;
-
+ fill_values_and_nulls(values, nulls, item->dimensions, false);
values[14] = UInt64GetDatum(item->count);
-
+
tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls);
SRF_RETURN_NEXT(funcctx, HeapTupleGetDatum(tuple));
@@ -1563,36 +1490,9 @@ pg_wait_sampling_get_history_extended(PG_FUNCTION_ARGS)
/* Make tuple descriptor */
tupdesc = CreateTemplateTupleDesc(15);
- TupleDescInitEntry(tupdesc, (AttrNumber) 1, "pid",
- INT4OID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 2, "sample_ts",
- TIMESTAMPTZOID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 3, "type",
- TEXTOID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 4, "event",
- TEXTOID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 5, "queryid",
- INT8OID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 6, "role_id",
- INT8OID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 7, "database_id",
- INT8OID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 8, "parallel_leader_pid",
- INT4OID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 9, "is_regular_backend",
- BOOLOID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 10, "backend_type",
- TEXTOID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 11, "backend_state",
- TEXTOID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 12, "proc_start",
+ fill_tuple_desc (tupdesc);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 15, "sample_ts", //TODO we have moved this to the end to have it more in line with current and profile; debatable; maybe move it to first place?
TIMESTAMPTZOID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 13, "client_addr",
- TEXTOID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 14, "client_hostname",
- TEXTOID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 15, "appname",
- TEXTOID, -1, 0);
funcctx->tuple_desc = BlessTupleDesc(tupdesc);
MemoryContextSwitchTo(oldcontext);
@@ -1607,14 +1507,8 @@ pg_wait_sampling_get_history_extended(PG_FUNCTION_ARGS)
{
HeapTuple tuple;
HistoryItem *item;
- Datum values[14];
- bool nulls[14];
- const char *event_type,
- *event,
- *backend_type;
- Datum backend_state, proc_start, client_addr;
- bool is_null_be_state = false,
- is_null_client_addr = false;
+ Datum values[15];
+ bool nulls[15];
item = &history->items[history->index];
@@ -1622,64 +1516,8 @@ pg_wait_sampling_get_history_extended(PG_FUNCTION_ARGS)
MemSet(values, 0, sizeof(values));
MemSet(nulls, 0, sizeof(nulls));
- event_type = pgstat_get_wait_event_type(item->dimensions.wait_event_info);
- event = pgstat_get_wait_event(item->dimensions.wait_event_info);
- backend_type = GetBackendTypeDesc(item->dimensions.backend_type);
- backend_state = GetBackendState(item->dimensions.backend_state, &is_null_be_state);
- proc_start = TimestampTzGetDatum(item->dimensions.proc_start);
- client_addr = get_backend_client_addr(item->dimensions.client_addr, &is_null_client_addr);
-
- values[0] = Int32GetDatum(item->dimensions.pid);
- values[1] = TimestampTzGetDatum(item->ts);
- if (event_type)
- values[2] = PointerGetDatum(cstring_to_text(event_type));
- else
- nulls[2] = true;
- if (event)
- values[3] = PointerGetDatum(cstring_to_text(event));
- else
- nulls[3] = true;
- values[4] = UInt64GetDatum(item->dimensions.queryId);
- if (pgws_profile_dimensions & PGWS_DIMENSIONS_ROLE_ID)
- values[5] = ObjectIdGetDatum(item->dimensions.role_id);
- else
- nulls[5] = true;
- if (pgws_profile_dimensions & PGWS_DIMENSIONS_DB_ID)
- values[6] = ObjectIdGetDatum(item->dimensions.database_id);
- else
- nulls[6] = true;
- if (pgws_profile_dimensions & PGWS_DIMENSIONS_PARALLEL_LEADER_PID)
- values[7] = Int32GetDatum(item->dimensions.parallel_leader_pid);
- else
- nulls[7] = true;
- if (pgws_profile_dimensions & PGWS_DIMENSIONS_IS_REGULAR_BE)
- values[8] = BoolGetDatum(item->dimensions.is_regular_backend);
- else
- nulls[8] = true;
- if (backend_type && (pgws_profile_dimensions & PGWS_DIMENSIONS_BE_TYPE))
- values[9] = PointerGetDatum(cstring_to_text(backend_type));
- else
- nulls[9] = true;
- if (!is_null_be_state && (pgws_profile_dimensions & PGWS_DIMENSIONS_BE_STATE))
- values[10] = backend_state;
- else
- nulls[10] = true;
- if (pgws_profile_dimensions & PGWS_DIMENSIONS_BE_START_TIME)
- values[11] = proc_start;
- else
- nulls[11] = true;
- if (!is_null_client_addr && pgws_profile_dimensions & PGWS_DIMENSIONS_CLIENT_ADDR)
- values[12] = client_addr;
- else
- nulls[12] = true;
- if (pgws_profile_dimensions & PGWS_DIMENSIONS_CLIENT_HOSTNAME)
- values[13] = PointerGetDatum(cstring_to_text(item->dimensions.client_hostname));
- else
- nulls[13] = true;
- if (pgws_profile_dimensions & PGWS_DIMENSIONS_APPNAME)
- values[14] = PointerGetDatum(cstring_to_text(item->dimensions.appname));
- else
- nulls[14] = true;
+ fill_values_and_nulls(values, nulls, item->dimensions, false);
+ values[14] = TimestampTzGetDatum(item->ts); //TODO!!!!!!!!!!!
tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls);
diff --git a/pg_wait_sampling.h b/pg_wait_sampling.h
index 0216f47..ce7a721 100644
--- a/pg_wait_sampling.h
+++ b/pg_wait_sampling.h
@@ -123,6 +123,9 @@ extern int pgws_profile_dimensions; /* bit mask that is derived from GUC */
extern PgBackendStatus* get_beentry_by_procpid(int pid);
/* collector.c */
+extern void fill_dimensions(SamplingDimensions *dimensions, PGPROC *proc,
+ int pid, uint32 wait_event_info, uint64 queryId,
+ int dimensions_mask)
extern void pgws_register_wait_collector(void);
extern PGDLLEXPORT void pgws_collector_main(Datum main_arg);
From da75db063b1aa83cc28366914619fecbb697c2b6 Mon Sep 17 00:00:00 2001
From: Oleg Tselebrovskiy
Date: Wed, 11 Jun 2025 17:16:40 +0700
Subject: [PATCH 05/11] Add serialization
---
collector.c | 361 +++++++++++++++--
pg_wait_sampling--1.1--1.2.sql | 9 +-
pg_wait_sampling.c | 700 ++++++++++++++++++---------------
pg_wait_sampling.h | 35 +-
4 files changed, 739 insertions(+), 366 deletions(-)
diff --git a/collector.c b/collector.c
index f74251c..180fcce 100644
--- a/collector.c
+++ b/collector.c
@@ -40,6 +40,9 @@
PGWS_DIMENSIONS_APPNAME))
static volatile sig_atomic_t shutdown_requested = false;
+int saved_profile_dimensions; //TODO should be initialized with the same value as GUC?
+int saved_history_dimensions;
+
static void handle_sigterm(SIGNAL_ARGS);
/*
@@ -73,6 +76,8 @@ alloc_history(History *observations, int count)
observations->index = 0;
observations->count = count;
observations->wraparound = false;
+
+ saved_history_dimensions = pgws_history_dimensions;
}
/*
@@ -117,6 +122,8 @@ realloc_history(History *observations, int count)
observations->index = copyCount;
observations->count = count;
observations->wraparound = false;
+
+ saved_history_dimensions = pgws_history_dimensions;
}
static void
@@ -157,13 +164,20 @@ fill_dimensions(SamplingDimensions *dimensions, PGPROC *proc,
Oid role_id = proc->roleId;
Oid database_id = proc->databaseId;
PGPROC *lockGroupLeader = proc->lockGroupLeader;
+#if PG_VERSION_NUM >= 180000
bool is_regular_backend = proc->isRegularBackend;
+#else
+ bool is_regular_backend = !proc->isBackgroundWorker;
+#endif
- dimensions->pid = pid;
+ if (dimensions_mask & PGWS_DIMENSIONS_PID)
+ dimensions->pid = pid;
- dimensions->wait_event_info = wait_event_info;
+ if (dimensions_mask & PGWS_DIMENSIONS_WAIT_EVENT_TYPE ||
+ dimensions_mask & PGWS_DIMENSIONS_WAIT_EVENT)
+ dimensions->wait_event_info = wait_event_info;
- if (pgws_profileQueries)
+ if (pgws_profileQueries || (dimensions_mask & PGWD_DIMENSIONS_QUERY_ID))
dimensions->queryId = queryId;
/* Copy everything we need from PGPROC */
@@ -217,13 +231,17 @@ static void
copy_dimensions (SamplingDimensions *dst, SamplingDimensions *src,
int dst_dimensions_mask)
{
- dst->pid = src->pid;
+ if (dst_dimensions_mask & PGWS_DIMENSIONS_PID)
+ dst->pid = src->pid;
- dst->wait_event_info = src->wait_event_info;
+ if (dst_dimensions_mask & PGWS_DIMENSIONS_WAIT_EVENT_TYPE ||
+ dst_dimensions_mask & PGWS_DIMENSIONS_WAIT_EVENT)
+ dst->wait_event_info = src->wait_event_info;
- dst->queryId = src->queryId;
+ if (dst_dimensions_mask & PGWD_DIMENSIONS_QUERY_ID)
+ dst->queryId = src->queryId;
- if (dst_dimensions_mask & PGWS_DIMENSIONS_ROLE_ID)
+ if (dst_dimensions_mask & PGWD_DIMENSIONS_QUERY_ID)
dst->role_id = src->role_id;
if (dst_dimensions_mask & PGWS_DIMENSIONS_DB_ID)
@@ -254,6 +272,283 @@ copy_dimensions (SamplingDimensions *dst, SamplingDimensions *src,
strcpy(dst->appname, src->appname);
}
+int
+get_serialized_size(int dimensions_mask, bool need_last_field)
+{
+ int serialized_size = 0;
+ SamplingDimensions dimensions = {0}; /* Used only for sizeof */
+
+ if (dimensions_mask & PGWS_DIMENSIONS_PID)
+ serialized_size += sizeof(dimensions.pid);
+ if (dimensions_mask & PGWS_DIMENSIONS_WAIT_EVENT_TYPE ||
+ dimensions_mask & PGWS_DIMENSIONS_WAIT_EVENT)
+ serialized_size += sizeof(dimensions.wait_event_info);
+ if (dimensions_mask & PGWD_DIMENSIONS_QUERY_ID)
+ serialized_size += sizeof(dimensions.queryId);
+ if (dimensions_mask & PGWS_DIMENSIONS_ROLE_ID)
+ serialized_size += sizeof(dimensions.role_id);
+ if (dimensions_mask & PGWS_DIMENSIONS_DB_ID)
+ serialized_size += sizeof(dimensions.database_id);
+ if (dimensions_mask & PGWS_DIMENSIONS_PARALLEL_LEADER_PID)
+ serialized_size += sizeof(dimensions.parallel_leader_pid);
+ if (dimensions_mask & PGWS_DIMENSIONS_IS_REGULAR_BE)
+ serialized_size += sizeof(dimensions.is_regular_backend);
+ if (dimensions_mask & PGWS_DIMENSIONS_BE_TYPE)
+ serialized_size += sizeof(dimensions.backend_type);
+ if (dimensions_mask & PGWS_DIMENSIONS_BE_STATE)
+ serialized_size += sizeof(dimensions.backend_state);
+ if (dimensions_mask & PGWS_DIMENSIONS_BE_START_TIME)
+ serialized_size += sizeof(dimensions.proc_start);
+ if (dimensions_mask & PGWS_DIMENSIONS_CLIENT_ADDR)
+ serialized_size += sizeof(dimensions.client_addr);
+ if (dimensions_mask & PGWS_DIMENSIONS_CLIENT_HOSTNAME)
+ serialized_size += sizeof(dimensions.client_hostname);
+ if (dimensions_mask & PGWS_DIMENSIONS_APPNAME)
+ serialized_size += sizeof(dimensions.appname);
+ /* timestamp of history and count of profile are both 8 bytes */
+ if (need_last_field)
+ serialized_size += sizeof(uint64);
+ return serialized_size;
+}
+
+static void
+serialize_item(SamplingDimensions dimensions, int dimensions_mask,
+ char **serialized_item, char **serialized_key, int *serialized_size,
+ TimestampTz ts, uint64 count, bool is_history)
+{
+ char dummy_array[sizeof(SamplingDimensions) + sizeof(uint64) + 1];
+
+ memset(dummy_array, 0, sizeof(dummy_array));
+
+ if (dimensions_mask & PGWS_DIMENSIONS_PID)
+ {
+ memcpy(dummy_array + *serialized_size, &dimensions.pid,
+ sizeof(dimensions.pid));
+ *serialized_size += sizeof(dimensions.pid);
+ }
+
+ if (dimensions_mask & PGWS_DIMENSIONS_WAIT_EVENT_TYPE ||
+ dimensions_mask & PGWS_DIMENSIONS_WAIT_EVENT)
+ {
+ memcpy(dummy_array + *serialized_size, &dimensions.wait_event_info,
+ sizeof(dimensions.wait_event_info));
+ *serialized_size += sizeof(dimensions.wait_event_info);
+ }
+
+ if (dimensions_mask & PGWD_DIMENSIONS_QUERY_ID)
+ {
+ memcpy(dummy_array + *serialized_size, &dimensions.queryId,
+ sizeof(dimensions.queryId));
+ *serialized_size += sizeof(dimensions.queryId);
+ }
+
+ if (dimensions_mask & PGWS_DIMENSIONS_ROLE_ID)
+ {
+ memcpy(dummy_array + *serialized_size, &dimensions.role_id,
+ sizeof(dimensions.role_id));
+ *serialized_size += sizeof(dimensions.role_id);
+ }
+
+ if (dimensions_mask & PGWS_DIMENSIONS_DB_ID)
+ {
+ memcpy(dummy_array + *serialized_size, &dimensions.database_id,
+ sizeof(dimensions.database_id));
+ *serialized_size += sizeof(dimensions.database_id);
+ }
+
+ if (dimensions_mask & PGWS_DIMENSIONS_PARALLEL_LEADER_PID)
+ {
+ memcpy(dummy_array + *serialized_size, &dimensions.parallel_leader_pid,
+ sizeof(dimensions.parallel_leader_pid));
+ *serialized_size += sizeof(dimensions.parallel_leader_pid);
+ }
+
+ if (dimensions_mask & PGWS_DIMENSIONS_IS_REGULAR_BE)
+ {
+ memcpy(dummy_array + *serialized_size, &dimensions.is_regular_backend,
+ sizeof(dimensions.is_regular_backend));
+ *serialized_size += sizeof(dimensions.is_regular_backend);
+ }
+
+ if (dimensions_mask & PGWS_DIMENSIONS_BE_TYPE)
+ {
+ memcpy(dummy_array + *serialized_size, &dimensions.backend_type,
+ sizeof(dimensions.backend_type));
+ *serialized_size += sizeof(dimensions.backend_type);
+ }
+
+ if (dimensions_mask & PGWS_DIMENSIONS_BE_STATE)
+ {
+ memcpy(dummy_array + *serialized_size, &dimensions.backend_state,
+ sizeof(dimensions.backend_state));
+ *serialized_size += sizeof(dimensions.backend_state);
+ }
+
+ if (dimensions_mask & PGWS_DIMENSIONS_BE_START_TIME)
+ {
+ memcpy(dummy_array + *serialized_size, &dimensions.proc_start,
+ sizeof(dimensions.proc_start));
+ *serialized_size += sizeof(dimensions.proc_start);
+ }
+
+ if (dimensions_mask & PGWS_DIMENSIONS_CLIENT_ADDR)
+ {
+ memcpy(dummy_array + *serialized_size, &dimensions.client_addr,
+ sizeof(dimensions.client_addr));
+ *serialized_size += sizeof(dimensions.client_addr);
+ }
+
+ if (dimensions_mask & PGWS_DIMENSIONS_CLIENT_HOSTNAME)
+ {
+ memcpy(dummy_array + *serialized_size, &dimensions.client_hostname,
+ sizeof(dimensions.client_hostname));
+ *serialized_size += sizeof(dimensions.client_hostname);
+ }
+
+ if (dimensions_mask & PGWS_DIMENSIONS_APPNAME)
+ {
+ memcpy(dummy_array + *serialized_size, &dimensions.appname,
+ sizeof(dimensions.appname));
+ *serialized_size += sizeof(dimensions.appname);
+ }
+
+ /* copy all the fields without ts/count */
+ *serialized_key = palloc0(*serialized_size + 1);
+ strcpy(*serialized_key, dummy_array);
+
+ if (is_history)
+ {
+ memcpy(dummy_array + *serialized_size, &ts,
+ sizeof(TimestampTz));
+ *serialized_size += sizeof(TimestampTz);
+ }
+ else
+ {
+ memcpy(dummy_array + *serialized_size, &count,
+ sizeof(uint64));
+ *serialized_size += sizeof(uint64);
+ }
+
+ /* copy everything */
+ *serialized_item = palloc0(*serialized_size + 1);
+ strcpy(*serialized_item, dummy_array);
+}
+
+void
+deserialize_item(SamplingDimensions *dimensions, char *serialized_item,
+ int dimensions_mask, TimestampTz *ts, uint64 *count)
+{
+ int idx = 0;
+
+ memset(dimensions, 0, sizeof(SamplingDimensions));
+
+ if (dimensions_mask & PGWS_DIMENSIONS_PID)
+ {
+ memcpy(&dimensions->pid, serialized_item + idx,
+ sizeof(dimensions->pid));
+ idx += sizeof(dimensions->pid);
+ }
+
+ if (dimensions_mask & PGWS_DIMENSIONS_WAIT_EVENT_TYPE ||
+ dimensions_mask & PGWS_DIMENSIONS_WAIT_EVENT)
+ {
+ memcpy(&dimensions->wait_event_info, serialized_item + idx,
+ sizeof(dimensions->wait_event_info));
+ idx += sizeof(dimensions->wait_event_info);
+ }
+
+ if (dimensions_mask & PGWD_DIMENSIONS_QUERY_ID)
+ {
+ memcpy(&dimensions->queryId, serialized_item + idx,
+ sizeof(dimensions->queryId));
+ idx += sizeof(dimensions->queryId);
+ }
+
+ if (dimensions_mask & PGWS_DIMENSIONS_ROLE_ID)
+ {
+ memcpy(&dimensions->role_id, serialized_item + idx,
+ sizeof(dimensions->role_id));
+ idx += sizeof(dimensions->role_id);
+ }
+
+ if (dimensions_mask & PGWS_DIMENSIONS_DB_ID)
+ {
+ memcpy(&dimensions->database_id, serialized_item + idx,
+ sizeof(dimensions->database_id));
+ idx += sizeof(dimensions->database_id);
+ }
+
+ if (dimensions_mask & PGWS_DIMENSIONS_PARALLEL_LEADER_PID)
+ {
+ memcpy(&dimensions->parallel_leader_pid, serialized_item + idx,
+ sizeof(dimensions->parallel_leader_pid));
+ idx += sizeof(dimensions->parallel_leader_pid);
+ }
+
+ if (dimensions_mask & PGWS_DIMENSIONS_IS_REGULAR_BE)
+ {
+ memcpy(&dimensions->is_regular_backend, serialized_item + idx,
+ sizeof(dimensions->is_regular_backend));
+ idx += sizeof(dimensions->is_regular_backend);
+ }
+
+ if (dimensions_mask & PGWS_DIMENSIONS_BE_TYPE)
+ {
+ memcpy(&dimensions->backend_type, serialized_item + idx,
+ sizeof(dimensions->backend_type));
+ idx += sizeof(dimensions->backend_type);
+ }
+
+ if (dimensions_mask & PGWS_DIMENSIONS_BE_STATE)
+ {
+ memcpy(&dimensions->backend_state, serialized_item + idx,
+ sizeof(dimensions->backend_state));
+ idx += sizeof(dimensions->backend_state);
+ }
+
+ if (dimensions_mask & PGWS_DIMENSIONS_BE_START_TIME)
+ {
+ memcpy(&dimensions->proc_start, serialized_item + idx,
+ sizeof(dimensions->proc_start));
+ idx += sizeof(dimensions->proc_start);
+ }
+
+ if (dimensions_mask & PGWS_DIMENSIONS_CLIENT_ADDR)
+ {
+ memcpy(&dimensions->client_addr, serialized_item + idx,
+ sizeof(dimensions->client_addr));
+ idx += sizeof(dimensions->client_addr);
+ }
+
+ if (dimensions_mask & PGWS_DIMENSIONS_CLIENT_HOSTNAME)
+ {
+ memcpy(&dimensions->client_hostname, serialized_item + idx,
+ sizeof(dimensions->client_hostname));
+ idx += sizeof(dimensions->client_hostname);
+ }
+
+ if (dimensions_mask & PGWS_DIMENSIONS_APPNAME)
+ {
+ memcpy(&dimensions->appname, serialized_item + idx,
+ sizeof(dimensions->appname));
+ idx += sizeof(dimensions->appname);
+ }
+
+ if (ts)
+ {
+ memcpy(ts, serialized_item + idx,
+ sizeof(TimestampTz));
+ idx += sizeof(TimestampTz);
+ }
+
+ if (count)
+ {
+ memcpy(count, serialized_item + idx,
+ sizeof(uint64));
+ idx += sizeof(uint64);
+ }
+}
+
/*
* Read current waits from backends and write them to history array
* and/or profile hash.
@@ -281,7 +576,9 @@ probe_waits(History *observations, HTAB *profile_hash,
PGPROC *proc = &ProcGlobal->allProcs[i];
int pid;
uint32 wait_event_info;
- SamplingDimensions common_dimensions;
+ SamplingDimensions common_dimensions,
+ history_dimensions,
+ profile_dimensions;
int dimensions_mask_common = pgws_history_dimensions |
pgws_profile_dimensions;
@@ -290,28 +587,30 @@ probe_waits(History *observations, HTAB *profile_hash,
continue;
/*
- * We zero items and dimensions with memset
- * to avoid doing it field-by-field
+ * We zero dimensions with memset to avoid doing it field-by-field
*/
- memset(&item_history, 0, sizeof(HistoryItem));
- memset(&item_profile, 0, sizeof(ProfileItem));
+ memset(&history_dimensions, 0, sizeof(SamplingDimensions));
+ memset(&profile_dimensions, 0, sizeof(SamplingDimensions));
memset(&common_dimensions, 0, sizeof(SamplingDimensions));
fill_dimensions(&common_dimensions, proc, pid, wait_event_info,
pgws_proc_queryids[i], dimensions_mask_common);
- copy_dimensions(&item_history.dimensions,
+ copy_dimensions(&history_dimensions,
&common_dimensions,
pgws_history_dimensions);
- copy_dimensions(&item_history.dimensions,
+ copy_dimensions(&profile_dimensions,
&common_dimensions,
pgws_profile_dimensions);
item_history.ts = ts;
+ item_history.dimensions = history_dimensions;
/* Write to the history if needed */
if (write_history)
{
+ //TODO вот тут что-то сделать нужно??? потому что мы не запаковываем
+ //историю
observation = get_next_observation(observations);
*observation = item_history;
}
@@ -319,18 +618,33 @@ probe_waits(History *observations, HTAB *profile_hash,
/* Write to the profile if needed */
if (write_profile)
{
- ProfileItem *profileItem;
- bool found;
+ bool found;
+ int serialized_size = 0;
+ uint64 count = 1;
+ char *serialized_key,
+ *serialized_item,
+ *stored_item;
if (!profile_pid)
item_profile.dimensions.pid = 0;
- profileItem = (ProfileItem *) hash_search(profile_hash, &item_profile,
- HASH_ENTER, &found);
+ serialize_item(item_profile.dimensions, saved_profile_dimensions,
+ &serialized_item, &serialized_key, &serialized_size,
+ (TimestampTz) 0, count, false);
+
+ stored_item = (char *) hash_search(profile_hash, serialized_key,
+ HASH_ENTER, &found);
+
if (found)
- profileItem->count++;
+ {
+ memcpy(&count, (stored_item + serialized_size - sizeof(uint64)),
+ sizeof(uint64));
+ count++;
+ memcpy((stored_item + serialized_size - sizeof(uint64)), &count,
+ sizeof(uint64));
+ }
else
- profileItem->count = 1;
+ memcpy(stored_item, serialized_item, serialized_size);
}
}
LWLockRelease(ProcArrayLock);
@@ -426,14 +740,17 @@ make_profile_hash()
{
HASHCTL hash_ctl;
+ saved_profile_dimensions = pgws_profile_dimensions;
+
/*
* Since adding additional dimensions we use SamplingDimensions as
* hashtable key. This is fine for cases when some fields are 0 since
* it doesn't impede our ability to search the hash table for entries
*/
- hash_ctl.keysize = sizeof(SamplingDimensions);
+ hash_ctl.keysize = get_serialized_size(saved_profile_dimensions, false);
+ /* entry includes SamplingDimensions and ts/count */
+ hash_ctl.entrysize = get_serialized_size(saved_profile_dimensions, true);
- hash_ctl.entrysize = sizeof(ProfileItem);
return hash_create("Waits profile hash", 1024, &hash_ctl,
HASH_ELEM | HASH_BLOBS);
}
diff --git a/pg_wait_sampling--1.1--1.2.sql b/pg_wait_sampling--1.1--1.2.sql
index 29def16..e774f0f 100644
--- a/pg_wait_sampling--1.1--1.2.sql
+++ b/pg_wait_sampling--1.1--1.2.sql
@@ -3,12 +3,9 @@
-- complain if script is sourced in psql, rather than via ALTER EXTENSION
\echo Use "ALTER EXTENSION pg_wait_sampling UPDATE TO 1.2" to load this file. \quit
-DROP FUNCTION pg_wait_sampling_get_profile (
- OUT pid int4,
- OUT event_type text,
- OUT event text,
- OUT count bigint
-) CASCADE;
+DROP FUNCTION pg_wait_sampling_get_current CASCADE;
+DROP FUNCTION pg_wait_sampling_get_profile CASCADE;
+DROP FUNCTION pg_wait_sampling_get_history CASCADE;
CREATE FUNCTION pg_wait_sampling_get_current_extended (
pid int4,
diff --git a/pg_wait_sampling.c b/pg_wait_sampling.c
index 221ec10..29d96ab 100644
--- a/pg_wait_sampling.c
+++ b/pg_wait_sampling.c
@@ -346,7 +346,15 @@ pgws_general_dimensions_check_hook (char **newvalue, void **extra, GucSource sou
char *tok = (char *) lfirst(l);
/* Process all allowed values */
- if (pg_strcasecmp(tok, "role_id") == 0)
+ if (pg_strcasecmp(tok, "pid") == 0)
+ extrachecks |= PGWS_DIMENSIONS_PID;
+ else if (pg_strcasecmp(tok, "wait_event_type") == 0)
+ extrachecks |= PGWS_DIMENSIONS_WAIT_EVENT_TYPE;
+ else if (pg_strcasecmp(tok, "wait_event") == 0)
+ extrachecks |= PGWS_DIMENSIONS_WAIT_EVENT;
+ else if (pg_strcasecmp(tok, "query_id") == 0)
+ extrachecks |= PGWD_DIMENSIONS_QUERY_ID;
+ else if (pg_strcasecmp(tok, "role_id") == 0)
extrachecks |= PGWS_DIMENSIONS_ROLE_ID;
else if (pg_strcasecmp(tok, "database_id") == 0)
extrachecks |= PGWS_DIMENSIONS_DB_ID;
@@ -536,7 +544,7 @@ _PG_init(void)
"Sets sampling dimensions for history",
NULL,
&pgws_history_dimensions_string,
- "none",
+ "pid, wait_event_type, wait_event, query_id",
PGC_SIGHUP,
GUC_LIST_INPUT,
pgws_general_dimensions_check_hook,
@@ -547,7 +555,7 @@ _PG_init(void)
"Sets sampling dimensions for profile",
NULL,
&pgws_profile_dimensions_string,
- "none",
+ "pid, wait_event_type, wait_event, query_id",
PGC_SIGHUP,
GUC_LIST_INPUT,
pgws_general_dimensions_check_hook,
@@ -620,127 +628,127 @@ typedef struct
TimestampTz ts;
} WaitCurrentContext;
-PG_FUNCTION_INFO_V1(pg_wait_sampling_get_current);
-Datum
-pg_wait_sampling_get_current(PG_FUNCTION_ARGS)
-{
- FuncCallContext *funcctx;
- WaitCurrentContext *params;
-
- check_shmem();
-
- if (SRF_IS_FIRSTCALL())
- {
- MemoryContext oldcontext;
- TupleDesc tupdesc;
-
- funcctx = SRF_FIRSTCALL_INIT();
-
- oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
- params = (WaitCurrentContext *) palloc0(sizeof(WaitCurrentContext));
- params->ts = GetCurrentTimestamp();
-
- funcctx->user_fctx = params;
- tupdesc = CreateTemplateTupleDesc(4);
- TupleDescInitEntry(tupdesc, (AttrNumber) 1, "pid",
- INT4OID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 2, "type",
- TEXTOID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 3, "event",
- TEXTOID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 4, "queryid",
- INT8OID, -1, 0);
-
- funcctx->tuple_desc = BlessTupleDesc(tupdesc);
-
- LWLockAcquire(ProcArrayLock, LW_SHARED);
-
- if (!PG_ARGISNULL(0))
- {
- /* pg_wait_sampling_get_current(pid int4) function */
- HistoryItem *item;
- PGPROC *proc;
-
- proc = search_proc(PG_GETARG_UINT32(0));
- params->items = (HistoryItem *) palloc0(sizeof(HistoryItem));
- item = ¶ms->items[0];
- item->dimensions.pid = proc->pid;
- item->dimensions.wait_event_info = proc->wait_event_info;
- item->dimensions.queryId = pgws_proc_queryids[proc - ProcGlobal->allProcs];
- funcctx->max_calls = 1;
- }
- else
- {
- /* pg_wait_sampling_current view */
- int procCount = ProcGlobal->allProcCount,
- i,
- j = 0;
-
- params->items = (HistoryItem *) palloc0(sizeof(HistoryItem) * procCount);
- for (i = 0; i < procCount; i++)
- {
- PGPROC *proc = &ProcGlobal->allProcs[i];
-
- if (!pgws_should_sample_proc(proc,
- ¶ms->items[j].dimensions.pid,
- ¶ms->items[j].dimensions.wait_event_info))
- continue;
-
- params->items[j].dimensions.pid = proc->pid;
- params->items[j].dimensions.wait_event_info = proc->wait_event_info;
- params->items[j].dimensions.queryId = pgws_proc_queryids[i];
- j++;
- }
- funcctx->max_calls = j;
- }
-
- LWLockRelease(ProcArrayLock);
-
- MemoryContextSwitchTo(oldcontext);
- }
-
- /* stuff done on every call of the function */
- funcctx = SRF_PERCALL_SETUP();
- params = (WaitCurrentContext *) funcctx->user_fctx;
-
- if (funcctx->call_cntr < funcctx->max_calls)
- {
- HeapTuple tuple;
- Datum values[4];
- bool nulls[4];
- const char *event_type,
- *event;
- HistoryItem *item;
-
- item = ¶ms->items[funcctx->call_cntr];
-
- /* Make and return next tuple to caller */
- MemSet(values, 0, sizeof(values));
- MemSet(nulls, 0, sizeof(nulls));
-
- event_type = pgstat_get_wait_event_type(item->dimensions.wait_event_info);
- event = pgstat_get_wait_event(item->dimensions.wait_event_info);
- values[0] = Int32GetDatum(item->dimensions.pid);
- if (event_type)
- values[1] = PointerGetDatum(cstring_to_text(event_type));
- else
- nulls[1] = true;
- if (event)
- values[2] = PointerGetDatum(cstring_to_text(event));
- else
- nulls[2] = true;
-
- values[3] = UInt64GetDatum(item->dimensions.queryId);
- tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls);
-
- SRF_RETURN_NEXT(funcctx, HeapTupleGetDatum(tuple));
- }
- else
- {
- /* nothing left */
- SRF_RETURN_DONE(funcctx);
- }
-}
+//PG_FUNCTION_INFO_V1(pg_wait_sampling_get_current);
+//Datum
+//pg_wait_sampling_get_current(PG_FUNCTION_ARGS)
+//{
+// FuncCallContext *funcctx;
+// WaitCurrentContext *params;
+//
+// check_shmem();
+//
+// if (SRF_IS_FIRSTCALL())
+// {
+// MemoryContext oldcontext;
+// TupleDesc tupdesc;
+//
+// funcctx = SRF_FIRSTCALL_INIT();
+//
+// oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
+// params = (WaitCurrentContext *) palloc0(sizeof(WaitCurrentContext));
+// params->ts = GetCurrentTimestamp();
+//
+// funcctx->user_fctx = params;
+// tupdesc = CreateTemplateTupleDesc(4);
+// TupleDescInitEntry(tupdesc, (AttrNumber) 1, "pid",
+// INT4OID, -1, 0);
+// TupleDescInitEntry(tupdesc, (AttrNumber) 2, "type",
+// TEXTOID, -1, 0);
+// TupleDescInitEntry(tupdesc, (AttrNumber) 3, "event",
+// TEXTOID, -1, 0);
+// TupleDescInitEntry(tupdesc, (AttrNumber) 4, "queryid",
+// INT8OID, -1, 0);
+//
+// funcctx->tuple_desc = BlessTupleDesc(tupdesc);
+//
+// LWLockAcquire(ProcArrayLock, LW_SHARED);
+//
+// if (!PG_ARGISNULL(0))
+// {
+// /* pg_wait_sampling_get_current(pid int4) function */
+// HistoryItem *item;
+// PGPROC *proc;
+//
+// proc = search_proc(PG_GETARG_UINT32(0));
+// params->items = (HistoryItem *) palloc0(sizeof(HistoryItem));
+// item = ¶ms->items[0];
+// item->dimensions.pid = proc->pid;
+// item->dimensions.wait_event_info = proc->wait_event_info;
+// item->dimensions.queryId = pgws_proc_queryids[proc - ProcGlobal->allProcs];
+// funcctx->max_calls = 1;
+// }
+// else
+// {
+// /* pg_wait_sampling_current view */
+// int procCount = ProcGlobal->allProcCount,
+// i,
+// j = 0;
+//
+// params->items = (HistoryItem *) palloc0(sizeof(HistoryItem) * procCount);
+// for (i = 0; i < procCount; i++)
+// {
+// PGPROC *proc = &ProcGlobal->allProcs[i];
+//
+// if (!pgws_should_sample_proc(proc,
+// ¶ms->items[j].dimensions.pid,
+// ¶ms->items[j].dimensions.wait_event_info))
+// continue;
+//
+// params->items[j].dimensions.pid = proc->pid;
+// params->items[j].dimensions.wait_event_info = proc->wait_event_info;
+// params->items[j].dimensions.queryId = pgws_proc_queryids[i];
+// j++;
+// }
+// funcctx->max_calls = j;
+// }
+//
+// LWLockRelease(ProcArrayLock);
+//
+// MemoryContextSwitchTo(oldcontext);
+// }
+//
+// /* stuff done on every call of the function */
+// funcctx = SRF_PERCALL_SETUP();
+// params = (WaitCurrentContext *) funcctx->user_fctx;
+//
+// if (funcctx->call_cntr < funcctx->max_calls)
+// {
+// HeapTuple tuple;
+// Datum values[4];
+// bool nulls[4];
+// const char *event_type,
+// *event;
+// HistoryItem *item;
+//
+// item = ¶ms->items[funcctx->call_cntr];
+//
+// /* Make and return next tuple to caller */
+// MemSet(values, 0, sizeof(values));
+// MemSet(nulls, 0, sizeof(nulls));
+//
+// event_type = pgstat_get_wait_event_type(item->dimensions.wait_event_info);
+// event = pgstat_get_wait_event(item->dimensions.wait_event_info);
+// values[0] = Int32GetDatum(item->dimensions.pid);
+// if (event_type)
+// values[1] = PointerGetDatum(cstring_to_text(event_type));
+// else
+// nulls[1] = true;
+// if (event)
+// values[2] = PointerGetDatum(cstring_to_text(event));
+// else
+// nulls[2] = true;
+//
+// values[3] = UInt64GetDatum(item->dimensions.queryId);
+// tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls);
+//
+// SRF_RETURN_NEXT(funcctx, HeapTupleGetDatum(tuple));
+// }
+// else
+// {
+// /* nothing left */
+// SRF_RETURN_DONE(funcctx);
+// }
+//}
static Datum
GetBackendState(BackendState state, bool *is_null)
@@ -847,8 +855,8 @@ get_beentry_by_procpid(int pid)
/* Here beid is just index in localBackendStatusTable */
local_beentry = pgstat_fetch_stat_local_beentry(cur_be_idx);
#endif
- if (local_beentry->backendStatus.st_procpid == pid)
- return &local_beentry->backendStatus;
+ if (local_beentry->backendStatus->st_procpid == pid)
+ return local_beentry->backendStatus;
}
return NULL;
}
@@ -890,7 +898,7 @@ fill_tuple_desc (TupleDesc tupdesc)
}
static void
-fill_values_and_nulls(Datum *values, bool *nulls, SamplingDimensions dimensions, bool skip_mask)
+fill_values_and_nulls(Datum *values, bool *nulls, SamplingDimensions dimensions, int dimensions_mask)
{
const char *event_type,
*event,
@@ -906,56 +914,59 @@ fill_values_and_nulls(Datum *values, bool *nulls, SamplingDimensions dimensions,
proc_start = TimestampTzGetDatum(dimensions.proc_start);
client_addr = get_backend_client_addr(dimensions.client_addr, &is_null_client_addr);
- values[0] = Int32GetDatum(dimensions.pid);
- if (event_type)
+ if (dimensions_mask & PGWS_DIMENSIONS_PID)
+ values[0] = Int32GetDatum(dimensions.pid);
+ else
+ values[0] = (Datum) 0;
+ if (event_type && (dimensions_mask & PGWS_DIMENSIONS_WAIT_EVENT_TYPE))
values[1] = PointerGetDatum(cstring_to_text(event_type));
else
nulls[1] = true;
- if (event)
+ if (event && (dimensions_mask & PGWS_DIMENSIONS_WAIT_EVENT))
values[2] = PointerGetDatum(cstring_to_text(event));
else
nulls[2] = true;
- if (pgws_profileQueries || skip_mask)
+ if (pgws_profileQueries || (dimensions_mask & PGWD_DIMENSIONS_QUERY_ID))
values[3] = UInt64GetDatum(dimensions.queryId);
else
values[3] = (Datum) 0;
- if ((pgws_profile_dimensions & PGWS_DIMENSIONS_ROLE_ID) || skip_mask)
+ if (dimensions_mask & PGWS_DIMENSIONS_ROLE_ID)
values[4] = ObjectIdGetDatum(dimensions.role_id);
else
nulls[4] = true;
- if (pgws_profile_dimensions & PGWS_DIMENSIONS_DB_ID || skip_mask)
+ if (dimensions_mask & PGWS_DIMENSIONS_DB_ID)
values[5] = ObjectIdGetDatum(dimensions.database_id);
else
nulls[5] = true;
- if (pgws_profile_dimensions & PGWS_DIMENSIONS_PARALLEL_LEADER_PID || skip_mask)
+ if (dimensions_mask & PGWS_DIMENSIONS_PARALLEL_LEADER_PID)
values[6] = Int32GetDatum(dimensions.parallel_leader_pid);
else
nulls[6] = true;
- if (pgws_profile_dimensions & PGWS_DIMENSIONS_IS_REGULAR_BE || skip_mask)
+ if (dimensions_mask & PGWS_DIMENSIONS_IS_REGULAR_BE)
values[7] = BoolGetDatum(dimensions.is_regular_backend);
else
nulls[7] = true;
- if (backend_type && ((pgws_profile_dimensions & PGWS_DIMENSIONS_BE_TYPE) || skip_mask))
+ if (backend_type && (dimensions_mask & PGWS_DIMENSIONS_BE_TYPE))
values[8] = PointerGetDatum(cstring_to_text(backend_type));
else
nulls[8] = true;
- if (!is_null_be_state && ((pgws_profile_dimensions & PGWS_DIMENSIONS_BE_STATE) || skip_mask))
+ if (!is_null_be_state && (dimensions_mask & PGWS_DIMENSIONS_BE_STATE))
values[9] = backend_state;
else
nulls[9] = true;
- if ((pgws_profile_dimensions & PGWS_DIMENSIONS_BE_START_TIME) || skip_mask)
+ if (dimensions_mask & PGWS_DIMENSIONS_BE_START_TIME)
values[10] = proc_start;
else
nulls[10] = true;
- if (!is_null_client_addr && ((pgws_profile_dimensions & PGWS_DIMENSIONS_CLIENT_ADDR) || skip_mask))
+ if (!is_null_client_addr && (dimensions_mask & PGWS_DIMENSIONS_CLIENT_ADDR))
values[11] = client_addr;
else
nulls[11] = true;
- if ((pgws_profile_dimensions & PGWS_DIMENSIONS_CLIENT_HOSTNAME) || skip_mask)
+ if (dimensions_mask & PGWS_DIMENSIONS_CLIENT_HOSTNAME)
values[12] = PointerGetDatum(cstring_to_text(dimensions.client_hostname));
else
nulls[12] = true;
- if ((pgws_profile_dimensions & PGWS_DIMENSIONS_APPNAME) || skip_mask)
+ if (dimensions_mask & PGWS_DIMENSIONS_APPNAME)
values[13] = PointerGetDatum(cstring_to_text(dimensions.appname));
else
nulls[13] = true;
@@ -995,7 +1006,7 @@ pg_wait_sampling_get_current_extended(PG_FUNCTION_ARGS)
/* pg_wait_sampling_get_current_extended(pid int4) function */
HistoryItem *item;
PGPROC *proc;
- PgBackendStatus *bestatus;
+ //PgBackendStatus *bestatus; not needed?
proc = search_proc(PG_GETARG_UINT32(0));
@@ -1021,12 +1032,12 @@ pg_wait_sampling_get_current_extended(PG_FUNCTION_ARGS)
{
PGPROC *proc = &ProcGlobal->allProcs[i];
- if (!pgws_should_sample_proc(&proc,
+ if (!pgws_should_sample_proc(proc,
¶ms->items[j].dimensions.pid,
¶ms->items[j].dimensions.wait_event_info))
continue;
- fill_dimensions(¶ms->items[j]->dimensions, proc, proc->pid,
+ fill_dimensions(¶ms->items[j].dimensions, proc, proc->pid,
proc->wait_event_info,
pgws_proc_queryids[proc - ProcGlobal->allProcs],
PGWS_DIMENSIONS_ALL);
@@ -1063,7 +1074,7 @@ pg_wait_sampling_get_current_extended(PG_FUNCTION_ARGS)
MemSet(values, 0, sizeof(values));
MemSet(nulls, 0, sizeof(nulls));
- fill_values_and_nulls(values, nulls, item->dimensions, true);
+ fill_values_and_nulls(values, nulls, item->dimensions, PGWS_DIMENSIONS_ALL);
tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls);
@@ -1179,100 +1190,132 @@ receive_array(SHMRequest request, Size item_size, Size *count)
return result;
}
-
-PG_FUNCTION_INFO_V1(pg_wait_sampling_get_profile);
-Datum
-pg_wait_sampling_get_profile(PG_FUNCTION_ARGS)
+static void *
+deserialize_array(void *tmp_array, int count, Size real_item_size, bool is_history)
{
- Profile *profile;
- FuncCallContext *funcctx;
-
- check_shmem();
+ Pointer result;
+ int i;
+ int dimensions_mask = (is_history ? saved_history_dimensions : saved_profile_dimensions);
+ int serialized_size = get_serialized_size(saved_profile_dimensions, true);
- if (SRF_IS_FIRSTCALL())
+ result = palloc0(real_item_size * count);
+ for (i = 0; i < count; i++)
{
- MemoryContext oldcontext;
- TupleDesc tupdesc;
+ SamplingDimensions tmp_dimensions;
+ char *cur_item;
+ TimestampTz *ts;
+ uint64 *count;
- funcctx = SRF_FIRSTCALL_INIT();
- oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
+ cur_item = (((char *) tmp_array) + i * serialized_size);
+ ts = (is_history ? palloc0(sizeof(TimestampTz)) : NULL);
+ count = (is_history ? NULL : palloc0(sizeof(uint64)));
- /* Receive profile from shmq */
- profile = (Profile *) palloc0(sizeof(Profile));
- profile->items = (ProfileItem *) receive_array(PROFILE_REQUEST,
- sizeof(ProfileItem), &profile->count);
+ deserialize_item(&tmp_dimensions, cur_item, dimensions_mask, ts, count);
- funcctx->user_fctx = profile;
- funcctx->max_calls = profile->count;
+ memcpy((result + i * real_item_size), &tmp_dimensions, sizeof(SamplingDimensions));
- /* Make tuple descriptor */
- tupdesc = CreateTemplateTupleDesc(5);
- TupleDescInitEntry(tupdesc, (AttrNumber) 1, "pid",
- INT4OID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 2, "type",
- TEXTOID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 3, "event",
- TEXTOID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 4, "queryid",
- INT8OID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 5, "count",
- INT8OID, -1, 0);
- funcctx->tuple_desc = BlessTupleDesc(tupdesc);
-
- MemoryContextSwitchTo(oldcontext);
- }
-
- /* stuff done on every call of the function */
- funcctx = SRF_PERCALL_SETUP();
-
- profile = (Profile *) funcctx->user_fctx;
-
- if (funcctx->call_cntr < funcctx->max_calls)
- {
- /* for each row */
- Datum values[5];
- bool nulls[5];
- HeapTuple tuple;
- ProfileItem *item;
- const char *event_type,
- *event;
-
- item = &profile->items[funcctx->call_cntr];
-
- MemSet(values, 0, sizeof(values));
- MemSet(nulls, 0, sizeof(nulls));
-
- /* Make and return next tuple to caller */
- event_type = pgstat_get_wait_event_type(item->dimensions.wait_event_info);
- event = pgstat_get_wait_event(item->dimensions.wait_event_info);
- values[0] = Int32GetDatum(item->dimensions.pid);
- if (event_type)
- values[1] = PointerGetDatum(cstring_to_text(event_type));
- else
- nulls[1] = true;
- if (event)
- values[2] = PointerGetDatum(cstring_to_text(event));
+ if (is_history)
+ memcpy((result + i * real_item_size + serialized_size), ts, sizeof(TimestampTz));
else
- nulls[2] = true;
-
- if (pgws_profileQueries)
- values[3] = UInt64GetDatum(item->dimensions.queryId);
- else
- values[3] = (Datum) 0;
-
- values[4] = UInt64GetDatum(item->count);
-
- tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls);
-
- SRF_RETURN_NEXT(funcctx, HeapTupleGetDatum(tuple));
- }
- else
- {
- /* nothing left */
- SRF_RETURN_DONE(funcctx);
+ memcpy((result + i * real_item_size + serialized_size), count, sizeof(uint64));
}
+
+ return result;
}
+//PG_FUNCTION_INFO_V1(pg_wait_sampling_get_profile);
+//Datum
+//pg_wait_sampling_get_profile(PG_FUNCTION_ARGS)
+//{
+// Profile *profile;
+// FuncCallContext *funcctx;
+//
+// check_shmem();
+//
+// if (SRF_IS_FIRSTCALL())
+// {
+// MemoryContext oldcontext;
+// TupleDesc tupdesc;
+//
+// funcctx = SRF_FIRSTCALL_INIT();
+// oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
+//
+// /* Receive profile from shmq */
+// profile = (Profile *) palloc0(sizeof(Profile));
+// profile->items = (ProfileItem *) receive_array(PROFILE_REQUEST,
+// sizeof(ProfileItem), &profile->count);
+//
+// funcctx->user_fctx = profile;
+// funcctx->max_calls = profile->count;
+//
+// /* Make tuple descriptor */
+// tupdesc = CreateTemplateTupleDesc(5);
+// TupleDescInitEntry(tupdesc, (AttrNumber) 1, "pid",
+// INT4OID, -1, 0);
+// TupleDescInitEntry(tupdesc, (AttrNumber) 2, "type",
+// TEXTOID, -1, 0);
+// TupleDescInitEntry(tupdesc, (AttrNumber) 3, "event",
+// TEXTOID, -1, 0);
+// TupleDescInitEntry(tupdesc, (AttrNumber) 4, "queryid",
+// INT8OID, -1, 0);
+// TupleDescInitEntry(tupdesc, (AttrNumber) 5, "count",
+// INT8OID, -1, 0);
+// funcctx->tuple_desc = BlessTupleDesc(tupdesc);
+//
+// MemoryContextSwitchTo(oldcontext);
+// }
+//
+// /* stuff done on every call of the function */
+// funcctx = SRF_PERCALL_SETUP();
+//
+// profile = (Profile *) funcctx->user_fctx;
+//
+// if (funcctx->call_cntr < funcctx->max_calls)
+// {
+// /* for each row */
+// Datum values[5];
+// bool nulls[5];
+// HeapTuple tuple;
+// ProfileItem *item;
+// const char *event_type,
+// *event;
+//
+// item = &profile->items[funcctx->call_cntr];
+//
+// MemSet(values, 0, sizeof(values));
+// MemSet(nulls, 0, sizeof(nulls));
+//
+// /* Make and return next tuple to caller */
+// event_type = pgstat_get_wait_event_type(item->dimensions.wait_event_info);
+// event = pgstat_get_wait_event(item->dimensions.wait_event_info);
+// values[0] = Int32GetDatum(item->dimensions.pid);
+// if (event_type)
+// values[1] = PointerGetDatum(cstring_to_text(event_type));
+// else
+// nulls[1] = true;
+// if (event)
+// values[2] = PointerGetDatum(cstring_to_text(event));
+// else
+// nulls[2] = true;
+//
+// if (pgws_profileQueries)
+// values[3] = UInt64GetDatum(item->dimensions.queryId);
+// else
+// values[3] = (Datum) 0;
+//
+// values[4] = UInt64GetDatum(item->count);
+//
+// tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls);
+//
+// SRF_RETURN_NEXT(funcctx, HeapTupleGetDatum(tuple));
+// }
+// else
+// {
+// /* nothing left */
+// SRF_RETURN_DONE(funcctx);
+// }
+//}
+
PG_FUNCTION_INFO_V1(pg_wait_sampling_get_profile_extended);
Datum
pg_wait_sampling_get_profile_extended(PG_FUNCTION_ARGS)
@@ -1286,15 +1329,19 @@ pg_wait_sampling_get_profile_extended(PG_FUNCTION_ARGS)
{
MemoryContext oldcontext;
TupleDesc tupdesc;
+ void *tmp_array;
funcctx = SRF_FIRSTCALL_INIT();
oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
/* Receive profile from shmq */
profile = (Profile *) palloc0(sizeof(Profile));
- profile->items = (ProfileItem *) receive_array(PROFILE_REQUEST,
- sizeof(ProfileItem), &profile->count);
-
+
+ tmp_array = receive_array(PROFILE_REQUEST,
+ get_serialized_size(saved_profile_dimensions, true),
+ &profile->count);
+ profile->items = (ProfileItem *) deserialize_array(tmp_array, profile->count,
+ sizeof(ProfileItem), false);
funcctx->user_fctx = profile;
funcctx->max_calls = profile->count;
@@ -1327,7 +1374,7 @@ pg_wait_sampling_get_profile_extended(PG_FUNCTION_ARGS)
MemSet(values, 0, sizeof(values));
MemSet(nulls, 0, sizeof(nulls));
- fill_values_and_nulls(values, nulls, item->dimensions, false);
+ fill_values_and_nulls(values, nulls, item->dimensions, pgws_profile_dimensions);
values[14] = UInt64GetDatum(item->count);
tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls);
@@ -1373,95 +1420,95 @@ pg_wait_sampling_reset_profile(PG_FUNCTION_ARGS)
PG_RETURN_VOID();
}
-PG_FUNCTION_INFO_V1(pg_wait_sampling_get_history);
-Datum
-pg_wait_sampling_get_history(PG_FUNCTION_ARGS)
-{
- History *history;
- FuncCallContext *funcctx;
-
- check_shmem();
-
- if (SRF_IS_FIRSTCALL())
- {
- MemoryContext oldcontext;
- TupleDesc tupdesc;
-
- funcctx = SRF_FIRSTCALL_INIT();
- oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
-
- /* Receive history from shmq */
- history = (History *) palloc0(sizeof(History));
- history->items = (HistoryItem *) receive_array(HISTORY_REQUEST,
- sizeof(HistoryItem), &history->count);
-
- funcctx->user_fctx = history;
- funcctx->max_calls = history->count;
-
- /* Make tuple descriptor */
- tupdesc = CreateTemplateTupleDesc(5);
- TupleDescInitEntry(tupdesc, (AttrNumber) 1, "pid",
- INT4OID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 2, "sample_ts",
- TIMESTAMPTZOID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 3, "type",
- TEXTOID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 4, "event",
- TEXTOID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 5, "queryid",
- INT8OID, -1, 0);
- funcctx->tuple_desc = BlessTupleDesc(tupdesc);
-
- MemoryContextSwitchTo(oldcontext);
- }
-
- /* stuff done on every call of the function */
- funcctx = SRF_PERCALL_SETUP();
-
- history = (History *) funcctx->user_fctx;
-
- if (history->index < history->count)
- {
- HeapTuple tuple;
- HistoryItem *item;
- Datum values[5];
- bool nulls[5];
- const char *event_type,
- *event;
-
- item = &history->items[history->index];
-
- /* Make and return next tuple to caller */
- MemSet(values, 0, sizeof(values));
- MemSet(nulls, 0, sizeof(nulls));
-
- event_type = pgstat_get_wait_event_type(item->dimensions.wait_event_info);
- event = pgstat_get_wait_event(item->dimensions.wait_event_info);
- values[0] = Int32GetDatum(item->dimensions.pid);
- values[1] = TimestampTzGetDatum(item->ts);
- if (event_type)
- values[2] = PointerGetDatum(cstring_to_text(event_type));
- else
- nulls[2] = true;
- if (event)
- values[3] = PointerGetDatum(cstring_to_text(event));
- else
- nulls[3] = true;
-
- values[4] = UInt64GetDatum(item->dimensions.queryId);
- tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls);
-
- history->index++;
- SRF_RETURN_NEXT(funcctx, HeapTupleGetDatum(tuple));
- }
- else
- {
- /* nothing left */
- SRF_RETURN_DONE(funcctx);
- }
-
- PG_RETURN_VOID();
-}
+//PG_FUNCTION_INFO_V1(pg_wait_sampling_get_history);
+//Datum
+//pg_wait_sampling_get_history(PG_FUNCTION_ARGS)
+//{
+// History *history;
+// FuncCallContext *funcctx;
+//
+// check_shmem();
+//
+// if (SRF_IS_FIRSTCALL())
+// {
+// MemoryContext oldcontext;
+// TupleDesc tupdesc;
+//
+// funcctx = SRF_FIRSTCALL_INIT();
+// oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
+//
+// /* Receive history from shmq */
+// history = (History *) palloc0(sizeof(History));
+// history->items = (HistoryItem *) receive_array(HISTORY_REQUEST,
+// sizeof(HistoryItem), &history->count);
+//
+// funcctx->user_fctx = history;
+// funcctx->max_calls = history->count;
+//
+// /* Make tuple descriptor */
+// tupdesc = CreateTemplateTupleDesc(5);
+// TupleDescInitEntry(tupdesc, (AttrNumber) 1, "pid",
+// INT4OID, -1, 0);
+// TupleDescInitEntry(tupdesc, (AttrNumber) 2, "sample_ts",
+// TIMESTAMPTZOID, -1, 0);
+// TupleDescInitEntry(tupdesc, (AttrNumber) 3, "type",
+// TEXTOID, -1, 0);
+// TupleDescInitEntry(tupdesc, (AttrNumber) 4, "event",
+// TEXTOID, -1, 0);
+// TupleDescInitEntry(tupdesc, (AttrNumber) 5, "queryid",
+// INT8OID, -1, 0);
+// funcctx->tuple_desc = BlessTupleDesc(tupdesc);
+//
+// MemoryContextSwitchTo(oldcontext);
+// }
+//
+// /* stuff done on every call of the function */
+// funcctx = SRF_PERCALL_SETUP();
+//
+// history = (History *) funcctx->user_fctx;
+//
+// if (history->index < history->count)
+// {
+// HeapTuple tuple;
+// HistoryItem *item;
+// Datum values[5];
+// bool nulls[5];
+// const char *event_type,
+// *event;
+//
+// item = &history->items[history->index];
+//
+// /* Make and return next tuple to caller */
+// MemSet(values, 0, sizeof(values));
+// MemSet(nulls, 0, sizeof(nulls));
+//
+// event_type = pgstat_get_wait_event_type(item->dimensions.wait_event_info);
+// event = pgstat_get_wait_event(item->dimensions.wait_event_info);
+// values[0] = Int32GetDatum(item->dimensions.pid);
+// values[1] = TimestampTzGetDatum(item->ts);
+// if (event_type)
+// values[2] = PointerGetDatum(cstring_to_text(event_type));
+// else
+// nulls[2] = true;
+// if (event)
+// values[3] = PointerGetDatum(cstring_to_text(event));
+// else
+// nulls[3] = true;
+//
+// values[4] = UInt64GetDatum(item->dimensions.queryId);
+// tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls);
+//
+// history->index++;
+// SRF_RETURN_NEXT(funcctx, HeapTupleGetDatum(tuple));
+// }
+// else
+// {
+// /* nothing left */
+// SRF_RETURN_DONE(funcctx);
+// }
+//
+// PG_RETURN_VOID();
+//}
PG_FUNCTION_INFO_V1(pg_wait_sampling_get_history_extended);
Datum
@@ -1469,6 +1516,7 @@ pg_wait_sampling_get_history_extended(PG_FUNCTION_ARGS)
{
History *history;
FuncCallContext *funcctx;
+ void *tmp_array;
check_shmem();
@@ -1482,9 +1530,11 @@ pg_wait_sampling_get_history_extended(PG_FUNCTION_ARGS)
/* Receive history from shmq */
history = (History *) palloc0(sizeof(History));
- history->items = (HistoryItem *) receive_array(HISTORY_REQUEST,
- sizeof(HistoryItem), &history->count);
-
+ tmp_array = receive_array(PROFILE_REQUEST,
+ get_serialized_size(saved_history_dimensions, true),
+ &history->count);
+ history->items = (HistoryItem *) deserialize_array(tmp_array, history->count,
+ sizeof(HistoryItem), true);
funcctx->user_fctx = history;
funcctx->max_calls = history->count;
@@ -1516,7 +1566,7 @@ pg_wait_sampling_get_history_extended(PG_FUNCTION_ARGS)
MemSet(values, 0, sizeof(values));
MemSet(nulls, 0, sizeof(nulls));
- fill_values_and_nulls(values, nulls, item->dimensions, false);
+ fill_values_and_nulls(values, nulls, item->dimensions, pgws_history_dimensions);
values[14] = TimestampTzGetDatum(item->ts); //TODO!!!!!!!!!!!
tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls);
diff --git a/pg_wait_sampling.h b/pg_wait_sampling.h
index ce7a721..d912a51 100644
--- a/pg_wait_sampling.h
+++ b/pg_wait_sampling.h
@@ -30,16 +30,20 @@
/* Values for sampling dimensions */
#define PGWS_DIMENSIONS_NONE 0
-#define PGWS_DIMENSIONS_ROLE_ID (1 << 0)
-#define PGWS_DIMENSIONS_DB_ID (1 << 1)
-#define PGWS_DIMENSIONS_PARALLEL_LEADER_PID (1 << 2)
-#define PGWS_DIMENSIONS_IS_REGULAR_BE (1 << 3)
-#define PGWS_DIMENSIONS_BE_TYPE (1 << 4)
-#define PGWS_DIMENSIONS_BE_STATE (1 << 5)
-#define PGWS_DIMENSIONS_BE_START_TIME (1 << 6)
-#define PGWS_DIMENSIONS_CLIENT_ADDR (1 << 7)
-#define PGWS_DIMENSIONS_CLIENT_HOSTNAME (1 << 8)
-#define PGWS_DIMENSIONS_APPNAME (1 << 9)
+#define PGWS_DIMENSIONS_PID (1 << 0)
+#define PGWS_DIMENSIONS_WAIT_EVENT_TYPE (1 << 1)
+#define PGWS_DIMENSIONS_WAIT_EVENT (1 << 2)
+#define PGWD_DIMENSIONS_QUERY_ID (1 << 3)
+#define PGWS_DIMENSIONS_ROLE_ID (1 << 4)
+#define PGWS_DIMENSIONS_DB_ID (1 << 5)
+#define PGWS_DIMENSIONS_PARALLEL_LEADER_PID (1 << 6)
+#define PGWS_DIMENSIONS_IS_REGULAR_BE (1 << 7)
+#define PGWS_DIMENSIONS_BE_TYPE (1 << 8)
+#define PGWS_DIMENSIONS_BE_STATE (1 << 9)
+#define PGWS_DIMENSIONS_BE_START_TIME (1 << 10)
+#define PGWS_DIMENSIONS_CLIENT_ADDR (1 << 11)
+#define PGWS_DIMENSIONS_CLIENT_HOSTNAME (1 << 12)
+#define PGWS_DIMENSIONS_APPNAME (1 << 13)
#define PGWS_DIMENSIONS_ALL ((int) ~0)
/* ^ all 1 in binary */
@@ -111,6 +115,8 @@ extern int pgws_profilePeriod;
extern bool pgws_profilePid;
extern int pgws_profileQueries;
extern bool pgws_sampleCpu;
+extern int pgws_history_dimensions;
+extern int pgws_profile_dimensions;
/* pg_wait_sampling.c */
extern CollectorShmqHeader *pgws_collector_hdr;
@@ -118,14 +124,17 @@ extern shm_mq *pgws_collector_mq;
extern uint64 *pgws_proc_queryids;
extern void pgws_init_lock_tag(LOCKTAG *tag, uint32 lock);
extern bool pgws_should_sample_proc(PGPROC *proc, int *pid_p, uint32 *wait_event_info_p);
-extern int pgws_history_dimensions; /* bit mask that is derived from GUC */
-extern int pgws_profile_dimensions; /* bit mask that is derived from GUC */
extern PgBackendStatus* get_beentry_by_procpid(int pid);
/* collector.c */
+extern int saved_profile_dimensions;
+extern int saved_history_dimensions;
extern void fill_dimensions(SamplingDimensions *dimensions, PGPROC *proc,
int pid, uint32 wait_event_info, uint64 queryId,
- int dimensions_mask)
+ int dimensions_mask);
+extern void deserialize_item(SamplingDimensions* dimensions, char* serialized_item,
+ int dimensions_mask, TimestampTz* ts, uint64* count);
+extern int get_serialized_size(int dimensions_mask, bool need_last_field);
extern void pgws_register_wait_collector(void);
extern PGDLLEXPORT void pgws_collector_main(Datum main_arg);
From 46af7da112389738efb7cc74cf3d7e6c0ece6c50 Mon Sep 17 00:00:00 2001
From: Oleg Tselebrovskiy
Date: Mon, 23 Jun 2025 14:10:29 +0700
Subject: [PATCH 06/11] Fixing serialization
---
collector.c | 110 ++++--
pg_wait_sampling--1.1--1.2.sql | 37 +-
pg_wait_sampling.c | 673 +++++++++++++++++----------------
pg_wait_sampling.h | 2 +
4 files changed, 447 insertions(+), 375 deletions(-)
diff --git a/collector.c b/collector.c
index 180fcce..65a146b 100644
--- a/collector.c
+++ b/collector.c
@@ -40,7 +40,7 @@
PGWS_DIMENSIONS_APPNAME))
static volatile sig_atomic_t shutdown_requested = false;
-int saved_profile_dimensions; //TODO should be initialized with the same value as GUC?
+int saved_profile_dimensions;
int saved_history_dimensions;
static void handle_sigterm(SIGNAL_ARGS);
@@ -72,12 +72,15 @@ pgws_register_wait_collector(void)
static void
alloc_history(History *observations, int count)
{
- observations->items = (HistoryItem *) palloc0(sizeof(HistoryItem) * count);
+ int serialized_size;
+
+ saved_history_dimensions = pgws_history_dimensions;
+ serialized_size = get_serialized_size(saved_history_dimensions, true);
+
+ observations->serialized_items = (char *) palloc0(serialized_size * count);
observations->index = 0;
observations->count = count;
observations->wraparound = false;
-
- saved_history_dimensions = pgws_history_dimensions;
}
/*
@@ -86,13 +89,17 @@ alloc_history(History *observations, int count)
static void
realloc_history(History *observations, int count)
{
- HistoryItem *newitems;
+ char *newitems;
int copyCount,
i,
j;
+ int serialized_size;
+
+ //saved_history_dimensions = pgws_history_dimensions; // TODO вроде как
+ serialized_size = get_serialized_size(saved_history_dimensions, true);
/* Allocate new array for history */
- newitems = (HistoryItem *) palloc0(sizeof(HistoryItem) * count);
+ newitems = (char *) palloc0(serialized_size * count);
/* Copy entries from old array to the new */
if (observations->wraparound)
@@ -111,19 +118,19 @@ realloc_history(History *observations, int count)
{
if (j >= observations->count)
j = 0;
- memcpy(&newitems[i], &observations->items[j], sizeof(HistoryItem));
+ memcpy((newitems + i * serialized_size),
+ (observations->serialized_items + j * serialized_size),
+ serialized_size);
i++;
j++;
}
/* Switch to new history array */
- pfree(observations->items);
- observations->items = newitems;
+ pfree(observations->serialized_items);
+ observations->serialized_items = newitems;
observations->index = copyCount;
observations->count = count;
observations->wraparound = false;
-
- saved_history_dimensions = pgws_history_dimensions;
}
static void
@@ -140,10 +147,11 @@ handle_sigterm(SIGNAL_ARGS)
/*
* Get next item of history with rotation.
*/
-static HistoryItem *
+static char *
get_next_observation(History *observations)
{
- HistoryItem *result;
+ char *result;
+ int serialized_size = get_serialized_size(saved_history_dimensions, true);
/* Check for wraparound */
if (observations->index >= observations->count)
@@ -151,7 +159,7 @@ get_next_observation(History *observations)
observations->index = 0;
observations->wraparound = true;
}
- result = &observations->items[observations->index];
+ result = &observations->serialized_items[observations->index * serialized_size];
observations->index++;
return result;
}
@@ -413,8 +421,8 @@ serialize_item(SamplingDimensions dimensions, int dimensions_mask,
}
/* copy all the fields without ts/count */
- *serialized_key = palloc0(*serialized_size + 1);
- strcpy(*serialized_key, dummy_array);
+ *serialized_key = palloc0(*serialized_size);
+ memcpy(*serialized_key, dummy_array, *serialized_size);
if (is_history)
{
@@ -430,8 +438,8 @@ serialize_item(SamplingDimensions dimensions, int dimensions_mask,
}
/* copy everything */
- *serialized_item = palloc0(*serialized_size + 1);
- strcpy(*serialized_item, dummy_array);
+ *serialized_item = palloc0(*serialized_size);
+ memcpy(*serialized_item, dummy_array, *serialized_size);
}
void
@@ -570,17 +578,17 @@ probe_waits(History *observations, HTAB *profile_hash,
LWLockAcquire(ProcArrayLock, LW_SHARED);
for (i = 0; i < ProcGlobal->allProcCount; i++)
{
- HistoryItem item_history,
- *observation;
- ProfileItem item_profile;
+ //HistoryItem item_history,
+ // *observation;
+ //ProfileItem item_profile;
PGPROC *proc = &ProcGlobal->allProcs[i];
int pid;
uint32 wait_event_info;
SamplingDimensions common_dimensions,
history_dimensions,
profile_dimensions;
- int dimensions_mask_common = pgws_history_dimensions |
- pgws_profile_dimensions;
+ int dimensions_mask_common = saved_history_dimensions |
+ saved_profile_dimensions;
/* Check if we need to sample this process */
if (!pgws_should_sample_proc(proc, &pid, &wait_event_info))
@@ -598,21 +606,27 @@ probe_waits(History *observations, HTAB *profile_hash,
copy_dimensions(&history_dimensions,
&common_dimensions,
- pgws_history_dimensions);
+ saved_history_dimensions);
copy_dimensions(&profile_dimensions,
&common_dimensions,
- pgws_profile_dimensions);
+ saved_profile_dimensions);
- item_history.ts = ts;
- item_history.dimensions = history_dimensions;
+ //item_history.ts = ts;
+ //item_history.dimensions = history_dimensions;
/* Write to the history if needed */
if (write_history)
{
- //TODO вот тут что-то сделать нужно??? потому что мы не запаковываем
- //историю
+ char *serialized_key,
+ *serialized_item,
+ *observation;
+ int serialized_size = 0;
+
observation = get_next_observation(observations);
- *observation = item_history;
+ serialize_item(history_dimensions, saved_history_dimensions,
+ &serialized_item, &serialized_key, &serialized_size,
+ ts, (uint64) 0, true);
+ memcpy(observation, serialized_item, serialized_size);
}
/* Write to the profile if needed */
@@ -626,9 +640,9 @@ probe_waits(History *observations, HTAB *profile_hash,
*stored_item;
if (!profile_pid)
- item_profile.dimensions.pid = 0;
+ profile_dimensions.pid = 0;
- serialize_item(item_profile.dimensions, saved_profile_dimensions,
+ serialize_item(profile_dimensions, saved_profile_dimensions,
&serialized_item, &serialized_key, &serialized_size,
(TimestampTz) 0, count, false);
@@ -659,8 +673,9 @@ probe_waits(History *observations, HTAB *profile_hash,
* Send waits history to shared memory queue.
*/
static void
-send_history(History *observations, shm_mq_handle *mqh)
+send_history(History *observations, shm_mq_handle *mqh) //TODO TODO TODO
{
+ int serialized_size = get_serialized_size(saved_history_dimensions, true);
Size count,
i;
shm_mq_result mq_result;
@@ -679,11 +694,20 @@ send_history(History *observations, shm_mq_handle *mqh)
"receiver of message queue has been detached")));
return;
}
+ /* Send saved_dimensions next */
+ mq_result = shm_mq_send_compat(mqh, sizeof(saved_history_dimensions), &saved_history_dimensions, false, true);
+ if (mq_result == SHM_MQ_DETACHED)
+ {
+ ereport(WARNING,
+ (errmsg("pg_wait_sampling collector: "
+ "receiver of message queue has been detached")));
+ return;
+ }
for (i = 0; i < count; i++)
{
mq_result = shm_mq_send_compat(mqh,
- sizeof(HistoryItem),
- &observations->items[i],
+ serialized_size,
+ (observations->serialized_items + i * serialized_size),
false,
true);
if (mq_result == SHM_MQ_DETACHED)
@@ -703,7 +727,8 @@ static void
send_profile(HTAB *profile_hash, shm_mq_handle *mqh)
{
HASH_SEQ_STATUS scan_status;
- ProfileItem *item;
+ char *serialized_item;
+ int serialized_size = get_serialized_size(saved_profile_dimensions, true);
Size count = hash_get_num_entries(profile_hash);
shm_mq_result mq_result;
@@ -716,10 +741,19 @@ send_profile(HTAB *profile_hash, shm_mq_handle *mqh)
"receiver of message queue has been detached")));
return;
}
+ /* Send saved_dimensions next */
+ mq_result = shm_mq_send_compat(mqh, sizeof(saved_profile_dimensions), &saved_profile_dimensions, false, true);
+ if (mq_result == SHM_MQ_DETACHED)
+ {
+ ereport(WARNING,
+ (errmsg("pg_wait_sampling collector: "
+ "receiver of message queue has been detached")));
+ return;
+ }
hash_seq_init(&scan_status, profile_hash);
- while ((item = (ProfileItem *) hash_seq_search(&scan_status)) != NULL)
+ while ((serialized_item = (char *) hash_seq_search(&scan_status)) != NULL)
{
- mq_result = shm_mq_send_compat(mqh, sizeof(ProfileItem), item, false,
+ mq_result = shm_mq_send_compat(mqh, serialized_size, serialized_item, false,
true);
if (mq_result == SHM_MQ_DETACHED)
{
diff --git a/pg_wait_sampling--1.1--1.2.sql b/pg_wait_sampling--1.1--1.2.sql
index e774f0f..e0e3337 100644
--- a/pg_wait_sampling--1.1--1.2.sql
+++ b/pg_wait_sampling--1.1--1.2.sql
@@ -3,9 +3,26 @@
-- complain if script is sourced in psql, rather than via ALTER EXTENSION
\echo Use "ALTER EXTENSION pg_wait_sampling UPDATE TO 1.2" to load this file. \quit
-DROP FUNCTION pg_wait_sampling_get_current CASCADE;
-DROP FUNCTION pg_wait_sampling_get_profile CASCADE;
-DROP FUNCTION pg_wait_sampling_get_history CASCADE;
+--DROP FUNCTION pg_wait_sampling_get_current (
+-- pid int4,
+-- OUT pid int4,
+-- OUT event_type text,
+-- OUT event text
+--) CASCADE;
+--
+--DROP FUNCTION pg_wait_sampling_get_history (
+-- OUT pid int4,
+-- OUT ts timestamptz,
+-- OUT event_type text,
+-- OUT event text
+--) CASCADE;
+--
+--DROP FUNCTION pg_wait_sampling_get_profile (
+-- OUT pid int4,
+-- OUT event_type text,
+-- OUT event text,
+-- OUT count bigint
+--) CASCADE;
CREATE FUNCTION pg_wait_sampling_get_current_extended (
pid int4,
@@ -35,7 +52,6 @@ GRANT SELECT ON pg_wait_sampling_current TO PUBLIC;
CREATE FUNCTION pg_wait_sampling_get_history_extended (
OUT pid int4,
- OUT ts timestamptz,
OUT event_type text,
OUT event text,
OUT queryid int8,
@@ -48,7 +64,8 @@ CREATE FUNCTION pg_wait_sampling_get_history_extended (
OUT proc_start timestamptz,
OUT client_addr text,
OUT client_hostname text,
- OUT appname text
+ OUT appname text,
+ OUT ts timestamptz
)
RETURNS SETOF record
AS 'MODULE_PATHNAME'
@@ -85,9 +102,9 @@ CREATE VIEW pg_wait_sampling_profile_extended AS
GRANT SELECT ON pg_wait_sampling_profile_extended TO PUBLIC;
-CREATE VIEW pg_wait_sampling_profile AS
- SELECT pid, event_type, event, queryid, SUM(count) FROM pg_wait_sampling_profile_extended
- GROUP BY pid, event_type, event, queryid;
-
-GRANT SELECT ON pg_wait_sampling_profile TO PUBLIC;
+--CREATE VIEW pg_wait_sampling_profile AS
+-- SELECT pid, event_type, event, queryid, SUM(count) FROM pg_wait_sampling_profile_extended
+-- GROUP BY pid, event_type, event, queryid;
+--
+--GRANT SELECT ON pg_wait_sampling_profile TO PUBLIC;
diff --git a/pg_wait_sampling.c b/pg_wait_sampling.c
index 29d96ab..0ac0a8f 100644
--- a/pg_wait_sampling.c
+++ b/pg_wait_sampling.c
@@ -628,127 +628,128 @@ typedef struct
TimestampTz ts;
} WaitCurrentContext;
-//PG_FUNCTION_INFO_V1(pg_wait_sampling_get_current);
-//Datum
-//pg_wait_sampling_get_current(PG_FUNCTION_ARGS)
-//{
-// FuncCallContext *funcctx;
-// WaitCurrentContext *params;
-//
-// check_shmem();
-//
-// if (SRF_IS_FIRSTCALL())
-// {
-// MemoryContext oldcontext;
-// TupleDesc tupdesc;
-//
-// funcctx = SRF_FIRSTCALL_INIT();
-//
-// oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
-// params = (WaitCurrentContext *) palloc0(sizeof(WaitCurrentContext));
-// params->ts = GetCurrentTimestamp();
-//
-// funcctx->user_fctx = params;
-// tupdesc = CreateTemplateTupleDesc(4);
-// TupleDescInitEntry(tupdesc, (AttrNumber) 1, "pid",
-// INT4OID, -1, 0);
-// TupleDescInitEntry(tupdesc, (AttrNumber) 2, "type",
-// TEXTOID, -1, 0);
-// TupleDescInitEntry(tupdesc, (AttrNumber) 3, "event",
-// TEXTOID, -1, 0);
-// TupleDescInitEntry(tupdesc, (AttrNumber) 4, "queryid",
-// INT8OID, -1, 0);
-//
-// funcctx->tuple_desc = BlessTupleDesc(tupdesc);
-//
-// LWLockAcquire(ProcArrayLock, LW_SHARED);
-//
-// if (!PG_ARGISNULL(0))
-// {
-// /* pg_wait_sampling_get_current(pid int4) function */
-// HistoryItem *item;
-// PGPROC *proc;
-//
-// proc = search_proc(PG_GETARG_UINT32(0));
-// params->items = (HistoryItem *) palloc0(sizeof(HistoryItem));
-// item = ¶ms->items[0];
-// item->dimensions.pid = proc->pid;
-// item->dimensions.wait_event_info = proc->wait_event_info;
-// item->dimensions.queryId = pgws_proc_queryids[proc - ProcGlobal->allProcs];
-// funcctx->max_calls = 1;
-// }
-// else
-// {
-// /* pg_wait_sampling_current view */
-// int procCount = ProcGlobal->allProcCount,
-// i,
-// j = 0;
-//
-// params->items = (HistoryItem *) palloc0(sizeof(HistoryItem) * procCount);
-// for (i = 0; i < procCount; i++)
-// {
-// PGPROC *proc = &ProcGlobal->allProcs[i];
-//
-// if (!pgws_should_sample_proc(proc,
-// ¶ms->items[j].dimensions.pid,
-// ¶ms->items[j].dimensions.wait_event_info))
-// continue;
-//
-// params->items[j].dimensions.pid = proc->pid;
-// params->items[j].dimensions.wait_event_info = proc->wait_event_info;
-// params->items[j].dimensions.queryId = pgws_proc_queryids[i];
-// j++;
-// }
-// funcctx->max_calls = j;
-// }
-//
-// LWLockRelease(ProcArrayLock);
-//
-// MemoryContextSwitchTo(oldcontext);
-// }
-//
-// /* stuff done on every call of the function */
-// funcctx = SRF_PERCALL_SETUP();
-// params = (WaitCurrentContext *) funcctx->user_fctx;
-//
-// if (funcctx->call_cntr < funcctx->max_calls)
-// {
-// HeapTuple tuple;
-// Datum values[4];
-// bool nulls[4];
-// const char *event_type,
-// *event;
-// HistoryItem *item;
-//
-// item = ¶ms->items[funcctx->call_cntr];
-//
-// /* Make and return next tuple to caller */
-// MemSet(values, 0, sizeof(values));
-// MemSet(nulls, 0, sizeof(nulls));
-//
-// event_type = pgstat_get_wait_event_type(item->dimensions.wait_event_info);
-// event = pgstat_get_wait_event(item->dimensions.wait_event_info);
-// values[0] = Int32GetDatum(item->dimensions.pid);
-// if (event_type)
-// values[1] = PointerGetDatum(cstring_to_text(event_type));
-// else
-// nulls[1] = true;
-// if (event)
-// values[2] = PointerGetDatum(cstring_to_text(event));
-// else
-// nulls[2] = true;
-//
-// values[3] = UInt64GetDatum(item->dimensions.queryId);
-// tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls);
-//
-// SRF_RETURN_NEXT(funcctx, HeapTupleGetDatum(tuple));
-// }
-// else
-// {
-// /* nothing left */
-// SRF_RETURN_DONE(funcctx);
-// }
-//}
+//TODO OBSOLETE
+PG_FUNCTION_INFO_V1(pg_wait_sampling_get_current);
+Datum
+pg_wait_sampling_get_current(PG_FUNCTION_ARGS)
+{
+ FuncCallContext *funcctx;
+ WaitCurrentContext *params;
+
+ check_shmem();
+
+ if (SRF_IS_FIRSTCALL())
+ {
+ MemoryContext oldcontext;
+ TupleDesc tupdesc;
+
+ funcctx = SRF_FIRSTCALL_INIT();
+
+ oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
+ params = (WaitCurrentContext *) palloc0(sizeof(WaitCurrentContext));
+ params->ts = GetCurrentTimestamp();
+
+ funcctx->user_fctx = params;
+ tupdesc = CreateTemplateTupleDesc(4);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 1, "pid",
+ INT4OID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 2, "type",
+ TEXTOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 3, "event",
+ TEXTOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 4, "queryid",
+ INT8OID, -1, 0);
+
+ funcctx->tuple_desc = BlessTupleDesc(tupdesc);
+
+ LWLockAcquire(ProcArrayLock, LW_SHARED);
+
+ if (!PG_ARGISNULL(0))
+ {
+ /* pg_wait_sampling_get_current(pid int4) function */
+ HistoryItem *item;
+ PGPROC *proc;
+
+ proc = search_proc(PG_GETARG_UINT32(0));
+ params->items = (HistoryItem *) palloc0(sizeof(HistoryItem));
+ item = ¶ms->items[0];
+ item->dimensions.pid = proc->pid;
+ item->dimensions.wait_event_info = proc->wait_event_info;
+ item->dimensions.queryId = pgws_proc_queryids[proc - ProcGlobal->allProcs];
+ funcctx->max_calls = 1;
+ }
+ else
+ {
+ /* pg_wait_sampling_current view */
+ int procCount = ProcGlobal->allProcCount,
+ i,
+ j = 0;
+
+ params->items = (HistoryItem *) palloc0(sizeof(HistoryItem) * procCount);
+ for (i = 0; i < procCount; i++)
+ {
+ PGPROC *proc = &ProcGlobal->allProcs[i];
+
+ if (!pgws_should_sample_proc(proc,
+ ¶ms->items[j].dimensions.pid,
+ ¶ms->items[j].dimensions.wait_event_info))
+ continue;
+
+ params->items[j].dimensions.pid = proc->pid;
+ params->items[j].dimensions.wait_event_info = proc->wait_event_info;
+ params->items[j].dimensions.queryId = pgws_proc_queryids[i];
+ j++;
+ }
+ funcctx->max_calls = j;
+ }
+
+ LWLockRelease(ProcArrayLock);
+
+ MemoryContextSwitchTo(oldcontext);
+ }
+
+ /* stuff done on every call of the function */
+ funcctx = SRF_PERCALL_SETUP();
+ params = (WaitCurrentContext *) funcctx->user_fctx;
+
+ if (funcctx->call_cntr < funcctx->max_calls)
+ {
+ HeapTuple tuple;
+ Datum values[4];
+ bool nulls[4];
+ const char *event_type,
+ *event;
+ HistoryItem *item;
+
+ item = ¶ms->items[funcctx->call_cntr];
+
+ /* Make and return next tuple to caller */
+ MemSet(values, 0, sizeof(values));
+ MemSet(nulls, 0, sizeof(nulls));
+
+ event_type = pgstat_get_wait_event_type(item->dimensions.wait_event_info);
+ event = pgstat_get_wait_event(item->dimensions.wait_event_info);
+ values[0] = Int32GetDatum(item->dimensions.pid);
+ if (event_type)
+ values[1] = PointerGetDatum(cstring_to_text(event_type));
+ else
+ nulls[1] = true;
+ if (event)
+ values[2] = PointerGetDatum(cstring_to_text(event));
+ else
+ nulls[2] = true;
+
+ values[3] = UInt64GetDatum(item->dimensions.queryId);
+ tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls);
+
+ SRF_RETURN_NEXT(funcctx, HeapTupleGetDatum(tuple));
+ }
+ else
+ {
+ /* nothing left */
+ SRF_RETURN_DONE(funcctx);
+ }
+}
static Datum
GetBackendState(BackendState state, bool *is_null)
@@ -1106,7 +1107,7 @@ pgws_init_lock_tag(LOCKTAG *tag, uint32 lock)
/* Get array (history or profile data) from shared memory */
static void *
-receive_array(SHMRequest request, Size item_size, Size *count)
+receive_array(SHMRequest request, Size *item_size, Size *count, int *dimensions_mask)
{
LOCKTAG collectorTag;
shm_mq_result res;
@@ -1168,17 +1169,25 @@ receive_array(SHMRequest request, Size item_size, Size *count)
memcpy(count, data, sizeof(*count));
- result = palloc(item_size * (*count));
+ res = shm_mq_receive(recv_mqh, &len, &data, false);
+ if (res != SHM_MQ_SUCCESS || len != sizeof(*dimensions_mask))
+ elog(ERROR, "error reading mq");
+
+ memcpy(dimensions_mask, data, sizeof(*dimensions_mask));
+
+ *item_size = get_serialized_size(*dimensions_mask, true);
+
+ result = palloc(*item_size * (*count));
ptr = result;
for (i = 0; i < *count; i++)
{
res = shm_mq_receive(recv_mqh, &len, &data, false);
- if (res != SHM_MQ_SUCCESS || len != item_size)
+ if (res != SHM_MQ_SUCCESS || len != *item_size)
elog(ERROR, "error reading mq");
- memcpy(ptr, data, item_size);
- ptr += item_size;
+ memcpy(ptr, data, *item_size);
+ ptr += *item_size;
}
}
PG_END_ENSURE_ERROR_CLEANUP(pgws_cleanup_callback, 0);
@@ -1191,20 +1200,24 @@ receive_array(SHMRequest request, Size item_size, Size *count)
}
static void *
-deserialize_array(void *tmp_array, int count, Size real_item_size, bool is_history)
+deserialize_array(void *tmp_array, int count, bool is_history)
{
- Pointer result;
+ Pointer result,
+ ptr;
int i;
int dimensions_mask = (is_history ? saved_history_dimensions : saved_profile_dimensions);
- int serialized_size = get_serialized_size(saved_profile_dimensions, true);
+ int serialized_size = get_serialized_size(dimensions_mask, true);
+
+ result = palloc0((is_history ? sizeof(HistoryItem) : sizeof(ProfileItem)) * count);
+ ptr = result;
- result = palloc0(real_item_size * count);
for (i = 0; i < count; i++)
{
SamplingDimensions tmp_dimensions;
char *cur_item;
TimestampTz *ts;
uint64 *count;
+ int ts_count_size = sizeof(uint64); /* is 8 bytes anyway */
cur_item = (((char *) tmp_array) + i * serialized_size);
ts = (is_history ? palloc0(sizeof(TimestampTz)) : NULL);
@@ -1212,109 +1225,116 @@ deserialize_array(void *tmp_array, int count, Size real_item_size, bool is_histo
deserialize_item(&tmp_dimensions, cur_item, dimensions_mask, ts, count);
- memcpy((result + i * real_item_size), &tmp_dimensions, sizeof(SamplingDimensions));
-
+ memcpy(ptr, &tmp_dimensions, sizeof(SamplingDimensions));
+ ptr += sizeof(SamplingDimensions);
if (is_history)
- memcpy((result + i * real_item_size + serialized_size), ts, sizeof(TimestampTz));
+ {
+ memcpy(ptr, ts, ts_count_size);
+ ptr += sizeof(TimestampTz);
+ }
else
- memcpy((result + i * real_item_size + serialized_size), count, sizeof(uint64));
+ {
+ memcpy(ptr, count, ts_count_size);
+ ptr += sizeof(uint64);
+ }
}
return result;
}
-//PG_FUNCTION_INFO_V1(pg_wait_sampling_get_profile);
-//Datum
-//pg_wait_sampling_get_profile(PG_FUNCTION_ARGS)
-//{
-// Profile *profile;
-// FuncCallContext *funcctx;
-//
-// check_shmem();
-//
-// if (SRF_IS_FIRSTCALL())
-// {
-// MemoryContext oldcontext;
-// TupleDesc tupdesc;
-//
-// funcctx = SRF_FIRSTCALL_INIT();
-// oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
-//
-// /* Receive profile from shmq */
-// profile = (Profile *) palloc0(sizeof(Profile));
-// profile->items = (ProfileItem *) receive_array(PROFILE_REQUEST,
-// sizeof(ProfileItem), &profile->count);
-//
-// funcctx->user_fctx = profile;
-// funcctx->max_calls = profile->count;
-//
-// /* Make tuple descriptor */
-// tupdesc = CreateTemplateTupleDesc(5);
-// TupleDescInitEntry(tupdesc, (AttrNumber) 1, "pid",
-// INT4OID, -1, 0);
-// TupleDescInitEntry(tupdesc, (AttrNumber) 2, "type",
-// TEXTOID, -1, 0);
-// TupleDescInitEntry(tupdesc, (AttrNumber) 3, "event",
-// TEXTOID, -1, 0);
-// TupleDescInitEntry(tupdesc, (AttrNumber) 4, "queryid",
-// INT8OID, -1, 0);
-// TupleDescInitEntry(tupdesc, (AttrNumber) 5, "count",
-// INT8OID, -1, 0);
-// funcctx->tuple_desc = BlessTupleDesc(tupdesc);
-//
-// MemoryContextSwitchTo(oldcontext);
-// }
-//
-// /* stuff done on every call of the function */
-// funcctx = SRF_PERCALL_SETUP();
-//
-// profile = (Profile *) funcctx->user_fctx;
-//
-// if (funcctx->call_cntr < funcctx->max_calls)
-// {
-// /* for each row */
-// Datum values[5];
-// bool nulls[5];
-// HeapTuple tuple;
-// ProfileItem *item;
-// const char *event_type,
-// *event;
-//
-// item = &profile->items[funcctx->call_cntr];
-//
-// MemSet(values, 0, sizeof(values));
-// MemSet(nulls, 0, sizeof(nulls));
-//
-// /* Make and return next tuple to caller */
-// event_type = pgstat_get_wait_event_type(item->dimensions.wait_event_info);
-// event = pgstat_get_wait_event(item->dimensions.wait_event_info);
-// values[0] = Int32GetDatum(item->dimensions.pid);
-// if (event_type)
-// values[1] = PointerGetDatum(cstring_to_text(event_type));
-// else
-// nulls[1] = true;
-// if (event)
-// values[2] = PointerGetDatum(cstring_to_text(event));
-// else
-// nulls[2] = true;
-//
-// if (pgws_profileQueries)
-// values[3] = UInt64GetDatum(item->dimensions.queryId);
-// else
-// values[3] = (Datum) 0;
-//
-// values[4] = UInt64GetDatum(item->count);
-//
-// tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls);
-//
-// SRF_RETURN_NEXT(funcctx, HeapTupleGetDatum(tuple));
-// }
-// else
-// {
-// /* nothing left */
-// SRF_RETURN_DONE(funcctx);
-// }
-//}
+//TODO OBSOLETE
+PG_FUNCTION_INFO_V1(pg_wait_sampling_get_profile);
+Datum
+pg_wait_sampling_get_profile(PG_FUNCTION_ARGS)
+{
+ Profile *profile;
+ FuncCallContext *funcctx;
+
+ check_shmem();
+
+ if (SRF_IS_FIRSTCALL())
+ {
+ MemoryContext oldcontext;
+ TupleDesc tupdesc;
+
+ funcctx = SRF_FIRSTCALL_INIT();
+ oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
+
+ /* Receive profile from shmq */
+ profile = (Profile *) palloc0(sizeof(Profile));
+ //profile->items = (ProfileItem *) receive_array(PROFILE_REQUEST,
+ // sizeof(ProfileItem), &profile->count);
+
+ funcctx->user_fctx = profile;
+ funcctx->max_calls = profile->count;
+
+ /* Make tuple descriptor */
+ tupdesc = CreateTemplateTupleDesc(5);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 1, "pid",
+ INT4OID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 2, "type",
+ TEXTOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 3, "event",
+ TEXTOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 4, "queryid",
+ INT8OID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 5, "count",
+ INT8OID, -1, 0);
+ funcctx->tuple_desc = BlessTupleDesc(tupdesc);
+
+ MemoryContextSwitchTo(oldcontext);
+ }
+
+ /* stuff done on every call of the function */
+ funcctx = SRF_PERCALL_SETUP();
+
+ profile = (Profile *) funcctx->user_fctx;
+
+ if (funcctx->call_cntr < funcctx->max_calls)
+ {
+ /* for each row */
+ Datum values[5];
+ bool nulls[5];
+ HeapTuple tuple;
+ ProfileItem *item;
+ const char *event_type,
+ *event;
+
+ item = &profile->items[funcctx->call_cntr];
+
+ MemSet(values, 0, sizeof(values));
+ MemSet(nulls, 0, sizeof(nulls));
+
+ /* Make and return next tuple to caller */
+ event_type = pgstat_get_wait_event_type(item->dimensions.wait_event_info);
+ event = pgstat_get_wait_event(item->dimensions.wait_event_info);
+ values[0] = Int32GetDatum(item->dimensions.pid);
+ if (event_type)
+ values[1] = PointerGetDatum(cstring_to_text(event_type));
+ else
+ nulls[1] = true;
+ if (event)
+ values[2] = PointerGetDatum(cstring_to_text(event));
+ else
+ nulls[2] = true;
+
+ if (pgws_profileQueries)
+ values[3] = UInt64GetDatum(item->dimensions.queryId);
+ else
+ values[3] = (Datum) 0;
+
+ values[4] = UInt64GetDatum(item->count);
+
+ tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls);
+
+ SRF_RETURN_NEXT(funcctx, HeapTupleGetDatum(tuple));
+ }
+ else
+ {
+ /* nothing left */
+ SRF_RETURN_DONE(funcctx);
+ }
+}
PG_FUNCTION_INFO_V1(pg_wait_sampling_get_profile_extended);
Datum
@@ -1330,6 +1350,7 @@ pg_wait_sampling_get_profile_extended(PG_FUNCTION_ARGS)
MemoryContext oldcontext;
TupleDesc tupdesc;
void *tmp_array;
+ Size serialized_size;
funcctx = SRF_FIRSTCALL_INIT();
oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
@@ -1337,11 +1358,9 @@ pg_wait_sampling_get_profile_extended(PG_FUNCTION_ARGS)
/* Receive profile from shmq */
profile = (Profile *) palloc0(sizeof(Profile));
- tmp_array = receive_array(PROFILE_REQUEST,
- get_serialized_size(saved_profile_dimensions, true),
- &profile->count);
- profile->items = (ProfileItem *) deserialize_array(tmp_array, profile->count,
- sizeof(ProfileItem), false);
+ tmp_array = receive_array(PROFILE_REQUEST, &serialized_size,
+ &profile->count, &saved_profile_dimensions);
+ profile->items = (ProfileItem *) deserialize_array(tmp_array, profile->count, false);
funcctx->user_fctx = profile;
funcctx->max_calls = profile->count;
@@ -1420,95 +1439,96 @@ pg_wait_sampling_reset_profile(PG_FUNCTION_ARGS)
PG_RETURN_VOID();
}
-//PG_FUNCTION_INFO_V1(pg_wait_sampling_get_history);
-//Datum
-//pg_wait_sampling_get_history(PG_FUNCTION_ARGS)
-//{
-// History *history;
-// FuncCallContext *funcctx;
-//
-// check_shmem();
-//
-// if (SRF_IS_FIRSTCALL())
-// {
-// MemoryContext oldcontext;
-// TupleDesc tupdesc;
-//
-// funcctx = SRF_FIRSTCALL_INIT();
-// oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
-//
-// /* Receive history from shmq */
-// history = (History *) palloc0(sizeof(History));
-// history->items = (HistoryItem *) receive_array(HISTORY_REQUEST,
-// sizeof(HistoryItem), &history->count);
-//
-// funcctx->user_fctx = history;
-// funcctx->max_calls = history->count;
-//
-// /* Make tuple descriptor */
-// tupdesc = CreateTemplateTupleDesc(5);
-// TupleDescInitEntry(tupdesc, (AttrNumber) 1, "pid",
-// INT4OID, -1, 0);
-// TupleDescInitEntry(tupdesc, (AttrNumber) 2, "sample_ts",
-// TIMESTAMPTZOID, -1, 0);
-// TupleDescInitEntry(tupdesc, (AttrNumber) 3, "type",
-// TEXTOID, -1, 0);
-// TupleDescInitEntry(tupdesc, (AttrNumber) 4, "event",
-// TEXTOID, -1, 0);
-// TupleDescInitEntry(tupdesc, (AttrNumber) 5, "queryid",
-// INT8OID, -1, 0);
-// funcctx->tuple_desc = BlessTupleDesc(tupdesc);
-//
-// MemoryContextSwitchTo(oldcontext);
-// }
-//
-// /* stuff done on every call of the function */
-// funcctx = SRF_PERCALL_SETUP();
-//
-// history = (History *) funcctx->user_fctx;
-//
-// if (history->index < history->count)
-// {
-// HeapTuple tuple;
-// HistoryItem *item;
-// Datum values[5];
-// bool nulls[5];
-// const char *event_type,
-// *event;
-//
-// item = &history->items[history->index];
-//
-// /* Make and return next tuple to caller */
-// MemSet(values, 0, sizeof(values));
-// MemSet(nulls, 0, sizeof(nulls));
-//
-// event_type = pgstat_get_wait_event_type(item->dimensions.wait_event_info);
-// event = pgstat_get_wait_event(item->dimensions.wait_event_info);
-// values[0] = Int32GetDatum(item->dimensions.pid);
-// values[1] = TimestampTzGetDatum(item->ts);
-// if (event_type)
-// values[2] = PointerGetDatum(cstring_to_text(event_type));
-// else
-// nulls[2] = true;
-// if (event)
-// values[3] = PointerGetDatum(cstring_to_text(event));
-// else
-// nulls[3] = true;
-//
-// values[4] = UInt64GetDatum(item->dimensions.queryId);
-// tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls);
-//
-// history->index++;
-// SRF_RETURN_NEXT(funcctx, HeapTupleGetDatum(tuple));
-// }
-// else
-// {
-// /* nothing left */
-// SRF_RETURN_DONE(funcctx);
-// }
-//
-// PG_RETURN_VOID();
-//}
+//TODO OBSOLETE
+PG_FUNCTION_INFO_V1(pg_wait_sampling_get_history);
+Datum
+pg_wait_sampling_get_history(PG_FUNCTION_ARGS)
+{
+ History *history;
+ FuncCallContext *funcctx;
+
+ check_shmem();
+
+ if (SRF_IS_FIRSTCALL())
+ {
+ MemoryContext oldcontext;
+ TupleDesc tupdesc;
+
+ funcctx = SRF_FIRSTCALL_INIT();
+ oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
+
+ /* Receive history from shmq */
+ history = (History *) palloc0(sizeof(History));
+ //history->items = (HistoryItem *) receive_array(HISTORY_REQUEST,
+ // sizeof(HistoryItem), &history->count, &saved_history_dimensions);
+
+ funcctx->user_fctx = history;
+ funcctx->max_calls = history->count;
+
+ /* Make tuple descriptor */
+ tupdesc = CreateTemplateTupleDesc(5);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 1, "pid",
+ INT4OID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 2, "sample_ts",
+ TIMESTAMPTZOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 3, "type",
+ TEXTOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 4, "event",
+ TEXTOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 5, "queryid",
+ INT8OID, -1, 0);
+ funcctx->tuple_desc = BlessTupleDesc(tupdesc);
+
+ MemoryContextSwitchTo(oldcontext);
+ }
+
+ /* stuff done on every call of the function */
+ funcctx = SRF_PERCALL_SETUP();
+
+ history = (History *) funcctx->user_fctx;
+
+ if (history->index < history->count)
+ {
+ //HeapTuple tuple;
+ //HistoryItem *item;
+ //Datum values[5];
+ //bool nulls[5];
+ //const char *event_type,
+ // *event;
+ //
+ //item = &history->items[history->index];
+ //
+ //github.com/* Make and return next tuple to caller */
+ //MemSet(values, 0, sizeof(values));
+ //MemSet(nulls, 0, sizeof(nulls));
+ //
+ //event_type = pgstat_get_wait_event_type(item->dimensions.wait_event_info);
+ //event = pgstat_get_wait_event(item->dimensions.wait_event_info);
+ //values[0] = Int32GetDatum(item->dimensions.pid);
+ //values[1] = TimestampTzGetDatum(item->ts);
+ //if (event_type)
+ // values[2] = PointerGetDatum(cstring_to_text(event_type));
+ //else
+ // nulls[2] = true;
+ //if (event)
+ // values[3] = PointerGetDatum(cstring_to_text(event));
+ //else
+ // nulls[3] = true;
+ //
+ //values[4] = UInt64GetDatum(item->dimensions.queryId);
+ //tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls);
+ //
+ //history->index++;
+ //SRF_RETURN_NEXT(funcctx, HeapTupleGetDatum(tuple));
+ }
+ else
+ {
+ /* nothing left */
+ SRF_RETURN_DONE(funcctx);
+ }
+
+ PG_RETURN_VOID();
+}
PG_FUNCTION_INFO_V1(pg_wait_sampling_get_history_extended);
Datum
@@ -1524,24 +1544,23 @@ pg_wait_sampling_get_history_extended(PG_FUNCTION_ARGS)
{
MemoryContext oldcontext;
TupleDesc tupdesc;
+ Size serialized_size;
funcctx = SRF_FIRSTCALL_INIT();
oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
/* Receive history from shmq */
history = (History *) palloc0(sizeof(History));
- tmp_array = receive_array(PROFILE_REQUEST,
- get_serialized_size(saved_history_dimensions, true),
- &history->count);
- history->items = (HistoryItem *) deserialize_array(tmp_array, history->count,
- sizeof(HistoryItem), true);
+ tmp_array = receive_array(HISTORY_REQUEST, &serialized_size,
+ &history->count, &saved_history_dimensions);
+ history->items = (HistoryItem *) deserialize_array(tmp_array, history->count, true);
funcctx->user_fctx = history;
funcctx->max_calls = history->count;
/* Make tuple descriptor */
tupdesc = CreateTemplateTupleDesc(15);
fill_tuple_desc (tupdesc);
- TupleDescInitEntry(tupdesc, (AttrNumber) 15, "sample_ts", //TODO we have moved this to the end to have it more in line with current and profile; debatable; maybe move it to first place?
+ TupleDescInitEntry(tupdesc, (AttrNumber) 15, "sample_ts", //XXX we have moved this to the end to have it more in line with current and profile; debatable; maybe move it to first place?
TIMESTAMPTZOID, -1, 0);
funcctx->tuple_desc = BlessTupleDesc(tupdesc);
@@ -1567,7 +1586,7 @@ pg_wait_sampling_get_history_extended(PG_FUNCTION_ARGS)
MemSet(nulls, 0, sizeof(nulls));
fill_values_and_nulls(values, nulls, item->dimensions, pgws_history_dimensions);
- values[14] = TimestampTzGetDatum(item->ts); //TODO!!!!!!!!!!!
+ values[14] = TimestampTzGetDatum(item->ts); //XXX same as above
tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls);
diff --git a/pg_wait_sampling.h b/pg_wait_sampling.h
index d912a51..c9b3b4a 100644
--- a/pg_wait_sampling.h
+++ b/pg_wait_sampling.h
@@ -91,6 +91,8 @@ typedef struct
bool wraparound;
Size index;
Size count;
+ char *serialized_items;
+ /* used only in pg_wait_sampling.c */
HistoryItem *items;
} History;
From 64efec7aeab54718643aa99e23ad3ad6013ab432 Mon Sep 17 00:00:00 2001
From: Oleg Tselebrovskiy
Date: Tue, 24 Jun 2025 13:21:33 +0700
Subject: [PATCH 07/11] Add history reset
---
collector.c | 6 ++++++
pg_wait_sampling--1.1--1.2.sql | 8 ++++++++
pg_wait_sampling.c | 28 ++++++++++++++++++++++++++++
pg_wait_sampling.h | 1 +
4 files changed, 43 insertions(+)
diff --git a/collector.c b/collector.c
index 65a146b..9e6afad 100644
--- a/collector.c
+++ b/collector.c
@@ -961,6 +961,12 @@ pgws_collector_main(Datum main_arg)
}
shm_mq_detach(mqh);
}
+ else if (request == HISTORY_RESET)
+ {
+ /* Reset history */
+ pfree(observations.items);
+ alloc_history(&observations, pgws_historySize);
+ }
else if (request == PROFILE_RESET)
{
/* Reset profile hash */
diff --git a/pg_wait_sampling--1.1--1.2.sql b/pg_wait_sampling--1.1--1.2.sql
index e0e3337..073104d 100644
--- a/pg_wait_sampling--1.1--1.2.sql
+++ b/pg_wait_sampling--1.1--1.2.sql
@@ -102,6 +102,14 @@ CREATE VIEW pg_wait_sampling_profile_extended AS
GRANT SELECT ON pg_wait_sampling_profile_extended TO PUBLIC;
+CREATE FUNCTION pg_wait_sampling_reset_history()
+RETURNS void
+AS 'MODULE_PATHNAME'
+LANGUAGE C VOLATILE STRICT;
+
+-- Don't want this to be available to non-superusers.
+REVOKE ALL ON FUNCTION pg_wait_sampling_reset_history() FROM PUBLIC;
+
--CREATE VIEW pg_wait_sampling_profile AS
-- SELECT pid, event_type, event, queryid, SUM(count) FROM pg_wait_sampling_profile_extended
-- GROUP BY pid, event_type, event, queryid;
diff --git a/pg_wait_sampling.c b/pg_wait_sampling.c
index 0ac0a8f..7b2f823 100644
--- a/pg_wait_sampling.c
+++ b/pg_wait_sampling.c
@@ -1439,6 +1439,34 @@ pg_wait_sampling_reset_profile(PG_FUNCTION_ARGS)
PG_RETURN_VOID();
}
+PG_FUNCTION_INFO_V1(pg_wait_sampling_reset_history);
+Datum
+pg_wait_sampling_reset_history(PG_FUNCTION_ARGS)
+{
+ LOCKTAG collectorTag;
+
+ check_shmem();
+
+ pgws_init_lock_tag(&queueTag, PGWS_QUEUE_LOCK);
+
+ LockAcquire(&queueTag, ExclusiveLock, false, false);
+
+ pgws_init_lock_tag(&collectorTag, PGWS_COLLECTOR_LOCK);
+ LockAcquire(&collectorTag, ExclusiveLock, false, false);
+ pgws_collector_hdr->request = HISTORY_RESET;
+ LockRelease(&collectorTag, ExclusiveLock, false);
+
+ if (!pgws_collector_hdr->latch)
+ ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR),
+ errmsg("pg_wait_sampling collector wasn't started")));
+
+ SetLatch(pgws_collector_hdr->latch);
+
+ LockRelease(&queueTag, ExclusiveLock, false);
+
+ PG_RETURN_VOID();
+}
+
//TODO OBSOLETE
PG_FUNCTION_INFO_V1(pg_wait_sampling_get_history);
Datum
diff --git a/pg_wait_sampling.h b/pg_wait_sampling.h
index c9b3b4a..3b84b60 100644
--- a/pg_wait_sampling.h
+++ b/pg_wait_sampling.h
@@ -100,6 +100,7 @@ typedef enum
{
NO_REQUEST,
HISTORY_REQUEST,
+ HISTORY_RESET,
PROFILE_REQUEST,
PROFILE_RESET
} SHMRequest;
From f6a2203ed1b9643a07343424942c8a2513aba2c9 Mon Sep 17 00:00:00 2001
From: Oleg Tselebrovskiy
Date: Tue, 24 Jun 2025 18:00:01 +0700
Subject: [PATCH 08/11] remove old functions for good
---
pg_wait_sampling--1.1--1.2.sql | 69 ++++----
pg_wait_sampling.c | 312 +--------------------------------
2 files changed, 33 insertions(+), 348 deletions(-)
diff --git a/pg_wait_sampling--1.1--1.2.sql b/pg_wait_sampling--1.1--1.2.sql
index 073104d..8653d87 100644
--- a/pg_wait_sampling--1.1--1.2.sql
+++ b/pg_wait_sampling--1.1--1.2.sql
@@ -3,28 +3,28 @@
-- complain if script is sourced in psql, rather than via ALTER EXTENSION
\echo Use "ALTER EXTENSION pg_wait_sampling UPDATE TO 1.2" to load this file. \quit
---DROP FUNCTION pg_wait_sampling_get_current (
--- pid int4,
--- OUT pid int4,
--- OUT event_type text,
--- OUT event text
---) CASCADE;
---
---DROP FUNCTION pg_wait_sampling_get_history (
--- OUT pid int4,
--- OUT ts timestamptz,
--- OUT event_type text,
--- OUT event text
---) CASCADE;
---
---DROP FUNCTION pg_wait_sampling_get_profile (
--- OUT pid int4,
--- OUT event_type text,
--- OUT event text,
--- OUT count bigint
---) CASCADE;
+DROP FUNCTION pg_wait_sampling_get_current (
+ pid int4,
+ OUT pid int4,
+ OUT event_type text,
+ OUT event text
+) CASCADE;
+
+DROP FUNCTION pg_wait_sampling_get_history (
+ OUT pid int4,
+ OUT ts timestamptz,
+ OUT event_type text,
+ OUT event text
+) CASCADE;
-CREATE FUNCTION pg_wait_sampling_get_current_extended (
+DROP FUNCTION pg_wait_sampling_get_profile (
+ OUT pid int4,
+ OUT event_type text,
+ OUT event text,
+ OUT count bigint
+) CASCADE;
+
+CREATE FUNCTION pg_wait_sampling_get_current (
pid int4,
OUT pid int4,
OUT event_type text,
@@ -45,12 +45,12 @@ RETURNS SETOF record
AS 'MODULE_PATHNAME'
LANGUAGE C VOLATILE CALLED ON NULL INPUT;
-CREATE VIEW pg_wait_sampling_current_extended AS
- SELECT * FROM pg_wait_sampling_get_current_extended(NULL::integer);
+CREATE VIEW pg_wait_sampling_current AS
+ SELECT * FROM pg_wait_sampling_get_current(NULL::integer);
GRANT SELECT ON pg_wait_sampling_current TO PUBLIC;
-CREATE FUNCTION pg_wait_sampling_get_history_extended (
+CREATE FUNCTION pg_wait_sampling_get_history (
OUT pid int4,
OUT event_type text,
OUT event text,
@@ -71,12 +71,12 @@ RETURNS SETOF record
AS 'MODULE_PATHNAME'
LANGUAGE C VOLATILE STRICT;
-CREATE VIEW pg_wait_sampling_history_extended AS
- SELECT * FROM pg_wait_sampling_get_history_extended();
+CREATE VIEW pg_wait_sampling_history AS
+ SELECT * FROM pg_wait_sampling_get_history();
-GRANT SELECT ON pg_wait_sampling_history_extended TO PUBLIC;
+GRANT SELECT ON pg_wait_sampling_history TO PUBLIC;
-CREATE FUNCTION pg_wait_sampling_get_profile_extended (
+CREATE FUNCTION pg_wait_sampling_get_profile (
OUT pid int4,
OUT event_type text,
OUT event text,
@@ -97,10 +97,10 @@ RETURNS SETOF record
AS 'MODULE_PATHNAME'
LANGUAGE C VOLATILE STRICT;
-CREATE VIEW pg_wait_sampling_profile_extended AS
- SELECT * FROM pg_wait_sampling_get_profile_extended();
+CREATE VIEW pg_wait_sampling_profile AS
+ SELECT * FROM pg_wait_sampling_get_profile();
-GRANT SELECT ON pg_wait_sampling_profile_extended TO PUBLIC;
+GRANT SELECT ON pg_wait_sampling_profile TO PUBLIC;
CREATE FUNCTION pg_wait_sampling_reset_history()
RETURNS void
@@ -109,10 +109,3 @@ LANGUAGE C VOLATILE STRICT;
-- Don't want this to be available to non-superusers.
REVOKE ALL ON FUNCTION pg_wait_sampling_reset_history() FROM PUBLIC;
-
---CREATE VIEW pg_wait_sampling_profile AS
--- SELECT pid, event_type, event, queryid, SUM(count) FROM pg_wait_sampling_profile_extended
--- GROUP BY pid, event_type, event, queryid;
---
---GRANT SELECT ON pg_wait_sampling_profile TO PUBLIC;
-
diff --git a/pg_wait_sampling.c b/pg_wait_sampling.c
index 7b2f823..2d2b8d0 100644
--- a/pg_wait_sampling.c
+++ b/pg_wait_sampling.c
@@ -628,129 +628,6 @@ typedef struct
TimestampTz ts;
} WaitCurrentContext;
-//TODO OBSOLETE
-PG_FUNCTION_INFO_V1(pg_wait_sampling_get_current);
-Datum
-pg_wait_sampling_get_current(PG_FUNCTION_ARGS)
-{
- FuncCallContext *funcctx;
- WaitCurrentContext *params;
-
- check_shmem();
-
- if (SRF_IS_FIRSTCALL())
- {
- MemoryContext oldcontext;
- TupleDesc tupdesc;
-
- funcctx = SRF_FIRSTCALL_INIT();
-
- oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
- params = (WaitCurrentContext *) palloc0(sizeof(WaitCurrentContext));
- params->ts = GetCurrentTimestamp();
-
- funcctx->user_fctx = params;
- tupdesc = CreateTemplateTupleDesc(4);
- TupleDescInitEntry(tupdesc, (AttrNumber) 1, "pid",
- INT4OID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 2, "type",
- TEXTOID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 3, "event",
- TEXTOID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 4, "queryid",
- INT8OID, -1, 0);
-
- funcctx->tuple_desc = BlessTupleDesc(tupdesc);
-
- LWLockAcquire(ProcArrayLock, LW_SHARED);
-
- if (!PG_ARGISNULL(0))
- {
- /* pg_wait_sampling_get_current(pid int4) function */
- HistoryItem *item;
- PGPROC *proc;
-
- proc = search_proc(PG_GETARG_UINT32(0));
- params->items = (HistoryItem *) palloc0(sizeof(HistoryItem));
- item = ¶ms->items[0];
- item->dimensions.pid = proc->pid;
- item->dimensions.wait_event_info = proc->wait_event_info;
- item->dimensions.queryId = pgws_proc_queryids[proc - ProcGlobal->allProcs];
- funcctx->max_calls = 1;
- }
- else
- {
- /* pg_wait_sampling_current view */
- int procCount = ProcGlobal->allProcCount,
- i,
- j = 0;
-
- params->items = (HistoryItem *) palloc0(sizeof(HistoryItem) * procCount);
- for (i = 0; i < procCount; i++)
- {
- PGPROC *proc = &ProcGlobal->allProcs[i];
-
- if (!pgws_should_sample_proc(proc,
- ¶ms->items[j].dimensions.pid,
- ¶ms->items[j].dimensions.wait_event_info))
- continue;
-
- params->items[j].dimensions.pid = proc->pid;
- params->items[j].dimensions.wait_event_info = proc->wait_event_info;
- params->items[j].dimensions.queryId = pgws_proc_queryids[i];
- j++;
- }
- funcctx->max_calls = j;
- }
-
- LWLockRelease(ProcArrayLock);
-
- MemoryContextSwitchTo(oldcontext);
- }
-
- /* stuff done on every call of the function */
- funcctx = SRF_PERCALL_SETUP();
- params = (WaitCurrentContext *) funcctx->user_fctx;
-
- if (funcctx->call_cntr < funcctx->max_calls)
- {
- HeapTuple tuple;
- Datum values[4];
- bool nulls[4];
- const char *event_type,
- *event;
- HistoryItem *item;
-
- item = ¶ms->items[funcctx->call_cntr];
-
- /* Make and return next tuple to caller */
- MemSet(values, 0, sizeof(values));
- MemSet(nulls, 0, sizeof(nulls));
-
- event_type = pgstat_get_wait_event_type(item->dimensions.wait_event_info);
- event = pgstat_get_wait_event(item->dimensions.wait_event_info);
- values[0] = Int32GetDatum(item->dimensions.pid);
- if (event_type)
- values[1] = PointerGetDatum(cstring_to_text(event_type));
- else
- nulls[1] = true;
- if (event)
- values[2] = PointerGetDatum(cstring_to_text(event));
- else
- nulls[2] = true;
-
- values[3] = UInt64GetDatum(item->dimensions.queryId);
- tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls);
-
- SRF_RETURN_NEXT(funcctx, HeapTupleGetDatum(tuple));
- }
- else
- {
- /* nothing left */
- SRF_RETURN_DONE(funcctx);
- }
-}
-
static Datum
GetBackendState(BackendState state, bool *is_null)
{
@@ -973,9 +850,9 @@ fill_values_and_nulls(Datum *values, bool *nulls, SamplingDimensions dimensions,
nulls[13] = true;
}
-PG_FUNCTION_INFO_V1(pg_wait_sampling_get_current_extended);
+PG_FUNCTION_INFO_V1(pg_wait_sampling_get_current);
Datum
-pg_wait_sampling_get_current_extended(PG_FUNCTION_ARGS)
+pg_wait_sampling_get_current(PG_FUNCTION_ARGS)
{
FuncCallContext *funcctx;
WaitCurrentContext *params;
@@ -1242,7 +1119,6 @@ deserialize_array(void *tmp_array, int count, bool is_history)
return result;
}
-//TODO OBSOLETE
PG_FUNCTION_INFO_V1(pg_wait_sampling_get_profile);
Datum
pg_wait_sampling_get_profile(PG_FUNCTION_ARGS)
@@ -1252,99 +1128,6 @@ pg_wait_sampling_get_profile(PG_FUNCTION_ARGS)
check_shmem();
- if (SRF_IS_FIRSTCALL())
- {
- MemoryContext oldcontext;
- TupleDesc tupdesc;
-
- funcctx = SRF_FIRSTCALL_INIT();
- oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
-
- /* Receive profile from shmq */
- profile = (Profile *) palloc0(sizeof(Profile));
- //profile->items = (ProfileItem *) receive_array(PROFILE_REQUEST,
- // sizeof(ProfileItem), &profile->count);
-
- funcctx->user_fctx = profile;
- funcctx->max_calls = profile->count;
-
- /* Make tuple descriptor */
- tupdesc = CreateTemplateTupleDesc(5);
- TupleDescInitEntry(tupdesc, (AttrNumber) 1, "pid",
- INT4OID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 2, "type",
- TEXTOID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 3, "event",
- TEXTOID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 4, "queryid",
- INT8OID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 5, "count",
- INT8OID, -1, 0);
- funcctx->tuple_desc = BlessTupleDesc(tupdesc);
-
- MemoryContextSwitchTo(oldcontext);
- }
-
- /* stuff done on every call of the function */
- funcctx = SRF_PERCALL_SETUP();
-
- profile = (Profile *) funcctx->user_fctx;
-
- if (funcctx->call_cntr < funcctx->max_calls)
- {
- /* for each row */
- Datum values[5];
- bool nulls[5];
- HeapTuple tuple;
- ProfileItem *item;
- const char *event_type,
- *event;
-
- item = &profile->items[funcctx->call_cntr];
-
- MemSet(values, 0, sizeof(values));
- MemSet(nulls, 0, sizeof(nulls));
-
- /* Make and return next tuple to caller */
- event_type = pgstat_get_wait_event_type(item->dimensions.wait_event_info);
- event = pgstat_get_wait_event(item->dimensions.wait_event_info);
- values[0] = Int32GetDatum(item->dimensions.pid);
- if (event_type)
- values[1] = PointerGetDatum(cstring_to_text(event_type));
- else
- nulls[1] = true;
- if (event)
- values[2] = PointerGetDatum(cstring_to_text(event));
- else
- nulls[2] = true;
-
- if (pgws_profileQueries)
- values[3] = UInt64GetDatum(item->dimensions.queryId);
- else
- values[3] = (Datum) 0;
-
- values[4] = UInt64GetDatum(item->count);
-
- tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls);
-
- SRF_RETURN_NEXT(funcctx, HeapTupleGetDatum(tuple));
- }
- else
- {
- /* nothing left */
- SRF_RETURN_DONE(funcctx);
- }
-}
-
-PG_FUNCTION_INFO_V1(pg_wait_sampling_get_profile_extended);
-Datum
-pg_wait_sampling_get_profile_extended(PG_FUNCTION_ARGS)
-{
- Profile *profile;
- FuncCallContext *funcctx;
-
- check_shmem();
-
if (SRF_IS_FIRSTCALL())
{
MemoryContext oldcontext;
@@ -1467,100 +1250,9 @@ pg_wait_sampling_reset_history(PG_FUNCTION_ARGS)
PG_RETURN_VOID();
}
-//TODO OBSOLETE
PG_FUNCTION_INFO_V1(pg_wait_sampling_get_history);
Datum
pg_wait_sampling_get_history(PG_FUNCTION_ARGS)
-{
- History *history;
- FuncCallContext *funcctx;
-
- check_shmem();
-
- if (SRF_IS_FIRSTCALL())
- {
- MemoryContext oldcontext;
- TupleDesc tupdesc;
-
- funcctx = SRF_FIRSTCALL_INIT();
- oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
-
- /* Receive history from shmq */
- history = (History *) palloc0(sizeof(History));
- //history->items = (HistoryItem *) receive_array(HISTORY_REQUEST,
- // sizeof(HistoryItem), &history->count, &saved_history_dimensions);
-
- funcctx->user_fctx = history;
- funcctx->max_calls = history->count;
-
- /* Make tuple descriptor */
- tupdesc = CreateTemplateTupleDesc(5);
- TupleDescInitEntry(tupdesc, (AttrNumber) 1, "pid",
- INT4OID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 2, "sample_ts",
- TIMESTAMPTZOID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 3, "type",
- TEXTOID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 4, "event",
- TEXTOID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 5, "queryid",
- INT8OID, -1, 0);
- funcctx->tuple_desc = BlessTupleDesc(tupdesc);
-
- MemoryContextSwitchTo(oldcontext);
- }
-
- /* stuff done on every call of the function */
- funcctx = SRF_PERCALL_SETUP();
-
- history = (History *) funcctx->user_fctx;
-
- if (history->index < history->count)
- {
- //HeapTuple tuple;
- //HistoryItem *item;
- //Datum values[5];
- //bool nulls[5];
- //const char *event_type,
- // *event;
- //
- //item = &history->items[history->index];
- //
- //github.com/* Make and return next tuple to caller */
- //MemSet(values, 0, sizeof(values));
- //MemSet(nulls, 0, sizeof(nulls));
- //
- //event_type = pgstat_get_wait_event_type(item->dimensions.wait_event_info);
- //event = pgstat_get_wait_event(item->dimensions.wait_event_info);
- //values[0] = Int32GetDatum(item->dimensions.pid);
- //values[1] = TimestampTzGetDatum(item->ts);
- //if (event_type)
- // values[2] = PointerGetDatum(cstring_to_text(event_type));
- //else
- // nulls[2] = true;
- //if (event)
- // values[3] = PointerGetDatum(cstring_to_text(event));
- //else
- // nulls[3] = true;
- //
- //values[4] = UInt64GetDatum(item->dimensions.queryId);
- //tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls);
- //
- //history->index++;
- //SRF_RETURN_NEXT(funcctx, HeapTupleGetDatum(tuple));
- }
- else
- {
- /* nothing left */
- SRF_RETURN_DONE(funcctx);
- }
-
- PG_RETURN_VOID();
-}
-
-PG_FUNCTION_INFO_V1(pg_wait_sampling_get_history_extended);
-Datum
-pg_wait_sampling_get_history_extended(PG_FUNCTION_ARGS)
{
History *history;
FuncCallContext *funcctx;
From 09fc6620aaaed44e7ea47033e3d87fd5ab26ef58 Mon Sep 17 00:00:00 2001
From: Oleg Tselebrovskiy
Date: Wed, 25 Jun 2025 09:33:58 +0700
Subject: [PATCH 09/11] Do as pg_stat_statements does with different extension
versions
---
pg_wait_sampling--1.1--1.2.sql | 6 +-
pg_wait_sampling.c | 281 ++++++++++++++++++++++++---------
2 files changed, 205 insertions(+), 82 deletions(-)
diff --git a/pg_wait_sampling--1.1--1.2.sql b/pg_wait_sampling--1.1--1.2.sql
index 8653d87..e9499c5 100644
--- a/pg_wait_sampling--1.1--1.2.sql
+++ b/pg_wait_sampling--1.1--1.2.sql
@@ -42,7 +42,7 @@ CREATE FUNCTION pg_wait_sampling_get_current (
OUT appname text
)
RETURNS SETOF record
-AS 'MODULE_PATHNAME'
+AS 'MODULE_PATHNAME', 'pg_wait_sampling_get_current_1_2'
LANGUAGE C VOLATILE CALLED ON NULL INPUT;
CREATE VIEW pg_wait_sampling_current AS
@@ -68,7 +68,7 @@ CREATE FUNCTION pg_wait_sampling_get_history (
OUT ts timestamptz
)
RETURNS SETOF record
-AS 'MODULE_PATHNAME'
+AS 'MODULE_PATHNAME', 'pg_wait_sampling_get_history_1_2'
LANGUAGE C VOLATILE STRICT;
CREATE VIEW pg_wait_sampling_history AS
@@ -94,7 +94,7 @@ CREATE FUNCTION pg_wait_sampling_get_profile (
OUT count int8
)
RETURNS SETOF record
-AS 'MODULE_PATHNAME'
+AS 'MODULE_PATHNAME', 'pg_wait_sampling_get_profile_1_2'
LANGUAGE C VOLATILE STRICT;
CREATE VIEW pg_wait_sampling_profile AS
diff --git a/pg_wait_sampling.c b/pg_wait_sampling.c
index 2d2b8d0..fe20dba 100644
--- a/pg_wait_sampling.c
+++ b/pg_wait_sampling.c
@@ -129,6 +129,20 @@ static const struct config_enum_entry pgws_profile_queries_options[] =
{NULL, 0, false}
};
+/* like in pg_stat_statements */
+typedef enum pgwsVersion
+{
+ PGWS_V1_1 = 0,
+ PGWS_V1_2,
+} pgwsVersion;
+
+Datum pg_wait_sampling_get_current_internal(FunctionCallInfo fcinfo,
+ pgwsVersion api_version);
+Datum pg_wait_sampling_get_profile_internal(FunctionCallInfo fcinfo,
+ pgwsVersion api_version);
+Datum pg_wait_sampling_get_history_internal(FunctionCallInfo fcinfo,
+ pgwsVersion api_version);
+
int pgws_historySize = 5000;
int pgws_historyPeriod = 10;
int pgws_profilePeriod = 10;
@@ -739,11 +753,16 @@ get_beentry_by_procpid(int pid)
return NULL;
}
+/* like in pg_stat_statements */
+#define PG_WAIT_SAMPLING_COLS_V1_1 5
+#define PG_WAIT_SAMPLING_COLS_V1_2 15
+#define PG_WAIT_SAMPLING_COLS 15 /* maximum of above */
+
/*
* Common routine to fill "dimensions" part of tupdesc
*/
static void
-fill_tuple_desc (TupleDesc tupdesc)
+fill_tuple_desc (TupleDesc tupdesc, pgwsVersion api_version)
{
TupleDescInitEntry(tupdesc, (AttrNumber) 1, "pid",
INT4OID, -1, 0);
@@ -753,30 +772,34 @@ fill_tuple_desc (TupleDesc tupdesc)
TEXTOID, -1, 0);
TupleDescInitEntry(tupdesc, (AttrNumber) 4, "queryid",
INT8OID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 5, "role_id",
- INT8OID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 6, "database_id",
- INT8OID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 7, "parallel_leader_pid",
- INT4OID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 8, "is_regular_backend",
- BOOLOID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 9, "backend_type",
- TEXTOID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 10, "backend_state",
- TEXTOID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 11, "proc_start",
- TIMESTAMPTZOID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 12, "client_addr",
- TEXTOID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 13, "client_hostname",
- TEXTOID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 14, "appname",
- TEXTOID, -1, 0);
+ if (api_version >= PGWS_V1_2)
+ {
+ TupleDescInitEntry(tupdesc, (AttrNumber) 5, "role_id",
+ INT8OID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 6, "database_id",
+ INT8OID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 7, "parallel_leader_pid",
+ INT4OID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 8, "is_regular_backend",
+ BOOLOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 9, "backend_type",
+ TEXTOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 10, "backend_state",
+ TEXTOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 11, "proc_start",
+ TIMESTAMPTZOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 12, "client_addr",
+ TEXTOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 13, "client_hostname",
+ TEXTOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 14, "appname",
+ TEXTOID, -1, 0);
+ }
}
static void
-fill_values_and_nulls(Datum *values, bool *nulls, SamplingDimensions dimensions, int dimensions_mask)
+fill_values_and_nulls(Datum *values, bool *nulls, SamplingDimensions dimensions,
+ int dimensions_mask, pgwsVersion api_version)
{
const char *event_type,
*event,
@@ -812,53 +835,88 @@ fill_values_and_nulls(Datum *values, bool *nulls, SamplingDimensions dimensions,
values[4] = ObjectIdGetDatum(dimensions.role_id);
else
nulls[4] = true;
- if (dimensions_mask & PGWS_DIMENSIONS_DB_ID)
- values[5] = ObjectIdGetDatum(dimensions.database_id);
- else
- nulls[5] = true;
- if (dimensions_mask & PGWS_DIMENSIONS_PARALLEL_LEADER_PID)
- values[6] = Int32GetDatum(dimensions.parallel_leader_pid);
- else
- nulls[6] = true;
- if (dimensions_mask & PGWS_DIMENSIONS_IS_REGULAR_BE)
- values[7] = BoolGetDatum(dimensions.is_regular_backend);
- else
- nulls[7] = true;
- if (backend_type && (dimensions_mask & PGWS_DIMENSIONS_BE_TYPE))
- values[8] = PointerGetDatum(cstring_to_text(backend_type));
- else
- nulls[8] = true;
- if (!is_null_be_state && (dimensions_mask & PGWS_DIMENSIONS_BE_STATE))
- values[9] = backend_state;
- else
- nulls[9] = true;
- if (dimensions_mask & PGWS_DIMENSIONS_BE_START_TIME)
- values[10] = proc_start;
- else
- nulls[10] = true;
- if (!is_null_client_addr && (dimensions_mask & PGWS_DIMENSIONS_CLIENT_ADDR))
- values[11] = client_addr;
- else
- nulls[11] = true;
- if (dimensions_mask & PGWS_DIMENSIONS_CLIENT_HOSTNAME)
- values[12] = PointerGetDatum(cstring_to_text(dimensions.client_hostname));
- else
- nulls[12] = true;
- if (dimensions_mask & PGWS_DIMENSIONS_APPNAME)
- values[13] = PointerGetDatum(cstring_to_text(dimensions.appname));
- else
- nulls[13] = true;
+ if (api_version >= PGWS_V1_2)
+ {
+ if (dimensions_mask & PGWS_DIMENSIONS_DB_ID)
+ values[5] = ObjectIdGetDatum(dimensions.database_id);
+ else
+ nulls[5] = true;
+ if (dimensions_mask & PGWS_DIMENSIONS_PARALLEL_LEADER_PID)
+ values[6] = Int32GetDatum(dimensions.parallel_leader_pid);
+ else
+ nulls[6] = true;
+ if (dimensions_mask & PGWS_DIMENSIONS_IS_REGULAR_BE)
+ values[7] = BoolGetDatum(dimensions.is_regular_backend);
+ else
+ nulls[7] = true;
+ if (backend_type && (dimensions_mask & PGWS_DIMENSIONS_BE_TYPE))
+ values[8] = PointerGetDatum(cstring_to_text(backend_type));
+ else
+ nulls[8] = true;
+ if (!is_null_be_state && (dimensions_mask & PGWS_DIMENSIONS_BE_STATE))
+ values[9] = backend_state;
+ else
+ nulls[9] = true;
+ if (dimensions_mask & PGWS_DIMENSIONS_BE_START_TIME)
+ values[10] = proc_start;
+ else
+ nulls[10] = true;
+ if (!is_null_client_addr && (dimensions_mask & PGWS_DIMENSIONS_CLIENT_ADDR))
+ values[11] = client_addr;
+ else
+ nulls[11] = true;
+ if (dimensions_mask & PGWS_DIMENSIONS_CLIENT_HOSTNAME)
+ values[12] = PointerGetDatum(cstring_to_text(dimensions.client_hostname));
+ else
+ nulls[12] = true;
+ if (dimensions_mask & PGWS_DIMENSIONS_APPNAME)
+ values[13] = PointerGetDatum(cstring_to_text(dimensions.appname));
+ else
+ nulls[13] = true;
+ }
}
PG_FUNCTION_INFO_V1(pg_wait_sampling_get_current);
Datum
pg_wait_sampling_get_current(PG_FUNCTION_ARGS)
+{
+ return pg_wait_sampling_get_current_internal(fcinfo, PGWS_V1_1);
+}
+
+PG_FUNCTION_INFO_V1(pg_wait_sampling_get_current_1_2);
+Datum
+pg_wait_sampling_get_current_1_2(PG_FUNCTION_ARGS)
+{
+ return pg_wait_sampling_get_current_internal(fcinfo, PGWS_V1_2);
+}
+
+Datum
+pg_wait_sampling_get_current_internal(FunctionCallInfo fcinfo,
+ pgwsVersion api_version)
{
FuncCallContext *funcctx;
WaitCurrentContext *params;
+ ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
check_shmem();
+ /*
+ * Check we have the expected number of output arguments. Safety check
+ */
+ switch (rsinfo->expectedDesc->natts + 1)
+ {
+ case PG_WAIT_SAMPLING_COLS_V1_1:
+ if (api_version != PGWS_V1_1)
+ elog(ERROR, "incorrect number of output arguments");
+ break;
+ case PG_WAIT_SAMPLING_COLS_V1_2:
+ if (api_version != PGWS_V1_2)
+ elog(ERROR, "incorrect number of output arguments");
+ break;
+ default:
+ elog(ERROR, "incorrect number of output arguments");
+ }
+
/* Initialization, done only on the first call */
if (SRF_IS_FIRSTCALL())
{
@@ -873,8 +931,8 @@ pg_wait_sampling_get_current(PG_FUNCTION_ARGS)
funcctx->user_fctx = params;
/* Setup tuple desc */
- tupdesc = CreateTemplateTupleDesc(14);
- fill_tuple_desc (tupdesc);
+ tupdesc = CreateTemplateTupleDesc(rsinfo->expectedDesc->natts);
+ fill_tuple_desc (tupdesc, api_version);
funcctx->tuple_desc = BlessTupleDesc(tupdesc);
LWLockAcquire(ProcArrayLock, LW_SHARED);
@@ -884,7 +942,6 @@ pg_wait_sampling_get_current(PG_FUNCTION_ARGS)
/* pg_wait_sampling_get_current_extended(pid int4) function */
HistoryItem *item;
PGPROC *proc;
- //PgBackendStatus *bestatus; not needed?
proc = search_proc(PG_GETARG_UINT32(0));
@@ -942,8 +999,8 @@ pg_wait_sampling_get_current(PG_FUNCTION_ARGS)
if (funcctx->call_cntr < funcctx->max_calls)
{
HeapTuple tuple;
- Datum values[14];
- bool nulls[14];
+ Datum values[PG_WAIT_SAMPLING_COLS - 1];
+ bool nulls[PG_WAIT_SAMPLING_COLS - 1];
HistoryItem *item;
item = ¶ms->items[funcctx->call_cntr];
@@ -952,7 +1009,7 @@ pg_wait_sampling_get_current(PG_FUNCTION_ARGS)
MemSet(values, 0, sizeof(values));
MemSet(nulls, 0, sizeof(nulls));
- fill_values_and_nulls(values, nulls, item->dimensions, PGWS_DIMENSIONS_ALL);
+ fill_values_and_nulls(values, nulls, item->dimensions, PGWS_DIMENSIONS_ALL, api_version);
tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls);
@@ -1122,12 +1179,44 @@ deserialize_array(void *tmp_array, int count, bool is_history)
PG_FUNCTION_INFO_V1(pg_wait_sampling_get_profile);
Datum
pg_wait_sampling_get_profile(PG_FUNCTION_ARGS)
+{
+ return pg_wait_sampling_get_profile_internal(fcinfo, PGWS_V1_1);
+}
+
+PG_FUNCTION_INFO_V1(pg_wait_sampling_get_profile_1_2);
+Datum
+pg_wait_sampling_get_profile_1_2(PG_FUNCTION_ARGS)
+{
+ return pg_wait_sampling_get_profile_internal(fcinfo, PGWS_V1_2);
+}
+
+Datum
+pg_wait_sampling_get_profile_internal(FunctionCallInfo fcinfo,
+ pgwsVersion api_version)
{
Profile *profile;
FuncCallContext *funcctx;
+ ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
check_shmem();
+ /*
+ * Check we have the expected number of output arguments. Safety check
+ */
+ switch (rsinfo->expectedDesc->natts)
+ {
+ case PG_WAIT_SAMPLING_COLS_V1_1:
+ if (api_version != PGWS_V1_1)
+ elog(ERROR, "incorrect number of output arguments");
+ break;
+ case PG_WAIT_SAMPLING_COLS_V1_2:
+ if (api_version != PGWS_V1_2)
+ elog(ERROR, "incorrect number of output arguments");
+ break;
+ default:
+ elog(ERROR, "incorrect number of output arguments");
+ }
+
if (SRF_IS_FIRSTCALL())
{
MemoryContext oldcontext;
@@ -1148,9 +1237,9 @@ pg_wait_sampling_get_profile(PG_FUNCTION_ARGS)
funcctx->max_calls = profile->count;
/* Make tuple descriptor */
- tupdesc = CreateTemplateTupleDesc(15);
- fill_tuple_desc (tupdesc);
- TupleDescInitEntry(tupdesc, (AttrNumber) 15, "count",
+ tupdesc = CreateTemplateTupleDesc(rsinfo->expectedDesc->natts);
+ fill_tuple_desc (tupdesc, api_version);
+ TupleDescInitEntry(tupdesc, (AttrNumber) rsinfo->expectedDesc->natts, "count",
INT8OID, -1, 0);
funcctx->tuple_desc = BlessTupleDesc(tupdesc);
@@ -1165,8 +1254,8 @@ pg_wait_sampling_get_profile(PG_FUNCTION_ARGS)
if (funcctx->call_cntr < funcctx->max_calls)
{
/* for each row */
- Datum values[15];
- bool nulls[15];
+ Datum values[PG_WAIT_SAMPLING_COLS];
+ bool nulls[PG_WAIT_SAMPLING_COLS];
HeapTuple tuple;
ProfileItem *item;
@@ -1176,8 +1265,9 @@ pg_wait_sampling_get_profile(PG_FUNCTION_ARGS)
MemSet(values, 0, sizeof(values));
MemSet(nulls, 0, sizeof(nulls));
- fill_values_and_nulls(values, nulls, item->dimensions, pgws_profile_dimensions);
- values[14] = UInt64GetDatum(item->count);
+ fill_values_and_nulls(values, nulls, item->dimensions,
+ pgws_profile_dimensions, api_version);
+ values[rsinfo->expectedDesc->natts - 1] = UInt64GetDatum(item->count);
tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls);
@@ -1253,17 +1343,49 @@ pg_wait_sampling_reset_history(PG_FUNCTION_ARGS)
PG_FUNCTION_INFO_V1(pg_wait_sampling_get_history);
Datum
pg_wait_sampling_get_history(PG_FUNCTION_ARGS)
+{
+ return pg_wait_sampling_get_history_internal(fcinfo, PGWS_V1_1);
+}
+
+PG_FUNCTION_INFO_V1(pg_wait_sampling_get_history_1_2);
+Datum
+pg_wait_sampling_get_history_1_2(PG_FUNCTION_ARGS)
+{
+ return pg_wait_sampling_get_history_internal(fcinfo, PGWS_V1_2);
+}
+
+Datum
+pg_wait_sampling_get_history_internal(FunctionCallInfo fcinfo,
+ pgwsVersion api_version)
{
History *history;
FuncCallContext *funcctx;
- void *tmp_array;
+ ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
check_shmem();
+ /*
+ * Check we have the expected number of output arguments. Safety check
+ */
+ switch (rsinfo->expectedDesc->natts)
+ {
+ case PG_WAIT_SAMPLING_COLS_V1_1:
+ if (api_version != PGWS_V1_1)
+ elog(ERROR, "incorrect number of output arguments");
+ break;
+ case PG_WAIT_SAMPLING_COLS_V1_2:
+ if (api_version != PGWS_V1_2)
+ elog(ERROR, "incorrect number of output arguments");
+ break;
+ default:
+ elog(ERROR, "incorrect number of output arguments");
+ }
+
if (SRF_IS_FIRSTCALL())
{
MemoryContext oldcontext;
TupleDesc tupdesc;
+ void *tmp_array;
Size serialized_size;
funcctx = SRF_FIRSTCALL_INIT();
@@ -1278,9 +1400,9 @@ pg_wait_sampling_get_history(PG_FUNCTION_ARGS)
funcctx->max_calls = history->count;
/* Make tuple descriptor */
- tupdesc = CreateTemplateTupleDesc(15);
- fill_tuple_desc (tupdesc);
- TupleDescInitEntry(tupdesc, (AttrNumber) 15, "sample_ts", //XXX we have moved this to the end to have it more in line with current and profile; debatable; maybe move it to first place?
+ tupdesc = CreateTemplateTupleDesc(rsinfo->expectedDesc->natts);
+ fill_tuple_desc (tupdesc, api_version);
+ TupleDescInitEntry(tupdesc, (AttrNumber) rsinfo->expectedDesc->natts, "sample_ts", //XXX we have moved this to the end to have it more in line with current and profile; debatable; maybe move it to first place?
TIMESTAMPTZOID, -1, 0);
funcctx->tuple_desc = BlessTupleDesc(tupdesc);
@@ -1296,8 +1418,8 @@ pg_wait_sampling_get_history(PG_FUNCTION_ARGS)
{
HeapTuple tuple;
HistoryItem *item;
- Datum values[15];
- bool nulls[15];
+ Datum values[PG_WAIT_SAMPLING_COLS];
+ bool nulls[PG_WAIT_SAMPLING_COLS];
item = &history->items[history->index];
@@ -1305,8 +1427,9 @@ pg_wait_sampling_get_history(PG_FUNCTION_ARGS)
MemSet(values, 0, sizeof(values));
MemSet(nulls, 0, sizeof(nulls));
- fill_values_and_nulls(values, nulls, item->dimensions, pgws_history_dimensions);
- values[14] = TimestampTzGetDatum(item->ts); //XXX same as above
+ fill_values_and_nulls(values, nulls, item->dimensions,
+ pgws_history_dimensions, api_version);
+ values[rsinfo->expectedDesc->natts - 1] = TimestampTzGetDatum(item->ts); //XXX same as above
tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls);
From 42b482814509c6cf63067a5537d04f0d1f6cf10d Mon Sep 17 00:00:00 2001
From: Oleg Tselebrovskiy
Date: Wed, 25 Jun 2025 16:09:54 +0700
Subject: [PATCH 10/11] Fixes after self-review
---
collector.c | 11 ++-----
expected/load.out | 76 ++++++++++++++++++++++++++++++--------------
expected/load_1.out | 31 ------------------
expected/queries.out | 45 --------------------------
pg_wait_sampling.c | 31 +++++++++++-------
pg_wait_sampling.h | 2 --
sql/queries.sql | 14 --------
7 files changed, 74 insertions(+), 136 deletions(-)
delete mode 100644 expected/load_1.out
diff --git a/collector.c b/collector.c
index 9e6afad..ec03fee 100644
--- a/collector.c
+++ b/collector.c
@@ -95,7 +95,6 @@ realloc_history(History *observations, int count)
j;
int serialized_size;
- //saved_history_dimensions = pgws_history_dimensions; // TODO вроде как
serialized_size = get_serialized_size(saved_history_dimensions, true);
/* Allocate new array for history */
@@ -578,9 +577,6 @@ probe_waits(History *observations, HTAB *profile_hash,
LWLockAcquire(ProcArrayLock, LW_SHARED);
for (i = 0; i < ProcGlobal->allProcCount; i++)
{
- //HistoryItem item_history,
- // *observation;
- //ProfileItem item_profile;
PGPROC *proc = &ProcGlobal->allProcs[i];
int pid;
uint32 wait_event_info;
@@ -611,9 +607,6 @@ probe_waits(History *observations, HTAB *profile_hash,
&common_dimensions,
saved_profile_dimensions);
- //item_history.ts = ts;
- //item_history.dimensions = history_dimensions;
-
/* Write to the history if needed */
if (write_history)
{
@@ -646,7 +639,7 @@ probe_waits(History *observations, HTAB *profile_hash,
&serialized_item, &serialized_key, &serialized_size,
(TimestampTz) 0, count, false);
- stored_item = (char *) hash_search(profile_hash, serialized_key,
+ stored_item = (char *) hash_search(profile_hash, serialized_key,
HASH_ENTER, &found);
if (found)
@@ -673,7 +666,7 @@ probe_waits(History *observations, HTAB *profile_hash,
* Send waits history to shared memory queue.
*/
static void
-send_history(History *observations, shm_mq_handle *mqh) //TODO TODO TODO
+send_history(History *observations, shm_mq_handle *mqh)
{
int serialized_size = get_serialized_size(saved_history_dimensions, true);
Size count,
diff --git a/expected/load.out b/expected/load.out
index b7de0ac..94e9075 100644
--- a/expected/load.out
+++ b/expected/load.out
@@ -1,31 +1,61 @@
CREATE EXTENSION pg_wait_sampling;
\d pg_wait_sampling_current
-View "public.pg_wait_sampling_current"
- Column | Type | Modifiers
-------------+---------+-----------
- pid | integer |
- event_type | text |
- event | text |
- queryid | bigint |
+ View "public.pg_wait_sampling_current"
+ Column | Type | Collation | Nullable | Default
+---------------------+--------------------------+-----------+----------+---------
+ pid | integer | | |
+ event_type | text | | |
+ event | text | | |
+ queryid | bigint | | |
+ role_id | bigint | | |
+ database_id | bigint | | |
+ parallel_leader_pid | integer | | |
+ is_regular_backend | boolean | | |
+ backend_type | text | | |
+ backend_state | text | | |
+ proc_start | timestamp with time zone | | |
+ client_addr | text | | |
+ client_hostname | text | | |
+ appname | text | | |
\d pg_wait_sampling_history
- View "public.pg_wait_sampling_history"
- Column | Type | Modifiers
-------------+--------------------------+-----------
- pid | integer |
- ts | timestamp with time zone |
- event_type | text |
- event | text |
- queryid | bigint |
+ View "public.pg_wait_sampling_history"
+ Column | Type | Collation | Nullable | Default
+---------------------+--------------------------+-----------+----------+---------
+ pid | integer | | |
+ event_type | text | | |
+ event | text | | |
+ queryid | bigint | | |
+ role_id | bigint | | |
+ database_id | bigint | | |
+ parallel_leader_pid | integer | | |
+ is_regular_backend | boolean | | |
+ backend_type | text | | |
+ backend_state | text | | |
+ proc_start | timestamp with time zone | | |
+ client_addr | text | | |
+ client_hostname | text | | |
+ appname | text | | |
+ ts | timestamp with time zone | | |
\d pg_wait_sampling_profile
-View "public.pg_wait_sampling_profile"
- Column | Type | Modifiers
-------------+---------+-----------
- pid | integer |
- event_type | text |
- event | text |
- queryid | bigint |
- count | bigint |
+ View "public.pg_wait_sampling_profile"
+ Column | Type | Collation | Nullable | Default
+---------------------+--------------------------+-----------+----------+---------
+ pid | integer | | |
+ event_type | text | | |
+ event | text | | |
+ queryid | bigint | | |
+ role_id | bigint | | |
+ database_id | bigint | | |
+ parallel_leader_pid | integer | | |
+ is_regular_backend | boolean | | |
+ backend_type | text | | |
+ backend_state | text | | |
+ proc_start | timestamp with time zone | | |
+ client_addr | text | | |
+ client_hostname | text | | |
+ appname | text | | |
+ count | bigint | | |
DROP EXTENSION pg_wait_sampling;
diff --git a/expected/load_1.out b/expected/load_1.out
deleted file mode 100644
index 1a1358a..0000000
--- a/expected/load_1.out
+++ /dev/null
@@ -1,31 +0,0 @@
-CREATE EXTENSION pg_wait_sampling;
-\d pg_wait_sampling_current
- View "public.pg_wait_sampling_current"
- Column | Type | Collation | Nullable | Default
-------------+---------+-----------+----------+---------
- pid | integer | | |
- event_type | text | | |
- event | text | | |
- queryid | bigint | | |
-
-\d pg_wait_sampling_history
- View "public.pg_wait_sampling_history"
- Column | Type | Collation | Nullable | Default
-------------+--------------------------+-----------+----------+---------
- pid | integer | | |
- ts | timestamp with time zone | | |
- event_type | text | | |
- event | text | | |
- queryid | bigint | | |
-
-\d pg_wait_sampling_profile
- View "public.pg_wait_sampling_profile"
- Column | Type | Collation | Nullable | Default
-------------+---------+-----------+----------+---------
- pid | integer | | |
- event_type | text | | |
- event | text | | |
- queryid | bigint | | |
- count | bigint | | |
-
-DROP EXTENSION pg_wait_sampling;
diff --git a/expected/queries.out b/expected/queries.out
index 6718c14..722df5f 100644
--- a/expected/queries.out
+++ b/expected/queries.out
@@ -20,27 +20,6 @@ WITH t as (SELECT sum(0) FROM pg_wait_sampling_profile)
0
(1 row)
-WITH t as (SELECT sum(0) FROM pg_wait_sampling_current_extended)
- SELECT sum(0) FROM generate_series(1, 2), t;
- sum
------
- 0
-(1 row)
-
-WITH t as (SELECT sum(0) FROM pg_wait_sampling_history_extended)
- SELECT sum(0) FROM generate_series(1, 2), t;
- sum
------
- 0
-(1 row)
-
-WITH t as (SELECT sum(0) FROM pg_wait_sampling_profile_extended)
- SELECT sum(0) FROM generate_series(1, 2), t;
- sum
------
- 0
-(1 row)
-
-- Some dummy checks just to be sure that all our functions work and return something.
SELECT count(*) = 1 as test FROM pg_wait_sampling_get_current(pg_backend_pid());
test
@@ -66,28 +45,4 @@ SELECT pg_wait_sampling_reset_profile();
(1 row)
-SELECT count(*) = 1 as test FROM pg_wait_sampling_get_current_extended(pg_backend_pid());
- test
-------
- t
-(1 row)
-
-SELECT count(*) >= 0 as test FROM pg_wait_sampling_get_profile_extended();
- test
-------
- t
-(1 row)
-
-SELECT count(*) >= 0 as test FROM pg_wait_sampling_get_history_extended();
- test
-------
- t
-(1 row)
-
-SELECT pg_wait_sampling_reset_profile();
- pg_wait_sampling_reset_profile
---------------------------------
-
-(1 row)
-
DROP EXTENSION pg_wait_sampling;
diff --git a/pg_wait_sampling.c b/pg_wait_sampling.c
index fe20dba..6a82bba 100644
--- a/pg_wait_sampling.c
+++ b/pg_wait_sampling.c
@@ -334,11 +334,9 @@ pgws_general_dimensions_check_hook (char **newvalue, void **extra, GucSource sou
int extrachecks = 0;
int *myextra;
- /* Check special cases when we turn all or none dimensions */
+ /* Check special case when we turn all dimensions */
if (pg_strcasecmp(*newvalue, "all") == 0)
extrachecks = PGWS_DIMENSIONS_ALL;
- else if (pg_strcasecmp(*newvalue, "none") == 0)
- extrachecks = PGWS_DIMENSIONS_NONE;
else
{
/* Need a modifiable copy of string */
@@ -388,7 +386,7 @@ pgws_general_dimensions_check_hook (char **newvalue, void **extra, GucSource sou
extrachecks |= PGWS_DIMENSIONS_CLIENT_HOSTNAME;
else if (pg_strcasecmp(tok, "appname") == 0)
extrachecks |= PGWS_DIMENSIONS_APPNAME;
- else if (pg_strcasecmp(tok, "all") == 0 || pg_strcasecmp(tok, "none") == 0)
+ else if (pg_strcasecmp(tok, "all") == 0)
{
GUC_check_errdetail("Key word \"%s\" cannot be combined with other key words.", tok);
pfree(rawstring);
@@ -747,8 +745,17 @@ get_beentry_by_procpid(int pid)
/* Here beid is just index in localBackendStatusTable */
local_beentry = pgstat_fetch_stat_local_beentry(cur_be_idx);
#endif
+#if defined(PGPRO_EE) || defined(PGPRO_STD) && PG_VERSION_NUM >= 160000
if (local_beentry->backendStatus->st_procpid == pid)
return local_beentry->backendStatus;
+#else
+ if (local_beentry->backendStatus.st_procpid == pid)
+ {
+ PgBackendStatus *result = palloc0(sizeof(PgBackendStatus));
+ *result = local_beentry->backendStatus;
+ return result;
+ }
+#endif
}
return NULL;
}
@@ -831,12 +838,12 @@ fill_values_and_nulls(Datum *values, bool *nulls, SamplingDimensions dimensions,
values[3] = UInt64GetDatum(dimensions.queryId);
else
values[3] = (Datum) 0;
- if (dimensions_mask & PGWS_DIMENSIONS_ROLE_ID)
- values[4] = ObjectIdGetDatum(dimensions.role_id);
- else
- nulls[4] = true;
if (api_version >= PGWS_V1_2)
{
+ if (dimensions_mask & PGWS_DIMENSIONS_ROLE_ID)
+ values[4] = ObjectIdGetDatum(dimensions.role_id);
+ else
+ nulls[4] = true;
if (dimensions_mask & PGWS_DIMENSIONS_DB_ID)
values[5] = ObjectIdGetDatum(dimensions.database_id);
else
@@ -887,7 +894,7 @@ PG_FUNCTION_INFO_V1(pg_wait_sampling_get_current_1_2);
Datum
pg_wait_sampling_get_current_1_2(PG_FUNCTION_ARGS)
{
- return pg_wait_sampling_get_current_internal(fcinfo, PGWS_V1_2);
+ return pg_wait_sampling_get_current_internal(fcinfo, PGWS_V1_2);
}
Datum
@@ -1187,7 +1194,7 @@ PG_FUNCTION_INFO_V1(pg_wait_sampling_get_profile_1_2);
Datum
pg_wait_sampling_get_profile_1_2(PG_FUNCTION_ARGS)
{
- return pg_wait_sampling_get_profile_internal(fcinfo, PGWS_V1_2);
+ return pg_wait_sampling_get_profile_internal(fcinfo, PGWS_V1_2);
}
Datum
@@ -1328,7 +1335,7 @@ pg_wait_sampling_reset_history(PG_FUNCTION_ARGS)
LockAcquire(&collectorTag, ExclusiveLock, false, false);
pgws_collector_hdr->request = HISTORY_RESET;
LockRelease(&collectorTag, ExclusiveLock, false);
-
+
if (!pgws_collector_hdr->latch)
ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR),
errmsg("pg_wait_sampling collector wasn't started")));
@@ -1351,7 +1358,7 @@ PG_FUNCTION_INFO_V1(pg_wait_sampling_get_history_1_2);
Datum
pg_wait_sampling_get_history_1_2(PG_FUNCTION_ARGS)
{
- return pg_wait_sampling_get_history_internal(fcinfo, PGWS_V1_2);
+ return pg_wait_sampling_get_history_internal(fcinfo, PGWS_V1_2);
}
Datum
diff --git a/pg_wait_sampling.h b/pg_wait_sampling.h
index 3b84b60..5f457f4 100644
--- a/pg_wait_sampling.h
+++ b/pg_wait_sampling.h
@@ -28,8 +28,6 @@
#define PGWS_COLLECTOR_LOCK 1
/* Values for sampling dimensions */
-#define PGWS_DIMENSIONS_NONE 0
-
#define PGWS_DIMENSIONS_PID (1 << 0)
#define PGWS_DIMENSIONS_WAIT_EVENT_TYPE (1 << 1)
#define PGWS_DIMENSIONS_WAIT_EVENT (1 << 2)
diff --git a/sql/queries.sql b/sql/queries.sql
index 6658c74..de44c6d 100644
--- a/sql/queries.sql
+++ b/sql/queries.sql
@@ -9,24 +9,10 @@ WITH t as (SELECT sum(0) FROM pg_wait_sampling_history)
WITH t as (SELECT sum(0) FROM pg_wait_sampling_profile)
SELECT sum(0) FROM generate_series(1, 2), t;
-WITH t as (SELECT sum(0) FROM pg_wait_sampling_current_extended)
- SELECT sum(0) FROM generate_series(1, 2), t;
-
-WITH t as (SELECT sum(0) FROM pg_wait_sampling_history_extended)
- SELECT sum(0) FROM generate_series(1, 2), t;
-
-WITH t as (SELECT sum(0) FROM pg_wait_sampling_profile_extended)
- SELECT sum(0) FROM generate_series(1, 2), t;
-
-- Some dummy checks just to be sure that all our functions work and return something.
SELECT count(*) = 1 as test FROM pg_wait_sampling_get_current(pg_backend_pid());
SELECT count(*) >= 0 as test FROM pg_wait_sampling_get_profile();
SELECT count(*) >= 0 as test FROM pg_wait_sampling_get_history();
SELECT pg_wait_sampling_reset_profile();
-SELECT count(*) = 1 as test FROM pg_wait_sampling_get_current_extended(pg_backend_pid());
-SELECT count(*) >= 0 as test FROM pg_wait_sampling_get_profile_extended();
-SELECT count(*) >= 0 as test FROM pg_wait_sampling_get_history_extended();
-SELECT pg_wait_sampling_reset_profile();
-
DROP EXTENSION pg_wait_sampling;
From 01c45a43e6bca7f664bfbab41dcfca290b1cfcfa Mon Sep 17 00:00:00 2001
From: Oleg Tselebrovskiy
Date: Thu, 26 Jun 2025 11:36:32 +0700
Subject: [PATCH 11/11] Fix and add info about sampling dimensions to README
---
README.md | 81 +++++++++++++++++--------------------------------------
1 file changed, 25 insertions(+), 56 deletions(-)
diff --git a/README.md b/README.md
index f5f68cd..50e742e 100644
--- a/README.md
+++ b/README.md
@@ -31,7 +31,7 @@ When `pg_wait_sampling` is enabled, it collects two kinds of statistics.
a client who periodically reads this history and dumps it somewhere, user
can have continuous history of wait events.
* Waits profile. It's implemented as in-memory hash table where samples
- are accumulated per each wait event and can be divided by process,
+ are accumulated per each wait event and can be divided by process, wait event,
query and other dimensions. This hash table can be reset by user request.
Assuming there is a client who periodically dumps profile and resets it,
user can have statistics of wait events over time.
@@ -99,20 +99,6 @@ Usage
`pg_wait_sampling_current` view – information about current wait events for
all processes including background workers.
-| Column name | Column type | Description |
-| ----------- | ----------- | ----------------------- |
-| pid | int4 | Id of process |
-| event_type | text | Name of wait event type |
-| event | text | Name of wait event |
-| queryid | int8 | Id of query |
-
-`pg_wait_sampling_get_current(pid int4)` returns the same table for single given
-process.
-
-`pg_wait_sampling_current_extended` view – information about current wait events for
-all processes including background workers. Structure of this view can be changed
-between verions.
-
| Column name | Column type | Description |
| ------------------- | ----------- | --------------------------- |
| pid | int4 | Id of process |
@@ -129,28 +115,15 @@ between verions.
| client_hostname | text | Client hostname |
| appname | text | Application name |
-`pg_wait_sampling_get_current_extended(pid int4)` returns the same table for single given
+`pg_wait_sampling_get_current(pid int4)` returns the same table for single given
process.
`pg_wait_sampling_history` view – history of wait events obtained by sampling into
in-memory ring buffer.
-| Column name | Column type | Description |
-| ----------- | ----------- | ----------------------- |
-| pid | int4 | Id of process |
-| ts | timestamptz | Sample timestamp |
-| event_type | text | Name of wait event type |
-| event | text | Name of wait event |
-| queryid | int8 | Id of query |
-
-`pg_wait_sampling_history_extended` view – history of wait events obtained by
-sampling into in-memory ring buffer. Structure of this view can be changed
-between verions
-
| Column name | Column type | Description |
| ------------------- | ----------- | --------------------------- |
| pid | int4 | Id of process |
-| ts | timestamptz | Sample timestamp |
| event_type | text | Name of wait event type |
| event | text | Name of wait event |
| queryid | int8 | Id of query |
@@ -163,22 +136,13 @@ between verions
| client_addr | text | Client address |
| client_hostname | text | Client hostname |
| appname | text | Application name |
+| ts | timestamptz | Sample timestamp |
+
+`pg_wait_sampling_reset_history()` function resets the history.
`pg_wait_sampling_profile` view – profile of wait events obtained by sampling into
in-memory hash table.
-| Column name | Column type | Description |
-| ----------- | ----------- | ----------------------- |
-| pid | int4 | Id of process |
-| event_type | text | Name of wait event type |
-| event | text | Name of wait event |
-| queryid | int8 | Id of query |
-| count | int8 | Count of samples |
-
-`pg_wait_sampling_profile_extended` view – history of wait events obtained by
-sampling into in-memory ring buffer. Structure of this view can be changed
-between verions
-
| Column name | Column type | Description |
| ------------------- | ----------- | --------------------------- |
| pid | int4 | Id of process |
@@ -201,16 +165,16 @@ between verions
The work of wait event statistics collector worker is controlled by following
GUCs.
-| Parameter name | Data type | Description | Default value |
-|-------------------------------------| --------- |---------------------------------------------|--------------:|
-| pg_wait_sampling.history_size | int4 | Size of history in-memory ring buffer | 5000 |
-| pg_wait_sampling.history_period | int4 | Period for history sampling in milliseconds | 10 |
-| pg_wait_sampling.profile_period | int4 | Period for profile sampling in milliseconds | 10 |
-| pg_wait_sampling.profile_pid | bool | Whether profile should be per pid | true |
-| pg_wait_sampling.profile_queries | enum | Whether profile should be per query | top |
-| pg_wait_sampling.sample_cpu | bool | Whether on CPU backends should be sampled | true |
-| pg_wait_sampling.history_dimensions | text | Additional columns in extended history view | 'none' |
-| pg_wait_sampling.profile_dimensions | text | Additional columns in extended profile view | 'none' |
+| Parameter name | Data type | Description | Default value |
+|-------------------------------------| --------- |---------------------------------------------|----------------------------------------------|
+| pg_wait_sampling.history_size | int4 | Size of history in-memory ring buffer | 5000 |
+| pg_wait_sampling.history_period | int4 | Period for history sampling in milliseconds | 10 |
+| pg_wait_sampling.profile_period | int4 | Period for profile sampling in milliseconds | 10 |
+| pg_wait_sampling.profile_pid | bool | Whether profile should be per pid | true |
+| pg_wait_sampling.profile_queries | enum | Whether profile should be per query | top |
+| pg_wait_sampling.sample_cpu | bool | Whether on CPU backends should be sampled | true |
+| pg_wait_sampling.history_dimensions | text | Additional columns in extended history view | 'pid, wait_event_type, wait_event, query_id' |
+| pg_wait_sampling.profile_dimensions | text | Additional columns in extended profile view | 'pid, wait_event_type, wait_event, query_id' |
If `pg_wait_sampling.profile_pid` is set to false, sampling profile wouldn't be
collected in per-process manner. In this case the value of pid could would
@@ -226,10 +190,11 @@ will be NULL.
`pg_wait_sampling.history_dimenstions` and `pg_wait_sampling.profile_dimensions`
determine what additional columns will be sampled in `history/profile_extended`
-views. Possible values are `none`, `all`, `role_id`, `database_id`,
-`parallel_leader_pid`, `backend_type`, `backend_state`, `backend_start_time`,
-`client_addr`, `client_hostname`, `appname` and any combination of column names.
-`none` and `all` cannot be used together with any other values and must be used alone.
+views. Possible values are `all`, `pid`, `wait_event_type`, `wait_event`,
+`query_id`, `role_id`, `database_id`, `parallel_leader_pid`, `backend_type`,
+`backend_state`, `backend_start_time`, `client_addr`, `client_hostname`,
+`appname` and any combination of column names.
+`all` cannot be used together with any other values and must be used alone.
> [!WARNING]
> Turning on any of the following columns: `backend_type`, `backend_state`,
@@ -238,9 +203,13 @@ views. Possible values are `none`, `all`, `role_id`, `database_id`,
> BackendStatusTable. This is especially noticeable with PostgreSQL 13-16
Values of these GUC variables can be changed only in config file or with ALTER SYSTEM.
-Then you need to reload server's configuration (such as with pg_reload_conf function)
+Then you need to reload server's configuration (such as with `pg_reload_conf` function)
for changes to take effect.
+> [!WARNING]
+> When using `pg_reload_conf` you also need to invoke `pg_wait_sampling_reset_history()`
+> and `pg_wait_sampling_reset_profile()` for correct application of new dimensions
+
See
[PostgreSQL documentation](http://www.postgresql.org/docs/devel/static/monitoring-stats.html#WAIT-EVENT-TABLE)
for list of possible wait events.
--- a PPN by Garber Painting Akron. With Image Size Reduction included!Fetched URL: http://github.com/postgrespro/pg_wait_sampling/pull/97.patch
Alternative Proxies:
Alternative Proxy
pFad Proxy
pFad v3 Proxy
pFad v4 Proxy