From beb8587a3b60ec6e1a378b1a2c60a6368aeafb43 Mon Sep 17 00:00:00 2001 From: Kovalenko Anastasia Date: Fri, 30 Jul 2021 18:53:26 +0300 Subject: [PATCH 01/37] Python tests fixed --- tests/common.py | 66 +++++++++++++++++++- tests/pg_qs_test_runner.py | 17 +++++- tests/test_cases.py | 121 ++++++++++++++++++++++--------------- 3 files changed, 154 insertions(+), 50 deletions(-) diff --git a/tests/common.py b/tests/common.py index 3f4f9c2..ac24e76 100644 --- a/tests/common.py +++ b/tests/common.py @@ -43,6 +43,21 @@ def n_close(conns): for conn in conns: conn.close() +def pg_query_state_locks(config, pid, conn, verbose=False, costs=False, timing=False, \ + buffers=False, triggers=False, format='text'): + """ + Get query state from backend with specified pid and optional parameters. + Save any warning, info, notice and log data in global variable 'notices' + """ + + curs = conn.cursor() + curs.callproc('pg_query_state', (pid, verbose, costs, timing, buffers, triggers, format)) + wait(conn) + result = curs.fetchall() + notices = conn.notices[:] + + return result, notices + def pg_query_state(config, pid, verbose=False, costs=False, timing=False, \ buffers=False, triggers=False, format='text'): """ @@ -52,7 +67,6 @@ def pg_query_state(config, pid, verbose=False, costs=False, timing=False, \ conn = psycopg2.connect(**config) curs = conn.cursor() - curs.callproc('pg_query_state', (pid, verbose, costs, timing, buffers, triggers, format)) result = curs.fetchall() notices = conn.notices[:] @@ -60,6 +74,56 @@ def pg_query_state(config, pid, verbose=False, costs=False, timing=False, \ return result, notices +def onetime_query_state_locks(config, acon_query, acon_pg, query, args={}, num_workers=0): + """ + Get intermediate state of 'query' on connection 'acon_query' after number of 'steps' + of node executions from start of query + """ + + curs_query = acon_query.cursor() + curs_pg = acon_pg.cursor() + curs_query.execute("select pg_advisory_lock(1);") + curs_pg.execute("select pg_advisory_lock(2);") + wait(acon_query) + wait(acon_pg) + curs_pg.execute("select pg_advisory_lock(1);") + set_guc(acon_query, 'enable_mergejoin', 'off') + set_guc(acon_query, 'max_parallel_workers_per_gather', num_workers) + curs_query.execute(query) + # extract current state of query progress + MAX_PG_QS_RETRIES = 10 + DELAY_BETWEEN_RETRIES = 0.1 + pg_qs_args = { + 'config': config, + 'pid': acon_query.get_backend_pid(), + 'conn': acon_pg + } + for k, v in args.items(): + pg_qs_args[k] = v + n_retries = 0 + + wait(acon_pg) + + while True: + result, notices = pg_query_state_locks(**pg_qs_args) + n_retries += 1 + if len(result) > 0: + break + if n_retries >= MAX_PG_QS_RETRIES: + # pg_query_state callings don't return any result, more likely run + # query has completed + break + time.sleep(DELAY_BETWEEN_RETRIES) + + curs_pg.execute("select pg_advisory_unlock(2);") + wait(acon_pg) + wait(acon_query) + + set_guc(acon_query, 'enable_mergejoin', 'on') + curs_query.execute("select pg_advisory_unlock(2);") + curs_pg.execute("select pg_advisory_unlock(1);") + return result, notices + def onetime_query_state(config, async_conn, query, args={}, num_workers=0): """ Get intermediate state of 'query' on connection 'async_conn' after number of 'steps' diff --git a/tests/pg_qs_test_runner.py b/tests/pg_qs_test_runner.py index 28db807..a6e02e9 100644 --- a/tests/pg_qs_test_runner.py +++ b/tests/pg_qs_test_runner.py @@ -1,6 +1,6 @@ ''' pg_qs_test_runner.py -Copyright (c) 2016-2020, Postgres Professional +Copyright (c) 2016-2021, Postgres Professional ''' import argparse @@ -22,6 +22,20 @@ def __call__(self, parser, args, values, option_string=None): class SetupException(Exception): pass class TeardownException(Exception): pass +unlock_if_eq_1 = """ + CREATE OR REPLACE FUNCTION unlock_if_eq_1(x integer) RETURNS integer AS $$ + BEGIN + IF x = 1 THEN + perform pg_advisory_unlock(1); + perform pg_advisory_lock(2); + return 1; + ELSE + return x; + END IF; + END; + $$ LANGUAGE plpgsql + """ + setup_cmd = [ 'drop extension if exists pg_query_state cascade', 'drop table if exists foo cascade', @@ -33,6 +47,7 @@ class TeardownException(Exception): pass 'insert into bar select i, i%2=1 from generate_series(1, 500000) as i', 'analyze foo', 'analyze bar', + unlock_if_eq_1, ] teardown_cmd = [ diff --git a/tests/test_cases.py b/tests/test_cases.py index 440f32f..1750bb1 100644 --- a/tests/test_cases.py +++ b/tests/test_cases.py @@ -1,6 +1,6 @@ ''' test_cases.py -Copyright (c) 2016-2020, Postgres Professional +Copyright (c) 2016-2021, Postgres Professional ''' import json @@ -42,21 +42,28 @@ def test_deadlock(config): def test_simple_query(config): """test statistics of simple query""" - acon, = common.n_async_connect(config) - query = 'select count(*) from foo join bar on foo.c1=bar.c1' + acon1, acon2 = common.n_async_connect(config, 2) + query = 'select count(*) from foo join bar on foo.c1=bar.c1 and unlock_if_eq_1(foo.c1)=bar.c1' expected = r"""Aggregate \(Current loop: actual rows=\d+, loop number=1\) - -> Hash Join \(Current loop: actual rows=\d+, loop number=1\) + -> Hash Join \(Current loop: actual rows=62473, loop number=1\) Hash Cond: \(foo.c1 = bar.c1\) + Join Filter: \(unlock_if_eq_1\(foo.c1\) = bar.c1\) -> Seq Scan on foo \(Current loop: actual rows=\d+, loop number=1\) -> Hash \(Current loop: actual rows=\d+, loop number=1\) Buckets: \d+ Batches: \d+ Memory Usage: \d+kB -> Seq Scan on bar \(Current loop: actual rows=\d+, loop number=1\)""" - qs, _ = common.onetime_query_state(config, acon, query) - assert qs[0][0] == acon.get_backend_pid() and qs[0][1] == 0 \ - and qs[0][2] == query and re.match(expected, qs[0][3]) and qs[0][4] == None + qs, _ = common.onetime_query_state_locks(config, acon1, acon2, query) - common.n_close((acon,)) + assert qs[0][0] == acon1.get_backend_pid() + assert qs[0][1] == 0 + assert qs[0][2] == query + assert re.match(expected, qs[0][3]) + assert qs[0][4] == None + # assert qs[0][0] == acon.get_backend_pid() and qs[0][1] == 0 \ + # and qs[0][2] == query and re.match(expected, qs[0][3]) and qs[0][4] == None + + common.n_close((acon1, acon2)) def test_concurrent_access(config): """test when two backends compete with each other to extract state from third running backend""" @@ -87,50 +94,56 @@ def test_concurrent_access(config): def test_nested_call(config): """test statistics under calling function""" - acon, = common.n_async_connect(config) + acon1, acon2 = common.n_async_connect(config, 2) util_conn = psycopg2.connect(**config) util_curs = util_conn.cursor() create_function = """ create or replace function n_join_foo_bar() returns integer as $$ begin - return (select count(*) from foo join bar on foo.c1=bar.c1); + return (select count(*) from foo join bar on foo.c1=bar.c1 and unlock_if_eq_1(foo.c1)=bar.c1); end; $$ language plpgsql""" drop_function = 'drop function n_join_foo_bar()' call_function = 'select * from n_join_foo_bar()' - nested_query = 'SELECT (select count(*) from foo join bar on foo.c1=bar.c1)' + nested_query1 = '(select count(*) from foo join bar on foo.c1=bar.c1 and unlock_if_eq_1(foo.c1)=bar.c1)' + nested_query2 = 'SELECT (select count(*) from foo join bar on foo.c1=bar.c1 and unlock_if_eq_1(foo.c1)=bar.c1)' expected = 'Function Scan on n_join_foo_bar (Current loop: actual rows=0, loop number=1)' expected_nested = r"""Result \(Current loop: actual rows=0, loop number=1\) InitPlan 1 \(returns \$0\) -> Aggregate \(Current loop: actual rows=0, loop number=1\) - -> Hash Join \(Current loop: actual rows=0, loop number=1\) + -> Hash Join \(Current loop: actual rows=62473, loop number=1\) Hash Cond: \(foo.c1 = bar.c1\) - -> Seq Scan on foo \(Current loop: actual rows=1, loop number=1\) - -> Hash \(Current loop: actual rows=0, loop number=1\) + Join Filter: \(unlock_if_eq_1\(foo.c1\) = bar.c1\) + -> Seq Scan on foo \(Current loop: actual rows=1000000, loop number=1\) + -> Hash \(Current loop: actual rows=500000, loop number=1\) Buckets: \d+ Batches: \d+ Memory Usage: \d+kB -> Seq Scan on bar \(Current loop: actual rows=\d+, loop number=1\)""" + util_curs.execute(create_function) util_conn.commit() - qs, notices = common.onetime_query_state(config, acon, call_function) + qs, notices = common.onetime_query_state_locks(config, acon1, acon2, call_function) # Print some debug output before assertion if len(qs) < 2: print(qs) - assert len(qs) == 2 \ - and qs[0][0] == qs[1][0] == acon.get_backend_pid() \ - and qs[0][1] == 0 and qs[1][1] == 1 \ - and qs[0][2] == call_function and qs[0][3] == expected \ - and qs[1][2] == nested_query and re.match(expected_nested, qs[1][3]) \ - and qs[0][4] == qs[1][4] == None + assert len(qs) == 3 + assert qs[0][0] == qs[1][0] == acon1.get_backend_pid() + assert qs[0][1] == 0 + assert qs[1][1] == 1 + assert qs[0][2] == call_function + assert qs[0][3] == expected + assert qs[1][2] == nested_query1 or qs[1][2] == nested_query2 + assert re.match(expected_nested, qs[1][3]) + assert qs[0][4] == qs[1][4] == None assert len(notices) == 0 util_curs.execute(drop_function) util_conn.close() - common.n_close((acon,)) + common.n_close((acon1, acon2)) def test_insert_on_conflict(config): """test statistics on conflicting tuples under INSERT ON CONFLICT query""" @@ -212,65 +225,77 @@ def test_trigger(config): def test_costs(config): """test plan costs""" - acon, = common.n_async_connect(config) - query = 'select count(*) from foo join bar on foo.c1=bar.c1' + acon1, acon2 = common.n_async_connect(config, 2) + query = 'select count(*) from foo join bar on foo.c1=bar.c1 and unlock_if_eq_1(foo.c1)=bar.c1;' + expected = r"""Aggregate \(cost=\d+.\d+..\d+.\d+ rows=\d+ width=8\) \(Current loop: actual rows=0, loop number=1\) - -> Hash Join \(cost=\d+.\d+..\d+.\d+ rows=\d+ width=0\) \(Current loop: actual rows=0, loop number=1\) + -> Hash Join \(cost=\d+.\d+..\d+.\d+ rows=\d+ width=0\) \(Current loop: actual rows=\d+, loop number=1\) Hash Cond: \(foo.c1 = bar.c1\) - -> Seq Scan on foo \(cost=0.00..\d+.\d+ rows=\d+ width=4\) \(Current loop: actual rows=1, loop number=1\) - -> Hash \(cost=\d+.\d+..\d+.\d+ rows=\d+ width=4\) \(Current loop: actual rows=0, loop number=1\) + Join Filter: \(unlock_if_eq_1\(foo.c1\) = bar.c1\) + -> Seq Scan on foo \(cost=0.00..\d+.\d+ rows=\d+ width=4\) \(Current loop: actual rows=1000000, loop number=1\) + -> Hash \(cost=\d+.\d+..\d+.\d+ rows=\d+ width=4\) \(Current loop: actual rows=500000, loop number=1\) Buckets: \d+ Batches: \d+ Memory Usage: \d+kB -> Seq Scan on bar \(cost=0.00..\d+.\d+ rows=\d+ width=4\) \(Current loop: actual rows=\d+, loop number=1\)""" - qs, notices = common.onetime_query_state(config, acon, query, {'costs': True}) - assert len(qs) == 1 and re.match(expected, qs[0][3]) + qs, notices = common.onetime_query_state_locks(config, acon1, acon2, query, {'costs': True}) + + assert len(qs) == 2 and re.match(expected, qs[0][3]) assert len(notices) == 0 - common.n_close((acon,)) + common.n_close((acon1, acon2)) def test_buffers(config): """test buffer statistics""" - acon, = common.n_async_connect(config) - query = 'select count(*) from foo join bar on foo.c1=bar.c1' + acon1, acon2 = common.n_async_connect(config, 2) + query = 'select count(*) from foo join bar on foo.c1=bar.c1 and unlock_if_eq_1(foo.c1)=bar.c1' expected = r"""Aggregate \(Current loop: actual rows=0, loop number=1\) - -> Hash Join \(Current loop: actual rows=0, loop number=1\) + -> Hash Join \(Current loop: actual rows=\d+, loop number=1\) Hash Cond: \(foo.c1 = bar.c1\) - -> Seq Scan on foo \(Current loop: actual rows=1, loop number=1\) + Join Filter: \(unlock_if_eq_1\(foo.c1\) = bar.c1\) + Buffers: shared hit=\d+, temp read=\d+ written=\d+ + -> Seq Scan on foo \(Current loop: actual rows=1000000, loop number=1\) Buffers: [^\n]* - -> Hash \(Current loop: actual rows=0, loop number=1\) + -> Hash \(Current loop: actual rows=500000, loop number=1\) Buckets: \d+ Batches: \d+ Memory Usage: \d+kB + Buffers: shared hit=\d+, temp written=\d+ -> Seq Scan on bar \(Current loop: actual rows=\d+, loop number=1\) Buffers: .*""" - common.set_guc(acon, 'pg_query_state.enable_buffers', 'on') + common.set_guc(acon1, 'pg_query_state.enable_buffers', 'on') - qs, notices = common.onetime_query_state(config, acon, query, {'buffers': True}) - assert len(qs) == 1 and re.match(expected, qs[0][3]) + qs, notices = common.onetime_query_state_locks(config, acon1, acon2, query, {'buffers': True}) + + assert len(qs) == 2 + assert re.match(expected, qs[0][3]) assert len(notices) == 0 - common.n_close((acon,)) + common.n_close((acon1, acon2)) def test_timing(config): """test timing statistics""" - acon, = common.n_async_connect(config) - query = 'select count(*) from foo join bar on foo.c1=bar.c1' + acon1, acon2 = common.n_async_connect(config, 2) + query = 'select count(*) from foo join bar on foo.c1=bar.c1 and unlock_if_eq_1(foo.c1)=bar.c1' + expected = r"""Aggregate \(Current loop: running time=\d+.\d+ actual rows=0, loop number=1\) - -> Hash Join \(Current loop: running time=\d+.\d+ actual rows=0, loop number=1\) + -> Hash Join \(Current loop: actual time=\d+.\d+..\d+.\d+ rows=\d+, loop number=1\) Hash Cond: \(foo.c1 = bar.c1\) - -> Seq Scan on foo \(Current loop: actual time=\d+.\d+..\d+.\d+ rows=1, loop number=1\) - -> Hash \(Current loop: running time=\d+.\d+ actual rows=0, loop number=1\) + Join Filter: \(unlock_if_eq_1\(foo.c1\) = bar.c1\) + -> Seq Scan on foo \(Current loop: actual time=\d+.\d+..\d+.\d+ rows=1000000, loop number=1\) + -> Hash \(Current loop: actual time=\d+.\d+..\d+.\d+ rows=500000, loop number=1\) Buckets: \d+ Batches: \d+ Memory Usage: \d+kB -> Seq Scan on bar \(Current loop: actual time=\d+.\d+..\d+.\d+ rows=\d+, loop number=1\)""" - common.set_guc(acon, 'pg_query_state.enable_timing', 'on') + common.set_guc(acon1, 'pg_query_state.enable_timing', 'on') - qs, notices = common.onetime_query_state(config, acon, query, {'timing': True}) - assert len(qs) == 1 and re.match(expected, qs[0][3]) + qs, notices = common.onetime_query_state_locks(config, acon1, acon2, query, {'timing': True}) + + assert len(qs) == 2 + assert re.match(expected, qs[0][3]) assert len(notices) == 0 - common.n_close((acon,)) + common.n_close((acon1, acon2)) def check_plan(plan): assert 'Current loop' in plan From 799b23c787c9ca35989e596ce6a5f8f22a418225 Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Thu, 2 Sep 2021 14:52:10 +0300 Subject: [PATCH 02/37] [PGPRO-5531] Fixed crashes in the receive_msg_by_parts function --- pg_query_state.c | 48 ++++++++++++++++++++++++++++++++++++++---------- pg_query_state.h | 5 +++++ 2 files changed, 43 insertions(+), 10 deletions(-) diff --git a/pg_query_state.c b/pg_query_state.c index d4a58fb..fc3f547 100644 --- a/pg_query_state.c +++ b/pg_query_state.c @@ -61,7 +61,7 @@ static void qs_ExecutorRun(QueryDesc *queryDesc, ScanDirection direction, static void qs_ExecutorFinish(QueryDesc *queryDesc); static shm_mq_result receive_msg_by_parts(shm_mq_handle *mqh, Size *total, - void **datap, bool nowait); + void **datap, int64 timeout, int *rc, bool nowait); /* Global variables */ List *QueryDescStack = NIL; @@ -780,7 +780,7 @@ shm_mq_receive_with_timeout(shm_mq_handle *mqh, { shm_mq_result mq_receive_result; - mq_receive_result = receive_msg_by_parts(mqh, nbytesp, datap, true); + mq_receive_result = receive_msg_by_parts(mqh, nbytesp, datap, timeout, &rc, true); if (mq_receive_result != SHM_MQ_WOULD_BLOCK) return mq_receive_result; if (rc & WL_TIMEOUT || delay <= 0) @@ -967,33 +967,61 @@ copy_msg(shm_mq_msg *msg) static shm_mq_result receive_msg_by_parts(shm_mq_handle *mqh, Size *total, void **datap, - bool nowait) + int64 timeout, int *rc, bool nowait) { shm_mq_result mq_receive_result; shm_mq_msg *buff; int offset; - Size *expected; - Size expected_data; + Size *expected; + Size expected_data; Size len; /* Get the expected number of bytes in message */ mq_receive_result = shm_mq_receive(mqh, &len, (void **) &expected, nowait); - expected_data = *expected; if (mq_receive_result != SHM_MQ_SUCCESS) return mq_receive_result; Assert(len == sizeof(Size)); + expected_data = *expected; *datap = palloc0(expected_data); /* Get the message itself */ for (offset = 0; offset < expected_data; ) { + int64 delay = timeout; /* Keep receiving new messages until we assemble the full message */ - mq_receive_result = shm_mq_receive(mqh, &len, ((void **) &buff), nowait); + for (;;) + { + mq_receive_result = shm_mq_receive(mqh, &len, ((void **) &buff), nowait); + if (mq_receive_result != SHM_MQ_SUCCESS) + { + if (nowait && mq_receive_result == SHM_MQ_WOULD_BLOCK) + { + /* + * We can't leave this function during reading parts with + * error code SHM_MQ_WOULD_BLOCK because can be be error + * at next call receive_msg_by_parts() with continuing + * reading non-readed parts. + * So we should wait whole MAX_RCV_TIMEOUT timeout and + * return error after that only. + */ + if (delay > 0) + { + pg_usleep(PART_RCV_DELAY * 1000); + delay -= PART_RCV_DELAY; + continue; + } + if (rc) + { /* Mark that the timeout has expired: */ + *rc |= WL_TIMEOUT; + } + } + return mq_receive_result; + } + break; + } memcpy((char *) *datap + offset, buff, len); offset += len; - if (mq_receive_result != SHM_MQ_SUCCESS) - return mq_receive_result; } *total = offset; @@ -1074,7 +1102,7 @@ GetRemoteBackendQueryStates(PGPROC *leader, mqh = shm_mq_attach(mq, NULL, NULL); elog(DEBUG1, "Wait response from leader %d", leader->pid); mq_receive_result = receive_msg_by_parts(mqh, &len, (void **) &msg, - false); + 0, NULL, false); if (mq_receive_result != SHM_MQ_SUCCESS) goto mq_error; if (msg->reqid != reqid) diff --git a/pg_query_state.h b/pg_query_state.h index 8192ae2..c812cd4 100644 --- a/pg_query_state.h +++ b/pg_query_state.h @@ -31,6 +31,11 @@ #define MAX_RCV_TIMEOUT 6000 /* 6 seconds */ #define MAX_SND_TIMEOUT 3000 /* 3 seconds */ +/* + * Delay for receiving parts of full message (in case SHM_MQ_WOULD_BLOCK code), + * should be tess than MAX_RCV_TIMEOUT + */ +#define PART_RCV_DELAY 1000 /* 1 second */ /* * Result status on query state request from asked backend From f79ebd98da2aeccd983f5bf85ccc0511a24f10d7 Mon Sep 17 00:00:00 2001 From: Daniel Shelepanov Date: Wed, 22 Sep 2021 17:33:25 +0300 Subject: [PATCH 03/37] [PGPRO-4561] Non-blocking writing to the queue To avoid deadlocking while canceling the pg_query_state() call writing to the queue is implemented non-blocking way. --- pg_query_state.c | 2 +- pg_query_state.h | 2 ++ signal_handler.c | 76 +++++++++++++++++++++++++++++++++++++++++++----- 3 files changed, 72 insertions(+), 8 deletions(-) diff --git a/pg_query_state.c b/pg_query_state.c index fc3f547..a4a7d1b 100644 --- a/pg_query_state.c +++ b/pg_query_state.c @@ -1141,7 +1141,7 @@ GetRemoteBackendQueryStates(PGPROC *leader, MAX_RCV_TIMEOUT); if (mq_receive_result != SHM_MQ_SUCCESS) { - /* counterpart is died, not consider it */ + /* counterpart is dead, not considering it */ goto mq_error; } if (msg->reqid != reqid) diff --git a/pg_query_state.h b/pg_query_state.h index c812cd4..382f910 100644 --- a/pg_query_state.h +++ b/pg_query_state.h @@ -19,6 +19,8 @@ #define QUEUE_SIZE (16 * 1024) #define MSG_MAX_SIZE 1024 +#define WRITING_DELAY (100 * 1000) // 100ms +#define NUM_OF_ATTEMPTS 6 #define TIMINIG_OFF_WARNING 1 #define BUFFERS_OFF_WARNING 2 diff --git a/signal_handler.c b/signal_handler.c index 8b61d36..2369fec 100644 --- a/signal_handler.c +++ b/signal_handler.c @@ -27,7 +27,16 @@ typedef struct char *plan; } stack_frame; -static void send_msg_by_parts(shm_mq_handle *mqh, Size nbytes, const void *data); +/* + * An self-explanarory enum describing the send_msg_by_parts results + */ +typedef enum +{ + MSG_BY_PARTS_SUCCEEDED, + MSG_BY_PARTS_FAILED +} msg_by_parts_result; + +static msg_by_parts_result send_msg_by_parts(shm_mq_handle *mqh, Size nbytes, const void *data); /* * Get List of stack_frames as a stack of function calls starting from outermost call. @@ -151,7 +160,36 @@ serialize_stack(char *dest, List *qs_stack) } } -static void +static msg_by_parts_result +shm_mq_send_nonblocking(shm_mq_handle *mqh, Size nbytes, const void *data, Size attempts) +{ + int i; + shm_mq_result res; + + for(i = 0; i < attempts; i++) + { + res = shm_mq_send(mqh, nbytes, data, true); + + if(res == SHM_MQ_SUCCESS) + break; + else if (res == SHM_MQ_DETACHED) + return MSG_BY_PARTS_FAILED; + + /* SHM_MQ_WOULD_BLOCK - sleeping for some delay */ + pg_usleep(WRITING_DELAY); + } + + if(i == attempts) + return MSG_BY_PARTS_FAILED; + + return MSG_BY_PARTS_SUCCEEDED; +} + +/* + * send_msg_by_parts sends data throurh the queue as a bunch of messages + * of smaller size + */ +static msg_by_parts_result send_msg_by_parts(shm_mq_handle *mqh, Size nbytes, const void *data) { int bytes_left; @@ -159,14 +197,20 @@ send_msg_by_parts(shm_mq_handle *mqh, Size nbytes, const void *data) int offset; /* Send the expected message length */ - shm_mq_send(mqh, sizeof(Size), &nbytes, false); + if(shm_mq_send_nonblocking(mqh, sizeof(Size), &nbytes, NUM_OF_ATTEMPTS) == MSG_BY_PARTS_FAILED) + return MSG_BY_PARTS_FAILED; + /* Send the message itself */ for (offset = 0; offset < nbytes; offset += bytes_send) { bytes_left = nbytes - offset; bytes_send = (bytes_left < MSG_MAX_SIZE) ? bytes_left : MSG_MAX_SIZE; - shm_mq_send(mqh, bytes_send, &(((unsigned char*)data)[offset]), false); + if(shm_mq_send_nonblocking(mqh, bytes_send, &(((unsigned char*)data)[offset]), NUM_OF_ATTEMPTS) + == MSG_BY_PARTS_FAILED) + return MSG_BY_PARTS_FAILED; } + + return MSG_BY_PARTS_SUCCEEDED; } /* @@ -227,7 +271,8 @@ SendQueryState(void) { shm_mq_msg msg = { reqid, BASE_SIZEOF_SHM_MQ_MSG, MyProc, STAT_DISABLED }; - send_msg_by_parts(mqh, msg.length, &msg); + if(send_msg_by_parts(mqh, msg.length, &msg) != MSG_BY_PARTS_SUCCEEDED) + goto connection_cleanup; } /* check if backend doesn't execute any query */ @@ -235,7 +280,8 @@ SendQueryState(void) { shm_mq_msg msg = { reqid, BASE_SIZEOF_SHM_MQ_MSG, MyProc, QUERY_NOT_RUNNING }; - send_msg_by_parts(mqh, msg.length, &msg); + if(send_msg_by_parts(mqh, msg.length, &msg) != MSG_BY_PARTS_SUCCEEDED) + goto connection_cleanup; } /* happy path */ @@ -258,9 +304,25 @@ SendQueryState(void) msg->stack_depth = list_length(qs_stack); serialize_stack(msg->stack, qs_stack); - send_msg_by_parts(mqh, msglen, msg); + + if(send_msg_by_parts(mqh, msglen, msg) != MSG_BY_PARTS_SUCCEEDED) + { + elog(WARNING, "pg_query_state: peer seems to have detached"); + goto connection_cleanup; + } } elog(DEBUG1, "Worker %d sends response for pg_query_state to %d", shm_mq_get_sender(mq)->pid, shm_mq_get_receiver(mq)->pid); DetachPeer(); UnlockShmem(&tag); + + return; + +connection_cleanup: +#if PG_VERSION_NUM < 100000 + shm_mq_detach(mq); +#else + shm_mq_detach(mqh); +#endif + DetachPeer(); + UnlockShmem(&tag); } From 25384f2513ef5c7a97194bdba0d71b7a17310a84 Mon Sep 17 00:00:00 2001 From: Kovalenko Anastasia Date: Fri, 23 Jul 2021 12:55:50 +0300 Subject: [PATCH 04/37] Patches for version 14 Docker scripts improved for version 14 --- .travis.yml | 2 + patches/custom_signals_14.0.patch | 217 +++++++++++++++++++++++ patches/runtime_explain_14.0.patch | 271 +++++++++++++++++++++++++++++ 3 files changed, 490 insertions(+) create mode 100644 patches/custom_signals_14.0.patch create mode 100644 patches/runtime_explain_14.0.patch diff --git a/.travis.yml b/.travis.yml index c6c06bf..8b3a5a7 100644 --- a/.travis.yml +++ b/.travis.yml @@ -18,6 +18,8 @@ notifications: on_failure: always env: + - PG_VERSION=14 LEVEL=hardcore USE_TPCDS=1 + - PG_VERSION=14 - PG_VERSION=13 LEVEL=hardcore USE_TPCDS=1 - PG_VERSION=13 - PG_VERSION=12 LEVEL=hardcore USE_TPCDS=1 diff --git a/patches/custom_signals_14.0.patch b/patches/custom_signals_14.0.patch new file mode 100644 index 0000000..fbb83de --- /dev/null +++ b/patches/custom_signals_14.0.patch @@ -0,0 +1,217 @@ +From f2632ea7cd03119c55b8aa0ef60f529380ca2536 Mon Sep 17 00:00:00 2001 +From: Kovalenko Anastasia +Date: Tue, 24 Aug 2021 16:22:28 +0300 +Subject: [PATCH] custom-signals + +--- + src/backend/storage/ipc/procsignal.c | 94 ++++++++++++++++++++++++++++ + src/backend/tcop/postgres.c | 2 + + src/include/storage/procsignal.h | 17 +++++ + 3 files changed, 113 insertions(+) + +diff --git a/src/backend/storage/ipc/procsignal.c b/src/backend/storage/ipc/procsignal.c +index defb75a..4245d28 100644 +--- a/src/backend/storage/ipc/procsignal.c ++++ b/src/backend/storage/ipc/procsignal.c +@@ -95,6 +95,13 @@ typedef struct + #define BARRIER_CLEAR_BIT(flags, type) \ + ((flags) &= ~(((uint32) 1) << (uint32) (type))) + ++#define IsCustomProcSignalReason(reason) \ ++ ((reason) >= PROCSIG_CUSTOM_1 && (reason) <= PROCSIG_CUSTOM_N) ++ ++static bool CustomSignalPendings[NUM_CUSTOM_PROCSIGNALS]; ++static bool CustomSignalProcessing[NUM_CUSTOM_PROCSIGNALS]; ++static ProcSignalHandler_type CustomInterruptHandlers[NUM_CUSTOM_PROCSIGNALS]; ++ + static ProcSignalHeader *ProcSignal = NULL; + static ProcSignalSlot *MyProcSignalSlot = NULL; + +@@ -103,6 +110,8 @@ static void CleanupProcSignalState(int status, Datum arg); + static void ResetProcSignalBarrierBits(uint32 flags); + static bool ProcessBarrierPlaceholder(void); + ++static void CheckAndSetCustomSignalInterrupts(void); ++ + /* + * ProcSignalShmemSize + * Compute space needed for procsignal's shared memory +@@ -246,6 +255,36 @@ CleanupProcSignalState(int status, Datum arg) + slot->pss_pid = 0; + } + ++/* ++ * RegisterCustomProcSignalHandler ++ * Assign specific handler of custom process signal with new ++ * ProcSignalReason key. ++ * ++ * This function has to be called in _PG_init function of extensions at the ++ * stage of loading shared preloaded libraries. Otherwise it throws fatal error. ++ * ++ * Return INVALID_PROCSIGNAL if all slots for custom signals are occupied. ++ */ ++ProcSignalReason ++RegisterCustomProcSignalHandler(ProcSignalHandler_type handler) ++{ ++ ProcSignalReason reason; ++ ++ if (!process_shared_preload_libraries_in_progress) ++ ereport(FATAL, (errcode(ERRCODE_INTERNAL_ERROR), ++ errmsg("cannot register custom signal after startup"))); ++ ++ /* Iterate through custom signal slots to find a free one */ ++ for (reason = PROCSIG_CUSTOM_1; reason <= PROCSIG_CUSTOM_N; reason++) ++ if (!CustomInterruptHandlers[reason - PROCSIG_CUSTOM_1]) ++ { ++ CustomInterruptHandlers[reason - PROCSIG_CUSTOM_1] = handler; ++ return reason; ++ } ++ ++ return INVALID_PROCSIGNAL; ++} ++ + /* + * SendProcSignal + * Send a signal to a Postgres process +@@ -679,7 +718,62 @@ procsignal_sigusr1_handler(SIGNAL_ARGS) + if (CheckProcSignal(PROCSIG_RECOVERY_CONFLICT_BUFFERPIN)) + RecoveryConflictInterrupt(PROCSIG_RECOVERY_CONFLICT_BUFFERPIN); + ++ CheckAndSetCustomSignalInterrupts(); ++ + SetLatch(MyLatch); + + errno = save_errno; + } ++ ++/* ++ * Handle receipt of an interrupt indicating any of custom process signals. ++ */ ++static void ++CheckAndSetCustomSignalInterrupts() ++{ ++ ProcSignalReason reason; ++ ++ for (reason = PROCSIG_CUSTOM_1; reason <= PROCSIG_CUSTOM_N; reason++) ++ { ++ if (CheckProcSignal(reason)) ++ { ++ ++ /* set interrupt flags */ ++ InterruptPending = true; ++ CustomSignalPendings[reason - PROCSIG_CUSTOM_1] = true; ++ } ++ } ++ ++ SetLatch(MyLatch); ++} ++ ++/* ++ * CheckAndHandleCustomSignals ++ * Check custom signal flags and call handler assigned to that signal ++ * if it is not NULL ++ * ++ * This function is called within CHECK_FOR_INTERRUPTS if interrupt occurred. ++ */ ++void ++CheckAndHandleCustomSignals(void) ++{ ++ int i; ++ ++ /* Check on expiring of custom signals and call its handlers if exist */ ++ for (i = 0; i < NUM_CUSTOM_PROCSIGNALS; i++) ++ { ++ if (!CustomSignalProcessing[i] && CustomSignalPendings[i]) ++ { ++ ProcSignalHandler_type handler; ++ ++ CustomSignalPendings[i] = false; ++ handler = CustomInterruptHandlers[i]; ++ if (handler != NULL) ++ { ++ CustomSignalProcessing[i] = true; ++ handler(); ++ CustomSignalProcessing[i] = false; ++ } ++ } ++ } ++} +\ No newline at end of file +diff --git a/src/backend/tcop/postgres.c b/src/backend/tcop/postgres.c +index 8cea10c..dd77c98 100644 +--- a/src/backend/tcop/postgres.c ++++ b/src/backend/tcop/postgres.c +@@ -3364,6 +3364,8 @@ ProcessInterrupts(void) + if (ParallelMessagePending) + HandleParallelMessages(); + ++ CheckAndHandleCustomSignals(); ++ + if (LogMemoryContextPending) + ProcessLogMemoryContextInterrupt(); + } +diff --git a/src/include/storage/procsignal.h b/src/include/storage/procsignal.h +index eec186b..74af186 100644 +--- a/src/include/storage/procsignal.h ++++ b/src/include/storage/procsignal.h +@@ -17,6 +17,8 @@ + #include "storage/backendid.h" + + ++#define NUM_CUSTOM_PROCSIGNALS 64 ++ + /* + * Reasons for signaling a Postgres child process (a backend or an auxiliary + * process, like checkpointer). We can cope with concurrent signals for different +@@ -29,6 +31,8 @@ + */ + typedef enum + { ++ INVALID_PROCSIGNAL = -1, /* Must be first */ ++ + PROCSIG_CATCHUP_INTERRUPT, /* sinval catchup interrupt */ + PROCSIG_NOTIFY_INTERRUPT, /* listen/notify interrupt */ + PROCSIG_PARALLEL_MESSAGE, /* message from cooperating parallel backend */ +@@ -44,6 +48,14 @@ typedef enum + PROCSIG_RECOVERY_CONFLICT_BUFFERPIN, + PROCSIG_RECOVERY_CONFLICT_STARTUP_DEADLOCK, + ++ PROCSIG_CUSTOM_1, ++ /* ++ * PROCSIG_CUSTOM_2, ++ * ..., ++ * PROCSIG_CUSTOM_N-1, ++ */ ++ PROCSIG_CUSTOM_N = PROCSIG_CUSTOM_1 + NUM_CUSTOM_PROCSIGNALS - 1, ++ + NUM_PROCSIGNALS /* Must be last! */ + } ProcSignalReason; + +@@ -56,6 +68,8 @@ typedef enum + */ + PROCSIGNAL_BARRIER_PLACEHOLDER = 0 + } ProcSignalBarrierType; ++/* Handler of custom process signal */ ++typedef void (*ProcSignalHandler_type) (void); + + /* + * prototypes for functions in procsignal.c +@@ -64,12 +78,15 @@ extern Size ProcSignalShmemSize(void); + extern void ProcSignalShmemInit(void); + + extern void ProcSignalInit(int pss_idx); ++extern ProcSignalReason ++ RegisterCustomProcSignalHandler(ProcSignalHandler_type handler); + extern int SendProcSignal(pid_t pid, ProcSignalReason reason, + BackendId backendId); + + extern uint64 EmitProcSignalBarrier(ProcSignalBarrierType type); + extern void WaitForProcSignalBarrier(uint64 generation); + extern void ProcessProcSignalBarrier(void); ++extern void CheckAndHandleCustomSignals(void); + + extern void procsignal_sigusr1_handler(SIGNAL_ARGS); + +-- +2.25.1 + diff --git a/patches/runtime_explain_14.0.patch b/patches/runtime_explain_14.0.patch new file mode 100644 index 0000000..3dbba9a --- /dev/null +++ b/patches/runtime_explain_14.0.patch @@ -0,0 +1,271 @@ +From 71a353d4cac663db43c57452f925082a233c0e49 Mon Sep 17 00:00:00 2001 +From: Kovalenko Anastasia +Date: Mon, 23 Aug 2021 15:29:59 +0300 +Subject: [PATCH] runtime-explain + +--- + src/backend/commands/explain.c | 153 ++++++++++++++++++++++++++++----- + src/include/commands/explain.h | 2 + + 2 files changed, 133 insertions(+), 22 deletions(-) + +diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c +index 10644dfac4..7106ed4257 100644 +--- a/src/backend/commands/explain.c ++++ b/src/backend/commands/explain.c +@@ -984,14 +984,36 @@ report_triggers(ResultRelInfo *rInfo, bool show_relname, ExplainState *es) + char *relname; + char *conname = NULL; + ++ instr_time starttimespan; ++ double total; ++ double ntuples; ++ double ncalls; ++ ++ if (!es->runtime) ++ { + /* Must clean up instrumentation state */ + InstrEndLoop(instr); ++ } ++ ++ /* Collect statistic variables */ ++ if (!INSTR_TIME_IS_ZERO(instr->starttime)) ++ { ++ INSTR_TIME_SET_CURRENT(starttimespan); ++ INSTR_TIME_SUBTRACT(starttimespan, instr->starttime); ++ } ++ else ++ INSTR_TIME_SET_ZERO(starttimespan); ++ ++ total = instr->total + INSTR_TIME_GET_DOUBLE(instr->counter) ++ + INSTR_TIME_GET_DOUBLE(starttimespan); ++ ntuples = instr->ntuples + instr->tuplecount; ++ ncalls = ntuples + !INSTR_TIME_IS_ZERO(starttimespan); + + /* + * We ignore triggers that were never invoked; they likely aren't + * relevant to the current query type. + */ +- if (instr->ntuples == 0) ++ if (ncalls == 0) + continue; + + ExplainOpenGroup("Trigger", NULL, true, es); +@@ -1017,9 +1039,9 @@ report_triggers(ResultRelInfo *rInfo, bool show_relname, ExplainState *es) + appendStringInfo(es->str, " on %s", relname); + if (es->timing) + appendStringInfo(es->str, ": time=%.3f calls=%.0f\n", +- 1000.0 * instr->total, instr->ntuples); ++ 1000.0 * total, ncalls); + else +- appendStringInfo(es->str, ": calls=%.0f\n", instr->ntuples); ++ appendStringInfo(es->str, ": calls=%.0f\n", ncalls); + } + else + { +@@ -1028,9 +1050,8 @@ report_triggers(ResultRelInfo *rInfo, bool show_relname, ExplainState *es) + ExplainPropertyText("Constraint Name", conname, es); + ExplainPropertyText("Relation", relname, es); + if (es->timing) +- ExplainPropertyFloat("Time", "ms", 1000.0 * instr->total, 3, +- es); +- ExplainPropertyFloat("Calls", NULL, instr->ntuples, 0, es); ++ ExplainPropertyFloat("Time", "ms", 1000.0 * total, 3, es); ++ ExplainPropertyFloat("Calls", NULL, ncalls, 0, es); + } + + if (conname) +@@ -1600,8 +1621,11 @@ ExplainNode(PlanState *planstate, List *ancestors, + * instrumentation results the user didn't ask for. But we do the + * InstrEndLoop call anyway, if possible, to reduce the number of cases + * auto_explain has to contend with. ++ * ++ * If flag es->stateinfo is set, i.e. when printing the current execution ++ * state, this step of cleaning up is missed. + */ +- if (planstate->instrument) ++ if (planstate->instrument && !es->runtime) + InstrEndLoop(planstate->instrument); + + if (es->analyze && +@@ -1636,7 +1660,7 @@ ExplainNode(PlanState *planstate, List *ancestors, + ExplainPropertyFloat("Actual Loops", NULL, nloops, 0, es); + } + } +- else if (es->analyze) ++ else if (es->analyze && !es->runtime) + { + if (es->format == EXPLAIN_FORMAT_TEXT) + appendStringInfoString(es->str, " (never executed)"); +@@ -1652,6 +1676,75 @@ ExplainNode(PlanState *planstate, List *ancestors, + } + } + ++ /* ++ * Print the progress of node execution at current loop. ++ */ ++ if (planstate->instrument && es->analyze && es->runtime) ++ { ++ instr_time starttimespan; ++ double startup_sec; ++ double total_sec; ++ double rows; ++ double loop_num; ++ bool finished; ++ ++ if (!INSTR_TIME_IS_ZERO(planstate->instrument->starttime)) ++ { ++ INSTR_TIME_SET_CURRENT(starttimespan); ++ INSTR_TIME_SUBTRACT(starttimespan, planstate->instrument->starttime); ++ } ++ else ++ INSTR_TIME_SET_ZERO(starttimespan); ++ startup_sec = 1000.0 * planstate->instrument->firsttuple; ++ total_sec = 1000.0 * (INSTR_TIME_GET_DOUBLE(planstate->instrument->counter) ++ + INSTR_TIME_GET_DOUBLE(starttimespan)); ++ rows = planstate->instrument->tuplecount; ++ loop_num = planstate->instrument->nloops + 1; ++ ++ finished = planstate->instrument->nloops > 0 ++ && !planstate->instrument->running ++ && INSTR_TIME_IS_ZERO(starttimespan); ++ ++ if (!finished) ++ { ++ ExplainOpenGroup("Current loop", "Current loop", true, es); ++ if (es->format == EXPLAIN_FORMAT_TEXT) ++ { ++ if (es->timing) ++ { ++ if (planstate->instrument->running) ++ appendStringInfo(es->str, ++ " (Current loop: actual time=%.3f..%.3f rows=%.0f, loop number=%.0f)", ++ startup_sec, total_sec, rows, loop_num); ++ else ++ appendStringInfo(es->str, ++ " (Current loop: running time=%.3f actual rows=0, loop number=%.0f)", ++ total_sec, loop_num); ++ } ++ else ++ appendStringInfo(es->str, ++ " (Current loop: actual rows=%.0f, loop number=%.0f)", ++ rows, loop_num); ++ } ++ else ++ { ++ ExplainPropertyFloat("Actual Loop Number", NULL, loop_num, 0, es); ++ if (es->timing) ++ { ++ if (planstate->instrument->running) ++ { ++ ExplainPropertyFloat("Actual Startup Time", NULL, startup_sec, 3, es); ++ ExplainPropertyFloat("Actual Total Time", NULL, total_sec, 3, es); ++ } ++ else ++ ExplainPropertyFloat("Running Time", NULL, total_sec, 3, es); ++ } ++ ExplainPropertyFloat("Actual Rows", NULL, rows, 0, es); ++ } ++ ExplainCloseGroup("Current loop", "Current loop", true, es); ++ } ++ } ++ + /* in text format, first line ends here */ + if (es->format == EXPLAIN_FORMAT_TEXT) + appendStringInfoChar(es->str, '\n'); +@@ -2051,6 +2144,9 @@ ExplainNode(PlanState *planstate, List *ancestors, + + /* Prepare per-worker buffer/WAL usage */ + if (es->workers_state && (es->buffers || es->wal) && es->verbose) ++ /* Show worker detail after query execution */ ++ if (es->analyze && es->verbose && planstate->worker_instrument ++ && !es->runtime) + { + WorkerInstrumentation *w = planstate->worker_instrument; + +@@ -3015,6 +3111,11 @@ show_hash_info(HashState *hashstate, ExplainState *es) + memcpy(&hinstrument, hashstate->hinstrument, + sizeof(HashInstrumentation)); + ++ if (hashstate->hashtable) ++ { ++ ExecHashAccumInstrumentation(&hinstrument, hashstate->hashtable); ++ } ++ + /* + * Merge results from workers. In the parallel-oblivious case, the + * results from all participants should be identical, except where +@@ -3392,20 +3493,16 @@ show_instrumentation_count(const char *qlabel, int which, + if (!es->analyze || !planstate->instrument) + return; + ++ nloops = planstate->instrument->nloops; + if (which == 2) +- nfiltered = planstate->instrument->nfiltered2; ++ nfiltered = ((nloops > 0) ? planstate->instrument->nfiltered2 / nloops : 0); + else +- nfiltered = planstate->instrument->nfiltered1; ++ nfiltered = ((nloops > 0) ? planstate->instrument->nfiltered1 / nloops : 0); + nloops = planstate->instrument->nloops; + + /* In text mode, suppress zero counts; they're not interesting enough */ + if (nfiltered > 0 || es->format != EXPLAIN_FORMAT_TEXT) +- { +- if (nloops > 0) +- ExplainPropertyFloat(qlabel, NULL, nfiltered / nloops, 0, es); +- else +- ExplainPropertyFloat(qlabel, NULL, 0.0, 0, es); +- } ++ ExplainPropertyFloat(qlabel, NULL, nfiltered, 0, es); + } + + /* +@@ -3977,15 +4074,27 @@ show_modifytable_info(ModifyTableState *mtstate, List *ancestors, + double insert_path; + double other_path; + +- InstrEndLoop(outerPlanState(mtstate)->instrument); ++ if (!es->runtime) ++ InstrEndLoop(outerPlanState(mtstate)->instrument); + + /* count the number of source rows */ +- total = outerPlanState(mtstate)->instrument->ntuples; +- other_path = mtstate->ps.instrument->ntuples2; +- insert_path = total - other_path; ++ other_path = mtstate->ps.instrument->nfiltered2; ++ ++ /* ++ * Insert occurs after extracting row from subplan and in runtime mode ++ * we can appear between these two operations - situation when ++ * total > insert_path + other_path. Therefore we don't know exactly ++ * whether last row from subplan is inserted. ++ * We don't print inserted tuples in runtime mode in order to not print ++ * inconsistent data ++ */ ++ if (!es->runtime) ++ { ++ total = outerPlanState(mtstate)->instrument->ntuples; ++ insert_path = total - other_path; ++ ExplainPropertyFloat("Tuples Inserted", NULL, insert_path, 0, es); ++ } + +- ExplainPropertyFloat("Tuples Inserted", NULL, +- insert_path, 0, es); + ExplainPropertyFloat("Conflicting Tuples", NULL, + other_path, 0, es); + } +diff --git a/src/include/commands/explain.h b/src/include/commands/explain.h +index e94d9e49cf..6a157b8bc0 100644 +--- a/src/include/commands/explain.h ++++ b/src/include/commands/explain.h +@@ -47,6 +47,8 @@ typedef struct ExplainState + bool summary; /* print total planning and execution timing */ + bool settings; /* print modified settings */ + ExplainFormat format; /* output format */ ++ bool runtime; /* print intermediate state of query execution, ++ not after completion */ + /* state for output formatting --- not reset for each new plan tree */ + int indent; /* current indentation level */ + List *grouping_stack; /* format-specific grouping state */ +-- +2.25.1 + From a4f4cd7e383f03c4a0bb6e96d7ef5526a1273ac2 Mon Sep 17 00:00:00 2001 From: Daniel Shelepanov Date: Wed, 13 Oct 2021 17:04:39 +0300 Subject: [PATCH 05/37] [PGPRO-4561] indentation and typo fixed after review --- pg_query_state.h | 2 +- signal_handler.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pg_query_state.h b/pg_query_state.h index 382f910..2b13234 100644 --- a/pg_query_state.h +++ b/pg_query_state.h @@ -19,7 +19,7 @@ #define QUEUE_SIZE (16 * 1024) #define MSG_MAX_SIZE 1024 -#define WRITING_DELAY (100 * 1000) // 100ms +#define WRITING_DELAY (100 * 1000) /* 100ms */ #define NUM_OF_ATTEMPTS 6 #define TIMINIG_OFF_WARNING 1 diff --git a/signal_handler.c b/signal_handler.c index 2369fec..2af69fd 100644 --- a/signal_handler.c +++ b/signal_handler.c @@ -163,7 +163,7 @@ serialize_stack(char *dest, List *qs_stack) static msg_by_parts_result shm_mq_send_nonblocking(shm_mq_handle *mqh, Size nbytes, const void *data, Size attempts) { - int i; + int i; shm_mq_result res; for(i = 0; i < attempts; i++) @@ -186,7 +186,7 @@ shm_mq_send_nonblocking(shm_mq_handle *mqh, Size nbytes, const void *data, Size } /* - * send_msg_by_parts sends data throurh the queue as a bunch of messages + * send_msg_by_parts sends data through the queue as a bunch of messages * of smaller size */ static msg_by_parts_result From 4b5b13feded6c554cc2bf332db2fadbcbcfa4c87 Mon Sep 17 00:00:00 2001 From: Vyacheslav Makarov Date: Thu, 14 Oct 2021 14:24:45 +0300 Subject: [PATCH 06/37] Temporarily turning off TPCDS tests. --- .travis.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.travis.yml b/.travis.yml index 8b3a5a7..300ddca 100644 --- a/.travis.yml +++ b/.travis.yml @@ -18,15 +18,15 @@ notifications: on_failure: always env: - - PG_VERSION=14 LEVEL=hardcore USE_TPCDS=1 + - PG_VERSION=14 LEVEL=hardcore USE_TPCDS=0 - PG_VERSION=14 - - PG_VERSION=13 LEVEL=hardcore USE_TPCDS=1 + - PG_VERSION=13 LEVEL=hardcore USE_TPCDS=0 - PG_VERSION=13 - - PG_VERSION=12 LEVEL=hardcore USE_TPCDS=1 + - PG_VERSION=12 LEVEL=hardcore USE_TPCDS=0 - PG_VERSION=12 - - PG_VERSION=11 LEVEL=hardcore USE_TPCDS=1 + - PG_VERSION=11 LEVEL=hardcore USE_TPCDS=0 - PG_VERSION=11 - - PG_VERSION=10 LEVEL=hardcore USE_TPCDS=1 + - PG_VERSION=10 LEVEL=hardcore USE_TPCDS=0 - PG_VERSION=10 - PG_VERSION=9.6 LEVEL=hardcore - PG_VERSION=9.6 From b166a3778d7ae41f4c53c0d0e25497c13089a294 Mon Sep 17 00:00:00 2001 From: "Anton A. Melnikov" Date: Sun, 24 Oct 2021 18:53:01 +0300 Subject: [PATCH 07/37] [PGPRO-5703] Run installcheck against an existing installation. Tags: pg_query_state --- Makefile | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index e017e60..ca9faab 100644 --- a/Makefile +++ b/Makefile @@ -30,7 +30,11 @@ ISOLATIONCHECKS = corner_cases check: isolationcheck -installcheck: isolationcheck +installcheck: submake-isolation + $(MKDIR_P) isolation_output + $(pg_isolation_regress_installcheck) \ + --outputdir=isolation_output \ + $(ISOLATIONCHECKS) isolationcheck: | submake-isolation temp-install $(MKDIR_P) isolation_output From a9168b94ba446f46471143922bc4b1414f74be86 Mon Sep 17 00:00:00 2001 From: "Anton A. Melnikov" Date: Wed, 27 Oct 2021 10:36:41 +0300 Subject: [PATCH 08/37] [PGPRO-5703] Place setting of n_peers after the exit on permission denied. Previously setting of n_peers before error exit leds to a warning: pg_query_state: last request was interrupted and MAX_RCV_TIMEOUT pause at second function call. Tags: pg_query_state --- pg_query_state.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/pg_query_state.c b/pg_query_state.c index a4a7d1b..935fbf8 100644 --- a/pg_query_state.c +++ b/pg_query_state.c @@ -541,14 +541,18 @@ pg_query_state(PG_FUNCTION_ARGS) break; } } - pg_atomic_write_u32(&counterpart_userid->n_peers, 1); - params->reqid = ++reqid; - pg_write_barrier(); counterpart_user_id = GetRemoteBackendUserId(proc); if (!(superuser() || GetUserId() == counterpart_user_id)) + { + UnlockShmem(&tag); ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("permission denied"))); + } + + pg_atomic_write_u32(&counterpart_userid->n_peers, 1); + params->reqid = ++reqid; + pg_write_barrier(); bg_worker_procs = GetRemoteBackendWorkers(proc); From 612f955ac9619b4d10ed2834582628c54f304c74 Mon Sep 17 00:00:00 2001 From: Vyacheslav Makarov Date: Fri, 29 Oct 2021 07:20:47 +0300 Subject: [PATCH 09/37] porting to version 15 and minor improvements to version 14 Fixed minor bugs in patches for version 14. Added patches for version 15. shm_mq_send started accepting one more argument. Solved this problem with #ifdef PG_VERSION_NUM. --- patches/custom_signals_14.0.patch | 11 -- patches/custom_signals_15.0.patch | 206 +++++++++++++++++++++++ patches/runtime_explain_14.0.patch | 10 -- patches/runtime_explain_15.0.patch | 261 +++++++++++++++++++++++++++++ pg_query_state.c | 4 + signal_handler.c | 4 + 6 files changed, 475 insertions(+), 21 deletions(-) create mode 100644 patches/custom_signals_15.0.patch create mode 100644 patches/runtime_explain_15.0.patch diff --git a/patches/custom_signals_14.0.patch b/patches/custom_signals_14.0.patch index fbb83de..9d640cb 100644 --- a/patches/custom_signals_14.0.patch +++ b/patches/custom_signals_14.0.patch @@ -1,14 +1,3 @@ -From f2632ea7cd03119c55b8aa0ef60f529380ca2536 Mon Sep 17 00:00:00 2001 -From: Kovalenko Anastasia -Date: Tue, 24 Aug 2021 16:22:28 +0300 -Subject: [PATCH] custom-signals - ---- - src/backend/storage/ipc/procsignal.c | 94 ++++++++++++++++++++++++++++ - src/backend/tcop/postgres.c | 2 + - src/include/storage/procsignal.h | 17 +++++ - 3 files changed, 113 insertions(+) - diff --git a/src/backend/storage/ipc/procsignal.c b/src/backend/storage/ipc/procsignal.c index defb75a..4245d28 100644 --- a/src/backend/storage/ipc/procsignal.c diff --git a/patches/custom_signals_15.0.patch b/patches/custom_signals_15.0.patch new file mode 100644 index 0000000..9d640cb --- /dev/null +++ b/patches/custom_signals_15.0.patch @@ -0,0 +1,206 @@ +diff --git a/src/backend/storage/ipc/procsignal.c b/src/backend/storage/ipc/procsignal.c +index defb75a..4245d28 100644 +--- a/src/backend/storage/ipc/procsignal.c ++++ b/src/backend/storage/ipc/procsignal.c +@@ -95,6 +95,13 @@ typedef struct + #define BARRIER_CLEAR_BIT(flags, type) \ + ((flags) &= ~(((uint32) 1) << (uint32) (type))) + ++#define IsCustomProcSignalReason(reason) \ ++ ((reason) >= PROCSIG_CUSTOM_1 && (reason) <= PROCSIG_CUSTOM_N) ++ ++static bool CustomSignalPendings[NUM_CUSTOM_PROCSIGNALS]; ++static bool CustomSignalProcessing[NUM_CUSTOM_PROCSIGNALS]; ++static ProcSignalHandler_type CustomInterruptHandlers[NUM_CUSTOM_PROCSIGNALS]; ++ + static ProcSignalHeader *ProcSignal = NULL; + static ProcSignalSlot *MyProcSignalSlot = NULL; + +@@ -103,6 +110,8 @@ static void CleanupProcSignalState(int status, Datum arg); + static void ResetProcSignalBarrierBits(uint32 flags); + static bool ProcessBarrierPlaceholder(void); + ++static void CheckAndSetCustomSignalInterrupts(void); ++ + /* + * ProcSignalShmemSize + * Compute space needed for procsignal's shared memory +@@ -246,6 +255,36 @@ CleanupProcSignalState(int status, Datum arg) + slot->pss_pid = 0; + } + ++/* ++ * RegisterCustomProcSignalHandler ++ * Assign specific handler of custom process signal with new ++ * ProcSignalReason key. ++ * ++ * This function has to be called in _PG_init function of extensions at the ++ * stage of loading shared preloaded libraries. Otherwise it throws fatal error. ++ * ++ * Return INVALID_PROCSIGNAL if all slots for custom signals are occupied. ++ */ ++ProcSignalReason ++RegisterCustomProcSignalHandler(ProcSignalHandler_type handler) ++{ ++ ProcSignalReason reason; ++ ++ if (!process_shared_preload_libraries_in_progress) ++ ereport(FATAL, (errcode(ERRCODE_INTERNAL_ERROR), ++ errmsg("cannot register custom signal after startup"))); ++ ++ /* Iterate through custom signal slots to find a free one */ ++ for (reason = PROCSIG_CUSTOM_1; reason <= PROCSIG_CUSTOM_N; reason++) ++ if (!CustomInterruptHandlers[reason - PROCSIG_CUSTOM_1]) ++ { ++ CustomInterruptHandlers[reason - PROCSIG_CUSTOM_1] = handler; ++ return reason; ++ } ++ ++ return INVALID_PROCSIGNAL; ++} ++ + /* + * SendProcSignal + * Send a signal to a Postgres process +@@ -679,7 +718,62 @@ procsignal_sigusr1_handler(SIGNAL_ARGS) + if (CheckProcSignal(PROCSIG_RECOVERY_CONFLICT_BUFFERPIN)) + RecoveryConflictInterrupt(PROCSIG_RECOVERY_CONFLICT_BUFFERPIN); + ++ CheckAndSetCustomSignalInterrupts(); ++ + SetLatch(MyLatch); + + errno = save_errno; + } ++ ++/* ++ * Handle receipt of an interrupt indicating any of custom process signals. ++ */ ++static void ++CheckAndSetCustomSignalInterrupts() ++{ ++ ProcSignalReason reason; ++ ++ for (reason = PROCSIG_CUSTOM_1; reason <= PROCSIG_CUSTOM_N; reason++) ++ { ++ if (CheckProcSignal(reason)) ++ { ++ ++ /* set interrupt flags */ ++ InterruptPending = true; ++ CustomSignalPendings[reason - PROCSIG_CUSTOM_1] = true; ++ } ++ } ++ ++ SetLatch(MyLatch); ++} ++ ++/* ++ * CheckAndHandleCustomSignals ++ * Check custom signal flags and call handler assigned to that signal ++ * if it is not NULL ++ * ++ * This function is called within CHECK_FOR_INTERRUPTS if interrupt occurred. ++ */ ++void ++CheckAndHandleCustomSignals(void) ++{ ++ int i; ++ ++ /* Check on expiring of custom signals and call its handlers if exist */ ++ for (i = 0; i < NUM_CUSTOM_PROCSIGNALS; i++) ++ { ++ if (!CustomSignalProcessing[i] && CustomSignalPendings[i]) ++ { ++ ProcSignalHandler_type handler; ++ ++ CustomSignalPendings[i] = false; ++ handler = CustomInterruptHandlers[i]; ++ if (handler != NULL) ++ { ++ CustomSignalProcessing[i] = true; ++ handler(); ++ CustomSignalProcessing[i] = false; ++ } ++ } ++ } ++} +\ No newline at end of file +diff --git a/src/backend/tcop/postgres.c b/src/backend/tcop/postgres.c +index 8cea10c..dd77c98 100644 +--- a/src/backend/tcop/postgres.c ++++ b/src/backend/tcop/postgres.c +@@ -3364,6 +3364,8 @@ ProcessInterrupts(void) + if (ParallelMessagePending) + HandleParallelMessages(); + ++ CheckAndHandleCustomSignals(); ++ + if (LogMemoryContextPending) + ProcessLogMemoryContextInterrupt(); + } +diff --git a/src/include/storage/procsignal.h b/src/include/storage/procsignal.h +index eec186b..74af186 100644 +--- a/src/include/storage/procsignal.h ++++ b/src/include/storage/procsignal.h +@@ -17,6 +17,8 @@ + #include "storage/backendid.h" + + ++#define NUM_CUSTOM_PROCSIGNALS 64 ++ + /* + * Reasons for signaling a Postgres child process (a backend or an auxiliary + * process, like checkpointer). We can cope with concurrent signals for different +@@ -29,6 +31,8 @@ + */ + typedef enum + { ++ INVALID_PROCSIGNAL = -1, /* Must be first */ ++ + PROCSIG_CATCHUP_INTERRUPT, /* sinval catchup interrupt */ + PROCSIG_NOTIFY_INTERRUPT, /* listen/notify interrupt */ + PROCSIG_PARALLEL_MESSAGE, /* message from cooperating parallel backend */ +@@ -44,6 +48,14 @@ typedef enum + PROCSIG_RECOVERY_CONFLICT_BUFFERPIN, + PROCSIG_RECOVERY_CONFLICT_STARTUP_DEADLOCK, + ++ PROCSIG_CUSTOM_1, ++ /* ++ * PROCSIG_CUSTOM_2, ++ * ..., ++ * PROCSIG_CUSTOM_N-1, ++ */ ++ PROCSIG_CUSTOM_N = PROCSIG_CUSTOM_1 + NUM_CUSTOM_PROCSIGNALS - 1, ++ + NUM_PROCSIGNALS /* Must be last! */ + } ProcSignalReason; + +@@ -56,6 +68,8 @@ typedef enum + */ + PROCSIGNAL_BARRIER_PLACEHOLDER = 0 + } ProcSignalBarrierType; ++/* Handler of custom process signal */ ++typedef void (*ProcSignalHandler_type) (void); + + /* + * prototypes for functions in procsignal.c +@@ -64,12 +78,15 @@ extern Size ProcSignalShmemSize(void); + extern void ProcSignalShmemInit(void); + + extern void ProcSignalInit(int pss_idx); ++extern ProcSignalReason ++ RegisterCustomProcSignalHandler(ProcSignalHandler_type handler); + extern int SendProcSignal(pid_t pid, ProcSignalReason reason, + BackendId backendId); + + extern uint64 EmitProcSignalBarrier(ProcSignalBarrierType type); + extern void WaitForProcSignalBarrier(uint64 generation); + extern void ProcessProcSignalBarrier(void); ++extern void CheckAndHandleCustomSignals(void); + + extern void procsignal_sigusr1_handler(SIGNAL_ARGS); + +-- +2.25.1 + diff --git a/patches/runtime_explain_14.0.patch b/patches/runtime_explain_14.0.patch index 3dbba9a..7904cc2 100644 --- a/patches/runtime_explain_14.0.patch +++ b/patches/runtime_explain_14.0.patch @@ -1,13 +1,3 @@ -From 71a353d4cac663db43c57452f925082a233c0e49 Mon Sep 17 00:00:00 2001 -From: Kovalenko Anastasia -Date: Mon, 23 Aug 2021 15:29:59 +0300 -Subject: [PATCH] runtime-explain - ---- - src/backend/commands/explain.c | 153 ++++++++++++++++++++++++++++----- - src/include/commands/explain.h | 2 + - 2 files changed, 133 insertions(+), 22 deletions(-) - diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c index 10644dfac4..7106ed4257 100644 --- a/src/backend/commands/explain.c diff --git a/patches/runtime_explain_15.0.patch b/patches/runtime_explain_15.0.patch new file mode 100644 index 0000000..7904cc2 --- /dev/null +++ b/patches/runtime_explain_15.0.patch @@ -0,0 +1,261 @@ +diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c +index 10644dfac4..7106ed4257 100644 +--- a/src/backend/commands/explain.c ++++ b/src/backend/commands/explain.c +@@ -984,14 +984,36 @@ report_triggers(ResultRelInfo *rInfo, bool show_relname, ExplainState *es) + char *relname; + char *conname = NULL; + ++ instr_time starttimespan; ++ double total; ++ double ntuples; ++ double ncalls; ++ ++ if (!es->runtime) ++ { + /* Must clean up instrumentation state */ + InstrEndLoop(instr); ++ } ++ ++ /* Collect statistic variables */ ++ if (!INSTR_TIME_IS_ZERO(instr->starttime)) ++ { ++ INSTR_TIME_SET_CURRENT(starttimespan); ++ INSTR_TIME_SUBTRACT(starttimespan, instr->starttime); ++ } ++ else ++ INSTR_TIME_SET_ZERO(starttimespan); ++ ++ total = instr->total + INSTR_TIME_GET_DOUBLE(instr->counter) ++ + INSTR_TIME_GET_DOUBLE(starttimespan); ++ ntuples = instr->ntuples + instr->tuplecount; ++ ncalls = ntuples + !INSTR_TIME_IS_ZERO(starttimespan); + + /* + * We ignore triggers that were never invoked; they likely aren't + * relevant to the current query type. + */ +- if (instr->ntuples == 0) ++ if (ncalls == 0) + continue; + + ExplainOpenGroup("Trigger", NULL, true, es); +@@ -1017,9 +1039,9 @@ report_triggers(ResultRelInfo *rInfo, bool show_relname, ExplainState *es) + appendStringInfo(es->str, " on %s", relname); + if (es->timing) + appendStringInfo(es->str, ": time=%.3f calls=%.0f\n", +- 1000.0 * instr->total, instr->ntuples); ++ 1000.0 * total, ncalls); + else +- appendStringInfo(es->str, ": calls=%.0f\n", instr->ntuples); ++ appendStringInfo(es->str, ": calls=%.0f\n", ncalls); + } + else + { +@@ -1028,9 +1050,8 @@ report_triggers(ResultRelInfo *rInfo, bool show_relname, ExplainState *es) + ExplainPropertyText("Constraint Name", conname, es); + ExplainPropertyText("Relation", relname, es); + if (es->timing) +- ExplainPropertyFloat("Time", "ms", 1000.0 * instr->total, 3, +- es); +- ExplainPropertyFloat("Calls", NULL, instr->ntuples, 0, es); ++ ExplainPropertyFloat("Time", "ms", 1000.0 * total, 3, es); ++ ExplainPropertyFloat("Calls", NULL, ncalls, 0, es); + } + + if (conname) +@@ -1600,8 +1621,11 @@ ExplainNode(PlanState *planstate, List *ancestors, + * instrumentation results the user didn't ask for. But we do the + * InstrEndLoop call anyway, if possible, to reduce the number of cases + * auto_explain has to contend with. ++ * ++ * If flag es->stateinfo is set, i.e. when printing the current execution ++ * state, this step of cleaning up is missed. + */ +- if (planstate->instrument) ++ if (planstate->instrument && !es->runtime) + InstrEndLoop(planstate->instrument); + + if (es->analyze && +@@ -1636,7 +1660,7 @@ ExplainNode(PlanState *planstate, List *ancestors, + ExplainPropertyFloat("Actual Loops", NULL, nloops, 0, es); + } + } +- else if (es->analyze) ++ else if (es->analyze && !es->runtime) + { + if (es->format == EXPLAIN_FORMAT_TEXT) + appendStringInfoString(es->str, " (never executed)"); +@@ -1652,6 +1676,75 @@ ExplainNode(PlanState *planstate, List *ancestors, + } + } + ++ /* ++ * Print the progress of node execution at current loop. ++ */ ++ if (planstate->instrument && es->analyze && es->runtime) ++ { ++ instr_time starttimespan; ++ double startup_sec; ++ double total_sec; ++ double rows; ++ double loop_num; ++ bool finished; ++ ++ if (!INSTR_TIME_IS_ZERO(planstate->instrument->starttime)) ++ { ++ INSTR_TIME_SET_CURRENT(starttimespan); ++ INSTR_TIME_SUBTRACT(starttimespan, planstate->instrument->starttime); ++ } ++ else ++ INSTR_TIME_SET_ZERO(starttimespan); ++ startup_sec = 1000.0 * planstate->instrument->firsttuple; ++ total_sec = 1000.0 * (INSTR_TIME_GET_DOUBLE(planstate->instrument->counter) ++ + INSTR_TIME_GET_DOUBLE(starttimespan)); ++ rows = planstate->instrument->tuplecount; ++ loop_num = planstate->instrument->nloops + 1; ++ ++ finished = planstate->instrument->nloops > 0 ++ && !planstate->instrument->running ++ && INSTR_TIME_IS_ZERO(starttimespan); ++ ++ if (!finished) ++ { ++ ExplainOpenGroup("Current loop", "Current loop", true, es); ++ if (es->format == EXPLAIN_FORMAT_TEXT) ++ { ++ if (es->timing) ++ { ++ if (planstate->instrument->running) ++ appendStringInfo(es->str, ++ " (Current loop: actual time=%.3f..%.3f rows=%.0f, loop number=%.0f)", ++ startup_sec, total_sec, rows, loop_num); ++ else ++ appendStringInfo(es->str, ++ " (Current loop: running time=%.3f actual rows=0, loop number=%.0f)", ++ total_sec, loop_num); ++ } ++ else ++ appendStringInfo(es->str, ++ " (Current loop: actual rows=%.0f, loop number=%.0f)", ++ rows, loop_num); ++ } ++ else ++ { ++ ExplainPropertyFloat("Actual Loop Number", NULL, loop_num, 0, es); ++ if (es->timing) ++ { ++ if (planstate->instrument->running) ++ { ++ ExplainPropertyFloat("Actual Startup Time", NULL, startup_sec, 3, es); ++ ExplainPropertyFloat("Actual Total Time", NULL, total_sec, 3, es); ++ } ++ else ++ ExplainPropertyFloat("Running Time", NULL, total_sec, 3, es); ++ } ++ ExplainPropertyFloat("Actual Rows", NULL, rows, 0, es); ++ } ++ ExplainCloseGroup("Current loop", "Current loop", true, es); ++ } ++ } ++ + /* in text format, first line ends here */ + if (es->format == EXPLAIN_FORMAT_TEXT) + appendStringInfoChar(es->str, '\n'); +@@ -2051,6 +2144,9 @@ ExplainNode(PlanState *planstate, List *ancestors, + + /* Prepare per-worker buffer/WAL usage */ + if (es->workers_state && (es->buffers || es->wal) && es->verbose) ++ /* Show worker detail after query execution */ ++ if (es->analyze && es->verbose && planstate->worker_instrument ++ && !es->runtime) + { + WorkerInstrumentation *w = planstate->worker_instrument; + +@@ -3015,6 +3111,11 @@ show_hash_info(HashState *hashstate, ExplainState *es) + memcpy(&hinstrument, hashstate->hinstrument, + sizeof(HashInstrumentation)); + ++ if (hashstate->hashtable) ++ { ++ ExecHashAccumInstrumentation(&hinstrument, hashstate->hashtable); ++ } ++ + /* + * Merge results from workers. In the parallel-oblivious case, the + * results from all participants should be identical, except where +@@ -3392,20 +3493,16 @@ show_instrumentation_count(const char *qlabel, int which, + if (!es->analyze || !planstate->instrument) + return; + ++ nloops = planstate->instrument->nloops; + if (which == 2) +- nfiltered = planstate->instrument->nfiltered2; ++ nfiltered = ((nloops > 0) ? planstate->instrument->nfiltered2 / nloops : 0); + else +- nfiltered = planstate->instrument->nfiltered1; ++ nfiltered = ((nloops > 0) ? planstate->instrument->nfiltered1 / nloops : 0); + nloops = planstate->instrument->nloops; + + /* In text mode, suppress zero counts; they're not interesting enough */ + if (nfiltered > 0 || es->format != EXPLAIN_FORMAT_TEXT) +- { +- if (nloops > 0) +- ExplainPropertyFloat(qlabel, NULL, nfiltered / nloops, 0, es); +- else +- ExplainPropertyFloat(qlabel, NULL, 0.0, 0, es); +- } ++ ExplainPropertyFloat(qlabel, NULL, nfiltered, 0, es); + } + + /* +@@ -3977,15 +4074,27 @@ show_modifytable_info(ModifyTableState *mtstate, List *ancestors, + double insert_path; + double other_path; + +- InstrEndLoop(outerPlanState(mtstate)->instrument); ++ if (!es->runtime) ++ InstrEndLoop(outerPlanState(mtstate)->instrument); + + /* count the number of source rows */ +- total = outerPlanState(mtstate)->instrument->ntuples; +- other_path = mtstate->ps.instrument->ntuples2; +- insert_path = total - other_path; ++ other_path = mtstate->ps.instrument->nfiltered2; ++ ++ /* ++ * Insert occurs after extracting row from subplan and in runtime mode ++ * we can appear between these two operations - situation when ++ * total > insert_path + other_path. Therefore we don't know exactly ++ * whether last row from subplan is inserted. ++ * We don't print inserted tuples in runtime mode in order to not print ++ * inconsistent data ++ */ ++ if (!es->runtime) ++ { ++ total = outerPlanState(mtstate)->instrument->ntuples; ++ insert_path = total - other_path; ++ ExplainPropertyFloat("Tuples Inserted", NULL, insert_path, 0, es); ++ } + +- ExplainPropertyFloat("Tuples Inserted", NULL, +- insert_path, 0, es); + ExplainPropertyFloat("Conflicting Tuples", NULL, + other_path, 0, es); + } +diff --git a/src/include/commands/explain.h b/src/include/commands/explain.h +index e94d9e49cf..6a157b8bc0 100644 +--- a/src/include/commands/explain.h ++++ b/src/include/commands/explain.h +@@ -47,6 +47,8 @@ typedef struct ExplainState + bool summary; /* print total planning and execution timing */ + bool settings; /* print modified settings */ + ExplainFormat format; /* output format */ ++ bool runtime; /* print intermediate state of query execution, ++ not after completion */ + /* state for output formatting --- not reset for each new plan tree */ + int indent; /* current indentation level */ + List *grouping_stack; /* format-specific grouping state */ +-- +2.25.1 + diff --git a/pg_query_state.c b/pg_query_state.c index a4a7d1b..21a406b 100644 --- a/pg_query_state.c +++ b/pg_query_state.c @@ -891,7 +891,11 @@ SendBgWorkerPids(void) msg->pids[i++] = current_pid; } +#if PG_VERSION_NUM <= 140000 shm_mq_send(mqh, msg_len, msg, false); +#else + shm_mq_send(mqh, msg_len, msg, false, true); +#endif UnlockShmem(&tag); } diff --git a/signal_handler.c b/signal_handler.c index 2af69fd..20325e7 100644 --- a/signal_handler.c +++ b/signal_handler.c @@ -168,7 +168,11 @@ shm_mq_send_nonblocking(shm_mq_handle *mqh, Size nbytes, const void *data, Size for(i = 0; i < attempts; i++) { +#if PG_VERSION_NUM <= 140000 res = shm_mq_send(mqh, nbytes, data, true); +#else + res = shm_mq_send(mqh, nbytes, data, true, true); +#endif if(res == SHM_MQ_SUCCESS) break; From 8ecd905fcd31a42a3e310eb591fc6fba8edd07fb Mon Sep 17 00:00:00 2001 From: Maxim Orlov Date: Mon, 8 Nov 2021 14:02:02 +0300 Subject: [PATCH 10/37] Issue #36: fix script output varying due to timing. tags: pg_query_state --- expected/corner_cases.out | 15 ++++++++++----- specs/corner_cases.spec | 10 +++++----- 2 files changed, 15 insertions(+), 10 deletions(-) diff --git a/expected/corner_cases.out b/expected/corner_cases.out index b475e33..725addc 100644 --- a/expected/corner_cases.out +++ b/expected/corner_cases.out @@ -18,8 +18,9 @@ save_own_pid (1 row) +step s2_pg_qs_counterpart: select pg_query_state(counterpart_pid(0)); s2: INFO: state of backend is idle -step s2_pg_qs_counterpart: select pg_query_state(counterpart_pid(0)); +step s2_pg_qs_counterpart: <... completed> pg_query_state -------------- (0 rows) @@ -33,8 +34,9 @@ save_own_pid (1 row) step s1_disable_pg_qs: set pg_query_state.enable to off; +step s2_pg_qs_counterpart: select pg_query_state(counterpart_pid(0)); s2: INFO: query execution statistics disabled -step s2_pg_qs_counterpart: select pg_query_state(counterpart_pid(0)); +step s2_pg_qs_counterpart: <... completed> pg_query_state -------------- (0 rows) @@ -49,8 +51,9 @@ save_own_pid (1 row) +step s2_pg_qs_counterpart: select pg_query_state(counterpart_pid(0)); s2: INFO: state of backend is idle -step s2_pg_qs_counterpart: select pg_query_state(counterpart_pid(0)); +step s2_pg_qs_counterpart: <... completed> pg_query_state -------------- (0 rows) @@ -65,8 +68,9 @@ save_own_pid (1 row) +step s2_pg_qs_counterpart: select pg_query_state(counterpart_pid(0)); s2: INFO: state of backend is idle -step s2_pg_qs_counterpart: select pg_query_state(counterpart_pid(0)); +step s2_pg_qs_counterpart: <... completed> pg_query_state -------------- (0 rows) @@ -81,5 +85,6 @@ save_own_pid (1 row) -step s2_pg_qs_counterpart: select pg_query_state(counterpart_pid(0)); +step s2_pg_qs_counterpart: select pg_query_state(counterpart_pid(0)); +step s2_pg_qs_counterpart: <... completed> ERROR: permission denied diff --git a/specs/corner_cases.spec b/specs/corner_cases.spec index 292b39d..c9f3fde 100644 --- a/specs/corner_cases.spec +++ b/specs/corner_cases.spec @@ -64,12 +64,12 @@ permutation "s1_pg_qs_1" permutation "s1_pg_qs_2" # Check idle -permutation "s1_save_pid" "s2_pg_qs_counterpart" +permutation "s1_save_pid" "s2_pg_qs_counterpart"(*) # Check module disable -permutation "s1_save_pid" "s1_disable_pg_qs" "s2_pg_qs_counterpart" +permutation "s1_save_pid" "s1_disable_pg_qs" "s2_pg_qs_counterpart"(*) # Check roles correspondence -permutation "s1_set_bob" "s2_set_bob" "s1_save_pid" "s2_pg_qs_counterpart" -permutation "s1_set_bob" "s2_set_su" "s1_save_pid" "s2_pg_qs_counterpart" -permutation "s1_set_bob" "s2_set_alice" "s1_save_pid" "s2_pg_qs_counterpart" +permutation "s1_set_bob" "s2_set_bob" "s1_save_pid" "s2_pg_qs_counterpart"(*) +permutation "s1_set_bob" "s2_set_su" "s1_save_pid" "s2_pg_qs_counterpart"(*) +permutation "s1_set_bob" "s2_set_alice" "s1_save_pid" "s2_pg_qs_counterpart"(*) From 4b53e0370ffcc01ed7e5187666f3385d2de1b547 Mon Sep 17 00:00:00 2001 From: Maxim Orlov Date: Mon, 8 Nov 2021 14:35:46 +0300 Subject: [PATCH 11/37] Issue #36: fix script output varying due to timing. Also fix alternative output. tags: pg_query_state --- expected/corner_cases_2.out | 15 ++++++++++----- expected/corner_cases_3.out | 15 ++++++++++----- 2 files changed, 20 insertions(+), 10 deletions(-) diff --git a/expected/corner_cases_2.out b/expected/corner_cases_2.out index da624f7..df7495f 100644 --- a/expected/corner_cases_2.out +++ b/expected/corner_cases_2.out @@ -13,8 +13,9 @@ step s1_save_pid: select save_own_pid(0); save_own_pid +step s2_pg_qs_counterpart: select pg_query_state(counterpart_pid(0)); s2: INFO: state of backend is idle -step s2_pg_qs_counterpart: select pg_query_state(counterpart_pid(0)); +step s2_pg_qs_counterpart: <... completed> pg_query_state @@ -24,8 +25,9 @@ save_own_pid step s1_disable_pg_qs: set pg_query_state.enable to off; +step s2_pg_qs_counterpart: select pg_query_state(counterpart_pid(0)); s2: INFO: query execution statistics disabled -step s2_pg_qs_counterpart: select pg_query_state(counterpart_pid(0)); +step s2_pg_qs_counterpart: <... completed> pg_query_state @@ -36,8 +38,9 @@ step s1_save_pid: select save_own_pid(0); save_own_pid +step s2_pg_qs_counterpart: select pg_query_state(counterpart_pid(0)); s2: INFO: state of backend is idle -step s2_pg_qs_counterpart: select pg_query_state(counterpart_pid(0)); +step s2_pg_qs_counterpart: <... completed> pg_query_state @@ -48,8 +51,9 @@ step s1_save_pid: select save_own_pid(0); save_own_pid +step s2_pg_qs_counterpart: select pg_query_state(counterpart_pid(0)); s2: INFO: state of backend is idle -step s2_pg_qs_counterpart: select pg_query_state(counterpart_pid(0)); +step s2_pg_qs_counterpart: <... completed> pg_query_state @@ -60,5 +64,6 @@ step s1_save_pid: select save_own_pid(0); save_own_pid -step s2_pg_qs_counterpart: select pg_query_state(counterpart_pid(0)); +step s2_pg_qs_counterpart: select pg_query_state(counterpart_pid(0)); +step s2_pg_qs_counterpart: <... completed> ERROR: permission denied diff --git a/expected/corner_cases_3.out b/expected/corner_cases_3.out index 845db75..8f6a8ef 100644 --- a/expected/corner_cases_3.out +++ b/expected/corner_cases_3.out @@ -13,8 +13,9 @@ step s1_save_pid: select save_own_pid(0); save_own_pid +step s2_pg_qs_counterpart: select pg_query_state(counterpart_pid(0)); s2: INFO: state of backend is idle -step s2_pg_qs_counterpart: select pg_query_state(counterpart_pid(0)); +step s2_pg_qs_counterpart: <... completed> pg_query_state @@ -24,8 +25,9 @@ save_own_pid step s1_disable_pg_qs: set pg_query_state.enable to off; +step s2_pg_qs_counterpart: select pg_query_state(counterpart_pid(0)); s2: INFO: query execution statistics disabled -step s2_pg_qs_counterpart: select pg_query_state(counterpart_pid(0)); +step s2_pg_qs_counterpart: <... completed> pg_query_state @@ -36,8 +38,9 @@ step s1_save_pid: select save_own_pid(0); save_own_pid +step s2_pg_qs_counterpart: select pg_query_state(counterpart_pid(0)); s2: INFO: state of backend is idle -step s2_pg_qs_counterpart: select pg_query_state(counterpart_pid(0)); +step s2_pg_qs_counterpart: <... completed> pg_query_state @@ -48,8 +51,9 @@ step s1_save_pid: select save_own_pid(0); save_own_pid +step s2_pg_qs_counterpart: select pg_query_state(counterpart_pid(0)); s2: INFO: state of backend is idle -step s2_pg_qs_counterpart: select pg_query_state(counterpart_pid(0)); +step s2_pg_qs_counterpart: <... completed> pg_query_state @@ -60,7 +64,8 @@ step s1_save_pid: select save_own_pid(0); save_own_pid -step s2_pg_qs_counterpart: select pg_query_state(counterpart_pid(0)); +step s2_pg_qs_counterpart: select pg_query_state(counterpart_pid(0)); +step s2_pg_qs_counterpart: <... completed> ERROR: permission denied unused step name: s1_enable_pg_qs unused step name: s1_pg_qs_counterpart From 2aeee15863615174a28bca525020dc48a1f36e22 Mon Sep 17 00:00:00 2001 From: Marina Polyakova Date: Wed, 10 Nov 2021 08:56:26 +0300 Subject: [PATCH 12/37] PGPRO-5826: fix build for PostgreSQL 14.1 The previous code did not work for PostgreSQL 14.1 because the signature of the function shm_mq_send was changed only in PostgreSQL 15devel which has PG_VERSION_NUM = 150000. --- pg_query_state.c | 2 +- signal_handler.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pg_query_state.c b/pg_query_state.c index f495f1d..f5a9a4d 100644 --- a/pg_query_state.c +++ b/pg_query_state.c @@ -895,7 +895,7 @@ SendBgWorkerPids(void) msg->pids[i++] = current_pid; } -#if PG_VERSION_NUM <= 140000 +#if PG_VERSION_NUM < 150000 shm_mq_send(mqh, msg_len, msg, false); #else shm_mq_send(mqh, msg_len, msg, false, true); diff --git a/signal_handler.c b/signal_handler.c index 20325e7..d4f1099 100644 --- a/signal_handler.c +++ b/signal_handler.c @@ -168,7 +168,7 @@ shm_mq_send_nonblocking(shm_mq_handle *mqh, Size nbytes, const void *data, Size for(i = 0; i < attempts; i++) { -#if PG_VERSION_NUM <= 140000 +#if PG_VERSION_NUM < 150000 res = shm_mq_send(mqh, nbytes, data, true); #else res = shm_mq_send(mqh, nbytes, data, true, true); From 3445b458376783ec5f52d3175aa657adfa8b46eb Mon Sep 17 00:00:00 2001 From: Daniel Shelepanov Date: Mon, 8 Nov 2021 23:09:22 +0300 Subject: [PATCH 13/37] [PGPRO-4561] Blocking custom signals while handling one Without HOLD_INTERRUPTS()/RESUME_INTERRUPTS() there is a risk of accepting a signal while executing another signal handler which leads to a hcs memory context corruption. tags: pg_query_state --- patches/custom_signals_13.0.patch | 134 +++++++++++++++++++----------- patches/custom_signals_14.0.patch | 86 +++++++++++++++---- patches/custom_signals_15.0.patch | 10 +++ 3 files changed, 165 insertions(+), 65 deletions(-) diff --git a/patches/custom_signals_13.0.patch b/patches/custom_signals_13.0.patch index add965c..266cba8 100644 --- a/patches/custom_signals_13.0.patch +++ b/patches/custom_signals_13.0.patch @@ -1,10 +1,10 @@ diff --git a/src/backend/storage/ipc/procsignal.c b/src/backend/storage/ipc/procsignal.c -index 4fa385b0ece..fc1637a2e28 100644 +index 4fa385b0ece..60854eee386 100644 --- a/src/backend/storage/ipc/procsignal.c +++ b/src/backend/storage/ipc/procsignal.c @@ -88,12 +88,21 @@ typedef struct - (((flags) & (((uint32) 1) << (uint32) (type))) != 0) - + (((flags) & (((uint32) 1) << (uint32) (type))) != 0) + static ProcSignalHeader *ProcSignal = NULL; +#define IsCustomProcSignalReason(reason) \ + ((reason) >= PROCSIG_CUSTOM_1 && (reason) <= PROCSIG_CUSTOM_N) @@ -14,20 +14,20 @@ index 4fa385b0ece..fc1637a2e28 100644 +static ProcSignalHandler_type CustomInterruptHandlers[NUM_CUSTOM_PROCSIGNALS]; + static volatile ProcSignalSlot *MyProcSignalSlot = NULL; - + static bool CheckProcSignal(ProcSignalReason reason); static void CleanupProcSignalState(int status, Datum arg); static void ProcessBarrierPlaceholder(void); - + +static void CheckAndSetCustomSignalInterrupts(void); + /* * ProcSignalShmemSize * Compute space needed for procsignal's shared memory @@ -235,6 +244,36 @@ CleanupProcSignalState(int status, Datum arg) - slot->pss_pid = 0; + slot->pss_pid = 0; } - + +/* + * RegisterCustomProcSignalHandler + * Assign specific handler of custom process signal with new @@ -61,17 +61,17 @@ index 4fa385b0ece..fc1637a2e28 100644 /* * SendProcSignal * Send a signal to a Postgres process -@@ -585,9 +624,64 @@ procsignal_sigusr1_handler(SIGNAL_ARGS) - if (CheckProcSignal(PROCSIG_RECOVERY_CONFLICT_BUFFERPIN)) - RecoveryConflictInterrupt(PROCSIG_RECOVERY_CONFLICT_BUFFERPIN); - +@@ -585,9 +624,71 @@ procsignal_sigusr1_handler(SIGNAL_ARGS) + if (CheckProcSignal(PROCSIG_RECOVERY_CONFLICT_BUFFERPIN)) + RecoveryConflictInterrupt(PROCSIG_RECOVERY_CONFLICT_BUFFERPIN); + + CheckAndSetCustomSignalInterrupts(); + - SetLatch(MyLatch); - - latch_sigusr1_handler(); - - errno = save_errno; + SetLatch(MyLatch); + + latch_sigusr1_handler(); + + errno = save_errno; } + +/* @@ -108,9 +108,15 @@ index 4fa385b0ece..fc1637a2e28 100644 +{ + int i; + -+ /* Check on expiring of custom signals and call its handlers if exist */ ++ /* ++ * This is invoked from ProcessInterrupts(), and since some of the ++ * functions it calls contain CHECK_FOR_INTERRUPTS(), there is a potential ++ * for recursive calls if more signals are received while this runs, so ++ * let's block interrupts until done. ++ */ ++ HOLD_INTERRUPTS(); ++ + for (i = 0; i < NUM_CUSTOM_PROCSIGNALS; i++) -+ { + if (!CustomSignalProcessing[i] && CustomSignalPendings[i]) + { + ProcSignalHandler_type handler; @@ -124,29 +130,66 @@ index 4fa385b0ece..fc1637a2e28 100644 + CustomSignalProcessing[i] = false; + } + } -+ } ++ ++ RESUME_INTERRUPTS(); +} diff --git a/src/backend/tcop/postgres.c b/src/backend/tcop/postgres.c -index 174c72a14bc..0e7366bd58f 100644 +index 7bc03ae4edc..3debd63bd7d 100644 --- a/src/backend/tcop/postgres.c +++ b/src/backend/tcop/postgres.c -@@ -3221,6 +3221,8 @@ ProcessInterrupts(void) - - if (ParallelMessagePending) - HandleParallelMessages(); +@@ -5,6 +5,7 @@ + * + * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California ++ * Portions Copyright (c) 2020-2021, Postgres Professional + * + * + * IDENTIFICATION +@@ -74,6 +75,7 @@ + #include "tcop/pquery.h" + #include "tcop/tcopprot.h" + #include "tcop/utility.h" ++#include "utils/builtins.h" + #include "utils/lsyscache.h" + #include "utils/memutils.h" + #include "utils/ps_status.h" +@@ -3231,6 +3233,8 @@ ProcessInterrupts(void) + + if (ParallelMessagePending) + HandleParallelMessages(); + + CheckAndHandleCustomSignals(); } - - + + +@@ -3576,7 +3580,7 @@ process_postgres_switches(int argc, char *argv[], GucContext ctx, + * postmaster/postmaster.c (the option sets should not conflict) and with + * the common help() function in main/main.c. + */ +- while ((flag = getopt(argc, argv, "B:bc:C:D:d:EeFf:h:ijk:lN:nOo:Pp:r:S:sTt:v:W:-:")) != -1) ++ while ((flag = getopt(argc, argv, "B:bc:C:D:d:EeFf:h:ijk:lN:nOo:Pp:r:S:sTt:v:W:Z-:")) != -1) + { + switch (flag) + { +@@ -3712,6 +3716,10 @@ process_postgres_switches(int argc, char *argv[], GucContext ctx, + SetConfigOption("post_auth_delay", optarg, ctx, gucsource); + break; + ++ case 'Z': ++ /* ignored for consistency with the postmaster */ ++ break; ++ + case 'c': + case '-': + { diff --git a/src/include/storage/procsignal.h b/src/include/storage/procsignal.h index 5cb39697f38..c05f60fa719 100644 --- a/src/include/storage/procsignal.h +++ b/src/include/storage/procsignal.h @@ -17,6 +17,8 @@ #include "storage/backendid.h" - - + + +#define NUM_CUSTOM_PROCSIGNALS 64 + /* @@ -158,13 +201,13 @@ index 5cb39697f38..c05f60fa719 100644 { + INVALID_PROCSIGNAL = -1, /* Must be first */ + - PROCSIG_CATCHUP_INTERRUPT, /* sinval catchup interrupt */ - PROCSIG_NOTIFY_INTERRUPT, /* listen/notify interrupt */ - PROCSIG_PARALLEL_MESSAGE, /* message from cooperating parallel backend */ + PROCSIG_CATCHUP_INTERRUPT, /* sinval catchup interrupt */ + PROCSIG_NOTIFY_INTERRUPT, /* listen/notify interrupt */ + PROCSIG_PARALLEL_MESSAGE, /* message from cooperating parallel backend */ @@ -43,6 +47,14 @@ typedef enum - PROCSIG_RECOVERY_CONFLICT_BUFFERPIN, - PROCSIG_RECOVERY_CONFLICT_STARTUP_DEADLOCK, - + PROCSIG_RECOVERY_CONFLICT_BUFFERPIN, + PROCSIG_RECOVERY_CONFLICT_STARTUP_DEADLOCK, + + PROCSIG_CUSTOM_1, + /* + * PROCSIG_CUSTOM_2, @@ -173,34 +216,31 @@ index 5cb39697f38..c05f60fa719 100644 + */ + PROCSIG_CUSTOM_N = PROCSIG_CUSTOM_1 + NUM_CUSTOM_PROCSIGNALS - 1, + - NUM_PROCSIGNALS /* Must be last! */ + NUM_PROCSIGNALS /* Must be last! */ } ProcSignalReason; - + @@ -55,6 +67,8 @@ typedef enum - */ - PROCSIGNAL_BARRIER_PLACEHOLDER = 0 + */ + PROCSIGNAL_BARRIER_PLACEHOLDER = 0 } ProcSignalBarrierType; +/* Handler of custom process signal */ +typedef void (*ProcSignalHandler_type) (void); - + /* * prototypes for functions in procsignal.c @@ -63,12 +77,15 @@ extern Size ProcSignalShmemSize(void); extern void ProcSignalShmemInit(void); - + extern void ProcSignalInit(int pss_idx); +extern ProcSignalReason + RegisterCustomProcSignalHandler(ProcSignalHandler_type handler); extern int SendProcSignal(pid_t pid, ProcSignalReason reason, - BackendId backendId); - + BackendId backendId); + extern uint64 EmitProcSignalBarrier(ProcSignalBarrierType type); extern void WaitForProcSignalBarrier(uint64 generation); extern void ProcessProcSignalBarrier(void); +extern void CheckAndHandleCustomSignals(void); - + extern void procsignal_sigusr1_handler(SIGNAL_ARGS); - --- -2.25.1 - + diff --git a/patches/custom_signals_14.0.patch b/patches/custom_signals_14.0.patch index 9d640cb..d02f2b5 100644 --- a/patches/custom_signals_14.0.patch +++ b/patches/custom_signals_14.0.patch @@ -1,11 +1,19 @@ diff --git a/src/backend/storage/ipc/procsignal.c b/src/backend/storage/ipc/procsignal.c -index defb75a..4245d28 100644 +index defb75aa26a..cd7d44977ca 100644 --- a/src/backend/storage/ipc/procsignal.c +++ b/src/backend/storage/ipc/procsignal.c -@@ -95,6 +95,13 @@ typedef struct - #define BARRIER_CLEAR_BIT(flags, type) \ +@@ -6,6 +6,7 @@ + * + * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California ++ * Portions Copyright (c) 2021, Postgres Professional + * + * IDENTIFICATION + * src/backend/storage/ipc/procsignal.c +@@ -96,6 +97,13 @@ typedef struct ((flags) &= ~(((uint32) 1) << (uint32) (type))) + static ProcSignalHeader *ProcSignal = NULL; +#define IsCustomProcSignalReason(reason) \ + ((reason) >= PROCSIG_CUSTOM_1 && (reason) <= PROCSIG_CUSTOM_N) + @@ -13,10 +21,10 @@ index defb75a..4245d28 100644 +static bool CustomSignalProcessing[NUM_CUSTOM_PROCSIGNALS]; +static ProcSignalHandler_type CustomInterruptHandlers[NUM_CUSTOM_PROCSIGNALS]; + - static ProcSignalHeader *ProcSignal = NULL; static ProcSignalSlot *MyProcSignalSlot = NULL; -@@ -103,6 +110,8 @@ static void CleanupProcSignalState(int status, Datum arg); + static bool CheckProcSignal(ProcSignalReason reason); +@@ -103,6 +111,8 @@ static void CleanupProcSignalState(int status, Datum arg); static void ResetProcSignalBarrierBits(uint32 flags); static bool ProcessBarrierPlaceholder(void); @@ -25,7 +33,7 @@ index defb75a..4245d28 100644 /* * ProcSignalShmemSize * Compute space needed for procsignal's shared memory -@@ -246,6 +255,36 @@ CleanupProcSignalState(int status, Datum arg) +@@ -246,6 +256,36 @@ CleanupProcSignalState(int status, Datum arg) slot->pss_pid = 0; } @@ -62,7 +70,7 @@ index defb75a..4245d28 100644 /* * SendProcSignal * Send a signal to a Postgres process -@@ -679,7 +718,62 @@ procsignal_sigusr1_handler(SIGNAL_ARGS) +@@ -679,7 +719,72 @@ procsignal_sigusr1_handler(SIGNAL_ARGS) if (CheckProcSignal(PROCSIG_RECOVERY_CONFLICT_BUFFERPIN)) RecoveryConflictInterrupt(PROCSIG_RECOVERY_CONFLICT_BUFFERPIN); @@ -107,6 +115,14 @@ index defb75a..4245d28 100644 +{ + int i; + ++ /* ++ * This is invoked from ProcessInterrupts(), and since some of the ++ * functions it calls contain CHECK_FOR_INTERRUPTS(), there is a potential ++ * for recursive calls if more signals are received while this runs, so ++ * let's block interrupts until done. ++ */ ++ HOLD_INTERRUPTS(); ++ + /* Check on expiring of custom signals and call its handlers if exist */ + for (i = 0; i < NUM_CUSTOM_PROCSIGNALS; i++) + { @@ -124,23 +140,60 @@ index defb75a..4245d28 100644 + } + } + } ++ ++ RESUME_INTERRUPTS(); +} -\ No newline at end of file diff --git a/src/backend/tcop/postgres.c b/src/backend/tcop/postgres.c -index 8cea10c..dd77c98 100644 +index 171f3a95006..e6fe26fb19a 100644 --- a/src/backend/tcop/postgres.c +++ b/src/backend/tcop/postgres.c -@@ -3364,6 +3364,8 @@ ProcessInterrupts(void) - if (ParallelMessagePending) - HandleParallelMessages(); +@@ -5,6 +5,7 @@ + * + * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California ++ * Portions Copyright (c) 2021, Postgres Professional + * + * + * IDENTIFICATION +@@ -75,6 +76,7 @@ + #include "tcop/pquery.h" + #include "tcop/tcopprot.h" + #include "tcop/utility.h" ++#include "utils/builtins.h" + #include "utils/lsyscache.h" + #include "utils/memutils.h" + #include "utils/ps_status.h" +@@ -3366,6 +3368,8 @@ ProcessInterrupts(void) -+ CheckAndHandleCustomSignals(); -+ if (LogMemoryContextPending) ProcessLogMemoryContextInterrupt(); ++ ++ CheckAndHandleCustomSignals(); } + + +@@ -3711,7 +3715,7 @@ process_postgres_switches(int argc, char *argv[], GucContext ctx, + * postmaster/postmaster.c (the option sets should not conflict) and with + * the common help() function in main/main.c. + */ +- while ((flag = getopt(argc, argv, "B:bc:C:D:d:EeFf:h:ijk:lN:nOPp:r:S:sTt:v:W:-:")) != -1) ++ while ((flag = getopt(argc, argv, "B:bc:C:D:d:EeFf:h:ijk:lN:nOPp:r:S:sTt:v:W:Z-:")) != -1) + { + switch (flag) + { +@@ -3843,6 +3847,10 @@ process_postgres_switches(int argc, char *argv[], GucContext ctx, + SetConfigOption("post_auth_delay", optarg, ctx, gucsource); + break; + ++ case 'Z': ++ /* ignored for consistency with the postmaster */ ++ break; ++ + case 'c': + case '-': + { diff --git a/src/include/storage/procsignal.h b/src/include/storage/procsignal.h -index eec186b..74af186 100644 +index eec186be2ee..74af186bf53 100644 --- a/src/include/storage/procsignal.h +++ b/src/include/storage/procsignal.h @@ -17,6 +17,8 @@ @@ -201,6 +254,3 @@ index eec186b..74af186 100644 extern void procsignal_sigusr1_handler(SIGNAL_ARGS); --- -2.25.1 - diff --git a/patches/custom_signals_15.0.patch b/patches/custom_signals_15.0.patch index 9d640cb..7678dbe 100644 --- a/patches/custom_signals_15.0.patch +++ b/patches/custom_signals_15.0.patch @@ -107,6 +107,14 @@ index defb75a..4245d28 100644 +{ + int i; + ++ /* ++ * This is invoked from ProcessInterrupts(), and since some of the ++ * functions it calls contain CHECK_FOR_INTERRUPTS(), there is a potential ++ * for recursive calls if more signals are received while this runs, so ++ * let's block interrupts until done. ++ */ ++ HOLD_INTERRUPTS(); ++ + /* Check on expiring of custom signals and call its handlers if exist */ + for (i = 0; i < NUM_CUSTOM_PROCSIGNALS; i++) + { @@ -124,6 +132,8 @@ index defb75a..4245d28 100644 + } + } + } ++ ++ RESUME_INTERRUPTS(); +} \ No newline at end of file diff --git a/src/backend/tcop/postgres.c b/src/backend/tcop/postgres.c From 45f8c63a12b41907b326592c30c583ce7ec38301 Mon Sep 17 00:00:00 2001 From: Anton Voloshin Date: Wed, 20 Jul 2022 13:25:22 +0300 Subject: [PATCH 14/37] adapt pg_query_state for upcoming PostgreSQL 15 1. Only call RequestAddinShmemSpace from within our implementation of shmem_request_hook (as required after commit 4f2400cb3 in PostgreSQL 15). 2. While we are here, remove _PG_fini, as it is now officially dead after commit ab02d702e in PostgreSQL 15. --- pg_query_state.c | 35 +++++++++++++++-------------------- 1 file changed, 15 insertions(+), 20 deletions(-) diff --git a/pg_query_state.c b/pg_query_state.c index f5a9a4d..d499902 100644 --- a/pg_query_state.c +++ b/pg_query_state.c @@ -48,7 +48,6 @@ static ExecutorFinish_hook_type prev_ExecutorFinish = NULL; static shmem_startup_hook_type prev_shmem_startup_hook = NULL; void _PG_init(void); -void _PG_fini(void); /* hooks defined in this module */ static void qs_ExecutorStart(QueryDesc *queryDesc, int eflags); @@ -179,6 +178,11 @@ pg_qs_shmem_startup(void) module_initialized = true; } +#if PG_VERSION_NUM >= 150000 +static shmem_request_hook_type prev_shmem_request_hook = NULL; +static void pg_qs_shmem_request(void); +#endif + /* * Module load callback */ @@ -188,12 +192,12 @@ _PG_init(void) if (!process_shared_preload_libraries_in_progress) return; - /* - * Request additional shared resources. (These are no-ops if we're not in - * the postmaster process.) We'll allocate or attach to the shared - * resources in qs_shmem_startup(). - */ +#if PG_VERSION_NUM >= 150000 + prev_shmem_request_hook = shmem_request_hook; + shmem_request_hook = pg_qs_shmem_request; +#else RequestAddinShmemSpace(pg_qs_shmem_size()); +#endif /* Register interrupt on custom signal of polling query state */ UserIdPollReason = RegisterCustomProcSignalHandler(SendCurrentUserId); @@ -252,22 +256,13 @@ _PG_init(void) shmem_startup_hook = pg_qs_shmem_startup; } -/* - * Module unload callback - */ -void -_PG_fini(void) +static void +pg_qs_shmem_request(void) { - module_initialized = false; - - /* clear global state */ - list_free(QueryDescStack); + if (prev_shmem_request_hook) + prev_shmem_request_hook(); - /* Uninstall hooks. */ - ExecutorStart_hook = prev_ExecutorStart; - ExecutorRun_hook = prev_ExecutorRun; - ExecutorFinish_hook = prev_ExecutorFinish; - shmem_startup_hook = prev_shmem_startup_hook; + RequestAddinShmemSpace(pg_qs_shmem_size()); } /* From 410cf8f5c872087bf96b47d2383fda2b5094e54d Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Mon, 8 Aug 2022 12:50:55 +0300 Subject: [PATCH 15/37] fix compilation on pre-15 --- pg_query_state.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pg_query_state.c b/pg_query_state.c index d499902..dabaa71 100644 --- a/pg_query_state.c +++ b/pg_query_state.c @@ -256,6 +256,7 @@ _PG_init(void) shmem_startup_hook = pg_qs_shmem_startup; } +#if PG_VERSION_NUM >= 150000 static void pg_qs_shmem_request(void) { @@ -264,6 +265,7 @@ pg_qs_shmem_request(void) RequestAddinShmemSpace(pg_qs_shmem_size()); } +#endif /* * ExecutorStart hook: From 6fa1219fa6b7f21031a3e92dc9c565a549272336 Mon Sep 17 00:00:00 2001 From: Ekaterina Sokolova Date: Mon, 31 Oct 2022 16:05:45 +0300 Subject: [PATCH 16/37] [PGPRO-6693] Checking the result of shm_mq_send (according to Svace). Tags: pg_query_state. --- pg_query_state.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/pg_query_state.c b/pg_query_state.c index dabaa71..ba79050 100644 --- a/pg_query_state.c +++ b/pg_query_state.c @@ -864,6 +864,7 @@ SendBgWorkerPids(void) int i; shm_mq_handle *mqh; LOCKTAG tag; + shm_mq_result result; LockShmem(&tag, PG_QS_SND_KEY); @@ -893,10 +894,15 @@ SendBgWorkerPids(void) } #if PG_VERSION_NUM < 150000 - shm_mq_send(mqh, msg_len, msg, false); + result = shm_mq_send(mqh, msg_len, msg, false); #else - shm_mq_send(mqh, msg_len, msg, false, true); + result = shm_mq_send(mqh, msg_len, msg, false, true); #endif + + /* Check for failure. */ + if(result == SHM_MQ_DETACHED) + elog(WARNING, "could not send message queue to shared-memory queue: receiver has been detached"); + UnlockShmem(&tag); } From 484600c9073d4fec4d24d829655d4c792be23121 Mon Sep 17 00:00:00 2001 From: Ekaterina Sokolova Date: Fri, 11 Nov 2022 09:54:51 +0300 Subject: [PATCH 17/37] [6693] Refactoring source code of pg_query_state. Tags: pg_query_state. --- pg_query_state.c | 40 ++++++++++++++++++++-------------------- signal_handler.c | 12 ++++++------ 2 files changed, 26 insertions(+), 26 deletions(-) diff --git a/pg_query_state.c b/pg_query_state.c index ba79050..2c8a917 100644 --- a/pg_query_state.c +++ b/pg_query_state.c @@ -101,10 +101,10 @@ static List *GetRemoteBackendQueryStates(PGPROC *leader, ExplainFormat format); /* Shared memory variables */ -shm_toc *toc = NULL; +shm_toc *toc = NULL; RemoteUserIdResult *counterpart_userid = NULL; -pg_qs_params *params = NULL; -shm_mq *mq = NULL; +pg_qs_params *params = NULL; +shm_mq *mq = NULL; /* * Estimate amount of shared memory needed. @@ -208,7 +208,7 @@ _PG_init(void) || UserIdPollReason == INVALID_PROCSIGNAL) { ereport(WARNING, (errcode(ERRCODE_INSUFFICIENT_RESOURCES), - errmsg("pg_query_state isn't loaded: insufficient custom ProcSignal slots"))); + errmsg("pg_query_state isn't loaded: insufficient custom ProcSignal slots"))); return; } @@ -435,7 +435,7 @@ deserialize_stack(char *src, int stack_depth) { List *result = NIL; char *curr_ptr = src; - int i; + int i; for (i = 0; i < stack_depth; i++) { @@ -599,10 +599,10 @@ pg_query_state(PG_FUNCTION_ARGS) /* print warnings if exist */ if (msg->warnings & TIMINIG_OFF_WARNING) ereport(WARNING, (errcode(ERRCODE_WARNING), - errmsg("timing statistics disabled"))); + errmsg("timing statistics disabled"))); if (msg->warnings & BUFFERS_OFF_WARNING) ereport(WARNING, (errcode(ERRCODE_WARNING), - errmsg("buffers statistics disabled"))); + errmsg("buffers statistics disabled"))); oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); @@ -864,7 +864,7 @@ SendBgWorkerPids(void) int i; shm_mq_handle *mqh; LOCKTAG tag; - shm_mq_result result; + shm_mq_result result; LockShmem(&tag, PG_QS_SND_KEY); @@ -959,10 +959,10 @@ GetRemoteBackendWorkers(PGPROC *proc) signal_error: ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("invalid send signal"))); + errmsg("invalid send signal"))); mq_error: ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("error in message queue data transmitting"))); + errmsg("error in message queue data transmitting"))); return NIL; } @@ -980,12 +980,12 @@ static shm_mq_result receive_msg_by_parts(shm_mq_handle *mqh, Size *total, void **datap, int64 timeout, int *rc, bool nowait) { - shm_mq_result mq_receive_result; - shm_mq_msg *buff; - int offset; - Size *expected; - Size expected_data; - Size len; + shm_mq_result mq_receive_result; + shm_mq_msg *buff; + int offset; + Size *expected; + Size expected_data; + Size len; /* Get the expected number of bytes in message */ mq_receive_result = shm_mq_receive(mqh, &len, (void **) &expected, nowait); @@ -1113,7 +1113,7 @@ GetRemoteBackendQueryStates(PGPROC *leader, mqh = shm_mq_attach(mq, NULL, NULL); elog(DEBUG1, "Wait response from leader %d", leader->pid); mq_receive_result = receive_msg_by_parts(mqh, &len, (void **) &msg, - 0, NULL, false); + 0, NULL, false); if (mq_receive_result != SHM_MQ_SUCCESS) goto mq_error; if (msg->reqid != reqid) @@ -1132,7 +1132,7 @@ GetRemoteBackendQueryStates(PGPROC *leader, */ foreach(iter, alive_procs) { - PGPROC *proc = (PGPROC *) lfirst(iter); + PGPROC *proc = (PGPROC *) lfirst(iter); /* prepare message queue to transfer data */ elog(DEBUG1, "Wait response from worker %d", proc->pid); @@ -1172,7 +1172,7 @@ GetRemoteBackendQueryStates(PGPROC *leader, signal_error: ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("invalid send signal"))); + errmsg("invalid send signal"))); mq_error: #if PG_VERSION_NUM < 100000 shm_mq_detach(mq); @@ -1180,7 +1180,7 @@ GetRemoteBackendQueryStates(PGPROC *leader, shm_mq_detach(mqh); #endif ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("error in message queue data transmitting"))); + errmsg("error in message queue data transmitting"))); return NIL; } diff --git a/signal_handler.c b/signal_handler.c index d4f1099..c8f2950 100644 --- a/signal_handler.c +++ b/signal_handler.c @@ -224,12 +224,12 @@ send_msg_by_parts(shm_mq_handle *mqh, Size nbytes, const void *data) void SendQueryState(void) { - shm_mq_handle *mqh; - instr_time start_time; - instr_time cur_time; - int64 delay = MAX_SND_TIMEOUT; - int reqid = params->reqid; - LOCKTAG tag; + shm_mq_handle *mqh; + instr_time start_time; + instr_time cur_time; + int64 delay = MAX_SND_TIMEOUT; + int reqid = params->reqid; + LOCKTAG tag; INSTR_TIME_SET_CURRENT(start_time); From 5ba877945f712a6bcdec29b905effef0867ba46b Mon Sep 17 00:00:00 2001 From: Marina Polyakova Date: Mon, 21 Nov 2022 16:20:39 +0300 Subject: [PATCH 18/37] Fix compiler warnings due to new checks in PostgreSQL 16 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit See the commit 0fe954c28584169938e5c0738cfaa9930ce77577 (Add -Wshadow=compatible-local to the standard compilation flags) in PostgreSQL 16. pg_query_state.c: In function ‘pg_query_state’: pg_query_state.c:615:66: warning: declaration of ‘msg’ shadows a previous local [-Wshadow=compatible-local] 615 | shm_mq_msg *msg = (shm_mq_msg *) lfirst(i); | ^~~ pg_query_state.c:489:42: note: shadowed declaration is here 489 | shm_mq_msg *msg; | ^~~ pg_query_state.c: In function ‘GetRemoteBackendWorkers’: pg_query_state.c:946:25: warning: declaration of ‘proc’ shadows a parameter [-Wshadow=compatible-local] 946 | PGPROC *proc = BackendPidGetProc(pid); | ^~~~ pg_query_state.c:913:33: note: shadowed declaration is here 913 | GetRemoteBackendWorkers(PGPROC *proc) | ~~~~~~~~^~~~ --- pg_query_state.c | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/pg_query_state.c b/pg_query_state.c index 2c8a917..3419d64 100644 --- a/pg_query_state.c +++ b/pg_query_state.c @@ -612,17 +612,18 @@ pg_query_state(PG_FUNCTION_ARGS) foreach(i, msgs) { List *qs_stack; - shm_mq_msg *msg = (shm_mq_msg *) lfirst(i); + shm_mq_msg *current_msg = (shm_mq_msg *) lfirst(i); proc_state *p_state = (proc_state *) palloc(sizeof(proc_state)); - if (msg->result_code != QS_RETURNED) + if (current_msg->result_code != QS_RETURNED) continue; - AssertState(msg->result_code == QS_RETURNED); + AssertState(current_msg->result_code == QS_RETURNED); - qs_stack = deserialize_stack(msg->stack, msg->stack_depth); + qs_stack = deserialize_stack(current_msg->stack, + current_msg->stack_depth); - p_state->proc = msg->proc; + p_state->proc = current_msg->proc; p_state->stack = qs_stack; p_state->frame_index = 0; p_state->frame_cursor = list_head(qs_stack); @@ -943,10 +944,10 @@ GetRemoteBackendWorkers(PGPROC *proc) for (i = 0; i < msg->number; i++) { pid_t pid = msg->pids[i]; - PGPROC *proc = BackendPidGetProc(pid); - if (!proc || !proc->pid) + PGPROC *current_proc = BackendPidGetProc(pid); + if (!current_proc || !current_proc->pid) continue; - result = lcons(proc, result); + result = lcons(current_proc, result); } #if PG_VERSION_NUM < 100000 From 3b2942dc4b601d1f5bee54243a12021464f3e8b2 Mon Sep 17 00:00:00 2001 From: Marina Polyakova Date: Wed, 14 Dec 2022 09:49:12 +0300 Subject: [PATCH 19/37] Remove AssertState See the commit b1099eca8f38ff5cfaf0901bb91cb6a22f909bc6 (Remove AssertArg and AssertState) in PostgreSQL 16. --- pg_query_state.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pg_query_state.c b/pg_query_state.c index 3419d64..a5a3f47 100644 --- a/pg_query_state.c +++ b/pg_query_state.c @@ -618,7 +618,7 @@ pg_query_state(PG_FUNCTION_ARGS) if (current_msg->result_code != QS_RETURNED) continue; - AssertState(current_msg->result_code == QS_RETURNED); + Assert(current_msg->result_code == QS_RETURNED); qs_stack = deserialize_stack(current_msg->stack, current_msg->stack_depth); @@ -890,7 +890,7 @@ SendBgWorkerPids(void) { pid_t current_pid = lfirst_int(iter); - AssertState(current_pid > 0); + Assert(current_pid > 0); msg->pids[i++] = current_pid; } From 93afc916dfccdb85b2d8f6604690f2348f2fd0c7 Mon Sep 17 00:00:00 2001 From: Vyacheslav Makarov Date: Thu, 26 Jan 2023 11:36:30 +0300 Subject: [PATCH 20/37] [PGPRO-7010] - Revision of docker tests for pg_query_state under version 15 tags: pg_query_state --- .travis.yml | 2 ++ patches/custom_signals_15.0.patch | 31 +++++++++++++++--------------- patches/runtime_explain_15.0.patch | 20 +++++++++---------- tests/test_cases.py | 28 +++++++++++++++++---------- 4 files changed, 46 insertions(+), 35 deletions(-) diff --git a/.travis.yml b/.travis.yml index 697aba2..e311dbc 100644 --- a/.travis.yml +++ b/.travis.yml @@ -22,6 +22,8 @@ notifications: on_failure: always env: + - PG_VERSION=15 LEVEL=hardcore USE_TPCDS=0 + - PG_VERSION=15 - PG_VERSION=14 LEVEL=hardcore USE_TPCDS=0 - PG_VERSION=14 - PG_VERSION=13 LEVEL=hardcore USE_TPCDS=0 diff --git a/patches/custom_signals_15.0.patch b/patches/custom_signals_15.0.patch index 7678dbe..4e99c69 100644 --- a/patches/custom_signals_15.0.patch +++ b/patches/custom_signals_15.0.patch @@ -2,10 +2,10 @@ diff --git a/src/backend/storage/ipc/procsignal.c b/src/backend/storage/ipc/proc index defb75a..4245d28 100644 --- a/src/backend/storage/ipc/procsignal.c +++ b/src/backend/storage/ipc/procsignal.c -@@ -95,6 +95,13 @@ typedef struct +@@ -96,6 +96,13 @@ typedef struct #define BARRIER_CLEAR_BIT(flags, type) \ ((flags) &= ~(((uint32) 1) << (uint32) (type))) - + +#define IsCustomProcSignalReason(reason) \ + ((reason) >= PROCSIG_CUSTOM_1 && (reason) <= PROCSIG_CUSTOM_N) + @@ -15,20 +15,20 @@ index defb75a..4245d28 100644 + static ProcSignalHeader *ProcSignal = NULL; static ProcSignalSlot *MyProcSignalSlot = NULL; - -@@ -103,6 +110,8 @@ static void CleanupProcSignalState(int status, Datum arg); + +@@ -103,6 +110,8 @@ static bool CheckProcSignal(ProcSignalReason reason); + static void CleanupProcSignalState(int status, Datum arg); static void ResetProcSignalBarrierBits(uint32 flags); - static bool ProcessBarrierPlaceholder(void); - + +static void CheckAndSetCustomSignalInterrupts(void); + /* * ProcSignalShmemSize - * Compute space needed for procsignal's shared memory + * Compute space needed for ProcSignal's shared memory @@ -246,6 +255,36 @@ CleanupProcSignalState(int status, Datum arg) slot->pss_pid = 0; } - + +/* + * RegisterCustomProcSignalHandler + * Assign specific handler of custom process signal with new @@ -62,7 +62,7 @@ index defb75a..4245d28 100644 /* * SendProcSignal * Send a signal to a Postgres process -@@ -679,7 +718,62 @@ procsignal_sigusr1_handler(SIGNAL_ARGS) +@@ -675,7 +714,72 @@ procsignal_sigusr1_handler(SIGNAL_ARGS) if (CheckProcSignal(PROCSIG_RECOVERY_CONFLICT_BUFFERPIN)) RecoveryConflictInterrupt(PROCSIG_RECOVERY_CONFLICT_BUFFERPIN); @@ -135,12 +135,12 @@ index defb75a..4245d28 100644 + + RESUME_INTERRUPTS(); +} -\ No newline at end of file ++ diff --git a/src/backend/tcop/postgres.c b/src/backend/tcop/postgres.c index 8cea10c..dd77c98 100644 --- a/src/backend/tcop/postgres.c +++ b/src/backend/tcop/postgres.c -@@ -3364,6 +3364,8 @@ ProcessInterrupts(void) +@@ -3402,6 +3402,8 @@ ProcessInterrupts(void) if (ParallelMessagePending) HandleParallelMessages(); @@ -186,16 +186,17 @@ index eec186b..74af186 100644 NUM_PROCSIGNALS /* Must be last! */ } ProcSignalReason; -@@ -56,6 +68,8 @@ typedef enum - */ - PROCSIGNAL_BARRIER_PLACEHOLDER = 0 +@@ -51,6 +63,9 @@ typedef enum + { + PROCSIGNAL_BARRIER_SMGRRELEASE /* ask smgr to close files */ } ProcSignalBarrierType; ++ +/* Handler of custom process signal */ +typedef void (*ProcSignalHandler_type) (void); /* * prototypes for functions in procsignal.c -@@ -64,12 +78,15 @@ extern Size ProcSignalShmemSize(void); +@@ -59,12 +74,15 @@ extern Size ProcSignalShmemSize(void); extern void ProcSignalShmemInit(void); extern void ProcSignalInit(int pss_idx); diff --git a/patches/runtime_explain_15.0.patch b/patches/runtime_explain_15.0.patch index 7904cc2..adab6dc 100644 --- a/patches/runtime_explain_15.0.patch +++ b/patches/runtime_explain_15.0.patch @@ -2,7 +2,7 @@ diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c index 10644dfac4..7106ed4257 100644 --- a/src/backend/commands/explain.c +++ b/src/backend/commands/explain.c -@@ -984,14 +984,36 @@ report_triggers(ResultRelInfo *rInfo, bool show_relname, ExplainState *es) +@@ -990,14 +990,36 @@ report_triggers(ResultRelInfo *rInfo, bool show_relname, ExplainState *es) char *relname; char *conname = NULL; @@ -40,7 +40,7 @@ index 10644dfac4..7106ed4257 100644 continue; ExplainOpenGroup("Trigger", NULL, true, es); -@@ -1017,9 +1039,9 @@ report_triggers(ResultRelInfo *rInfo, bool show_relname, ExplainState *es) +@@ -1023,9 +1045,9 @@ report_triggers(ResultRelInfo *rInfo, bool show_relname, ExplainState *es) appendStringInfo(es->str, " on %s", relname); if (es->timing) appendStringInfo(es->str, ": time=%.3f calls=%.0f\n", @@ -52,7 +52,7 @@ index 10644dfac4..7106ed4257 100644 } else { -@@ -1028,9 +1050,8 @@ report_triggers(ResultRelInfo *rInfo, bool show_relname, ExplainState *es) +@@ -1034,9 +1056,8 @@ report_triggers(ResultRelInfo *rInfo, bool show_relname, ExplainState *es) ExplainPropertyText("Constraint Name", conname, es); ExplainPropertyText("Relation", relname, es); if (es->timing) @@ -64,7 +64,7 @@ index 10644dfac4..7106ed4257 100644 } if (conname) -@@ -1600,8 +1621,11 @@ ExplainNode(PlanState *planstate, List *ancestors, +@@ -1609,8 +1630,11 @@ ExplainNode(PlanState *planstate, List *ancestors, * instrumentation results the user didn't ask for. But we do the * InstrEndLoop call anyway, if possible, to reduce the number of cases * auto_explain has to contend with. @@ -77,7 +77,7 @@ index 10644dfac4..7106ed4257 100644 InstrEndLoop(planstate->instrument); if (es->analyze && -@@ -1636,7 +1660,7 @@ ExplainNode(PlanState *planstate, List *ancestors, +@@ -1645,7 +1669,7 @@ ExplainNode(PlanState *planstate, List *ancestors, ExplainPropertyFloat("Actual Loops", NULL, nloops, 0, es); } } @@ -86,7 +86,7 @@ index 10644dfac4..7106ed4257 100644 { if (es->format == EXPLAIN_FORMAT_TEXT) appendStringInfoString(es->str, " (never executed)"); -@@ -1652,6 +1676,75 @@ ExplainNode(PlanState *planstate, List *ancestors, +@@ -1661,6 +1685,75 @@ ExplainNode(PlanState *planstate, List *ancestors, } } @@ -162,7 +162,7 @@ index 10644dfac4..7106ed4257 100644 /* in text format, first line ends here */ if (es->format == EXPLAIN_FORMAT_TEXT) appendStringInfoChar(es->str, '\n'); -@@ -2051,6 +2144,9 @@ ExplainNode(PlanState *planstate, List *ancestors, +@@ -2068,6 +2161,9 @@ ExplainNode(PlanState *planstate, List *ancestors, /* Prepare per-worker buffer/WAL usage */ if (es->workers_state && (es->buffers || es->wal) && es->verbose) @@ -172,7 +172,7 @@ index 10644dfac4..7106ed4257 100644 { WorkerInstrumentation *w = planstate->worker_instrument; -@@ -3015,6 +3111,11 @@ show_hash_info(HashState *hashstate, ExplainState *es) +@@ -3032,6 +3128,11 @@ show_hash_info(HashState *hashstate, ExplainState *es) memcpy(&hinstrument, hashstate->hinstrument, sizeof(HashInstrumentation)); @@ -184,7 +184,7 @@ index 10644dfac4..7106ed4257 100644 /* * Merge results from workers. In the parallel-oblivious case, the * results from all participants should be identical, except where -@@ -3392,20 +3493,16 @@ show_instrumentation_count(const char *qlabel, int which, +@@ -3412,20 +3513,16 @@ show_instrumentation_count(const char *qlabel, int which, if (!es->analyze || !planstate->instrument) return; @@ -209,7 +209,7 @@ index 10644dfac4..7106ed4257 100644 } /* -@@ -3977,15 +4074,27 @@ show_modifytable_info(ModifyTableState *mtstate, List *ancestors, +@@ -4028,15 +4125,27 @@ show_modifytable_info(ModifyTableState *mtstate, List *ancestors, double insert_path; double other_path; diff --git a/tests/test_cases.py b/tests/test_cases.py index 1750bb1..b4bbbb3 100644 --- a/tests/test_cases.py +++ b/tests/test_cases.py @@ -45,7 +45,7 @@ def test_simple_query(config): acon1, acon2 = common.n_async_connect(config, 2) query = 'select count(*) from foo join bar on foo.c1=bar.c1 and unlock_if_eq_1(foo.c1)=bar.c1' expected = r"""Aggregate \(Current loop: actual rows=\d+, loop number=1\) - -> Hash Join \(Current loop: actual rows=62473, loop number=1\) + -> Hash Join \(Current loop: actual rows=\d+, loop number=1\) Hash Cond: \(foo.c1 = bar.c1\) Join Filter: \(unlock_if_eq_1\(foo.c1\) = bar.c1\) -> Seq Scan on foo \(Current loop: actual rows=\d+, loop number=1\) @@ -111,10 +111,10 @@ def test_nested_call(config): expected_nested = r"""Result \(Current loop: actual rows=0, loop number=1\) InitPlan 1 \(returns \$0\) -> Aggregate \(Current loop: actual rows=0, loop number=1\) - -> Hash Join \(Current loop: actual rows=62473, loop number=1\) + -> Hash Join \(Current loop: actual rows=\d+, loop number=1\) Hash Cond: \(foo.c1 = bar.c1\) Join Filter: \(unlock_if_eq_1\(foo.c1\) = bar.c1\) - -> Seq Scan on foo \(Current loop: actual rows=1000000, loop number=1\) + -> Seq Scan on foo \(Current loop: actual rows=\d+, loop number=1\) -> Hash \(Current loop: actual rows=500000, loop number=1\) Buckets: \d+ Batches: \d+ Memory Usage: \d+kB -> Seq Scan on bar \(Current loop: actual rows=\d+, loop number=1\)""" @@ -232,7 +232,7 @@ def test_costs(config): -> Hash Join \(cost=\d+.\d+..\d+.\d+ rows=\d+ width=0\) \(Current loop: actual rows=\d+, loop number=1\) Hash Cond: \(foo.c1 = bar.c1\) Join Filter: \(unlock_if_eq_1\(foo.c1\) = bar.c1\) - -> Seq Scan on foo \(cost=0.00..\d+.\d+ rows=\d+ width=4\) \(Current loop: actual rows=1000000, loop number=1\) + -> Seq Scan on foo \(cost=0.00..\d+.\d+ rows=\d+ width=4\) \(Current loop: actual rows=\d+, loop number=1\) -> Hash \(cost=\d+.\d+..\d+.\d+ rows=\d+ width=4\) \(Current loop: actual rows=500000, loop number=1\) Buckets: \d+ Batches: \d+ Memory Usage: \d+kB -> Seq Scan on bar \(cost=0.00..\d+.\d+ rows=\d+ width=4\) \(Current loop: actual rows=\d+, loop number=1\)""" @@ -249,25 +249,33 @@ def test_buffers(config): acon1, acon2 = common.n_async_connect(config, 2) query = 'select count(*) from foo join bar on foo.c1=bar.c1 and unlock_if_eq_1(foo.c1)=bar.c1' - expected = r"""Aggregate \(Current loop: actual rows=0, loop number=1\) + temporary = r"""Aggregate \(Current loop: actual rows=0, loop number=1\) -> Hash Join \(Current loop: actual rows=\d+, loop number=1\) Hash Cond: \(foo.c1 = bar.c1\) - Join Filter: \(unlock_if_eq_1\(foo.c1\) = bar.c1\) - Buffers: shared hit=\d+, temp read=\d+ written=\d+ - -> Seq Scan on foo \(Current loop: actual rows=1000000, loop number=1\) + Join Filter: \(unlock_if_eq_1\(foo.c1\) = bar.c1\)""" + expected = temporary + expected_15 = temporary + expected += r""" + Buffers: shared hit=\d+, temp read=\d+ written=\d+""" + expected_15 += r""" + Buffers: shared hit=\d+, temp written=\d+""" + temporary = r""" + -> Seq Scan on foo \(Current loop: actual rows=\d+, loop number=1\) Buffers: [^\n]* -> Hash \(Current loop: actual rows=500000, loop number=1\) Buckets: \d+ Batches: \d+ Memory Usage: \d+kB Buffers: shared hit=\d+, temp written=\d+ -> Seq Scan on bar \(Current loop: actual rows=\d+, loop number=1\) Buffers: .*""" + expected += temporary + expected_15 += temporary common.set_guc(acon1, 'pg_query_state.enable_buffers', 'on') qs, notices = common.onetime_query_state_locks(config, acon1, acon2, query, {'buffers': True}) assert len(qs) == 2 - assert re.match(expected, qs[0][3]) + assert (re.match(expected, qs[0][3]) or re.match(expected_15, qs[0][3])) assert len(notices) == 0 common.n_close((acon1, acon2)) @@ -282,7 +290,7 @@ def test_timing(config): -> Hash Join \(Current loop: actual time=\d+.\d+..\d+.\d+ rows=\d+, loop number=1\) Hash Cond: \(foo.c1 = bar.c1\) Join Filter: \(unlock_if_eq_1\(foo.c1\) = bar.c1\) - -> Seq Scan on foo \(Current loop: actual time=\d+.\d+..\d+.\d+ rows=1000000, loop number=1\) + -> Seq Scan on foo \(Current loop: actual time=\d+.\d+..\d+.\d+ rows=\d+, loop number=1\) -> Hash \(Current loop: actual time=\d+.\d+..\d+.\d+ rows=500000, loop number=1\) Buckets: \d+ Batches: \d+ Memory Usage: \d+kB -> Seq Scan on bar \(Current loop: actual time=\d+.\d+..\d+.\d+ rows=\d+, loop number=1\)""" From c9f90cb47d0a5ed884cd7f1943493777524750eb Mon Sep 17 00:00:00 2001 From: Maxim Orlov Date: Mon, 15 May 2023 11:31:53 +0300 Subject: [PATCH 21/37] fixes in Makefile In order to be ready to the meson build and be able to support parallel installcheck do the following: - fix use of USE_MODULE_DB - isolation target should be included from the gloabl makefile - reorder vars to make it easy to read + fix out of source check target tags: pg_query_state --- Makefile | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index ca9faab..4468c51 100644 --- a/Makefile +++ b/Makefile @@ -8,10 +8,20 @@ EXTVERSION = 1.1 DATA = pg_query_state--1.0--1.1.sql DATA_built = $(EXTENSION)--$(EXTVERSION).sql PGFILEDESC = "pg_query_state - facility to track progress of plan execution" -EXTRA_REGRESS_OPTS=--temp-config=$(top_srcdir)/$(subdir)/test.conf + EXTRA_CLEAN = ./isolation_output $(EXTENSION)--$(EXTVERSION).sql \ Dockerfile ./tests/*.pyc ./tmp_stress +ISOLATION = corner_cases +# +# PG11 doesn't support ISOLATION_OPTS variable. We have to use +# "CREATE/DROP EXTENTION" command in spec. +# +# One day, when we'll get rid of PG11, it will be possible to uncomment this +# variable and remove "CREATE EXTENTION" from spec. +# +# ISOLATION_OPTS = --load-extension=pg_query_state + ifdef USE_PGXS PG_CONFIG ?= pg_config PGXS := $(shell $(PG_CONFIG) --pgxs) @@ -21,11 +31,17 @@ subdir = contrib/pg_query_state top_builddir = ../.. include $(top_builddir)/src/Makefile.global include $(top_srcdir)/contrib/contrib-global.mk +# need this to provide make check in case of "in source" build +EXTRA_REGRESS_OPTS=--temp-config=$(top_srcdir)/$(subdir)/test.conf endif $(EXTENSION)--$(EXTVERSION).sql: init.sql cat $^ > $@ +# +# Make conditional targets to save backward compatibility with PG11. +# +ifeq ($(MAJORVERSION),11) ISOLATIONCHECKS = corner_cases check: isolationcheck @@ -46,3 +62,4 @@ submake-isolation: $(MAKE) -C $(top_builddir)/src/test/isolation all temp-install: EXTRA_INSTALL=contrib/pg_query_state +endif From 26948428d741caf292097e236750bf5e679816de Mon Sep 17 00:00:00 2001 From: Ekaterina Sokolova Date: Tue, 20 Jun 2023 14:36:14 +0300 Subject: [PATCH 22/37] Fix Copyrights. Only for basic files, not patches. --- LICENSE | 2 +- pg_query_state.c | 2 +- pg_query_state.h | 2 +- run_tests.sh | 2 +- signal_handler.c | 2 +- tests/common.py | 2 +- tests/pg_qs_test_runner.py | 2 +- tests/test_cases.py | 2 +- tests/tpcds.py | 2 +- 9 files changed, 9 insertions(+), 9 deletions(-) diff --git a/LICENSE b/LICENSE index 1bafabf..5d50c25 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ pg_query_state is released under the PostgreSQL License, a liberal Open Source license, similar to the BSD or MIT licenses. -Copyright (c) 2016-2019, Postgres Professional +Copyright (c) 2016-2023, Postgres Professional Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group Portions Copyright (c) 1994, The Regents of the University of California diff --git a/pg_query_state.c b/pg_query_state.c index a5a3f47..a52e051 100644 --- a/pg_query_state.c +++ b/pg_query_state.c @@ -2,7 +2,7 @@ * pg_query_state.c * Extract information about query state from other backend * - * Copyright (c) 2016-2016, Postgres Professional + * Copyright (c) 2016-2023, Postgres Professional * * contrib/pg_query_state/pg_query_state.c * IDENTIFICATION diff --git a/pg_query_state.h b/pg_query_state.h index 2b13234..9152560 100644 --- a/pg_query_state.h +++ b/pg_query_state.h @@ -2,7 +2,7 @@ * pg_query_state.h * Headers for pg_query_state extension. * - * Copyright (c) 2016-2016, Postgres Professional + * Copyright (c) 2016-2023, Postgres Professional * * IDENTIFICATION * contrib/pg_query_state/pg_query_state.h diff --git a/run_tests.sh b/run_tests.sh index fbf2da1..1c43847 100644 --- a/run_tests.sh +++ b/run_tests.sh @@ -1,7 +1,7 @@ #!/usr/bin/env bash # -# Copyright (c) 2019, Postgres Professional +# Copyright (c) 2019-2023, Postgres Professional # # supported levels: # * standard diff --git a/signal_handler.c b/signal_handler.c index c8f2950..7e4b602 100644 --- a/signal_handler.c +++ b/signal_handler.c @@ -2,7 +2,7 @@ * signal_handler.c * Collect current query state and send it to requestor in custom signal handler * - * Copyright (c) 2016-2016, Postgres Professional + * Copyright (c) 2016-2023, Postgres Professional * * IDENTIFICATION * contrib/pg_query_state/signal_handler.c diff --git a/tests/common.py b/tests/common.py index ac24e76..c83abb1 100644 --- a/tests/common.py +++ b/tests/common.py @@ -1,6 +1,6 @@ ''' common.py -Copyright (c) 2016-2020, Postgres Professional +Copyright (c) 2016-2023, Postgres Professional ''' import psycopg2 diff --git a/tests/pg_qs_test_runner.py b/tests/pg_qs_test_runner.py index a6e02e9..a0df6a9 100644 --- a/tests/pg_qs_test_runner.py +++ b/tests/pg_qs_test_runner.py @@ -1,6 +1,6 @@ ''' pg_qs_test_runner.py -Copyright (c) 2016-2021, Postgres Professional +Copyright (c) 2016-2023, Postgres Professional ''' import argparse diff --git a/tests/test_cases.py b/tests/test_cases.py index b4bbbb3..c6b0fa2 100644 --- a/tests/test_cases.py +++ b/tests/test_cases.py @@ -1,6 +1,6 @@ ''' test_cases.py -Copyright (c) 2016-2021, Postgres Professional +Copyright (c) 2016-2023, Postgres Professional ''' import json diff --git a/tests/tpcds.py b/tests/tpcds.py index 8ac7183..944b799 100644 --- a/tests/tpcds.py +++ b/tests/tpcds.py @@ -1,6 +1,6 @@ ''' test_cases.py -Copyright (c) 2016-2020, Postgres Professional +Copyright (c) 2016-2023, Postgres Professional ''' import os From e8cde164f9dce99e6783c01dedbad20c6a8f75b2 Mon Sep 17 00:00:00 2001 From: Marina Polyakova Date: Mon, 4 Sep 2023 19:35:56 +0300 Subject: [PATCH 23/37] Rename some support functions for pgstat* views. See the commit 8dfa37b797843a83a5756ea3309055e8953e1a86 (Rename some support functions for pgstat* views.) in PostgreSQL 16. --- pg_query_state.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pg_query_state.c b/pg_query_state.c index a52e051..ab76f7f 100644 --- a/pg_query_state.c +++ b/pg_query_state.c @@ -365,7 +365,11 @@ search_be_status(int pid) for (beid = 1; beid <= pgstat_fetch_stat_numbackends(); beid++) { +#if PG_VERSION_NUM >= 160000 + PgBackendStatus *be_status = pgstat_get_beentry_by_backend_id(beid); +#else PgBackendStatus *be_status = pgstat_fetch_stat_beentry(beid); +#endif if (be_status && be_status->st_procpid == pid) return be_status; From 9597aca40a73a7e3fcfb0983446eaa4fa8d71c97 Mon Sep 17 00:00:00 2001 From: Ekaterina Sokolova Date: Thu, 22 Feb 2024 20:22:28 +0300 Subject: [PATCH 24/37] Module update for PostgreSQL 16. --- .travis.yml | 2 + LICENSE | 2 +- README.md | 15 +- patches/custom_signals_16.0.patch | 229 +++++++++++++++++++++++++ patches/runtime_explain_16.0.patch | 258 +++++++++++++++++++++++++++++ pg_query_state.c | 2 +- pg_query_state.h | 2 +- run_tests.sh | 2 +- signal_handler.c | 2 +- tests/common.py | 2 +- tests/pg_qs_test_runner.py | 2 +- tests/test_cases.py | 2 +- tests/tpcds.py | 2 +- 13 files changed, 512 insertions(+), 10 deletions(-) create mode 100644 patches/custom_signals_16.0.patch create mode 100644 patches/runtime_explain_16.0.patch diff --git a/.travis.yml b/.travis.yml index e311dbc..0983e07 100644 --- a/.travis.yml +++ b/.travis.yml @@ -22,6 +22,8 @@ notifications: on_failure: always env: + - PG_VERSION=16 LEVEL=hardcore USE_TPCDS=0 + - PG_VERSION=16 - PG_VERSION=15 LEVEL=hardcore USE_TPCDS=0 - PG_VERSION=15 - PG_VERSION=14 LEVEL=hardcore USE_TPCDS=0 diff --git a/LICENSE b/LICENSE index 5d50c25..7c10525 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ pg_query_state is released under the PostgreSQL License, a liberal Open Source license, similar to the BSD or MIT licenses. -Copyright (c) 2016-2023, Postgres Professional +Copyright (c) 2016-2024, Postgres Professional Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group Portions Copyright (c) 1994, The Regents of the University of California diff --git a/README.md b/README.md index 3ba5eb2..fba15cd 100644 --- a/README.md +++ b/README.md @@ -15,12 +15,25 @@ Using this module there can help in the following things: - overwatch the query execution ## Installation -To install `pg_query_state`, please apply corresponding patches `custom_signal_(PG_VERSION).patch` and `runtime_explain_(PG_VERSION).patch` (or `runtime_explain.patch` for PG version <= 10.0) in `patches/` directory to reqired stable version of PostgreSQL and rebuild PostgreSQL. +To install `pg_query_state`, please apply corresponding patches `custom_signal_(PG_VERSION).patch` and `runtime_explain_(PG_VERSION).patch` (or `runtime_explain.patch` for PG version <= 10.0) from the `patches/` directory to reqired stable version of PostgreSQL and rebuild PostgreSQL. + +To do this, run the following commands from the postgresql directory: +``` +patch -p1 < path_to_pg_query_state_folder/patches/runtime_explain_(PG_VERSION).patch +patch -p1 < path_to_pg_query_state_folder/patches/custom_signal_(PG_VERSION).patch +``` Then execute this in the module's directory: ``` make install USE_PGXS=1 ``` +To execute the command correctly, make sure you have the PATH or PG_CONFIG variable set. +``` +export PATH=path_to_your_bin_folder:$PATH +# or +export PG_CONFIG=path_to_your_bin_folder/pg_config +``` + Add module name to the `shared_preload_libraries` parameter in `postgresql.conf`: ``` shared_preload_libraries = 'pg_query_state' diff --git a/patches/custom_signals_16.0.patch b/patches/custom_signals_16.0.patch new file mode 100644 index 0000000..3a2183f --- /dev/null +++ b/patches/custom_signals_16.0.patch @@ -0,0 +1,229 @@ +diff --git a/src/backend/storage/ipc/procsignal.c b/src/backend/storage/ipc/procsignal.c +index c85cb5cc18..37ae4b3759 100644 +--- a/src/backend/storage/ipc/procsignal.c ++++ b/src/backend/storage/ipc/procsignal.c +@@ -6,6 +6,7 @@ + * + * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California ++ * Portions Copyright (c) 2024, Postgres Professional + * + * IDENTIFICATION + * src/backend/storage/ipc/procsignal.c +@@ -97,6 +98,13 @@ typedef struct + #define BARRIER_CLEAR_BIT(flags, type) \ + ((flags) &= ~(((uint32) 1) << (uint32) (type))) + ++#define IsCustomProcSignalReason(reason) \ ++ ((reason) >= PROCSIG_CUSTOM_1 && (reason) <= PROCSIG_CUSTOM_N) ++ ++static bool CustomSignalPendings[NUM_CUSTOM_PROCSIGNALS]; ++static bool CustomSignalProcessing[NUM_CUSTOM_PROCSIGNALS]; ++static ProcSignalHandler_type CustomInterruptHandlers[NUM_CUSTOM_PROCSIGNALS]; ++ + static ProcSignalHeader *ProcSignal = NULL; + static ProcSignalSlot *MyProcSignalSlot = NULL; + +@@ -104,6 +112,8 @@ static bool CheckProcSignal(ProcSignalReason reason); + static void CleanupProcSignalState(int status, Datum arg); + static void ResetProcSignalBarrierBits(uint32 flags); + ++static void CheckAndSetCustomSignalInterrupts(void); ++ + /* + * ProcSignalShmemSize + * Compute space needed for ProcSignal's shared memory +@@ -247,6 +257,36 @@ CleanupProcSignalState(int status, Datum arg) + slot->pss_pid = 0; + } + ++/* ++ * RegisterCustomProcSignalHandler ++ * Assign specific handler of custom process signal with new ++ * ProcSignalReason key. ++ * ++ * This function has to be called in _PG_init function of extensions at the ++ * stage of loading shared preloaded libraries. Otherwise it throws fatal error. ++ * ++ * Return INVALID_PROCSIGNAL if all slots for custom signals are occupied. ++ */ ++ProcSignalReason ++RegisterCustomProcSignalHandler(ProcSignalHandler_type handler) ++{ ++ ProcSignalReason reason; ++ ++ if (!process_shared_preload_libraries_in_progress) ++ ereport(FATAL, (errcode(ERRCODE_INTERNAL_ERROR), ++ errmsg("cannot register custom signal after startup"))); ++ ++ /* Iterate through custom signal slots to find a free one */ ++ for (reason = PROCSIG_CUSTOM_1; reason <= PROCSIG_CUSTOM_N; reason++) ++ if (!CustomInterruptHandlers[reason - PROCSIG_CUSTOM_1]) ++ { ++ CustomInterruptHandlers[reason - PROCSIG_CUSTOM_1] = handler; ++ return reason; ++ } ++ ++ return INVALID_PROCSIGNAL; ++} ++ + /* + * SendProcSignal + * Send a signal to a Postgres process +@@ -682,7 +722,72 @@ procsignal_sigusr1_handler(SIGNAL_ARGS) + if (CheckProcSignal(PROCSIG_RECOVERY_CONFLICT_BUFFERPIN)) + RecoveryConflictInterrupt(PROCSIG_RECOVERY_CONFLICT_BUFFERPIN); + ++ CheckAndSetCustomSignalInterrupts(); ++ + SetLatch(MyLatch); + + errno = save_errno; + } ++ ++/* ++ * Handle receipt of an interrupt indicating any of custom process signals. ++ */ ++static void ++CheckAndSetCustomSignalInterrupts() ++{ ++ ProcSignalReason reason; ++ ++ for (reason = PROCSIG_CUSTOM_1; reason <= PROCSIG_CUSTOM_N; reason++) ++ { ++ if (CheckProcSignal(reason)) ++ { ++ ++ /* set interrupt flags */ ++ InterruptPending = true; ++ CustomSignalPendings[reason - PROCSIG_CUSTOM_1] = true; ++ } ++ } ++ ++ SetLatch(MyLatch); ++} ++ ++/* ++ * CheckAndHandleCustomSignals ++ * Check custom signal flags and call handler assigned to that signal ++ * if it is not NULL ++ * ++ * This function is called within CHECK_FOR_INTERRUPTS if interrupt occurred. ++ */ ++void ++CheckAndHandleCustomSignals(void) ++{ ++ int i; ++ ++ /* ++ * This is invoked from ProcessInterrupts(), and since some of the ++ * functions it calls contain CHECK_FOR_INTERRUPTS(), there is a potential ++ * for recursive calls if more signals are received while this runs, so ++ * let's block interrupts until done. ++ */ ++ HOLD_INTERRUPTS(); ++ ++ /* Check on expiring of custom signals and call its handlers if exist */ ++ for (i = 0; i < NUM_CUSTOM_PROCSIGNALS; i++) ++ { ++ if (!CustomSignalProcessing[i] && CustomSignalPendings[i]) ++ { ++ ProcSignalHandler_type handler; ++ ++ CustomSignalPendings[i] = false; ++ handler = CustomInterruptHandlers[i]; ++ if (handler != NULL) ++ { ++ CustomSignalProcessing[i] = true; ++ handler(); ++ CustomSignalProcessing[i] = false; ++ } ++ } ++ } ++ ++ RESUME_INTERRUPTS(); ++} +diff --git a/src/backend/tcop/postgres.c b/src/backend/tcop/postgres.c +index 36cc99ec9c..a3acce427a 100644 +--- a/src/backend/tcop/postgres.c ++++ b/src/backend/tcop/postgres.c +@@ -3442,6 +3442,8 @@ ProcessInterrupts(void) + if (ParallelMessagePending) + HandleParallelMessages(); + ++ CheckAndHandleCustomSignals(); ++ + if (LogMemoryContextPending) + ProcessLogMemoryContextInterrupt(); + +diff --git a/src/include/storage/procsignal.h b/src/include/storage/procsignal.h +index 2f52100b00..0e31a5771e 100644 +--- a/src/include/storage/procsignal.h ++++ b/src/include/storage/procsignal.h +@@ -6,6 +6,7 @@ + * + * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California ++ * Portions Copyright (c) 2024, Postgres Professional + * + * src/include/storage/procsignal.h + * +@@ -17,6 +18,8 @@ + #include "storage/backendid.h" + + ++#define NUM_CUSTOM_PROCSIGNALS 64 ++ + /* + * Reasons for signaling a Postgres child process (a backend or an auxiliary + * process, like checkpointer). We can cope with concurrent signals for different +@@ -29,6 +32,8 @@ + */ + typedef enum + { ++ INVALID_PROCSIGNAL = -1, /* Must be first */ ++ + PROCSIG_CATCHUP_INTERRUPT, /* sinval catchup interrupt */ + PROCSIG_NOTIFY_INTERRUPT, /* listen/notify interrupt */ + PROCSIG_PARALLEL_MESSAGE, /* message from cooperating parallel backend */ +@@ -46,6 +51,14 @@ typedef enum + PROCSIG_RECOVERY_CONFLICT_BUFFERPIN, + PROCSIG_RECOVERY_CONFLICT_STARTUP_DEADLOCK, + ++ PROCSIG_CUSTOM_1, ++ /* ++ * PROCSIG_CUSTOM_2, ++ * ..., ++ * PROCSIG_CUSTOM_N-1, ++ */ ++ PROCSIG_CUSTOM_N = PROCSIG_CUSTOM_1 + NUM_CUSTOM_PROCSIGNALS - 1, ++ + NUM_PROCSIGNALS /* Must be last! */ + } ProcSignalReason; + +@@ -54,6 +67,9 @@ typedef enum + PROCSIGNAL_BARRIER_SMGRRELEASE /* ask smgr to close files */ + } ProcSignalBarrierType; + ++/* Handler of custom process signal */ ++typedef void (*ProcSignalHandler_type) (void); ++ + /* + * prototypes for functions in procsignal.c + */ +@@ -61,12 +77,15 @@ extern Size ProcSignalShmemSize(void); + extern void ProcSignalShmemInit(void); + + extern void ProcSignalInit(int pss_idx); ++extern ProcSignalReason ++ RegisterCustomProcSignalHandler(ProcSignalHandler_type handler); + extern int SendProcSignal(pid_t pid, ProcSignalReason reason, + BackendId backendId); + + extern uint64 EmitProcSignalBarrier(ProcSignalBarrierType type); + extern void WaitForProcSignalBarrier(uint64 generation); + extern void ProcessProcSignalBarrier(void); ++extern void CheckAndHandleCustomSignals(void); + + extern void procsignal_sigusr1_handler(SIGNAL_ARGS); + diff --git a/patches/runtime_explain_16.0.patch b/patches/runtime_explain_16.0.patch new file mode 100644 index 0000000..3d132ca --- /dev/null +++ b/patches/runtime_explain_16.0.patch @@ -0,0 +1,258 @@ +diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c +index 6c2e5c8a4f..74be3944d1 100644 +--- a/src/backend/commands/explain.c ++++ b/src/backend/commands/explain.c +@@ -1023,14 +1023,36 @@ report_triggers(ResultRelInfo *rInfo, bool show_relname, ExplainState *es) + char *relname; + char *conname = NULL; + ++ instr_time starttimespan; ++ double total; ++ double ntuples; ++ double ncalls; ++ ++ if (!es->runtime) ++ { + /* Must clean up instrumentation state */ + InstrEndLoop(instr); ++ } ++ ++ /* Collect statistic variables */ ++ if (!INSTR_TIME_IS_ZERO(instr->starttime)) ++ { ++ INSTR_TIME_SET_CURRENT(starttimespan); ++ INSTR_TIME_SUBTRACT(starttimespan, instr->starttime); ++ } ++ else ++ INSTR_TIME_SET_ZERO(starttimespan); ++ ++ total = instr->total + INSTR_TIME_GET_DOUBLE(instr->counter) ++ + INSTR_TIME_GET_DOUBLE(starttimespan); ++ ntuples = instr->ntuples + instr->tuplecount; ++ ncalls = ntuples + !INSTR_TIME_IS_ZERO(starttimespan); + + /* + * We ignore triggers that were never invoked; they likely aren't + * relevant to the current query type. + */ +- if (instr->ntuples == 0) ++ if (ncalls == 0) + continue; + + ExplainOpenGroup("Trigger", NULL, true, es); +@@ -1056,9 +1078,9 @@ report_triggers(ResultRelInfo *rInfo, bool show_relname, ExplainState *es) + appendStringInfo(es->str, " on %s", relname); + if (es->timing) + appendStringInfo(es->str, ": time=%.3f calls=%.0f\n", +- 1000.0 * instr->total, instr->ntuples); ++ 1000.0 * total, ncalls); + else +- appendStringInfo(es->str, ": calls=%.0f\n", instr->ntuples); ++ appendStringInfo(es->str, ": calls=%.0f\n", ncalls); + } + else + { +@@ -1067,9 +1089,8 @@ report_triggers(ResultRelInfo *rInfo, bool show_relname, ExplainState *es) + ExplainPropertyText("Constraint Name", conname, es); + ExplainPropertyText("Relation", relname, es); + if (es->timing) +- ExplainPropertyFloat("Time", "ms", 1000.0 * instr->total, 3, +- es); +- ExplainPropertyFloat("Calls", NULL, instr->ntuples, 0, es); ++ ExplainPropertyFloat("Time", "ms", 1000.0 * total, 3, es); ++ ExplainPropertyFloat("Calls", NULL, ncalls, 0, es); + } + + if (conname) +@@ -1645,8 +1666,11 @@ ExplainNode(PlanState *planstate, List *ancestors, + * instrumentation results the user didn't ask for. But we do the + * InstrEndLoop call anyway, if possible, to reduce the number of cases + * auto_explain has to contend with. ++ * ++ * If flag es->stateinfo is set, i.e. when printing the current execution ++ * state, this step of cleaning up is missed. + */ +- if (planstate->instrument) ++ if (planstate->instrument && !es->runtime) + InstrEndLoop(planstate->instrument); + + if (es->analyze && +@@ -1681,7 +1705,7 @@ ExplainNode(PlanState *planstate, List *ancestors, + ExplainPropertyFloat("Actual Loops", NULL, nloops, 0, es); + } + } +- else if (es->analyze) ++ else if (es->analyze && !es->runtime) + { + if (es->format == EXPLAIN_FORMAT_TEXT) + appendStringInfoString(es->str, " (never executed)"); +@@ -1697,6 +1721,75 @@ ExplainNode(PlanState *planstate, List *ancestors, + } + } + ++ /* ++ * Print the progress of node execution at current loop. ++ */ ++ if (planstate->instrument && es->analyze && es->runtime) ++ { ++ instr_time starttimespan; ++ double startup_sec; ++ double total_sec; ++ double rows; ++ double loop_num; ++ bool finished; ++ ++ if (!INSTR_TIME_IS_ZERO(planstate->instrument->starttime)) ++ { ++ INSTR_TIME_SET_CURRENT(starttimespan); ++ INSTR_TIME_SUBTRACT(starttimespan, planstate->instrument->starttime); ++ } ++ else ++ INSTR_TIME_SET_ZERO(starttimespan); ++ startup_sec = 1000.0 * planstate->instrument->firsttuple; ++ total_sec = 1000.0 * (INSTR_TIME_GET_DOUBLE(planstate->instrument->counter) ++ + INSTR_TIME_GET_DOUBLE(starttimespan)); ++ rows = planstate->instrument->tuplecount; ++ loop_num = planstate->instrument->nloops + 1; ++ ++ finished = planstate->instrument->nloops > 0 ++ && !planstate->instrument->running ++ && INSTR_TIME_IS_ZERO(starttimespan); ++ ++ if (!finished) ++ { ++ ExplainOpenGroup("Current loop", "Current loop", true, es); ++ if (es->format == EXPLAIN_FORMAT_TEXT) ++ { ++ if (es->timing) ++ { ++ if (planstate->instrument->running) ++ appendStringInfo(es->str, ++ " (Current loop: actual time=%.3f..%.3f rows=%.0f, loop number=%.0f)", ++ startup_sec, total_sec, rows, loop_num); ++ else ++ appendStringInfo(es->str, ++ " (Current loop: running time=%.3f actual rows=0, loop number=%.0f)", ++ total_sec, loop_num); ++ } ++ else ++ appendStringInfo(es->str, ++ " (Current loop: actual rows=%.0f, loop number=%.0f)", ++ rows, loop_num); ++ } ++ else ++ { ++ ExplainPropertyFloat("Actual Loop Number", NULL, loop_num, 0, es); ++ if (es->timing) ++ { ++ if (planstate->instrument->running) ++ { ++ ExplainPropertyFloat("Actual Startup Time", NULL, startup_sec, 3, es); ++ ExplainPropertyFloat("Actual Total Time", NULL, total_sec, 3, es); ++ } ++ else ++ ExplainPropertyFloat("Running Time", NULL, total_sec, 3, es); ++ } ++ ExplainPropertyFloat("Actual Rows", NULL, rows, 0, es); ++ } ++ ExplainCloseGroup("Current loop", "Current loop", true, es); ++ } ++ } ++ + /* in text format, first line ends here */ + if (es->format == EXPLAIN_FORMAT_TEXT) + appendStringInfoChar(es->str, '\n'); +@@ -2104,6 +2197,9 @@ ExplainNode(PlanState *planstate, List *ancestors, + + /* Prepare per-worker buffer/WAL usage */ + if (es->workers_state && (es->buffers || es->wal) && es->verbose) ++ /* Show worker detail after query execution */ ++ if (es->analyze && es->verbose && planstate->worker_instrument ++ && !es->runtime) + { + WorkerInstrumentation *w = planstate->worker_instrument; + +@@ -3068,6 +3164,11 @@ show_hash_info(HashState *hashstate, ExplainState *es) + memcpy(&hinstrument, hashstate->hinstrument, + sizeof(HashInstrumentation)); + ++ if (hashstate->hashtable) ++ { ++ ExecHashAccumInstrumentation(&hinstrument, hashstate->hashtable); ++ } ++ + /* + * Merge results from workers. In the parallel-oblivious case, the + * results from all participants should be identical, except where +@@ -3447,20 +3548,16 @@ show_instrumentation_count(const char *qlabel, int which, + if (!es->analyze || !planstate->instrument) + return; + ++ nloops = planstate->instrument->nloops; + if (which == 2) +- nfiltered = planstate->instrument->nfiltered2; ++ nfiltered = ((nloops > 0) ? planstate->instrument->nfiltered2 / nloops : 0); + else +- nfiltered = planstate->instrument->nfiltered1; ++ nfiltered = ((nloops > 0) ? planstate->instrument->nfiltered1 / nloops : 0); + nloops = planstate->instrument->nloops; + + /* In text mode, suppress zero counts; they're not interesting enough */ + if (nfiltered > 0 || es->format != EXPLAIN_FORMAT_TEXT) +- { +- if (nloops > 0) +- ExplainPropertyFloat(qlabel, NULL, nfiltered / nloops, 0, es); +- else +- ExplainPropertyFloat(qlabel, NULL, 0.0, 0, es); +- } ++ ExplainPropertyFloat(qlabel, NULL, nfiltered, 0, es); + } + + /* +@@ -4060,15 +4157,27 @@ show_modifytable_info(ModifyTableState *mtstate, List *ancestors, + double insert_path; + double other_path; + +- InstrEndLoop(outerPlanState(mtstate)->instrument); ++ if (!es->runtime) ++ InstrEndLoop(outerPlanState(mtstate)->instrument); + + /* count the number of source rows */ +- total = outerPlanState(mtstate)->instrument->ntuples; +- other_path = mtstate->ps.instrument->ntuples2; +- insert_path = total - other_path; ++ other_path = mtstate->ps.instrument->nfiltered2; ++ ++ /* ++ * Insert occurs after extracting row from subplan and in runtime mode ++ * we can appear between these two operations - situation when ++ * total > insert_path + other_path. Therefore we don't know exactly ++ * whether last row from subplan is inserted. ++ * We don't print inserted tuples in runtime mode in order to not print ++ * inconsistent data ++ */ ++ if (!es->runtime) ++ { ++ total = outerPlanState(mtstate)->instrument->ntuples; ++ insert_path = total - other_path; ++ ExplainPropertyFloat("Tuples Inserted", NULL, insert_path, 0, es); ++ } + +- ExplainPropertyFloat("Tuples Inserted", NULL, +- insert_path, 0, es); + ExplainPropertyFloat("Conflicting Tuples", NULL, + other_path, 0, es); + } +diff --git a/src/include/commands/explain.h b/src/include/commands/explain.h +index 3d3e632a0c..3eb7bf345d 100644 +--- a/src/include/commands/explain.h ++++ b/src/include/commands/explain.h +@@ -48,6 +48,8 @@ typedef struct ExplainState + bool settings; /* print modified settings */ + bool generic; /* generate a generic plan */ + ExplainFormat format; /* output format */ ++ bool runtime; /* print intermediate state of query execution, ++ not after completion */ + /* state for output formatting --- not reset for each new plan tree */ + int indent; /* current indentation level */ + List *grouping_stack; /* format-specific grouping state */ diff --git a/pg_query_state.c b/pg_query_state.c index ab76f7f..7d03c22 100644 --- a/pg_query_state.c +++ b/pg_query_state.c @@ -2,7 +2,7 @@ * pg_query_state.c * Extract information about query state from other backend * - * Copyright (c) 2016-2023, Postgres Professional + * Copyright (c) 2016-2024, Postgres Professional * * contrib/pg_query_state/pg_query_state.c * IDENTIFICATION diff --git a/pg_query_state.h b/pg_query_state.h index 9152560..f632008 100644 --- a/pg_query_state.h +++ b/pg_query_state.h @@ -2,7 +2,7 @@ * pg_query_state.h * Headers for pg_query_state extension. * - * Copyright (c) 2016-2023, Postgres Professional + * Copyright (c) 2016-2024, Postgres Professional * * IDENTIFICATION * contrib/pg_query_state/pg_query_state.h diff --git a/run_tests.sh b/run_tests.sh index 1c43847..7e3cf79 100644 --- a/run_tests.sh +++ b/run_tests.sh @@ -1,7 +1,7 @@ #!/usr/bin/env bash # -# Copyright (c) 2019-2023, Postgres Professional +# Copyright (c) 2019-2024, Postgres Professional # # supported levels: # * standard diff --git a/signal_handler.c b/signal_handler.c index 7e4b602..dfe8780 100644 --- a/signal_handler.c +++ b/signal_handler.c @@ -2,7 +2,7 @@ * signal_handler.c * Collect current query state and send it to requestor in custom signal handler * - * Copyright (c) 2016-2023, Postgres Professional + * Copyright (c) 2016-2024, Postgres Professional * * IDENTIFICATION * contrib/pg_query_state/signal_handler.c diff --git a/tests/common.py b/tests/common.py index c83abb1..6dab69a 100644 --- a/tests/common.py +++ b/tests/common.py @@ -1,6 +1,6 @@ ''' common.py -Copyright (c) 2016-2023, Postgres Professional +Copyright (c) 2016-2024, Postgres Professional ''' import psycopg2 diff --git a/tests/pg_qs_test_runner.py b/tests/pg_qs_test_runner.py index a0df6a9..f4088a9 100644 --- a/tests/pg_qs_test_runner.py +++ b/tests/pg_qs_test_runner.py @@ -1,6 +1,6 @@ ''' pg_qs_test_runner.py -Copyright (c) 2016-2023, Postgres Professional +Copyright (c) 2016-2024, Postgres Professional ''' import argparse diff --git a/tests/test_cases.py b/tests/test_cases.py index c6b0fa2..f86641d 100644 --- a/tests/test_cases.py +++ b/tests/test_cases.py @@ -1,6 +1,6 @@ ''' test_cases.py -Copyright (c) 2016-2023, Postgres Professional +Copyright (c) 2016-2024, Postgres Professional ''' import json diff --git a/tests/tpcds.py b/tests/tpcds.py index 944b799..1f2b6da 100644 --- a/tests/tpcds.py +++ b/tests/tpcds.py @@ -1,6 +1,6 @@ ''' test_cases.py -Copyright (c) 2016-2023, Postgres Professional +Copyright (c) 2016-2024, Postgres Professional ''' import os From d099f6834bf166dc993e4858736760de972b6b76 Mon Sep 17 00:00:00 2001 From: Ekaterina Sokolova Date: Tue, 27 Feb 2024 22:16:15 +0300 Subject: [PATCH 25/37] Fix Dockerfile for Travis-CI. --- Dockerfile.tmpl | 2 +- docker-compose.yml | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/Dockerfile.tmpl b/Dockerfile.tmpl index 1e512bc..93b9833 100644 --- a/Dockerfile.tmpl +++ b/Dockerfile.tmpl @@ -6,7 +6,7 @@ RUN apk add --no-cache \ perl perl-ipc-run \ make musl-dev gcc bison flex coreutils \ zlib-dev libedit-dev \ - clang clang-analyzer linux-headers \ + icu-dev clang clang-analyzer linux-headers \ python3 python3-dev py3-virtualenv; diff --git a/docker-compose.yml b/docker-compose.yml index 67f1cee..550e2be 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,2 +1,3 @@ -tests: +services: + tests: build: . \ No newline at end of file From 25bd499d95517dd05ac6af72e497a65c9965ee6a Mon Sep 17 00:00:00 2001 From: Marina Polyakova Date: Wed, 3 Apr 2024 10:59:35 +0300 Subject: [PATCH 26/37] Fix build with PostgreSQL 17devel at 7eb9a8201890f3b208fd4c109a5b08bf139b692a See the following commits in PostgreSQL 17devel: - ab355e3a88de745607f6dd4c21f0119b5c68f2ad Redefine backend ID to be an index into the proc array - 024c521117579a6d356050ad3d78fdc95e44eefa Replace BackendIds with 0-based ProcNumbers --- pg_query_state.c | 47 +++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 45 insertions(+), 2 deletions(-) diff --git a/pg_query_state.c b/pg_query_state.c index 7d03c22..adea7db 100644 --- a/pg_query_state.c +++ b/pg_query_state.c @@ -365,7 +365,9 @@ search_be_status(int pid) for (beid = 1; beid <= pgstat_fetch_stat_numbackends(); beid++) { -#if PG_VERSION_NUM >= 160000 +#if PG_VERSION_NUM >= 170000 + PgBackendStatus *be_status = pgstat_get_beentry_by_proc_number(beid); +#elif PG_VERSION_NUM >= 160000 PgBackendStatus *be_status = pgstat_get_beentry_by_backend_id(beid); #else PgBackendStatus *be_status = pgstat_fetch_stat_beentry(beid); @@ -505,7 +507,14 @@ pg_query_state(PG_FUNCTION_ARGS) errmsg("attempt to extract state of current process"))); proc = BackendPidGetProc(pid); - if (!proc || proc->backendId == InvalidBackendId || proc->databaseId == InvalidOid || proc->roleId == InvalidOid) + if (!proc || +#if PG_VERSION_NUM >= 170000 + proc->vxid.procNumber == INVALID_PROC_NUMBER || +#else + proc->backendId == InvalidBackendId || +#endif + proc->databaseId == InvalidOid || + proc->roleId == InvalidOid) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("backend with pid=%d not found", pid))); @@ -730,7 +739,12 @@ GetRemoteBackendUserId(PGPROC *proc) { Oid result; +#if PG_VERSION_NUM >= 170000 + Assert(proc && proc->vxid.procNumber != INVALID_PROC_NUMBER); +#else Assert(proc && proc->backendId != InvalidBackendId); +#endif + Assert(UserIdPollReason != INVALID_PROCSIGNAL); Assert(counterpart_userid); @@ -738,7 +752,12 @@ GetRemoteBackendUserId(PGPROC *proc) counterpart_userid->caller = MyLatch; pg_write_barrier(); +#if PG_VERSION_NUM >= 170000 + SendProcSignal(proc->pid, UserIdPollReason, proc->vxid.procNumber); +#else SendProcSignal(proc->pid, UserIdPollReason, proc->backendId); +#endif + for (;;) { SpinLockAcquire(&counterpart_userid->mutex); @@ -926,7 +945,12 @@ GetRemoteBackendWorkers(PGPROC *proc) List *result = NIL; LOCKTAG tag; +#if PG_VERSION_NUM >= 170000 + Assert(proc && proc->vxid.procNumber != INVALID_PROC_NUMBER); +#else Assert(proc && proc->backendId != InvalidBackendId); +#endif + Assert(WorkerPollReason != INVALID_PROCSIGNAL); Assert(mq); @@ -936,7 +960,12 @@ GetRemoteBackendWorkers(PGPROC *proc) shm_mq_set_receiver(mq, MyProc); UnlockShmem(&tag); +#if PG_VERSION_NUM >= 170000 + sig_result = SendProcSignal(proc->pid, WorkerPollReason, proc->vxid.procNumber); +#else sig_result = SendProcSignal(proc->pid, WorkerPollReason, proc->backendId); +#endif + if (sig_result == -1) goto signal_error; @@ -1088,9 +1117,16 @@ GetRemoteBackendQueryStates(PGPROC *leader, * send signal `QueryStatePollReason` to all processes and define all alive * ones */ +#if PG_VERSION_NUM >= 170000 + sig_result = SendProcSignal(leader->pid, + QueryStatePollReason, + leader->vxid.procNumber); +#else sig_result = SendProcSignal(leader->pid, QueryStatePollReason, leader->backendId); +#endif + if (sig_result == -1) goto signal_error; foreach(iter, pworkers) @@ -1101,9 +1137,16 @@ GetRemoteBackendQueryStates(PGPROC *leader, pg_atomic_add_fetch_u32(&counterpart_userid->n_peers, 1); +#if PG_VERSION_NUM >= 170000 + sig_result = SendProcSignal(proc->pid, + QueryStatePollReason, + proc->vxid.procNumber); +#else sig_result = SendProcSignal(proc->pid, QueryStatePollReason, proc->backendId); +#endif + if (sig_result == -1) { if (errno != ESRCH) From 1547e213cf366c7ee399d0e9a3bf1b9c5e6764c2 Mon Sep 17 00:00:00 2001 From: Ekaterina Sokolova Date: Fri, 12 Apr 2024 13:02:05 +0300 Subject: [PATCH 27/37] [PGPRO-10083] Set guc to false in python test. pg_query_state.enable_timing and enable_buffers default to false. But we should forcefully set guc variables to false in case of its true values in the configuration file. Tags: pg_query_state. --- tests/test_cases.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/test_cases.py b/tests/test_cases.py index f86641d..c866f58 100644 --- a/tests/test_cases.py +++ b/tests/test_cases.py @@ -379,6 +379,9 @@ def test_timing_buffers_conflicts(config): timing_pattern = '(?:running time=\d+.\d+)|(?:actual time=\d+.\d+..\d+.\d+)' buffers_pattern = 'Buffers:' + common.set_guc(acon, 'pg_query_state.enable_timing', 'off') + common.set_guc(acon, 'pg_query_state.enable_buffers', 'off') + qs, notices = common.onetime_query_state(config, acon, query, {'timing': True, 'buffers': False}) assert len(qs) == 1 and not re.search(timing_pattern, qs[0][3]) assert notices == ['WARNING: timing statistics disabled\n'] From aeabc16b73b6699360e653075afefbeacaf002a8 Mon Sep 17 00:00:00 2001 From: "Anton A. Melnikov" Date: Mon, 15 Apr 2024 18:38:51 +0300 Subject: [PATCH 28/37] When searching for backend status iterate over the index in the LocalPgBackendStatus array, not over BackendId for v16 or ProcNumber for 16+. BackendId may not be equal to the index +1 in the LocalPgBackendStatus array as well as ProcNumber may not be equal the index. --- pg_query_state.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/pg_query_state.c b/pg_query_state.c index adea7db..1949643 100644 --- a/pg_query_state.c +++ b/pg_query_state.c @@ -365,10 +365,16 @@ search_be_status(int pid) for (beid = 1; beid <= pgstat_fetch_stat_numbackends(); beid++) { -#if PG_VERSION_NUM >= 170000 - PgBackendStatus *be_status = pgstat_get_beentry_by_proc_number(beid); -#elif PG_VERSION_NUM >= 160000 - PgBackendStatus *be_status = pgstat_get_beentry_by_backend_id(beid); +#if PG_VERSION_NUM >= 160000 + LocalPgBackendStatus *lbe_status = pgstat_get_local_beentry_by_index(beid); + PgBackendStatus *be_status; + + Assert(lbe_status); + #ifndef PGPRO_STD + be_status = &lbe_status->backendStatus; + #else + be_status = lbe_status->backendStatus; + #endif #else PgBackendStatus *be_status = pgstat_fetch_stat_beentry(beid); #endif From a81edd4f7bd7eb063e58b579554f0698b51fdb42 Mon Sep 17 00:00:00 2001 From: Ekaterina Sokolova Date: Wed, 17 Apr 2024 14:09:48 +0300 Subject: [PATCH 29/37] Update TPC-DS tests 1) Enable TPC-DS tests 2) Added the ability to crash when calling function pg_query_state 3) Fix path to library in venv 4) Disable requiring Travis tests to be run for unsupported versions --- .travis.yml | 18 ++++++++---------- run_tests.sh | 15 ++++++++++----- tests/pg_qs_test_runner.py | 4 +++- tests/tpcds.py | 31 +++++++++++++++++++++++++++++-- 4 files changed, 50 insertions(+), 18 deletions(-) diff --git a/.travis.yml b/.travis.yml index 0983e07..56b2783 100644 --- a/.travis.yml +++ b/.travis.yml @@ -22,24 +22,22 @@ notifications: on_failure: always env: - - PG_VERSION=16 LEVEL=hardcore USE_TPCDS=0 + - PG_VERSION=16 LEVEL=hardcore USE_TPCDS=1 - PG_VERSION=16 - - PG_VERSION=15 LEVEL=hardcore USE_TPCDS=0 + - PG_VERSION=15 LEVEL=hardcore USE_TPCDS=1 - PG_VERSION=15 - - PG_VERSION=14 LEVEL=hardcore USE_TPCDS=0 + - PG_VERSION=14 LEVEL=hardcore USE_TPCDS=1 - PG_VERSION=14 - - PG_VERSION=13 LEVEL=hardcore USE_TPCDS=0 + - PG_VERSION=13 LEVEL=hardcore USE_TPCDS=1 - PG_VERSION=13 - - PG_VERSION=12 LEVEL=hardcore USE_TPCDS=0 + - PG_VERSION=12 LEVEL=hardcore USE_TPCDS=1 - PG_VERSION=12 - - PG_VERSION=11 LEVEL=hardcore USE_TPCDS=0 - PG_VERSION=11 - - PG_VERSION=10 LEVEL=hardcore USE_TPCDS=0 - PG_VERSION=10 - - PG_VERSION=9.6 LEVEL=hardcore - PG_VERSION=9.6 matrix: allow_failures: - - env: PG_VERSION=10 LEVEL=nightmare - - env: PG_VERSION=9.6 LEVEL=nightmare + - env: PG_VERSION=11 + - env: PG_VERSION=10 + - env: PG_VERSION=9.6 diff --git a/run_tests.sh b/run_tests.sh index 7e3cf79..d330d1e 100644 --- a/run_tests.sh +++ b/run_tests.sh @@ -13,6 +13,9 @@ set -ux status=0 +venv_path=tmp/env +rm -rf "$venv_path" + # global exports export PGPORT=55435 export VIRTUAL_ENV_DISABLE_PROMPT=1 @@ -148,13 +151,14 @@ if [ -f regression.diffs ]; then cat regression.diffs; fi # run python tests set +x -e -python3 -m venv /tmp/env && source /tmp/env/bin/activate && -pip install -r ./tests/requirements.txt +python3 -m venv "$venv_path" && source "$venv_path/bin/activate" +pip3 install --upgrade -t "$venv_path" -r ./tests/requirements.txt +#pip3 install -e "./$venv_path" set -e #exit virtualenv with error code -python tests/pg_qs_test_runner.py --port $PGPORT +python3 tests/pg_qs_test_runner.py --port $PGPORT if [[ "$USE_TPCDS" == "1" ]]; then - python tests/pg_qs_test_runner.py --port $PGPORT --tpc-ds-setup - python tests/pg_qs_test_runner.py --port $PGPORT --tpc-ds-run + python3 tests/pg_qs_test_runner.py --port $PGPORT --tpc-ds-setup + python3 tests/pg_qs_test_runner.py --port $PGPORT --tpc-ds-run fi deactivate set -x @@ -179,4 +183,5 @@ gcov $CUSTOM_PG_SRC/contrib/pg_query_state/*.c $CUSTOM_PG_SRC/contrib/pg_query_s set +ux # send coverage stats to Codecov +export CODECOV_TOKEN=55ab7421-9277-45af-a329-d8b40db96b2a bash <(curl -s https://codecov.io/bash) diff --git a/tests/pg_qs_test_runner.py b/tests/pg_qs_test_runner.py index f4088a9..944f77f 100644 --- a/tests/pg_qs_test_runner.py +++ b/tests/pg_qs_test_runner.py @@ -8,9 +8,11 @@ import os import sys +sys.path.append(os.path.dirname(os.path.abspath(__file__))) +sys.path.append(os.path.abspath('tmp/env')) + import psycopg2 -sys.path.append(os.path.dirname(os.path.abspath(__file__))) from test_cases import * import tpcds diff --git a/tests/tpcds.py b/tests/tpcds.py index 1f2b6da..4e38002 100644 --- a/tests/tpcds.py +++ b/tests/tpcds.py @@ -8,6 +8,10 @@ import time import progressbar +# This actually imports progressbar2 but `import progressbar2' itself doesn't work. +# In case of problems with the progressbar/progressbar2, check that you have the +# progressbar2 installed and the path to it or venv is specified. + import psycopg2.extensions import common @@ -55,13 +59,13 @@ def run_tpcds(config): TPC_DS_STATEMENT_TIMEOUT = 20000 # statement_timeout in ms print('Preparing TPC-DS queries...') + err_count = 0 queries = [] for query_file in sorted(os.listdir('tmp_stress/tpcds-result-reproduction/query_qualification/')): with open('tmp_stress/tpcds-result-reproduction/query_qualification/%s' % query_file, 'r') as f: queries.append(f.read()) acon, = common.n_async_connect(config) - pid = acon.get_backend_pid() print('Starting TPC-DS queries...') timeout_list = [] @@ -84,8 +88,25 @@ def run_tpcds(config): PG_QS_DELAY, BEFORE_GETTING_QS_DELAY = 0.1, 0.1 BEFORE_GETTING_QS, GETTING_QS = range(2) state, n_first_getting_qs_retries = BEFORE_GETTING_QS, 0 + + pg_qs_args = { + 'config': config, + 'pid': acon.get_backend_pid() + } + while True: - result, notices = common.pg_query_state(config, pid) + try: + result, notices = common.pg_query_state(**pg_qs_args) + except Exception as e: + # do not consider the test failed if the "error in message + # queue data transmitting" is received, this may happen with + # some small probability, but if it happens too often it is + # a problem, we will handle this case after the loop + if "error in message queue data transmitting" in e.pgerror: + err_count += 1 + else: + raise e + # run state machine to determine the first getting of query state # and query finishing if state == BEFORE_GETTING_QS: @@ -109,6 +130,12 @@ def run_tpcds(config): except psycopg2.extensions.QueryCanceledError: timeout_list.append(i + 1) + if err_count > 2: + print("ERROR: error in message queue data transmitting") + raise + elif err_count > 0: + print(err_count, " times there was error in message queue data transmitting") + common.n_close((acon,)) if len(timeout_list) > 0: From 7801aa13e527bf1cc0b0bb7760e57127e7aba9a3 Mon Sep 17 00:00:00 2001 From: Ekaterina Sokolova Date: Fri, 14 Jun 2024 15:48:41 +0300 Subject: [PATCH 30/37] Improve error handling. --- tests/tpcds.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/tests/tpcds.py b/tests/tpcds.py index 4e38002..bdeb408 100644 --- a/tests/tpcds.py +++ b/tests/tpcds.py @@ -26,7 +26,10 @@ def setup_tpcds(config): try: conn = psycopg2.connect(**config) cur = conn.cursor() + except Exception as e: + raise DataLoadException('Load failed: %s' % e) + try: # Create pg_query_state extension cur.execute('CREATE EXTENSION IF NOT EXISTS pg_query_state') @@ -131,8 +134,8 @@ def run_tpcds(config): timeout_list.append(i + 1) if err_count > 2: - print("ERROR: error in message queue data transmitting") - raise + print("\nERROR: error in message queue data transmitting") + raise Exception('error was received %d times'%err_count) elif err_count > 0: print(err_count, " times there was error in message queue data transmitting") From ec52e0ae8333b6d574d7a1449bc5d4c4781ecf2f Mon Sep 17 00:00:00 2001 From: Ekaterina Sokolova Date: Tue, 27 Aug 2024 12:12:45 +0300 Subject: [PATCH 31/37] Correcting confusion in variables. --- patches/runtime_explain_11.0.patch | 7 +++---- patches/runtime_explain_12.0.patch | 7 +++---- patches/runtime_explain_13.0.patch | 7 +++---- patches/runtime_explain_14.0.patch | 7 +++---- patches/runtime_explain_15.0.patch | 7 +++---- patches/runtime_explain_16.0.patch | 7 +++---- 6 files changed, 18 insertions(+), 24 deletions(-) diff --git a/patches/runtime_explain_11.0.patch b/patches/runtime_explain_11.0.patch index dddbcbe..9d12d5b 100644 --- a/patches/runtime_explain_11.0.patch +++ b/patches/runtime_explain_11.0.patch @@ -209,10 +209,9 @@ index 16a80a0ea1..b12906b005 100644 /* count the number of source rows */ - total = mtstate->mt_plans[0]->instrument->ntuples; -- other_path = mtstate->ps.instrument->ntuples2; + other_path = mtstate->ps.instrument->ntuples2; - insert_path = total - other_path; -+ other_path = mtstate->ps.instrument->nfiltered2; -+ + + /* + * Insert occurs after extracting row from subplan and in runtime mode + * we can appear between these two operations - situation when @@ -227,7 +226,7 @@ index 16a80a0ea1..b12906b005 100644 + insert_path = total - other_path; + ExplainPropertyFloat("Tuples Inserted", NULL, insert_path, 0, es); + } - ++ - ExplainPropertyFloat("Tuples Inserted", NULL, - insert_path, 0, es); ExplainPropertyFloat("Conflicting Tuples", NULL, diff --git a/patches/runtime_explain_12.0.patch b/patches/runtime_explain_12.0.patch index 1d105b2..9aa8397 100644 --- a/patches/runtime_explain_12.0.patch +++ b/patches/runtime_explain_12.0.patch @@ -222,10 +222,9 @@ index 92969636b75..fab4267a2c1 100644 /* count the number of source rows */ - total = mtstate->mt_plans[0]->instrument->ntuples; -- other_path = mtstate->ps.instrument->ntuples2; + other_path = mtstate->ps.instrument->ntuples2; - insert_path = total - other_path; -+ other_path = mtstate->ps.instrument->nfiltered2; -+ + + /* + * Insert occurs after extracting row from subplan and in runtime mode + * we can appear between these two operations - situation when @@ -240,7 +239,7 @@ index 92969636b75..fab4267a2c1 100644 + insert_path = total - other_path; + ExplainPropertyFloat("Tuples Inserted", NULL, insert_path, 0, es); + } - ++ - ExplainPropertyFloat("Tuples Inserted", NULL, - insert_path, 0, es); ExplainPropertyFloat("Conflicting Tuples", NULL, diff --git a/patches/runtime_explain_13.0.patch b/patches/runtime_explain_13.0.patch index 973ebd5..be29669 100644 --- a/patches/runtime_explain_13.0.patch +++ b/patches/runtime_explain_13.0.patch @@ -219,10 +219,9 @@ index 20708db9f12..866948bd0c1 100644 /* count the number of source rows */ - total = mtstate->mt_plans[0]->instrument->ntuples; -- other_path = mtstate->ps.instrument->ntuples2; + other_path = mtstate->ps.instrument->ntuples2; - insert_path = total - other_path; -+ other_path = mtstate->ps.instrument->nfiltered2; -+ + + /* + * Insert occurs after extracting row from subplan and in runtime mode + * we can appear between these two operations - situation when @@ -237,7 +236,7 @@ index 20708db9f12..866948bd0c1 100644 + insert_path = total - other_path; + ExplainPropertyFloat("Tuples Inserted", NULL, insert_path, 0, es); + } - ++ - ExplainPropertyFloat("Tuples Inserted", NULL, - insert_path, 0, es); ExplainPropertyFloat("Conflicting Tuples", NULL, diff --git a/patches/runtime_explain_14.0.patch b/patches/runtime_explain_14.0.patch index 7904cc2..b266b15 100644 --- a/patches/runtime_explain_14.0.patch +++ b/patches/runtime_explain_14.0.patch @@ -219,10 +219,9 @@ index 10644dfac4..7106ed4257 100644 /* count the number of source rows */ - total = outerPlanState(mtstate)->instrument->ntuples; -- other_path = mtstate->ps.instrument->ntuples2; + other_path = mtstate->ps.instrument->ntuples2; - insert_path = total - other_path; -+ other_path = mtstate->ps.instrument->nfiltered2; -+ + + /* + * Insert occurs after extracting row from subplan and in runtime mode + * we can appear between these two operations - situation when @@ -237,7 +236,7 @@ index 10644dfac4..7106ed4257 100644 + insert_path = total - other_path; + ExplainPropertyFloat("Tuples Inserted", NULL, insert_path, 0, es); + } - ++ - ExplainPropertyFloat("Tuples Inserted", NULL, - insert_path, 0, es); ExplainPropertyFloat("Conflicting Tuples", NULL, diff --git a/patches/runtime_explain_15.0.patch b/patches/runtime_explain_15.0.patch index adab6dc..d60cea8 100644 --- a/patches/runtime_explain_15.0.patch +++ b/patches/runtime_explain_15.0.patch @@ -219,10 +219,9 @@ index 10644dfac4..7106ed4257 100644 /* count the number of source rows */ - total = outerPlanState(mtstate)->instrument->ntuples; -- other_path = mtstate->ps.instrument->ntuples2; + other_path = mtstate->ps.instrument->ntuples2; - insert_path = total - other_path; -+ other_path = mtstate->ps.instrument->nfiltered2; -+ + + /* + * Insert occurs after extracting row from subplan and in runtime mode + * we can appear between these two operations - situation when @@ -237,7 +236,7 @@ index 10644dfac4..7106ed4257 100644 + insert_path = total - other_path; + ExplainPropertyFloat("Tuples Inserted", NULL, insert_path, 0, es); + } - ++ - ExplainPropertyFloat("Tuples Inserted", NULL, - insert_path, 0, es); ExplainPropertyFloat("Conflicting Tuples", NULL, diff --git a/patches/runtime_explain_16.0.patch b/patches/runtime_explain_16.0.patch index 3d132ca..2b955e9 100644 --- a/patches/runtime_explain_16.0.patch +++ b/patches/runtime_explain_16.0.patch @@ -219,10 +219,9 @@ index 6c2e5c8a4f..74be3944d1 100644 /* count the number of source rows */ - total = outerPlanState(mtstate)->instrument->ntuples; -- other_path = mtstate->ps.instrument->ntuples2; + other_path = mtstate->ps.instrument->ntuples2; - insert_path = total - other_path; -+ other_path = mtstate->ps.instrument->nfiltered2; -+ + + /* + * Insert occurs after extracting row from subplan and in runtime mode + * we can appear between these two operations - situation when @@ -237,7 +236,7 @@ index 6c2e5c8a4f..74be3944d1 100644 + insert_path = total - other_path; + ExplainPropertyFloat("Tuples Inserted", NULL, insert_path, 0, es); + } - ++ - ExplainPropertyFloat("Tuples Inserted", NULL, - insert_path, 0, es); ExplainPropertyFloat("Conflicting Tuples", NULL, From d01ea74a4b4c8f76f110badbcbc40b30308030f0 Mon Sep 17 00:00:00 2001 From: "Anton A. Melnikov" Date: Thu, 19 Sep 2024 22:05:49 +0300 Subject: [PATCH 32/37] PGPRO-10866: Add static decoration to avoid error: "no previous extern declaration for non-static variable [-Wmissing-variable-declarations]". Tags: pg_query_state --- pg_query_state.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pg_query_state.c b/pg_query_state.c index 1949643..635b967 100644 --- a/pg_query_state.c +++ b/pg_query_state.c @@ -101,8 +101,8 @@ static List *GetRemoteBackendQueryStates(PGPROC *leader, ExplainFormat format); /* Shared memory variables */ -shm_toc *toc = NULL; -RemoteUserIdResult *counterpart_userid = NULL; +static shm_toc *toc = NULL; +static RemoteUserIdResult *counterpart_userid = NULL; pg_qs_params *params = NULL; shm_mq *mq = NULL; From 1230ab03f36615c064c3adc6de0936f70b28e884 Mon Sep 17 00:00:00 2001 From: Ekaterina Sokolova Date: Tue, 15 Oct 2024 11:06:00 +0300 Subject: [PATCH 33/37] Updates for PostgreSQL 17. 1) Add patches for PostgreSQL 17; 2) Add alternative output to python test due to EXPLAIN output change. See fd0398fcb099. --- .travis.yml | 2 + patches/custom_signals_17.0.patch | 227 ++++++++++++++++++++++++ patches/runtime_explain_17.0.patch | 265 +++++++++++++++++++++++++++++ tests/test_cases.py | 13 +- 4 files changed, 506 insertions(+), 1 deletion(-) create mode 100644 patches/custom_signals_17.0.patch create mode 100644 patches/runtime_explain_17.0.patch diff --git a/.travis.yml b/.travis.yml index 56b2783..a9ae4c6 100644 --- a/.travis.yml +++ b/.travis.yml @@ -22,6 +22,8 @@ notifications: on_failure: always env: + - PG_VERSION=17 LEVEL=hardcore USE_TPCDS=1 + - PG_VERSION=17 - PG_VERSION=16 LEVEL=hardcore USE_TPCDS=1 - PG_VERSION=16 - PG_VERSION=15 LEVEL=hardcore USE_TPCDS=1 diff --git a/patches/custom_signals_17.0.patch b/patches/custom_signals_17.0.patch new file mode 100644 index 0000000..d227104 --- /dev/null +++ b/patches/custom_signals_17.0.patch @@ -0,0 +1,227 @@ +diff --git a/src/backend/storage/ipc/procsignal.c b/src/backend/storage/ipc/procsignal.c +index 4ed9ced..6e70892 100644 +--- a/src/backend/storage/ipc/procsignal.c ++++ b/src/backend/storage/ipc/procsignal.c +@@ -6,6 +6,7 @@ + * + * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California ++ * Portions Copyright (c) 2024, Postgres Professional + * + * IDENTIFICATION + * src/backend/storage/ipc/procsignal.c +@@ -96,6 +97,13 @@ typedef struct + #define BARRIER_CLEAR_BIT(flags, type) \ + ((flags) &= ~(((uint32) 1) << (uint32) (type))) + ++#define IsCustomProcSignalReason(reason) \ ++ ((reason) >= PROCSIG_CUSTOM_1 && (reason) <= PROCSIG_CUSTOM_N) ++ ++static bool CustomSignalPendings[NUM_CUSTOM_PROCSIGNALS]; ++static bool CustomSignalProcessing[NUM_CUSTOM_PROCSIGNALS]; ++static ProcSignalHandler_type CustomInterruptHandlers[NUM_CUSTOM_PROCSIGNALS]; ++ + static ProcSignalHeader *ProcSignal = NULL; + static ProcSignalSlot *MyProcSignalSlot = NULL; + +@@ -103,6 +111,8 @@ static bool CheckProcSignal(ProcSignalReason reason); + static void CleanupProcSignalState(int status, Datum arg); + static void ResetProcSignalBarrierBits(uint32 flags); + ++static void CheckAndSetCustomSignalInterrupts(void); ++ + /* + * ProcSignalShmemSize + * Compute space needed for ProcSignal's shared memory +@@ -242,6 +252,36 @@ CleanupProcSignalState(int status, Datum arg) + slot->pss_pid = 0; + } + ++/* ++ * RegisterCustomProcSignalHandler ++ * Assign specific handler of custom process signal with new ++ * ProcSignalReason key. ++ * ++ * This function has to be called in _PG_init function of extensions at the ++ * stage of loading shared preloaded libraries. Otherwise it throws fatal error. ++ * ++ * Return INVALID_PROCSIGNAL if all slots for custom signals are occupied. ++ */ ++ProcSignalReason ++RegisterCustomProcSignalHandler(ProcSignalHandler_type handler) ++{ ++ ProcSignalReason reason; ++ ++ if (!process_shared_preload_libraries_in_progress) ++ ereport(FATAL, (errcode(ERRCODE_INTERNAL_ERROR), ++ errmsg("cannot register custom signal after startup"))); ++ ++ /* Iterate through custom signal slots to find a free one */ ++ for (reason = PROCSIG_CUSTOM_1; reason <= PROCSIG_CUSTOM_N; reason++) ++ if (!CustomInterruptHandlers[reason - PROCSIG_CUSTOM_1]) ++ { ++ CustomInterruptHandlers[reason - PROCSIG_CUSTOM_1] = handler; ++ return reason; ++ } ++ ++ return INVALID_PROCSIGNAL; ++} ++ + /* + * SendProcSignal + * Send a signal to a Postgres process +@@ -676,5 +716,70 @@ procsignal_sigusr1_handler(SIGNAL_ARGS) + if (CheckProcSignal(PROCSIG_RECOVERY_CONFLICT_BUFFERPIN)) + HandleRecoveryConflictInterrupt(PROCSIG_RECOVERY_CONFLICT_BUFFERPIN); + ++ CheckAndSetCustomSignalInterrupts(); ++ + SetLatch(MyLatch); + } ++ ++/* ++ * Handle receipt of an interrupt indicating any of custom process signals. ++ */ ++static void ++CheckAndSetCustomSignalInterrupts() ++{ ++ ProcSignalReason reason; ++ ++ for (reason = PROCSIG_CUSTOM_1; reason <= PROCSIG_CUSTOM_N; reason++) ++ { ++ if (CheckProcSignal(reason)) ++ { ++ ++ /* set interrupt flags */ ++ InterruptPending = true; ++ CustomSignalPendings[reason - PROCSIG_CUSTOM_1] = true; ++ } ++ } ++ ++ SetLatch(MyLatch); ++} ++ ++/* ++ * CheckAndHandleCustomSignals ++ * Check custom signal flags and call handler assigned to that signal ++ * if it is not NULL ++ * ++ * This function is called within CHECK_FOR_INTERRUPTS if interrupt occurred. ++ */ ++void ++CheckAndHandleCustomSignals(void) ++{ ++ int i; ++ ++ /* ++ * This is invoked from ProcessInterrupts(), and since some of the ++ * functions it calls contain CHECK_FOR_INTERRUPTS(), there is a potential ++ * for recursive calls if more signals are received while this runs, so ++ * let's block interrupts until done. ++ */ ++ HOLD_INTERRUPTS(); ++ ++ /* Check on expiring of custom signals and call its handlers if exist */ ++ for (i = 0; i < NUM_CUSTOM_PROCSIGNALS; i++) ++ { ++ if (!CustomSignalProcessing[i] && CustomSignalPendings[i]) ++ { ++ ProcSignalHandler_type handler; ++ ++ CustomSignalPendings[i] = false; ++ handler = CustomInterruptHandlers[i]; ++ if (handler != NULL) ++ { ++ CustomSignalProcessing[i] = true; ++ handler(); ++ CustomSignalProcessing[i] = false; ++ } ++ } ++ } ++ ++ RESUME_INTERRUPTS(); ++} +diff --git a/src/backend/tcop/postgres.c b/src/backend/tcop/postgres.c +index a750dc8..e1b0be5 100644 +--- a/src/backend/tcop/postgres.c ++++ b/src/backend/tcop/postgres.c +@@ -3492,6 +3492,8 @@ ProcessInterrupts(void) + if (ParallelMessagePending) + HandleParallelMessages(); + ++ CheckAndHandleCustomSignals(); ++ + if (LogMemoryContextPending) + ProcessLogMemoryContextInterrupt(); + +diff --git a/src/include/storage/procsignal.h b/src/include/storage/procsignal.h +index 7d290ea..f262f0c 100644 +--- a/src/include/storage/procsignal.h ++++ b/src/include/storage/procsignal.h +@@ -6,6 +6,7 @@ + * + * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California ++ * Portions Copyright (c) 2024, Postgres Professional + * + * src/include/storage/procsignal.h + * +@@ -17,6 +18,8 @@ + #include "storage/procnumber.h" + + ++#define NUM_CUSTOM_PROCSIGNALS 64 ++ + /* + * Reasons for signaling a Postgres child process (a backend or an auxiliary + * process, like checkpointer). We can cope with concurrent signals for different +@@ -29,6 +32,8 @@ + */ + typedef enum + { ++ INVALID_PROCSIGNAL = -1, /* Must be first */ ++ + PROCSIG_CATCHUP_INTERRUPT, /* sinval catchup interrupt */ + PROCSIG_NOTIFY_INTERRUPT, /* listen/notify interrupt */ + PROCSIG_PARALLEL_MESSAGE, /* message from cooperating parallel backend */ +@@ -37,6 +42,14 @@ typedef enum + PROCSIG_LOG_MEMORY_CONTEXT, /* ask backend to log the memory contexts */ + PROCSIG_PARALLEL_APPLY_MESSAGE, /* Message from parallel apply workers */ + ++ PROCSIG_CUSTOM_1, ++ /* ++ * PROCSIG_CUSTOM_2, ++ * ..., ++ * PROCSIG_CUSTOM_N-1, ++ */ ++ PROCSIG_CUSTOM_N = PROCSIG_CUSTOM_1 + NUM_CUSTOM_PROCSIGNALS - 1, ++ + /* Recovery conflict reasons */ + PROCSIG_RECOVERY_CONFLICT_FIRST, + PROCSIG_RECOVERY_CONFLICT_DATABASE = PROCSIG_RECOVERY_CONFLICT_FIRST, +@@ -56,6 +69,9 @@ typedef enum + PROCSIGNAL_BARRIER_SMGRRELEASE, /* ask smgr to close files */ + } ProcSignalBarrierType; + ++/* Handler of custom process signal */ ++typedef void (*ProcSignalHandler_type) (void); ++ + /* + * prototypes for functions in procsignal.c + */ +@@ -63,12 +79,15 @@ extern Size ProcSignalShmemSize(void); + extern void ProcSignalShmemInit(void); + + extern void ProcSignalInit(void); ++extern ProcSignalReason ++ RegisterCustomProcSignalHandler(ProcSignalHandler_type handler); + extern int SendProcSignal(pid_t pid, ProcSignalReason reason, + ProcNumber procNumber); + + extern uint64 EmitProcSignalBarrier(ProcSignalBarrierType type); + extern void WaitForProcSignalBarrier(uint64 generation); + extern void ProcessProcSignalBarrier(void); ++extern void CheckAndHandleCustomSignals(void); + + extern void procsignal_sigusr1_handler(SIGNAL_ARGS); + diff --git a/patches/runtime_explain_17.0.patch b/patches/runtime_explain_17.0.patch new file mode 100644 index 0000000..65e22b8 --- /dev/null +++ b/patches/runtime_explain_17.0.patch @@ -0,0 +1,265 @@ +diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c +index 18a5af6b919..73d3d6171eb 100644 +--- a/src/backend/commands/explain.c ++++ b/src/backend/commands/explain.c +@@ -18,6 +18,7 @@ + #include "commands/createas.h" + #include "commands/defrem.h" + #include "commands/prepare.h" ++#include "executor/nodeHash.h" + #include "foreign/fdwapi.h" + #include "jit/jit.h" + #include "libpq/pqformat.h" +@@ -1233,14 +1234,36 @@ report_triggers(ResultRelInfo *rInfo, bool show_relname, ExplainState *es) + char *relname; + char *conname = NULL; + ++ instr_time starttimespan; ++ double total; ++ double ntuples; ++ double ncalls; ++ ++ if (!es->runtime) ++ { + /* Must clean up instrumentation state */ + InstrEndLoop(instr); ++ } ++ ++ /* Collect statistic variables */ ++ if (!INSTR_TIME_IS_ZERO(instr->starttime)) ++ { ++ INSTR_TIME_SET_CURRENT(starttimespan); ++ INSTR_TIME_SUBTRACT(starttimespan, instr->starttime); ++ } ++ else ++ INSTR_TIME_SET_ZERO(starttimespan); ++ ++ total = instr->total + INSTR_TIME_GET_DOUBLE(instr->counter) ++ + INSTR_TIME_GET_DOUBLE(starttimespan); ++ ntuples = instr->ntuples + instr->tuplecount; ++ ncalls = ntuples + !INSTR_TIME_IS_ZERO(starttimespan); + + /* + * We ignore triggers that were never invoked; they likely aren't + * relevant to the current query type. + */ +- if (instr->ntuples == 0) ++ if (ncalls == 0) + continue; + + ExplainOpenGroup("Trigger", NULL, true, es); +@@ -1266,9 +1289,9 @@ report_triggers(ResultRelInfo *rInfo, bool show_relname, ExplainState *es) + appendStringInfo(es->str, " on %s", relname); + if (es->timing) + appendStringInfo(es->str, ": time=%.3f calls=%.0f\n", +- 1000.0 * instr->total, instr->ntuples); ++ 1000.0 * total, ncalls); + else +- appendStringInfo(es->str, ": calls=%.0f\n", instr->ntuples); ++ appendStringInfo(es->str, ": calls=%.0f\n", ncalls); + } + else + { +@@ -1277,9 +1300,8 @@ report_triggers(ResultRelInfo *rInfo, bool show_relname, ExplainState *es) + ExplainPropertyText("Constraint Name", conname, es); + ExplainPropertyText("Relation", relname, es); + if (es->timing) +- ExplainPropertyFloat("Time", "ms", 1000.0 * instr->total, 3, +- es); +- ExplainPropertyFloat("Calls", NULL, instr->ntuples, 0, es); ++ ExplainPropertyFloat("Time", "ms", 1000.0 * total, 3, es); ++ ExplainPropertyFloat("Calls", NULL, ncalls, 0, es); + } + + if (conname) +@@ -1949,8 +1971,11 @@ ExplainNode(PlanState *planstate, List *ancestors, + * instrumentation results the user didn't ask for. But we do the + * InstrEndLoop call anyway, if possible, to reduce the number of cases + * auto_explain has to contend with. ++ * ++ * If flag es->stateinfo is set, i.e. when printing the current execution ++ * state, this step of cleaning up is missed. + */ +- if (planstate->instrument) ++ if (planstate->instrument && !es->runtime) + InstrEndLoop(planstate->instrument); + + if (es->analyze && +@@ -1985,7 +2010,7 @@ ExplainNode(PlanState *planstate, List *ancestors, + ExplainPropertyFloat("Actual Loops", NULL, nloops, 0, es); + } + } +- else if (es->analyze) ++ else if (es->analyze && !es->runtime) + { + if (es->format == EXPLAIN_FORMAT_TEXT) + appendStringInfoString(es->str, " (never executed)"); +@@ -2001,6 +2026,75 @@ ExplainNode(PlanState *planstate, List *ancestors, + } + } + ++ /* ++ * Print the progress of node execution at current loop. ++ */ ++ if (planstate->instrument && es->analyze && es->runtime) ++ { ++ instr_time starttimespan; ++ double startup_sec; ++ double total_sec; ++ double rows; ++ double loop_num; ++ bool finished; ++ ++ if (!INSTR_TIME_IS_ZERO(planstate->instrument->starttime)) ++ { ++ INSTR_TIME_SET_CURRENT(starttimespan); ++ INSTR_TIME_SUBTRACT(starttimespan, planstate->instrument->starttime); ++ } ++ else ++ INSTR_TIME_SET_ZERO(starttimespan); ++ startup_sec = 1000.0 * planstate->instrument->firsttuple; ++ total_sec = 1000.0 * (INSTR_TIME_GET_DOUBLE(planstate->instrument->counter) ++ + INSTR_TIME_GET_DOUBLE(starttimespan)); ++ rows = planstate->instrument->tuplecount; ++ loop_num = planstate->instrument->nloops + 1; ++ ++ finished = planstate->instrument->nloops > 0 ++ && !planstate->instrument->running ++ && INSTR_TIME_IS_ZERO(starttimespan); ++ ++ if (!finished) ++ { ++ ExplainOpenGroup("Current loop", "Current loop", true, es); ++ if (es->format == EXPLAIN_FORMAT_TEXT) ++ { ++ if (es->timing) ++ { ++ if (planstate->instrument->running) ++ appendStringInfo(es->str, ++ " (Current loop: actual time=%.3f..%.3f rows=%.0f, loop number=%.0f)", ++ startup_sec, total_sec, rows, loop_num); ++ else ++ appendStringInfo(es->str, ++ " (Current loop: running time=%.3f actual rows=0, loop number=%.0f)", ++ total_sec, loop_num); ++ } ++ else ++ appendStringInfo(es->str, ++ " (Current loop: actual rows=%.0f, loop number=%.0f)", ++ rows, loop_num); ++ } ++ else ++ { ++ ExplainPropertyFloat("Actual Loop Number", NULL, loop_num, 0, es); ++ if (es->timing) ++ { ++ if (planstate->instrument->running) ++ { ++ ExplainPropertyFloat("Actual Startup Time", NULL, startup_sec, 3, es); ++ ExplainPropertyFloat("Actual Total Time", NULL, total_sec, 3, es); ++ } ++ else ++ ExplainPropertyFloat("Running Time", NULL, total_sec, 3, es); ++ } ++ ExplainPropertyFloat("Actual Rows", NULL, rows, 0, es); ++ } ++ ExplainCloseGroup("Current loop", "Current loop", true, es); ++ } ++ } ++ + /* in text format, first line ends here */ + if (es->format == EXPLAIN_FORMAT_TEXT) + appendStringInfoChar(es->str, '\n'); +@@ -2416,6 +2510,9 @@ ExplainNode(PlanState *planstate, List *ancestors, + + /* Prepare per-worker buffer/WAL usage */ + if (es->workers_state && (es->buffers || es->wal) && es->verbose) ++ /* Show worker detail after query execution */ ++ if (es->analyze && es->verbose && planstate->worker_instrument ++ && !es->runtime) + { + WorkerInstrumentation *w = planstate->worker_instrument; + +@@ -3403,6 +3500,11 @@ show_hash_info(HashState *hashstate, ExplainState *es) + memcpy(&hinstrument, hashstate->hinstrument, + sizeof(HashInstrumentation)); + ++ if (hashstate->hashtable) ++ { ++ ExecHashAccumInstrumentation(&hinstrument, hashstate->hashtable); ++ } ++ + /* + * Merge results from workers. In the parallel-oblivious case, the + * results from all participants should be identical, except where +@@ -3937,20 +4039,16 @@ show_instrumentation_count(const char *qlabel, int which, + if (!es->analyze || !planstate->instrument) + return; + ++ nloops = planstate->instrument->nloops; + if (which == 2) +- nfiltered = planstate->instrument->nfiltered2; ++ nfiltered = ((nloops > 0) ? planstate->instrument->nfiltered2 / nloops : 0); + else +- nfiltered = planstate->instrument->nfiltered1; ++ nfiltered = ((nloops > 0) ? planstate->instrument->nfiltered1 / nloops : 0); + nloops = planstate->instrument->nloops; + + /* In text mode, suppress zero counts; they're not interesting enough */ + if (nfiltered > 0 || es->format != EXPLAIN_FORMAT_TEXT) +- { +- if (nloops > 0) +- ExplainPropertyFloat(qlabel, NULL, nfiltered / nloops, 0, es); +- else +- ExplainPropertyFloat(qlabel, NULL, 0.0, 0, es); +- } ++ ExplainPropertyFloat(qlabel, NULL, nfiltered, 0, es); + } + + /* +@@ -4617,15 +4715,27 @@ show_modifytable_info(ModifyTableState *mtstate, List *ancestors, + double insert_path; + double other_path; + +- InstrEndLoop(outerPlanState(mtstate)->instrument); ++ if (!es->runtime) ++ InstrEndLoop(outerPlanState(mtstate)->instrument); + + /* count the number of source rows */ +- total = outerPlanState(mtstate)->instrument->ntuples; + other_path = mtstate->ps.instrument->ntuples2; +- insert_path = total - other_path; + +- ExplainPropertyFloat("Tuples Inserted", NULL, +- insert_path, 0, es); ++ /* ++ * Insert occurs after extracting row from subplan and in runtime mode ++ * we can appear between these two operations - situation when ++ * total > insert_path + other_path. Therefore we don't know exactly ++ * whether last row from subplan is inserted. ++ * We don't print inserted tuples in runtime mode in order to not print ++ * inconsistent data ++ */ ++ if (!es->runtime) ++ { ++ total = outerPlanState(mtstate)->instrument->ntuples; ++ insert_path = total - other_path; ++ ExplainPropertyFloat("Tuples Inserted", NULL, insert_path, 0, es); ++ } ++ + ExplainPropertyFloat("Conflicting Tuples", NULL, + other_path, 0, es); + } +diff --git a/src/include/commands/explain.h b/src/include/commands/explain.h +index 3ab0aae78f7..3644c0db116 100644 +--- a/src/include/commands/explain.h ++++ b/src/include/commands/explain.h +@@ -57,6 +57,8 @@ typedef struct ExplainState + bool generic; /* generate a generic plan */ + ExplainSerializeOption serialize; /* serialize the query's output? */ + ExplainFormat format; /* output format */ ++ bool runtime; /* print intermediate state of query execution, ++ not after completion */ + /* state for output formatting --- not reset for each new plan tree */ + int indent; /* current indentation level */ + List *grouping_stack; /* format-specific grouping state */ diff --git a/tests/test_cases.py b/tests/test_cases.py index c866f58..498484b 100644 --- a/tests/test_cases.py +++ b/tests/test_cases.py @@ -110,6 +110,17 @@ def test_nested_call(config): expected = 'Function Scan on n_join_foo_bar (Current loop: actual rows=0, loop number=1)' expected_nested = r"""Result \(Current loop: actual rows=0, loop number=1\) InitPlan 1 \(returns \$0\) + -> Aggregate \(Current loop: actual rows=0, loop number=1\) + -> Hash Join \(Current loop: actual rows=\d+, loop number=1\) + Hash Cond: \(foo.c1 = bar.c1\) + Join Filter: \(unlock_if_eq_1\(foo.c1\) = bar.c1\) + -> Seq Scan on foo \(Current loop: actual rows=\d+, loop number=1\) + -> Hash \(Current loop: actual rows=500000, loop number=1\) + Buckets: \d+ Batches: \d+ Memory Usage: \d+kB + -> Seq Scan on bar \(Current loop: actual rows=\d+, loop number=1\)""" + + expected_nested_2 = r"""Result \(Current loop: actual rows=0, loop number=1\) + InitPlan 1 -> Aggregate \(Current loop: actual rows=0, loop number=1\) -> Hash Join \(Current loop: actual rows=\d+, loop number=1\) Hash Cond: \(foo.c1 = bar.c1\) @@ -136,7 +147,7 @@ def test_nested_call(config): assert qs[0][2] == call_function assert qs[0][3] == expected assert qs[1][2] == nested_query1 or qs[1][2] == nested_query2 - assert re.match(expected_nested, qs[1][3]) + assert re.match(expected_nested, qs[1][3]) or re.match(expected_nested_2, qs[1][3]) assert qs[0][4] == qs[1][4] == None assert len(notices) == 0 From 0c6e36aec1bde12a74326de9bb03685a6772b6b7 Mon Sep 17 00:00:00 2001 From: Ekaterina Sokolova Date: Thu, 31 Oct 2024 17:43:49 +0300 Subject: [PATCH 34/37] Remove checks with timeout problems. --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index a9ae4c6..06bae7b 100644 --- a/.travis.yml +++ b/.travis.yml @@ -22,7 +22,6 @@ notifications: on_failure: always env: - - PG_VERSION=17 LEVEL=hardcore USE_TPCDS=1 - PG_VERSION=17 - PG_VERSION=16 LEVEL=hardcore USE_TPCDS=1 - PG_VERSION=16 @@ -40,6 +39,7 @@ env: matrix: allow_failures: + - env: PG_VERSION=13 LEVEL=hardcore USE_TPCDS=1 - env: PG_VERSION=11 - env: PG_VERSION=10 - env: PG_VERSION=9.6 From 154e5f5f6ed5ddb2f14b208e84d7d2247ea4669d Mon Sep 17 00:00:00 2001 From: Arseny Kositsin Date: Mon, 11 Nov 2024 13:54:17 +0300 Subject: [PATCH 35/37] [PGPRO-11597] Redesigned the launch of isolation tests "CREATE/DROP EXTENSION" has been removed from spec. Instead, the "ISOLATION_OPTS" variable is used. Tags: pg_query_state --- .travis.yml | 2 -- Makefile | 10 ++-------- README.md | 2 +- specs/corner_cases.spec | 2 -- 4 files changed, 3 insertions(+), 13 deletions(-) diff --git a/.travis.yml b/.travis.yml index 06bae7b..c272005 100644 --- a/.travis.yml +++ b/.travis.yml @@ -33,13 +33,11 @@ env: - PG_VERSION=13 - PG_VERSION=12 LEVEL=hardcore USE_TPCDS=1 - PG_VERSION=12 - - PG_VERSION=11 - PG_VERSION=10 - PG_VERSION=9.6 matrix: allow_failures: - env: PG_VERSION=13 LEVEL=hardcore USE_TPCDS=1 - - env: PG_VERSION=11 - env: PG_VERSION=10 - env: PG_VERSION=9.6 diff --git a/Makefile b/Makefile index 4468c51..c96aae2 100644 --- a/Makefile +++ b/Makefile @@ -13,14 +13,8 @@ EXTRA_CLEAN = ./isolation_output $(EXTENSION)--$(EXTVERSION).sql \ Dockerfile ./tests/*.pyc ./tmp_stress ISOLATION = corner_cases -# -# PG11 doesn't support ISOLATION_OPTS variable. We have to use -# "CREATE/DROP EXTENTION" command in spec. -# -# One day, when we'll get rid of PG11, it will be possible to uncomment this -# variable and remove "CREATE EXTENTION" from spec. -# -# ISOLATION_OPTS = --load-extension=pg_query_state + +ISOLATION_OPTS = --load-extension=pg_query_state ifdef USE_PGXS PG_CONFIG ?= pg_config diff --git a/README.md b/README.md index fba15cd..6c983c1 100644 --- a/README.md +++ b/README.md @@ -20,7 +20,7 @@ To install `pg_query_state`, please apply corresponding patches `custom_signal_( To do this, run the following commands from the postgresql directory: ``` patch -p1 < path_to_pg_query_state_folder/patches/runtime_explain_(PG_VERSION).patch -patch -p1 < path_to_pg_query_state_folder/patches/custom_signal_(PG_VERSION).patch +patch -p1 < path_to_pg_query_state_folder/patches/custom_signals_(PG_VERSION).patch ``` Then execute this in the module's directory: diff --git a/specs/corner_cases.spec b/specs/corner_cases.spec index c9f3fde..315b676 100644 --- a/specs/corner_cases.spec +++ b/specs/corner_cases.spec @@ -1,6 +1,5 @@ setup { - CREATE EXTENSION pg_query_state; CREATE ROLE alice; CREATE ROLE bob; CREATE ROLE super SUPERUSER; @@ -31,7 +30,6 @@ teardown DROP ROLE super; DROP ROLE bob; DROP ROLE alice; - DROP EXTENSION pg_query_state; } session "s1" From 5a0f75b68774bd23a92fc5612d48e837d0daf08b Mon Sep 17 00:00:00 2001 From: Zharkov Roman Date: Tue, 21 Jan 2025 15:55:49 +0300 Subject: [PATCH 36/37] Add meson.build file to support building from the contrib source tree. --- meson.build | 53 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) create mode 100644 meson.build diff --git a/meson.build b/meson.build new file mode 100644 index 0000000..b2d4248 --- /dev/null +++ b/meson.build @@ -0,0 +1,53 @@ +# Copyright (c) 2025, Postgres Professional + +# Does not support the PGXS infrastructure at this time. Please, compile as part +# of the contrib source tree. + +pg_query_state_sources = files( + 'pg_query_state.c', + 'signal_handler.c', +) + +if host_system == 'windows' + pg_query_state_sources += rc_lib_gen.process(win32ver_rc, extra_args: [ + '--NAME', 'pg_query_state', + '--FILEDESC', 'pg_query_state - provides facility to know the current state of query execution on working backend.',]) +endif + +pg_query_state = shared_module('pg_query_state', + pg_query_state_sources, + kwargs: contrib_mod_args, +) +contrib_targets += pg_query_state + +extversion = '1.1' +output_name = 'pg_query_state--' + extversion + '.sql' + +configure_file( + input: 'init.sql', + output: output_name, + copy: true, + install: true, + install_dir: contrib_data_args['install_dir'], +) + +install_data( + 'pg_query_state.control', + 'pg_query_state--1.0--1.1.sql', + kwargs: contrib_data_args, +) + +tests += { + 'name': 'pg_query_state', + 'sd': meson.current_source_dir(), + 'bd': meson.current_build_dir(), + 'isolation': { + 'specs': [ + 'corner_cases', + ], + 'regress_args': [ + '--temp-config', files('test.conf'), + '--load-extension=pg_query_state', + ], + }, +} From ee0d4a809821a2a32215aa9f8dcaaa14279c90d8 Mon Sep 17 00:00:00 2001 From: Ekaterina Sokolova Date: Tue, 28 Jan 2025 15:48:16 +0300 Subject: [PATCH 37/37] Disable TPC-DS tests in Travis CI because of using an outdated third-party library. --- .travis.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.travis.yml b/.travis.yml index c272005..0812444 100644 --- a/.travis.yml +++ b/.travis.yml @@ -23,15 +23,15 @@ notifications: env: - PG_VERSION=17 - - PG_VERSION=16 LEVEL=hardcore USE_TPCDS=1 + - PG_VERSION=16 LEVEL=hardcore USE_TPCDS=0 - PG_VERSION=16 - - PG_VERSION=15 LEVEL=hardcore USE_TPCDS=1 + - PG_VERSION=15 LEVEL=hardcore USE_TPCDS=0 - PG_VERSION=15 - - PG_VERSION=14 LEVEL=hardcore USE_TPCDS=1 + - PG_VERSION=14 LEVEL=hardcore USE_TPCDS=0 - PG_VERSION=14 - - PG_VERSION=13 LEVEL=hardcore USE_TPCDS=1 + - PG_VERSION=13 LEVEL=hardcore USE_TPCDS=0 - PG_VERSION=13 - - PG_VERSION=12 LEVEL=hardcore USE_TPCDS=1 + - PG_VERSION=12 LEVEL=hardcore USE_TPCDS=0 - PG_VERSION=12 - PG_VERSION=10 - PG_VERSION=9.6 pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy