diff --git a/.travis.yml b/.travis.yml index 0983e07..0812444 100644 --- a/.travis.yml +++ b/.travis.yml @@ -22,6 +22,7 @@ notifications: on_failure: always env: + - PG_VERSION=17 - PG_VERSION=16 LEVEL=hardcore USE_TPCDS=0 - PG_VERSION=16 - PG_VERSION=15 LEVEL=hardcore USE_TPCDS=0 @@ -32,14 +33,11 @@ env: - PG_VERSION=13 - PG_VERSION=12 LEVEL=hardcore USE_TPCDS=0 - PG_VERSION=12 - - PG_VERSION=11 LEVEL=hardcore USE_TPCDS=0 - - PG_VERSION=11 - - PG_VERSION=10 LEVEL=hardcore USE_TPCDS=0 - PG_VERSION=10 - - PG_VERSION=9.6 LEVEL=hardcore - PG_VERSION=9.6 matrix: allow_failures: - - env: PG_VERSION=10 LEVEL=nightmare - - env: PG_VERSION=9.6 LEVEL=nightmare + - env: PG_VERSION=13 LEVEL=hardcore USE_TPCDS=1 + - env: PG_VERSION=10 + - env: PG_VERSION=9.6 diff --git a/Makefile b/Makefile index 4468c51..c96aae2 100644 --- a/Makefile +++ b/Makefile @@ -13,14 +13,8 @@ EXTRA_CLEAN = ./isolation_output $(EXTENSION)--$(EXTVERSION).sql \ Dockerfile ./tests/*.pyc ./tmp_stress ISOLATION = corner_cases -# -# PG11 doesn't support ISOLATION_OPTS variable. We have to use -# "CREATE/DROP EXTENTION" command in spec. -# -# One day, when we'll get rid of PG11, it will be possible to uncomment this -# variable and remove "CREATE EXTENTION" from spec. -# -# ISOLATION_OPTS = --load-extension=pg_query_state + +ISOLATION_OPTS = --load-extension=pg_query_state ifdef USE_PGXS PG_CONFIG ?= pg_config diff --git a/README.md b/README.md index fba15cd..6c983c1 100644 --- a/README.md +++ b/README.md @@ -20,7 +20,7 @@ To install `pg_query_state`, please apply corresponding patches `custom_signal_( To do this, run the following commands from the postgresql directory: ``` patch -p1 < path_to_pg_query_state_folder/patches/runtime_explain_(PG_VERSION).patch -patch -p1 < path_to_pg_query_state_folder/patches/custom_signal_(PG_VERSION).patch +patch -p1 < path_to_pg_query_state_folder/patches/custom_signals_(PG_VERSION).patch ``` Then execute this in the module's directory: diff --git a/meson.build b/meson.build new file mode 100644 index 0000000..b2d4248 --- /dev/null +++ b/meson.build @@ -0,0 +1,53 @@ +# Copyright (c) 2025, Postgres Professional + +# Does not support the PGXS infrastructure at this time. Please, compile as part +# of the contrib source tree. + +pg_query_state_sources = files( + 'pg_query_state.c', + 'signal_handler.c', +) + +if host_system == 'windows' + pg_query_state_sources += rc_lib_gen.process(win32ver_rc, extra_args: [ + '--NAME', 'pg_query_state', + '--FILEDESC', 'pg_query_state - provides facility to know the current state of query execution on working backend.',]) +endif + +pg_query_state = shared_module('pg_query_state', + pg_query_state_sources, + kwargs: contrib_mod_args, +) +contrib_targets += pg_query_state + +extversion = '1.1' +output_name = 'pg_query_state--' + extversion + '.sql' + +configure_file( + input: 'init.sql', + output: output_name, + copy: true, + install: true, + install_dir: contrib_data_args['install_dir'], +) + +install_data( + 'pg_query_state.control', + 'pg_query_state--1.0--1.1.sql', + kwargs: contrib_data_args, +) + +tests += { + 'name': 'pg_query_state', + 'sd': meson.current_source_dir(), + 'bd': meson.current_build_dir(), + 'isolation': { + 'specs': [ + 'corner_cases', + ], + 'regress_args': [ + '--temp-config', files('test.conf'), + '--load-extension=pg_query_state', + ], + }, +} diff --git a/patches/custom_signals_17.0.patch b/patches/custom_signals_17.0.patch new file mode 100644 index 0000000..d227104 --- /dev/null +++ b/patches/custom_signals_17.0.patch @@ -0,0 +1,227 @@ +diff --git a/src/backend/storage/ipc/procsignal.c b/src/backend/storage/ipc/procsignal.c +index 4ed9ced..6e70892 100644 +--- a/src/backend/storage/ipc/procsignal.c ++++ b/src/backend/storage/ipc/procsignal.c +@@ -6,6 +6,7 @@ + * + * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California ++ * Portions Copyright (c) 2024, Postgres Professional + * + * IDENTIFICATION + * src/backend/storage/ipc/procsignal.c +@@ -96,6 +97,13 @@ typedef struct + #define BARRIER_CLEAR_BIT(flags, type) \ + ((flags) &= ~(((uint32) 1) << (uint32) (type))) + ++#define IsCustomProcSignalReason(reason) \ ++ ((reason) >= PROCSIG_CUSTOM_1 && (reason) <= PROCSIG_CUSTOM_N) ++ ++static bool CustomSignalPendings[NUM_CUSTOM_PROCSIGNALS]; ++static bool CustomSignalProcessing[NUM_CUSTOM_PROCSIGNALS]; ++static ProcSignalHandler_type CustomInterruptHandlers[NUM_CUSTOM_PROCSIGNALS]; ++ + static ProcSignalHeader *ProcSignal = NULL; + static ProcSignalSlot *MyProcSignalSlot = NULL; + +@@ -103,6 +111,8 @@ static bool CheckProcSignal(ProcSignalReason reason); + static void CleanupProcSignalState(int status, Datum arg); + static void ResetProcSignalBarrierBits(uint32 flags); + ++static void CheckAndSetCustomSignalInterrupts(void); ++ + /* + * ProcSignalShmemSize + * Compute space needed for ProcSignal's shared memory +@@ -242,6 +252,36 @@ CleanupProcSignalState(int status, Datum arg) + slot->pss_pid = 0; + } + ++/* ++ * RegisterCustomProcSignalHandler ++ * Assign specific handler of custom process signal with new ++ * ProcSignalReason key. ++ * ++ * This function has to be called in _PG_init function of extensions at the ++ * stage of loading shared preloaded libraries. Otherwise it throws fatal error. ++ * ++ * Return INVALID_PROCSIGNAL if all slots for custom signals are occupied. ++ */ ++ProcSignalReason ++RegisterCustomProcSignalHandler(ProcSignalHandler_type handler) ++{ ++ ProcSignalReason reason; ++ ++ if (!process_shared_preload_libraries_in_progress) ++ ereport(FATAL, (errcode(ERRCODE_INTERNAL_ERROR), ++ errmsg("cannot register custom signal after startup"))); ++ ++ /* Iterate through custom signal slots to find a free one */ ++ for (reason = PROCSIG_CUSTOM_1; reason <= PROCSIG_CUSTOM_N; reason++) ++ if (!CustomInterruptHandlers[reason - PROCSIG_CUSTOM_1]) ++ { ++ CustomInterruptHandlers[reason - PROCSIG_CUSTOM_1] = handler; ++ return reason; ++ } ++ ++ return INVALID_PROCSIGNAL; ++} ++ + /* + * SendProcSignal + * Send a signal to a Postgres process +@@ -676,5 +716,70 @@ procsignal_sigusr1_handler(SIGNAL_ARGS) + if (CheckProcSignal(PROCSIG_RECOVERY_CONFLICT_BUFFERPIN)) + HandleRecoveryConflictInterrupt(PROCSIG_RECOVERY_CONFLICT_BUFFERPIN); + ++ CheckAndSetCustomSignalInterrupts(); ++ + SetLatch(MyLatch); + } ++ ++/* ++ * Handle receipt of an interrupt indicating any of custom process signals. ++ */ ++static void ++CheckAndSetCustomSignalInterrupts() ++{ ++ ProcSignalReason reason; ++ ++ for (reason = PROCSIG_CUSTOM_1; reason <= PROCSIG_CUSTOM_N; reason++) ++ { ++ if (CheckProcSignal(reason)) ++ { ++ ++ /* set interrupt flags */ ++ InterruptPending = true; ++ CustomSignalPendings[reason - PROCSIG_CUSTOM_1] = true; ++ } ++ } ++ ++ SetLatch(MyLatch); ++} ++ ++/* ++ * CheckAndHandleCustomSignals ++ * Check custom signal flags and call handler assigned to that signal ++ * if it is not NULL ++ * ++ * This function is called within CHECK_FOR_INTERRUPTS if interrupt occurred. ++ */ ++void ++CheckAndHandleCustomSignals(void) ++{ ++ int i; ++ ++ /* ++ * This is invoked from ProcessInterrupts(), and since some of the ++ * functions it calls contain CHECK_FOR_INTERRUPTS(), there is a potential ++ * for recursive calls if more signals are received while this runs, so ++ * let's block interrupts until done. ++ */ ++ HOLD_INTERRUPTS(); ++ ++ /* Check on expiring of custom signals and call its handlers if exist */ ++ for (i = 0; i < NUM_CUSTOM_PROCSIGNALS; i++) ++ { ++ if (!CustomSignalProcessing[i] && CustomSignalPendings[i]) ++ { ++ ProcSignalHandler_type handler; ++ ++ CustomSignalPendings[i] = false; ++ handler = CustomInterruptHandlers[i]; ++ if (handler != NULL) ++ { ++ CustomSignalProcessing[i] = true; ++ handler(); ++ CustomSignalProcessing[i] = false; ++ } ++ } ++ } ++ ++ RESUME_INTERRUPTS(); ++} +diff --git a/src/backend/tcop/postgres.c b/src/backend/tcop/postgres.c +index a750dc8..e1b0be5 100644 +--- a/src/backend/tcop/postgres.c ++++ b/src/backend/tcop/postgres.c +@@ -3492,6 +3492,8 @@ ProcessInterrupts(void) + if (ParallelMessagePending) + HandleParallelMessages(); + ++ CheckAndHandleCustomSignals(); ++ + if (LogMemoryContextPending) + ProcessLogMemoryContextInterrupt(); + +diff --git a/src/include/storage/procsignal.h b/src/include/storage/procsignal.h +index 7d290ea..f262f0c 100644 +--- a/src/include/storage/procsignal.h ++++ b/src/include/storage/procsignal.h +@@ -6,6 +6,7 @@ + * + * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California ++ * Portions Copyright (c) 2024, Postgres Professional + * + * src/include/storage/procsignal.h + * +@@ -17,6 +18,8 @@ + #include "storage/procnumber.h" + + ++#define NUM_CUSTOM_PROCSIGNALS 64 ++ + /* + * Reasons for signaling a Postgres child process (a backend or an auxiliary + * process, like checkpointer). We can cope with concurrent signals for different +@@ -29,6 +32,8 @@ + */ + typedef enum + { ++ INVALID_PROCSIGNAL = -1, /* Must be first */ ++ + PROCSIG_CATCHUP_INTERRUPT, /* sinval catchup interrupt */ + PROCSIG_NOTIFY_INTERRUPT, /* listen/notify interrupt */ + PROCSIG_PARALLEL_MESSAGE, /* message from cooperating parallel backend */ +@@ -37,6 +42,14 @@ typedef enum + PROCSIG_LOG_MEMORY_CONTEXT, /* ask backend to log the memory contexts */ + PROCSIG_PARALLEL_APPLY_MESSAGE, /* Message from parallel apply workers */ + ++ PROCSIG_CUSTOM_1, ++ /* ++ * PROCSIG_CUSTOM_2, ++ * ..., ++ * PROCSIG_CUSTOM_N-1, ++ */ ++ PROCSIG_CUSTOM_N = PROCSIG_CUSTOM_1 + NUM_CUSTOM_PROCSIGNALS - 1, ++ + /* Recovery conflict reasons */ + PROCSIG_RECOVERY_CONFLICT_FIRST, + PROCSIG_RECOVERY_CONFLICT_DATABASE = PROCSIG_RECOVERY_CONFLICT_FIRST, +@@ -56,6 +69,9 @@ typedef enum + PROCSIGNAL_BARRIER_SMGRRELEASE, /* ask smgr to close files */ + } ProcSignalBarrierType; + ++/* Handler of custom process signal */ ++typedef void (*ProcSignalHandler_type) (void); ++ + /* + * prototypes for functions in procsignal.c + */ +@@ -63,12 +79,15 @@ extern Size ProcSignalShmemSize(void); + extern void ProcSignalShmemInit(void); + + extern void ProcSignalInit(void); ++extern ProcSignalReason ++ RegisterCustomProcSignalHandler(ProcSignalHandler_type handler); + extern int SendProcSignal(pid_t pid, ProcSignalReason reason, + ProcNumber procNumber); + + extern uint64 EmitProcSignalBarrier(ProcSignalBarrierType type); + extern void WaitForProcSignalBarrier(uint64 generation); + extern void ProcessProcSignalBarrier(void); ++extern void CheckAndHandleCustomSignals(void); + + extern void procsignal_sigusr1_handler(SIGNAL_ARGS); + diff --git a/patches/runtime_explain_11.0.patch b/patches/runtime_explain_11.0.patch index dddbcbe..9d12d5b 100644 --- a/patches/runtime_explain_11.0.patch +++ b/patches/runtime_explain_11.0.patch @@ -209,10 +209,9 @@ index 16a80a0ea1..b12906b005 100644 /* count the number of source rows */ - total = mtstate->mt_plans[0]->instrument->ntuples; -- other_path = mtstate->ps.instrument->ntuples2; + other_path = mtstate->ps.instrument->ntuples2; - insert_path = total - other_path; -+ other_path = mtstate->ps.instrument->nfiltered2; -+ + + /* + * Insert occurs after extracting row from subplan and in runtime mode + * we can appear between these two operations - situation when @@ -227,7 +226,7 @@ index 16a80a0ea1..b12906b005 100644 + insert_path = total - other_path; + ExplainPropertyFloat("Tuples Inserted", NULL, insert_path, 0, es); + } - ++ - ExplainPropertyFloat("Tuples Inserted", NULL, - insert_path, 0, es); ExplainPropertyFloat("Conflicting Tuples", NULL, diff --git a/patches/runtime_explain_12.0.patch b/patches/runtime_explain_12.0.patch index 1d105b2..9aa8397 100644 --- a/patches/runtime_explain_12.0.patch +++ b/patches/runtime_explain_12.0.patch @@ -222,10 +222,9 @@ index 92969636b75..fab4267a2c1 100644 /* count the number of source rows */ - total = mtstate->mt_plans[0]->instrument->ntuples; -- other_path = mtstate->ps.instrument->ntuples2; + other_path = mtstate->ps.instrument->ntuples2; - insert_path = total - other_path; -+ other_path = mtstate->ps.instrument->nfiltered2; -+ + + /* + * Insert occurs after extracting row from subplan and in runtime mode + * we can appear between these two operations - situation when @@ -240,7 +239,7 @@ index 92969636b75..fab4267a2c1 100644 + insert_path = total - other_path; + ExplainPropertyFloat("Tuples Inserted", NULL, insert_path, 0, es); + } - ++ - ExplainPropertyFloat("Tuples Inserted", NULL, - insert_path, 0, es); ExplainPropertyFloat("Conflicting Tuples", NULL, diff --git a/patches/runtime_explain_13.0.patch b/patches/runtime_explain_13.0.patch index 973ebd5..be29669 100644 --- a/patches/runtime_explain_13.0.patch +++ b/patches/runtime_explain_13.0.patch @@ -219,10 +219,9 @@ index 20708db9f12..866948bd0c1 100644 /* count the number of source rows */ - total = mtstate->mt_plans[0]->instrument->ntuples; -- other_path = mtstate->ps.instrument->ntuples2; + other_path = mtstate->ps.instrument->ntuples2; - insert_path = total - other_path; -+ other_path = mtstate->ps.instrument->nfiltered2; -+ + + /* + * Insert occurs after extracting row from subplan and in runtime mode + * we can appear between these two operations - situation when @@ -237,7 +236,7 @@ index 20708db9f12..866948bd0c1 100644 + insert_path = total - other_path; + ExplainPropertyFloat("Tuples Inserted", NULL, insert_path, 0, es); + } - ++ - ExplainPropertyFloat("Tuples Inserted", NULL, - insert_path, 0, es); ExplainPropertyFloat("Conflicting Tuples", NULL, diff --git a/patches/runtime_explain_14.0.patch b/patches/runtime_explain_14.0.patch index 7904cc2..b266b15 100644 --- a/patches/runtime_explain_14.0.patch +++ b/patches/runtime_explain_14.0.patch @@ -219,10 +219,9 @@ index 10644dfac4..7106ed4257 100644 /* count the number of source rows */ - total = outerPlanState(mtstate)->instrument->ntuples; -- other_path = mtstate->ps.instrument->ntuples2; + other_path = mtstate->ps.instrument->ntuples2; - insert_path = total - other_path; -+ other_path = mtstate->ps.instrument->nfiltered2; -+ + + /* + * Insert occurs after extracting row from subplan and in runtime mode + * we can appear between these two operations - situation when @@ -237,7 +236,7 @@ index 10644dfac4..7106ed4257 100644 + insert_path = total - other_path; + ExplainPropertyFloat("Tuples Inserted", NULL, insert_path, 0, es); + } - ++ - ExplainPropertyFloat("Tuples Inserted", NULL, - insert_path, 0, es); ExplainPropertyFloat("Conflicting Tuples", NULL, diff --git a/patches/runtime_explain_15.0.patch b/patches/runtime_explain_15.0.patch index adab6dc..d60cea8 100644 --- a/patches/runtime_explain_15.0.patch +++ b/patches/runtime_explain_15.0.patch @@ -219,10 +219,9 @@ index 10644dfac4..7106ed4257 100644 /* count the number of source rows */ - total = outerPlanState(mtstate)->instrument->ntuples; -- other_path = mtstate->ps.instrument->ntuples2; + other_path = mtstate->ps.instrument->ntuples2; - insert_path = total - other_path; -+ other_path = mtstate->ps.instrument->nfiltered2; -+ + + /* + * Insert occurs after extracting row from subplan and in runtime mode + * we can appear between these two operations - situation when @@ -237,7 +236,7 @@ index 10644dfac4..7106ed4257 100644 + insert_path = total - other_path; + ExplainPropertyFloat("Tuples Inserted", NULL, insert_path, 0, es); + } - ++ - ExplainPropertyFloat("Tuples Inserted", NULL, - insert_path, 0, es); ExplainPropertyFloat("Conflicting Tuples", NULL, diff --git a/patches/runtime_explain_16.0.patch b/patches/runtime_explain_16.0.patch index 3d132ca..2b955e9 100644 --- a/patches/runtime_explain_16.0.patch +++ b/patches/runtime_explain_16.0.patch @@ -219,10 +219,9 @@ index 6c2e5c8a4f..74be3944d1 100644 /* count the number of source rows */ - total = outerPlanState(mtstate)->instrument->ntuples; -- other_path = mtstate->ps.instrument->ntuples2; + other_path = mtstate->ps.instrument->ntuples2; - insert_path = total - other_path; -+ other_path = mtstate->ps.instrument->nfiltered2; -+ + + /* + * Insert occurs after extracting row from subplan and in runtime mode + * we can appear between these two operations - situation when @@ -237,7 +236,7 @@ index 6c2e5c8a4f..74be3944d1 100644 + insert_path = total - other_path; + ExplainPropertyFloat("Tuples Inserted", NULL, insert_path, 0, es); + } - ++ - ExplainPropertyFloat("Tuples Inserted", NULL, - insert_path, 0, es); ExplainPropertyFloat("Conflicting Tuples", NULL, diff --git a/patches/runtime_explain_17.0.patch b/patches/runtime_explain_17.0.patch new file mode 100644 index 0000000..65e22b8 --- /dev/null +++ b/patches/runtime_explain_17.0.patch @@ -0,0 +1,265 @@ +diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c +index 18a5af6b919..73d3d6171eb 100644 +--- a/src/backend/commands/explain.c ++++ b/src/backend/commands/explain.c +@@ -18,6 +18,7 @@ + #include "commands/createas.h" + #include "commands/defrem.h" + #include "commands/prepare.h" ++#include "executor/nodeHash.h" + #include "foreign/fdwapi.h" + #include "jit/jit.h" + #include "libpq/pqformat.h" +@@ -1233,14 +1234,36 @@ report_triggers(ResultRelInfo *rInfo, bool show_relname, ExplainState *es) + char *relname; + char *conname = NULL; + ++ instr_time starttimespan; ++ double total; ++ double ntuples; ++ double ncalls; ++ ++ if (!es->runtime) ++ { + /* Must clean up instrumentation state */ + InstrEndLoop(instr); ++ } ++ ++ /* Collect statistic variables */ ++ if (!INSTR_TIME_IS_ZERO(instr->starttime)) ++ { ++ INSTR_TIME_SET_CURRENT(starttimespan); ++ INSTR_TIME_SUBTRACT(starttimespan, instr->starttime); ++ } ++ else ++ INSTR_TIME_SET_ZERO(starttimespan); ++ ++ total = instr->total + INSTR_TIME_GET_DOUBLE(instr->counter) ++ + INSTR_TIME_GET_DOUBLE(starttimespan); ++ ntuples = instr->ntuples + instr->tuplecount; ++ ncalls = ntuples + !INSTR_TIME_IS_ZERO(starttimespan); + + /* + * We ignore triggers that were never invoked; they likely aren't + * relevant to the current query type. + */ +- if (instr->ntuples == 0) ++ if (ncalls == 0) + continue; + + ExplainOpenGroup("Trigger", NULL, true, es); +@@ -1266,9 +1289,9 @@ report_triggers(ResultRelInfo *rInfo, bool show_relname, ExplainState *es) + appendStringInfo(es->str, " on %s", relname); + if (es->timing) + appendStringInfo(es->str, ": time=%.3f calls=%.0f\n", +- 1000.0 * instr->total, instr->ntuples); ++ 1000.0 * total, ncalls); + else +- appendStringInfo(es->str, ": calls=%.0f\n", instr->ntuples); ++ appendStringInfo(es->str, ": calls=%.0f\n", ncalls); + } + else + { +@@ -1277,9 +1300,8 @@ report_triggers(ResultRelInfo *rInfo, bool show_relname, ExplainState *es) + ExplainPropertyText("Constraint Name", conname, es); + ExplainPropertyText("Relation", relname, es); + if (es->timing) +- ExplainPropertyFloat("Time", "ms", 1000.0 * instr->total, 3, +- es); +- ExplainPropertyFloat("Calls", NULL, instr->ntuples, 0, es); ++ ExplainPropertyFloat("Time", "ms", 1000.0 * total, 3, es); ++ ExplainPropertyFloat("Calls", NULL, ncalls, 0, es); + } + + if (conname) +@@ -1949,8 +1971,11 @@ ExplainNode(PlanState *planstate, List *ancestors, + * instrumentation results the user didn't ask for. But we do the + * InstrEndLoop call anyway, if possible, to reduce the number of cases + * auto_explain has to contend with. ++ * ++ * If flag es->stateinfo is set, i.e. when printing the current execution ++ * state, this step of cleaning up is missed. + */ +- if (planstate->instrument) ++ if (planstate->instrument && !es->runtime) + InstrEndLoop(planstate->instrument); + + if (es->analyze && +@@ -1985,7 +2010,7 @@ ExplainNode(PlanState *planstate, List *ancestors, + ExplainPropertyFloat("Actual Loops", NULL, nloops, 0, es); + } + } +- else if (es->analyze) ++ else if (es->analyze && !es->runtime) + { + if (es->format == EXPLAIN_FORMAT_TEXT) + appendStringInfoString(es->str, " (never executed)"); +@@ -2001,6 +2026,75 @@ ExplainNode(PlanState *planstate, List *ancestors, + } + } + ++ /* ++ * Print the progress of node execution at current loop. ++ */ ++ if (planstate->instrument && es->analyze && es->runtime) ++ { ++ instr_time starttimespan; ++ double startup_sec; ++ double total_sec; ++ double rows; ++ double loop_num; ++ bool finished; ++ ++ if (!INSTR_TIME_IS_ZERO(planstate->instrument->starttime)) ++ { ++ INSTR_TIME_SET_CURRENT(starttimespan); ++ INSTR_TIME_SUBTRACT(starttimespan, planstate->instrument->starttime); ++ } ++ else ++ INSTR_TIME_SET_ZERO(starttimespan); ++ startup_sec = 1000.0 * planstate->instrument->firsttuple; ++ total_sec = 1000.0 * (INSTR_TIME_GET_DOUBLE(planstate->instrument->counter) ++ + INSTR_TIME_GET_DOUBLE(starttimespan)); ++ rows = planstate->instrument->tuplecount; ++ loop_num = planstate->instrument->nloops + 1; ++ ++ finished = planstate->instrument->nloops > 0 ++ && !planstate->instrument->running ++ && INSTR_TIME_IS_ZERO(starttimespan); ++ ++ if (!finished) ++ { ++ ExplainOpenGroup("Current loop", "Current loop", true, es); ++ if (es->format == EXPLAIN_FORMAT_TEXT) ++ { ++ if (es->timing) ++ { ++ if (planstate->instrument->running) ++ appendStringInfo(es->str, ++ " (Current loop: actual time=%.3f..%.3f rows=%.0f, loop number=%.0f)", ++ startup_sec, total_sec, rows, loop_num); ++ else ++ appendStringInfo(es->str, ++ " (Current loop: running time=%.3f actual rows=0, loop number=%.0f)", ++ total_sec, loop_num); ++ } ++ else ++ appendStringInfo(es->str, ++ " (Current loop: actual rows=%.0f, loop number=%.0f)", ++ rows, loop_num); ++ } ++ else ++ { ++ ExplainPropertyFloat("Actual Loop Number", NULL, loop_num, 0, es); ++ if (es->timing) ++ { ++ if (planstate->instrument->running) ++ { ++ ExplainPropertyFloat("Actual Startup Time", NULL, startup_sec, 3, es); ++ ExplainPropertyFloat("Actual Total Time", NULL, total_sec, 3, es); ++ } ++ else ++ ExplainPropertyFloat("Running Time", NULL, total_sec, 3, es); ++ } ++ ExplainPropertyFloat("Actual Rows", NULL, rows, 0, es); ++ } ++ ExplainCloseGroup("Current loop", "Current loop", true, es); ++ } ++ } ++ + /* in text format, first line ends here */ + if (es->format == EXPLAIN_FORMAT_TEXT) + appendStringInfoChar(es->str, '\n'); +@@ -2416,6 +2510,9 @@ ExplainNode(PlanState *planstate, List *ancestors, + + /* Prepare per-worker buffer/WAL usage */ + if (es->workers_state && (es->buffers || es->wal) && es->verbose) ++ /* Show worker detail after query execution */ ++ if (es->analyze && es->verbose && planstate->worker_instrument ++ && !es->runtime) + { + WorkerInstrumentation *w = planstate->worker_instrument; + +@@ -3403,6 +3500,11 @@ show_hash_info(HashState *hashstate, ExplainState *es) + memcpy(&hinstrument, hashstate->hinstrument, + sizeof(HashInstrumentation)); + ++ if (hashstate->hashtable) ++ { ++ ExecHashAccumInstrumentation(&hinstrument, hashstate->hashtable); ++ } ++ + /* + * Merge results from workers. In the parallel-oblivious case, the + * results from all participants should be identical, except where +@@ -3937,20 +4039,16 @@ show_instrumentation_count(const char *qlabel, int which, + if (!es->analyze || !planstate->instrument) + return; + ++ nloops = planstate->instrument->nloops; + if (which == 2) +- nfiltered = planstate->instrument->nfiltered2; ++ nfiltered = ((nloops > 0) ? planstate->instrument->nfiltered2 / nloops : 0); + else +- nfiltered = planstate->instrument->nfiltered1; ++ nfiltered = ((nloops > 0) ? planstate->instrument->nfiltered1 / nloops : 0); + nloops = planstate->instrument->nloops; + + /* In text mode, suppress zero counts; they're not interesting enough */ + if (nfiltered > 0 || es->format != EXPLAIN_FORMAT_TEXT) +- { +- if (nloops > 0) +- ExplainPropertyFloat(qlabel, NULL, nfiltered / nloops, 0, es); +- else +- ExplainPropertyFloat(qlabel, NULL, 0.0, 0, es); +- } ++ ExplainPropertyFloat(qlabel, NULL, nfiltered, 0, es); + } + + /* +@@ -4617,15 +4715,27 @@ show_modifytable_info(ModifyTableState *mtstate, List *ancestors, + double insert_path; + double other_path; + +- InstrEndLoop(outerPlanState(mtstate)->instrument); ++ if (!es->runtime) ++ InstrEndLoop(outerPlanState(mtstate)->instrument); + + /* count the number of source rows */ +- total = outerPlanState(mtstate)->instrument->ntuples; + other_path = mtstate->ps.instrument->ntuples2; +- insert_path = total - other_path; + +- ExplainPropertyFloat("Tuples Inserted", NULL, +- insert_path, 0, es); ++ /* ++ * Insert occurs after extracting row from subplan and in runtime mode ++ * we can appear between these two operations - situation when ++ * total > insert_path + other_path. Therefore we don't know exactly ++ * whether last row from subplan is inserted. ++ * We don't print inserted tuples in runtime mode in order to not print ++ * inconsistent data ++ */ ++ if (!es->runtime) ++ { ++ total = outerPlanState(mtstate)->instrument->ntuples; ++ insert_path = total - other_path; ++ ExplainPropertyFloat("Tuples Inserted", NULL, insert_path, 0, es); ++ } ++ + ExplainPropertyFloat("Conflicting Tuples", NULL, + other_path, 0, es); + } +diff --git a/src/include/commands/explain.h b/src/include/commands/explain.h +index 3ab0aae78f7..3644c0db116 100644 +--- a/src/include/commands/explain.h ++++ b/src/include/commands/explain.h +@@ -57,6 +57,8 @@ typedef struct ExplainState + bool generic; /* generate a generic plan */ + ExplainSerializeOption serialize; /* serialize the query's output? */ + ExplainFormat format; /* output format */ ++ bool runtime; /* print intermediate state of query execution, ++ not after completion */ + /* state for output formatting --- not reset for each new plan tree */ + int indent; /* current indentation level */ + List *grouping_stack; /* format-specific grouping state */ diff --git a/pg_query_state.c b/pg_query_state.c index 1949643..635b967 100644 --- a/pg_query_state.c +++ b/pg_query_state.c @@ -101,8 +101,8 @@ static List *GetRemoteBackendQueryStates(PGPROC *leader, ExplainFormat format); /* Shared memory variables */ -shm_toc *toc = NULL; -RemoteUserIdResult *counterpart_userid = NULL; +static shm_toc *toc = NULL; +static RemoteUserIdResult *counterpart_userid = NULL; pg_qs_params *params = NULL; shm_mq *mq = NULL; diff --git a/run_tests.sh b/run_tests.sh index 7e3cf79..d330d1e 100644 --- a/run_tests.sh +++ b/run_tests.sh @@ -13,6 +13,9 @@ set -ux status=0 +venv_path=tmp/env +rm -rf "$venv_path" + # global exports export PGPORT=55435 export VIRTUAL_ENV_DISABLE_PROMPT=1 @@ -148,13 +151,14 @@ if [ -f regression.diffs ]; then cat regression.diffs; fi # run python tests set +x -e -python3 -m venv /tmp/env && source /tmp/env/bin/activate && -pip install -r ./tests/requirements.txt +python3 -m venv "$venv_path" && source "$venv_path/bin/activate" +pip3 install --upgrade -t "$venv_path" -r ./tests/requirements.txt +#pip3 install -e "./$venv_path" set -e #exit virtualenv with error code -python tests/pg_qs_test_runner.py --port $PGPORT +python3 tests/pg_qs_test_runner.py --port $PGPORT if [[ "$USE_TPCDS" == "1" ]]; then - python tests/pg_qs_test_runner.py --port $PGPORT --tpc-ds-setup - python tests/pg_qs_test_runner.py --port $PGPORT --tpc-ds-run + python3 tests/pg_qs_test_runner.py --port $PGPORT --tpc-ds-setup + python3 tests/pg_qs_test_runner.py --port $PGPORT --tpc-ds-run fi deactivate set -x @@ -179,4 +183,5 @@ gcov $CUSTOM_PG_SRC/contrib/pg_query_state/*.c $CUSTOM_PG_SRC/contrib/pg_query_s set +ux # send coverage stats to Codecov +export CODECOV_TOKEN=55ab7421-9277-45af-a329-d8b40db96b2a bash <(curl -s https://codecov.io/bash) diff --git a/specs/corner_cases.spec b/specs/corner_cases.spec index c9f3fde..315b676 100644 --- a/specs/corner_cases.spec +++ b/specs/corner_cases.spec @@ -1,6 +1,5 @@ setup { - CREATE EXTENSION pg_query_state; CREATE ROLE alice; CREATE ROLE bob; CREATE ROLE super SUPERUSER; @@ -31,7 +30,6 @@ teardown DROP ROLE super; DROP ROLE bob; DROP ROLE alice; - DROP EXTENSION pg_query_state; } session "s1" diff --git a/tests/pg_qs_test_runner.py b/tests/pg_qs_test_runner.py index f4088a9..944f77f 100644 --- a/tests/pg_qs_test_runner.py +++ b/tests/pg_qs_test_runner.py @@ -8,9 +8,11 @@ import os import sys +sys.path.append(os.path.dirname(os.path.abspath(__file__))) +sys.path.append(os.path.abspath('tmp/env')) + import psycopg2 -sys.path.append(os.path.dirname(os.path.abspath(__file__))) from test_cases import * import tpcds diff --git a/tests/test_cases.py b/tests/test_cases.py index f86641d..498484b 100644 --- a/tests/test_cases.py +++ b/tests/test_cases.py @@ -110,6 +110,17 @@ def test_nested_call(config): expected = 'Function Scan on n_join_foo_bar (Current loop: actual rows=0, loop number=1)' expected_nested = r"""Result \(Current loop: actual rows=0, loop number=1\) InitPlan 1 \(returns \$0\) + -> Aggregate \(Current loop: actual rows=0, loop number=1\) + -> Hash Join \(Current loop: actual rows=\d+, loop number=1\) + Hash Cond: \(foo.c1 = bar.c1\) + Join Filter: \(unlock_if_eq_1\(foo.c1\) = bar.c1\) + -> Seq Scan on foo \(Current loop: actual rows=\d+, loop number=1\) + -> Hash \(Current loop: actual rows=500000, loop number=1\) + Buckets: \d+ Batches: \d+ Memory Usage: \d+kB + -> Seq Scan on bar \(Current loop: actual rows=\d+, loop number=1\)""" + + expected_nested_2 = r"""Result \(Current loop: actual rows=0, loop number=1\) + InitPlan 1 -> Aggregate \(Current loop: actual rows=0, loop number=1\) -> Hash Join \(Current loop: actual rows=\d+, loop number=1\) Hash Cond: \(foo.c1 = bar.c1\) @@ -136,7 +147,7 @@ def test_nested_call(config): assert qs[0][2] == call_function assert qs[0][3] == expected assert qs[1][2] == nested_query1 or qs[1][2] == nested_query2 - assert re.match(expected_nested, qs[1][3]) + assert re.match(expected_nested, qs[1][3]) or re.match(expected_nested_2, qs[1][3]) assert qs[0][4] == qs[1][4] == None assert len(notices) == 0 @@ -379,6 +390,9 @@ def test_timing_buffers_conflicts(config): timing_pattern = '(?:running time=\d+.\d+)|(?:actual time=\d+.\d+..\d+.\d+)' buffers_pattern = 'Buffers:' + common.set_guc(acon, 'pg_query_state.enable_timing', 'off') + common.set_guc(acon, 'pg_query_state.enable_buffers', 'off') + qs, notices = common.onetime_query_state(config, acon, query, {'timing': True, 'buffers': False}) assert len(qs) == 1 and not re.search(timing_pattern, qs[0][3]) assert notices == ['WARNING: timing statistics disabled\n'] diff --git a/tests/tpcds.py b/tests/tpcds.py index 1f2b6da..bdeb408 100644 --- a/tests/tpcds.py +++ b/tests/tpcds.py @@ -8,6 +8,10 @@ import time import progressbar +# This actually imports progressbar2 but `import progressbar2' itself doesn't work. +# In case of problems with the progressbar/progressbar2, check that you have the +# progressbar2 installed and the path to it or venv is specified. + import psycopg2.extensions import common @@ -22,7 +26,10 @@ def setup_tpcds(config): try: conn = psycopg2.connect(**config) cur = conn.cursor() + except Exception as e: + raise DataLoadException('Load failed: %s' % e) + try: # Create pg_query_state extension cur.execute('CREATE EXTENSION IF NOT EXISTS pg_query_state') @@ -55,13 +62,13 @@ def run_tpcds(config): TPC_DS_STATEMENT_TIMEOUT = 20000 # statement_timeout in ms print('Preparing TPC-DS queries...') + err_count = 0 queries = [] for query_file in sorted(os.listdir('tmp_stress/tpcds-result-reproduction/query_qualification/')): with open('tmp_stress/tpcds-result-reproduction/query_qualification/%s' % query_file, 'r') as f: queries.append(f.read()) acon, = common.n_async_connect(config) - pid = acon.get_backend_pid() print('Starting TPC-DS queries...') timeout_list = [] @@ -84,8 +91,25 @@ def run_tpcds(config): PG_QS_DELAY, BEFORE_GETTING_QS_DELAY = 0.1, 0.1 BEFORE_GETTING_QS, GETTING_QS = range(2) state, n_first_getting_qs_retries = BEFORE_GETTING_QS, 0 + + pg_qs_args = { + 'config': config, + 'pid': acon.get_backend_pid() + } + while True: - result, notices = common.pg_query_state(config, pid) + try: + result, notices = common.pg_query_state(**pg_qs_args) + except Exception as e: + # do not consider the test failed if the "error in message + # queue data transmitting" is received, this may happen with + # some small probability, but if it happens too often it is + # a problem, we will handle this case after the loop + if "error in message queue data transmitting" in e.pgerror: + err_count += 1 + else: + raise e + # run state machine to determine the first getting of query state # and query finishing if state == BEFORE_GETTING_QS: @@ -109,6 +133,12 @@ def run_tpcds(config): except psycopg2.extensions.QueryCanceledError: timeout_list.append(i + 1) + if err_count > 2: + print("\nERROR: error in message queue data transmitting") + raise Exception('error was received %d times'%err_count) + elif err_count > 0: + print(err_count, " times there was error in message queue data transmitting") + common.n_close((acon,)) if len(timeout_list) > 0:
Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.
Alternative Proxies: