From 9e4a5e317b4e1110252b05806e041f58edd7b454 Mon Sep 17 00:00:00 2001 From: Victor Stinner Date: Tue, 5 May 2020 16:56:11 +0200 Subject: [PATCH] Revert "bpo-40513: Per-interpreter signals pending (GH-19924)" This reverts commit 4e01946cafca0cf49f796c3118e0d65237bcad69. --- Include/internal/pycore_interp.h | 2 - Include/internal/pycore_runtime.h | 5 +++ Python/ceval.c | 65 +++++++++++++++++-------------- Python/ceval_gil.h | 2 +- 4 files changed, 42 insertions(+), 32 deletions(-) diff --git a/Include/internal/pycore_interp.h b/Include/internal/pycore_interp.h index 08291012365edc..5bf8998e673206 100644 --- a/Include/internal/pycore_interp.h +++ b/Include/internal/pycore_interp.h @@ -46,8 +46,6 @@ struct _ceval_state { /* Request for dropping the GIL */ _Py_atomic_int gil_drop_request; struct _pending_calls pending; - /* Request for checking signals. */ - _Py_atomic_int signals_pending; }; diff --git a/Include/internal/pycore_runtime.h b/Include/internal/pycore_runtime.h index 8ca1dfbb3f0a6b..34eb492b9f254f 100644 --- a/Include/internal/pycore_runtime.h +++ b/Include/internal/pycore_runtime.h @@ -14,6 +14,11 @@ extern "C" { /* ceval state */ struct _ceval_runtime_state { + /* Request for checking signals. It is shared by all interpreters (see + bpo-40513). Any thread of any interpreter can receive a signal, but only + the main thread of the main interpreter can handle signals: see + _Py_ThreadCanHandleSignals(). */ + _Py_atomic_int signals_pending; struct _gil_runtime_state gil; }; diff --git a/Python/ceval.c b/Python/ceval.c index 601e21a2fccd29..0c08a76f7d1130 100644 --- a/Python/ceval.c +++ b/Python/ceval.c @@ -143,70 +143,76 @@ is_tstate_valid(PyThreadState *tstate) the GIL eventually anyway. */ static inline void COMPUTE_EVAL_BREAKER(PyInterpreterState *interp, - struct _ceval_state *ceval) + struct _ceval_runtime_state *ceval, + struct _ceval_state *ceval2) { - _Py_atomic_store_relaxed(&ceval->eval_breaker, - _Py_atomic_load_relaxed(&ceval->gil_drop_request) + _Py_atomic_store_relaxed(&ceval2->eval_breaker, + _Py_atomic_load_relaxed(&ceval2->gil_drop_request) | (_Py_atomic_load_relaxed(&ceval->signals_pending) && _Py_ThreadCanHandleSignals(interp)) - | (_Py_atomic_load_relaxed(&ceval->pending.calls_to_do) + | (_Py_atomic_load_relaxed(&ceval2->pending.calls_to_do) && _Py_ThreadCanHandlePendingCalls()) - | ceval->pending.async_exc); + | ceval2->pending.async_exc); } static inline void SET_GIL_DROP_REQUEST(PyInterpreterState *interp) { - struct _ceval_state *ceval = &interp->ceval; - _Py_atomic_store_relaxed(&ceval->gil_drop_request, 1); - _Py_atomic_store_relaxed(&ceval->eval_breaker, 1); + struct _ceval_state *ceval2 = &interp->ceval; + _Py_atomic_store_relaxed(&ceval2->gil_drop_request, 1); + _Py_atomic_store_relaxed(&ceval2->eval_breaker, 1); } static inline void RESET_GIL_DROP_REQUEST(PyInterpreterState *interp) { - struct _ceval_state *ceval = &interp->ceval; - _Py_atomic_store_relaxed(&ceval->gil_drop_request, 0); - COMPUTE_EVAL_BREAKER(interp, ceval); + struct _ceval_runtime_state *ceval = &interp->runtime->ceval; + struct _ceval_state *ceval2 = &interp->ceval; + _Py_atomic_store_relaxed(&ceval2->gil_drop_request, 0); + COMPUTE_EVAL_BREAKER(interp, ceval, ceval2); } static inline void SIGNAL_PENDING_CALLS(PyInterpreterState *interp) { - struct _ceval_state *ceval = &interp->ceval; - _Py_atomic_store_relaxed(&ceval->pending.calls_to_do, 1); - COMPUTE_EVAL_BREAKER(interp, ceval); + struct _ceval_runtime_state *ceval = &interp->runtime->ceval; + struct _ceval_state *ceval2 = &interp->ceval; + _Py_atomic_store_relaxed(&ceval2->pending.calls_to_do, 1); + COMPUTE_EVAL_BREAKER(interp, ceval, ceval2); } static inline void UNSIGNAL_PENDING_CALLS(PyInterpreterState *interp) { - struct _ceval_state *ceval = &interp->ceval; - _Py_atomic_store_relaxed(&ceval->pending.calls_to_do, 0); - COMPUTE_EVAL_BREAKER(interp, ceval); + struct _ceval_runtime_state *ceval = &interp->runtime->ceval; + struct _ceval_state *ceval2 = &interp->ceval; + _Py_atomic_store_relaxed(&ceval2->pending.calls_to_do, 0); + COMPUTE_EVAL_BREAKER(interp, ceval, ceval2); } static inline void SIGNAL_PENDING_SIGNALS(PyInterpreterState *interp) { - struct _ceval_state *ceval = &interp->ceval; + struct _ceval_runtime_state *ceval = &interp->runtime->ceval; + struct _ceval_state *ceval2 = &interp->ceval; _Py_atomic_store_relaxed(&ceval->signals_pending, 1); /* eval_breaker is not set to 1 if thread_can_handle_signals() is false */ - COMPUTE_EVAL_BREAKER(interp, ceval); + COMPUTE_EVAL_BREAKER(interp, ceval, ceval2); } static inline void UNSIGNAL_PENDING_SIGNALS(PyInterpreterState *interp) { - struct _ceval_state *ceval = &interp->ceval; + struct _ceval_runtime_state *ceval = &interp->runtime->ceval; + struct _ceval_state *ceval2 = &interp->ceval; _Py_atomic_store_relaxed(&ceval->signals_pending, 0); - COMPUTE_EVAL_BREAKER(interp, ceval); + COMPUTE_EVAL_BREAKER(interp, ceval, ceval2); } @@ -222,9 +228,10 @@ SIGNAL_ASYNC_EXC(PyInterpreterState *interp) static inline void UNSIGNAL_ASYNC_EXC(PyInterpreterState *interp) { - struct _ceval_state *ceval = &interp->ceval; - ceval->pending.async_exc = 0; - COMPUTE_EVAL_BREAKER(interp, ceval); + struct _ceval_runtime_state *ceval = &interp->runtime->ceval; + struct _ceval_state *ceval2 = &interp->ceval; + ceval2->pending.async_exc = 0; + COMPUTE_EVAL_BREAKER(interp, ceval, ceval2); } @@ -349,11 +356,12 @@ PyEval_ReleaseLock(void) { _PyRuntimeState *runtime = &_PyRuntime; PyThreadState *tstate = _PyRuntimeState_GetThreadState(runtime); - struct _ceval_state *ceval2 = &tstate->interp->ceval; /* This function must succeed when the current thread state is NULL. We therefore avoid PyThreadState_Get() which dumps a fatal error in debug mode. */ - drop_gil(&runtime->ceval, ceval2, tstate); + struct _ceval_runtime_state *ceval = &runtime->ceval; + struct _ceval_state *ceval2 = &tstate->interp->ceval; + drop_gil(ceval, ceval2, tstate); } void @@ -435,7 +443,6 @@ PyThreadState * PyEval_SaveThread(void) { _PyRuntimeState *runtime = &_PyRuntime; - PyThreadState *tstate = _PyThreadState_Swap(&runtime->gilstate, NULL); ensure_tstate_not_null(__func__, tstate); @@ -831,16 +838,16 @@ eval_frame_handle_pending(PyThreadState *tstate) { _PyRuntimeState * const runtime = &_PyRuntime; struct _ceval_runtime_state *ceval = &runtime->ceval; - struct _ceval_state *ceval2 = &tstate->interp->ceval; /* Pending signals */ - if (_Py_atomic_load_relaxed(&ceval2->signals_pending)) { + if (_Py_atomic_load_relaxed(&ceval->signals_pending)) { if (handle_signals(tstate) != 0) { return -1; } } /* Pending calls */ + struct _ceval_state *ceval2 = &tstate->interp->ceval; if (_Py_atomic_load_relaxed(&ceval2->pending.calls_to_do)) { if (make_pending_calls(tstate) != 0) { return -1; diff --git a/Python/ceval_gil.h b/Python/ceval_gil.h index db47077d5c1ce1..f25f8100732942 100644 --- a/Python/ceval_gil.h +++ b/Python/ceval_gil.h @@ -305,7 +305,7 @@ take_gil(PyThreadState *tstate) handle signals. Note: RESET_GIL_DROP_REQUEST() calls COMPUTE_EVAL_BREAKER(). */ - COMPUTE_EVAL_BREAKER(interp, ceval2); + COMPUTE_EVAL_BREAKER(interp, ceval, ceval2); } /* Don't access tstate if the thread must exit */ pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy