diff --git a/Include/internal/pycore_ceval.h b/Include/internal/pycore_ceval.h index 4914948c6ca744..ccd4e2e699c93a 100644 --- a/Include/internal/pycore_ceval.h +++ b/Include/internal/pycore_ceval.h @@ -28,9 +28,8 @@ struct _ceval_runtime_state; extern void _Py_FinishPendingCalls(PyThreadState *tstate); -extern void _PyEval_InitRuntimeState(struct _ceval_runtime_state *); -extern void _PyEval_InitState(struct _ceval_state *, PyThread_type_lock); -extern void _PyEval_FiniState(struct _ceval_state *ceval); +extern void _PyEval_InitState(PyInterpreterState *, PyThread_type_lock); +extern void _PyEval_FiniState(PyInterpreterState *); PyAPI_FUNC(void) _PyEval_SignalReceived(PyInterpreterState *interp); PyAPI_FUNC(int) _PyEval_AddPendingCall( PyInterpreterState *interp, @@ -103,7 +102,7 @@ _PyEval_Vector(PyThreadState *tstate, PyObject* const* args, size_t argcount, PyObject *kwnames); -extern int _PyEval_ThreadsInitialized(struct pyruntimestate *runtime); +extern int _PyEval_ThreadsInitialized(PyInterpreterState *interp); extern PyStatus _PyEval_InitGIL(PyThreadState *tstate); extern void _PyEval_FiniGIL(PyInterpreterState *interp); diff --git a/Include/internal/pycore_gil.h b/Include/internal/pycore_gil.h index 8ebad37b686cd4..56bcc1ca2c61ef 100644 --- a/Include/internal/pycore_gil.h +++ b/Include/internal/pycore_gil.h @@ -20,7 +20,8 @@ extern "C" { #undef FORCE_SWITCHING #define FORCE_SWITCHING -struct _gil_runtime_state { +/* ** The GIL ** */ +struct _gil_state { /* microseconds (the Python API uses seconds, though) */ unsigned long interval; /* Last PyThreadState holding / having held the GIL. This helps us diff --git a/Include/internal/pycore_interp.h b/Include/internal/pycore_interp.h index e7f914ec2fe521..6b123b8d96d5a4 100644 --- a/Include/internal/pycore_interp.h +++ b/Include/internal/pycore_interp.h @@ -18,6 +18,7 @@ extern "C" { #include "pycore_exceptions.h" // struct _Py_exc_state #include "pycore_floatobject.h" // struct _Py_float_state #include "pycore_genobject.h" // struct _Py_async_gen_state +#include "pycore_gil.h" // struct _gil_state #include "pycore_gc.h" // struct _gc_runtime_state #include "pycore_list.h" // struct _Py_list_state #include "pycore_tuple.h" // struct _Py_tuple_state @@ -49,6 +50,9 @@ struct _ceval_state { _Py_atomic_int eval_breaker; /* Request for dropping the GIL */ _Py_atomic_int gil_drop_request; + /* The GIL */ + struct _gil_state gil; + /* Pending calls */ struct _pending_calls pending; }; diff --git a/Include/internal/pycore_runtime.h b/Include/internal/pycore_runtime.h index 2c04ead45869fc..d205f7afd0a46b 100644 --- a/Include/internal/pycore_runtime.h +++ b/Include/internal/pycore_runtime.h @@ -9,7 +9,6 @@ extern "C" { #endif #include "pycore_atomic.h" /* _Py_atomic_address */ -#include "pycore_gil.h" // struct _gil_runtime_state #include "pycore_global_objects.h" // struct _Py_global_objects #include "pycore_interp.h" // PyInterpreterState #include "pycore_unicodeobject.h" // struct _Py_unicode_runtime_ids @@ -26,7 +25,6 @@ struct _ceval_runtime_state { the main thread of the main interpreter can handle signals: see _Py_ThreadCanHandleSignals(). */ _Py_atomic_int signals_pending; - struct _gil_runtime_state gil; }; /* GIL state */ diff --git a/Python/ceval_gil.c b/Python/ceval_gil.c index a6790866766795..ab5b8f4cf50043 100644 --- a/Python/ceval_gil.c +++ b/Python/ceval_gil.c @@ -60,70 +60,70 @@ the GIL eventually anyway. */ static inline void COMPUTE_EVAL_BREAKER(PyInterpreterState *interp, - struct _ceval_runtime_state *ceval, - struct _ceval_state *ceval2) + struct _ceval_runtime_state *shared_ceval, + struct _ceval_state *ceval) { - _Py_atomic_store_relaxed(&ceval2->eval_breaker, - _Py_atomic_load_relaxed_int32(&ceval2->gil_drop_request) - | (_Py_atomic_load_relaxed_int32(&ceval->signals_pending) + _Py_atomic_store_relaxed(&ceval->eval_breaker, + _Py_atomic_load_relaxed_int32(&ceval->gil_drop_request) + | (_Py_atomic_load_relaxed_int32(&shared_ceval->signals_pending) && _Py_ThreadCanHandleSignals(interp)) - | (_Py_atomic_load_relaxed_int32(&ceval2->pending.calls_to_do) + | (_Py_atomic_load_relaxed_int32(&ceval->pending.calls_to_do) && _Py_ThreadCanHandlePendingCalls()) - | ceval2->pending.async_exc); + | ceval->pending.async_exc); } static inline void SET_GIL_DROP_REQUEST(PyInterpreterState *interp) { - struct _ceval_state *ceval2 = &interp->ceval; - _Py_atomic_store_relaxed(&ceval2->gil_drop_request, 1); - _Py_atomic_store_relaxed(&ceval2->eval_breaker, 1); + struct _ceval_state *ceval = &interp->ceval; + _Py_atomic_store_relaxed(&ceval->gil_drop_request, 1); + _Py_atomic_store_relaxed(&ceval->eval_breaker, 1); } static inline void RESET_GIL_DROP_REQUEST(PyInterpreterState *interp) { - struct _ceval_runtime_state *ceval = &interp->runtime->ceval; - struct _ceval_state *ceval2 = &interp->ceval; - _Py_atomic_store_relaxed(&ceval2->gil_drop_request, 0); - COMPUTE_EVAL_BREAKER(interp, ceval, ceval2); + struct _ceval_runtime_state *shared_ceval = &interp->runtime->ceval; + struct _ceval_state *ceval = &interp->ceval; + _Py_atomic_store_relaxed(&ceval->gil_drop_request, 0); + COMPUTE_EVAL_BREAKER(interp, shared_ceval, ceval); } static inline void SIGNAL_PENDING_CALLS(PyInterpreterState *interp) { - struct _ceval_runtime_state *ceval = &interp->runtime->ceval; - struct _ceval_state *ceval2 = &interp->ceval; - _Py_atomic_store_relaxed(&ceval2->pending.calls_to_do, 1); - COMPUTE_EVAL_BREAKER(interp, ceval, ceval2); + struct _ceval_runtime_state *shared_ceval = &interp->runtime->ceval; + struct _ceval_state *ceval = &interp->ceval; + _Py_atomic_store_relaxed(&ceval->pending.calls_to_do, 1); + COMPUTE_EVAL_BREAKER(interp, shared_ceval, ceval); } static inline void UNSIGNAL_PENDING_CALLS(PyInterpreterState *interp) { - struct _ceval_runtime_state *ceval = &interp->runtime->ceval; - struct _ceval_state *ceval2 = &interp->ceval; - _Py_atomic_store_relaxed(&ceval2->pending.calls_to_do, 0); - COMPUTE_EVAL_BREAKER(interp, ceval, ceval2); + struct _ceval_runtime_state *shared_ceval = &interp->runtime->ceval; + struct _ceval_state *ceval = &interp->ceval; + _Py_atomic_store_relaxed(&ceval->pending.calls_to_do, 0); + COMPUTE_EVAL_BREAKER(interp, shared_ceval, ceval); } static inline void SIGNAL_PENDING_SIGNALS(PyInterpreterState *interp, int force) { - struct _ceval_runtime_state *ceval = &interp->runtime->ceval; - struct _ceval_state *ceval2 = &interp->ceval; - _Py_atomic_store_relaxed(&ceval->signals_pending, 1); + struct _ceval_runtime_state *shared_ceval = &interp->runtime->ceval; + struct _ceval_state *ceval = &interp->ceval; + _Py_atomic_store_relaxed(&shared_ceval->signals_pending, 1); if (force) { - _Py_atomic_store_relaxed(&ceval2->eval_breaker, 1); + _Py_atomic_store_relaxed(&ceval->eval_breaker, 1); } else { /* eval_breaker is not set to 1 if thread_can_handle_signals() is false */ - COMPUTE_EVAL_BREAKER(interp, ceval, ceval2); + COMPUTE_EVAL_BREAKER(interp, shared_ceval, ceval); } } @@ -131,29 +131,29 @@ SIGNAL_PENDING_SIGNALS(PyInterpreterState *interp, int force) static inline void UNSIGNAL_PENDING_SIGNALS(PyInterpreterState *interp) { - struct _ceval_runtime_state *ceval = &interp->runtime->ceval; - struct _ceval_state *ceval2 = &interp->ceval; - _Py_atomic_store_relaxed(&ceval->signals_pending, 0); - COMPUTE_EVAL_BREAKER(interp, ceval, ceval2); + struct _ceval_runtime_state *shared_ceval = &interp->runtime->ceval; + struct _ceval_state *ceval = &interp->ceval; + _Py_atomic_store_relaxed(&shared_ceval->signals_pending, 0); + COMPUTE_EVAL_BREAKER(interp, shared_ceval, ceval); } static inline void SIGNAL_ASYNC_EXC(PyInterpreterState *interp) { - struct _ceval_state *ceval2 = &interp->ceval; - ceval2->pending.async_exc = 1; - _Py_atomic_store_relaxed(&ceval2->eval_breaker, 1); + struct _ceval_state *ceval = &interp->ceval; + ceval->pending.async_exc = 1; + _Py_atomic_store_relaxed(&ceval->eval_breaker, 1); } static inline void UNSIGNAL_ASYNC_EXC(PyInterpreterState *interp) { - struct _ceval_runtime_state *ceval = &interp->runtime->ceval; - struct _ceval_state *ceval2 = &interp->ceval; - ceval2->pending.async_exc = 0; - COMPUTE_EVAL_BREAKER(interp, ceval, ceval2); + struct _ceval_runtime_state *shared_ceval = &interp->runtime->ceval; + struct _ceval_state *ceval = &interp->ceval; + ceval->pending.async_exc = 0; + COMPUTE_EVAL_BREAKER(interp, shared_ceval, ceval); } #ifndef NDEBUG @@ -216,21 +216,37 @@ is_tstate_valid(PyThreadState *tstate) } \ +/* Currently, the GIL is shared by all interpreters, + and only the main interpreter is responsible to create + and destroy it. */ +#define _GET_OWN_GIL(interp) \ + (&interp->ceval.gil) + +static inline struct _gil_state * +_get_gil(PyInterpreterState *interp) +{ + if (interp->config._isolated_interpreter) { + return _GET_OWN_GIL(interp); + } + return &_PyRuntime.interpreters.main->ceval.gil; +} + + #define DEFAULT_INTERVAL 5000 -static void _gil_initialize(struct _gil_runtime_state *gil) +static void _gil_initialize(struct _gil_state *gil) { _Py_atomic_int uninitialized = {-1}; gil->locked = uninitialized; gil->interval = DEFAULT_INTERVAL; } -static int gil_created(struct _gil_runtime_state *gil) +static int gil_created(struct _gil_state *gil) { return (_Py_atomic_load_explicit(&gil->locked, _Py_memory_order_acquire) >= 0); } -static void create_gil(struct _gil_runtime_state *gil) +static void create_gil(struct _gil_state *gil) { MUTEX_INIT(gil->mutex); #ifdef FORCE_SWITCHING @@ -245,7 +261,7 @@ static void create_gil(struct _gil_runtime_state *gil) _Py_atomic_store_explicit(&gil->locked, 0, _Py_memory_order_release); } -static void destroy_gil(struct _gil_runtime_state *gil) +static void destroy_gil(struct _gil_state *gil) { /* some pthread-like implementations tie the mutex to the cond * and must have the cond destroyed first. @@ -262,7 +278,7 @@ static void destroy_gil(struct _gil_runtime_state *gil) } #ifdef HAVE_FORK -static void recreate_gil(struct _gil_runtime_state *gil) +static void recreate_gil(struct _gil_state *gil) { _Py_ANNOTATE_RWLOCK_DESTROY(&gil->locked); /* XXX should we destroy the old OS resources here? */ @@ -271,10 +287,9 @@ static void recreate_gil(struct _gil_runtime_state *gil) #endif static void -drop_gil(struct _ceval_runtime_state *ceval, struct _ceval_state *ceval2, +drop_gil(struct _gil_state *gil, struct _ceval_state *ceval, PyThreadState *tstate) { - struct _gil_runtime_state *gil = &ceval->gil; if (!_Py_atomic_load_relaxed(&gil->locked)) { Py_FatalError("drop_gil: GIL is not locked"); } @@ -294,7 +309,7 @@ drop_gil(struct _ceval_runtime_state *ceval, struct _ceval_state *ceval2, MUTEX_UNLOCK(gil->mutex); #ifdef FORCE_SWITCHING - if (_Py_atomic_load_relaxed(&ceval2->gil_drop_request) && tstate != NULL) { + if (_Py_atomic_load_relaxed(&ceval->gil_drop_request) && tstate != NULL) { MUTEX_LOCK(gil->switch_mutex); /* Not switched yet => wait */ if (((PyThreadState*)_Py_atomic_load_relaxed(&gil->last_holder)) == tstate) @@ -356,9 +371,9 @@ take_gil(PyThreadState *tstate) assert(is_tstate_valid(tstate)); PyInterpreterState *interp = tstate->interp; - struct _ceval_runtime_state *ceval = &interp->runtime->ceval; - struct _ceval_state *ceval2 = &interp->ceval; - struct _gil_runtime_state *gil = &ceval->gil; + struct _ceval_runtime_state *shared_ceval = &interp->runtime->ceval; + struct _ceval_state *ceval = &interp->ceval; + struct _gil_state *gil = _get_gil(interp); /* Check that _PyEval_InitThreads() was called to create the lock */ assert(gil_created(gil)); @@ -421,12 +436,12 @@ take_gil(PyThreadState *tstate) in take_gil() while the main thread called wait_for_thread_shutdown() from Py_Finalize(). */ MUTEX_UNLOCK(gil->mutex); - drop_gil(ceval, ceval2, tstate); + drop_gil(gil, ceval, tstate); PyThread_exit_thread(); } assert(is_tstate_valid(tstate)); - if (_Py_atomic_load_relaxed(&ceval2->gil_drop_request)) { + if (_Py_atomic_load_relaxed(&ceval->gil_drop_request)) { RESET_GIL_DROP_REQUEST(interp); } else { @@ -435,7 +450,7 @@ take_gil(PyThreadState *tstate) handle signals. Note: RESET_GIL_DROP_REQUEST() calls COMPUTE_EVAL_BREAKER(). */ - COMPUTE_EVAL_BREAKER(interp, ceval, ceval2); + COMPUTE_EVAL_BREAKER(interp, shared_ceval, ceval); } /* Don't access tstate if the thread must exit */ @@ -450,41 +465,48 @@ take_gil(PyThreadState *tstate) void _PyEval_SetSwitchInterval(unsigned long microseconds) { - struct _gil_runtime_state *gil = &_PyRuntime.ceval.gil; + PyInterpreterState *interp = _PyInterpreterState_GET(); + struct _gil_state *gil = _get_gil(interp); gil->interval = microseconds; } unsigned long _PyEval_GetSwitchInterval() { - struct _gil_runtime_state *gil = &_PyRuntime.ceval.gil; + PyInterpreterState *interp = _PyInterpreterState_GET(); + struct _gil_state *gil = _get_gil(interp); return gil->interval; } int -_PyEval_ThreadsInitialized(_PyRuntimeState *runtime) +_PyEval_ThreadsInitialized(PyInterpreterState *interp) { - return gil_created(&runtime->ceval.gil); + if (interp == NULL) { + interp = _PyRuntime.interpreters.main; + if (interp == NULL) { + return 0; + } + } + struct _gil_state *gil = _get_gil(interp); + return gil_created(gil); } int PyEval_ThreadsInitialized(void) { - _PyRuntimeState *runtime = &_PyRuntime; - return _PyEval_ThreadsInitialized(runtime); + PyInterpreterState *interp = _PyInterpreterState_GET(); + return _PyEval_ThreadsInitialized(interp); } PyStatus _PyEval_InitGIL(PyThreadState *tstate) { - if (!_Py_IsMainInterpreter(tstate->interp)) { - /* Currently, the GIL is shared by all interpreters, - and only the main interpreter is responsible to create - and destroy it. */ + struct _gil_state *gil = _get_gil(tstate->interp); + if (gil != _GET_OWN_GIL(tstate->interp)) { + /* It's a shared GIL. */ + assert(!_Py_IsMainInterpreter(tstate->interp)); return _PyStatus_OK(); } - - struct _gil_runtime_state *gil = &tstate->interp->runtime->ceval.gil; assert(!gil_created(gil)); PyThread_init_thread(); @@ -499,14 +521,12 @@ _PyEval_InitGIL(PyThreadState *tstate) void _PyEval_FiniGIL(PyInterpreterState *interp) { - if (!_Py_IsMainInterpreter(interp)) { - /* Currently, the GIL is shared by all interpreters, - and only the main interpreter is responsible to create - and destroy it. */ + struct _gil_state *gil = _get_gil(interp); + if (gil != _GET_OWN_GIL(interp)) { + /* It's a shared GIL. */ + assert(!_Py_IsMainInterpreter(interp)); return; } - - struct _gil_runtime_state *gil = &interp->runtime->ceval.gil; if (!gil_created(gil)) { /* First Py_InitializeFromConfig() call: the GIL doesn't exist yet: do nothing. */ @@ -548,17 +568,17 @@ PyEval_ReleaseLock(void) /* This function must succeed when the current thread state is NULL. We therefore avoid PyThreadState_Get() which dumps a fatal error in debug mode. */ - struct _ceval_runtime_state *ceval = &runtime->ceval; - struct _ceval_state *ceval2 = &tstate->interp->ceval; - drop_gil(ceval, ceval2, tstate); + struct _gil_state *gil = _get_gil(tstate->interp); + struct _ceval_state *ceval = &tstate->interp->ceval; + drop_gil(gil, ceval, tstate); } void _PyEval_ReleaseLock(PyThreadState *tstate) { - struct _ceval_runtime_state *ceval = &tstate->interp->runtime->ceval; - struct _ceval_state *ceval2 = &tstate->interp->ceval; - drop_gil(ceval, ceval2, tstate); + struct _gil_state *gil = _get_gil(tstate->interp); + struct _ceval_state *ceval = &tstate->interp->ceval; + drop_gil(gil, ceval, tstate); } void @@ -584,9 +604,9 @@ PyEval_ReleaseThread(PyThreadState *tstate) if (new_tstate != tstate) { Py_FatalError("wrong thread state"); } - struct _ceval_runtime_state *ceval = &runtime->ceval; - struct _ceval_state *ceval2 = &tstate->interp->ceval; - drop_gil(ceval, ceval2, tstate); + struct _gil_state *gil = _get_gil(tstate->interp); + struct _ceval_state *ceval = &tstate->interp->ceval; + drop_gil(gil, ceval, tstate); } #ifdef HAVE_FORK @@ -598,7 +618,7 @@ _PyEval_ReInitThreads(PyThreadState *tstate) { _PyRuntimeState *runtime = tstate->interp->runtime; - struct _gil_runtime_state *gil = &runtime->ceval.gil; + struct _gil_state *gil = _get_gil(tstate->interp); if (!gil_created(gil)) { return _PyStatus_OK(); } @@ -633,10 +653,10 @@ PyEval_SaveThread(void) PyThreadState *tstate = _PyThreadState_Swap(&runtime->gilstate, NULL); _Py_EnsureTstateNotNULL(tstate); - struct _ceval_runtime_state *ceval = &runtime->ceval; - struct _ceval_state *ceval2 = &tstate->interp->ceval; - assert(gil_created(&ceval->gil)); - drop_gil(ceval, ceval2, tstate); + struct _gil_state *gil = _get_gil(tstate->interp); + struct _ceval_state *ceval = &tstate->interp->ceval; + assert(gil_created(gil)); + drop_gil(gil, ceval, tstate); return tstate; } @@ -896,28 +916,31 @@ Py_MakePendingCalls(void) /* The interpreter's recursion limit */ void -_PyEval_InitRuntimeState(struct _ceval_runtime_state *ceval) +_PyEval_InitState(PyInterpreterState *interp, PyThread_type_lock pending_lock) { - _gil_initialize(&ceval->gil); -} + /* Each interpreter is responsible to create and destroy + its own GIL. Interpreters that share a GIL skip this step. */ + struct _gil_state *gil = _get_gil(interp); + if (gil == _GET_OWN_GIL(interp)) { + _gil_initialize(gil); + /* Everthing else GIL-related is initialized in _PyEval_InitGIL(). */ + } -void -_PyEval_InitState(struct _ceval_state *ceval, PyThread_type_lock pending_lock) -{ - struct _pending_calls *pending = &ceval->pending; + struct _pending_calls *pending = &interp->ceval.pending; assert(pending->lock == NULL); - pending->lock = pending_lock; } void -_PyEval_FiniState(struct _ceval_state *ceval) +_PyEval_FiniState(PyInterpreterState *interp) { - struct _pending_calls *pending = &ceval->pending; + struct _pending_calls *pending = &interp->ceval.pending; if (pending->lock != NULL) { PyThread_free_lock(pending->lock); pending->lock = NULL; } + + /* Everthing GIL-related is finalized in _PyEval_FiniGIL(). */ } /* Handle signals, pending calls, GIL drop request @@ -926,30 +949,31 @@ int _Py_HandlePending(PyThreadState *tstate) { _PyRuntimeState * const runtime = &_PyRuntime; - struct _ceval_runtime_state *ceval = &runtime->ceval; + struct _ceval_runtime_state *shared_ceval = &runtime->ceval; /* Pending signals */ - if (_Py_atomic_load_relaxed_int32(&ceval->signals_pending)) { + if (_Py_atomic_load_relaxed_int32(&shared_ceval->signals_pending)) { if (handle_signals(tstate) != 0) { return -1; } } /* Pending calls */ - struct _ceval_state *ceval2 = &tstate->interp->ceval; - if (_Py_atomic_load_relaxed_int32(&ceval2->pending.calls_to_do)) { + struct _ceval_state *ceval = &tstate->interp->ceval; + if (_Py_atomic_load_relaxed_int32(&ceval->pending.calls_to_do)) { if (make_pending_calls(tstate->interp) != 0) { return -1; } } /* GIL drop request */ - if (_Py_atomic_load_relaxed_int32(&ceval2->gil_drop_request)) { + if (_Py_atomic_load_relaxed_int32(&ceval->gil_drop_request)) { /* Give another thread a chance */ if (_PyThreadState_Swap(&runtime->gilstate, NULL) != tstate) { Py_FatalError("tstate mix-up"); } - drop_gil(ceval, ceval2, tstate); + struct _gil_state *gil = _get_gil(tstate->interp); + drop_gil(gil, ceval, tstate); /* Other threads may run now */ @@ -978,7 +1002,7 @@ _Py_HandlePending(PyThreadState *tstate) // value. It prevents to interrupt the eval loop at every instruction if // the current Python thread cannot handle signals (if // _Py_ThreadCanHandleSignals() is false). - COMPUTE_EVAL_BREAKER(tstate->interp, ceval, ceval2); + COMPUTE_EVAL_BREAKER(tstate->interp, shared_ceval, ceval); #endif return 0; diff --git a/Python/pystate.c b/Python/pystate.c index a0d61d7ebb3be9..a02eb20f03cb68 100644 --- a/Python/pystate.c +++ b/Python/pystate.c @@ -121,8 +121,6 @@ init_runtime(_PyRuntimeState *runtime, runtime->open_code_userdata = open_code_userdata; runtime->audit_hook_head = audit_hook_head; - _PyEval_InitRuntimeState(&runtime->ceval); - PyPreConfig_InitPythonConfig(&runtime->preconfig); runtime->interpreters.mutex = interpreters_mutex; @@ -305,7 +303,7 @@ init_interpreter(PyInterpreterState *interp, assert(next != NULL || (interp == runtime->interpreters.main)); interp->next = next; - _PyEval_InitState(&interp->ceval, pending_lock); + _PyEval_InitState(interp, pending_lock); _PyGC_InitState(&interp->gc); PyConfig_InitPythonConfig(&interp->config); _PyType_InitCache(interp); @@ -495,7 +493,7 @@ PyInterpreterState_Delete(PyInterpreterState *interp) struct pyinterpreters *interpreters = &runtime->interpreters; zapthreads(interp, 0); - _PyEval_FiniState(&interp->ceval); + _PyEval_FiniState(interp); /* Delete current thread. After this, many C API calls become crashy. */ _PyThreadState_Swap(&runtime->gilstate, NULL); @@ -1677,9 +1675,11 @@ PyGILState_Ensure(void) spells out other issues. Embedders are expected to have called Py_Initialize(). */ - /* Ensure that _PyEval_InitThreads() and _PyGILState_Init() have been - called by Py_Initialize() */ - assert(_PyEval_ThreadsInitialized(runtime)); + /* Ensure that _PyEval_InitThreads() has been called by Py_Initialize() */ + // XXX Use the appropriate interpreter. + assert(runtime->interpreters.main && + _PyEval_ThreadsInitialized(runtime->interpreters.main)); + /* Ensure that _PyGILState_Init() has been called by Py_Initialize() */ assert(gilstate->autoInterpreterState); PyThreadState *tcur = (PyThreadState *)PyThread_tss_get(&gilstate->autoTSSkey); pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy