From e7a447705263415d03ee4aacbd8194723b4b0126 Mon Sep 17 00:00:00 2001 From: Alex Turner Date: Fri, 26 Apr 2024 02:39:50 -0700 Subject: [PATCH 1/7] TSAN fix race between atomic and none-atomic mutation of tate->young.count --- Python/gc_free_threading.c | 3 ++- Tools/tsan/suppressions_free_threading.txt | 1 - 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Python/gc_free_threading.c b/Python/gc_free_threading.c index 9cf0e989d0993f..a17d42cd93b6fe 100644 --- a/Python/gc_free_threading.c +++ b/Python/gc_free_threading.c @@ -1159,7 +1159,8 @@ gc_collect_main(PyThreadState *tstate, int generation, _PyGC_Reason reason) if (generation+1 < NUM_GENERATIONS) { gcstate->old[generation].count += 1; } - gcstate->young.count = 0; + + _Py_atomic_store_int(&gcstate->young.count, 0); for (i = 1; i <= generation; i++) { gcstate->old[i-1].count = 0; } diff --git a/Tools/tsan/suppressions_free_threading.txt b/Tools/tsan/suppressions_free_threading.txt index 1408103ba80f96..a98e65fdfc8bd9 100644 --- a/Tools/tsan/suppressions_free_threading.txt +++ b/Tools/tsan/suppressions_free_threading.txt @@ -30,7 +30,6 @@ race:assign_version_tag race:compare_unicode_unicode race:delitem_common race:dictresize -race:gc_collect_main race:gc_restore_tid race:initialize_new_array race:insertdict From 5457972173853c96b15148f1d925b0af735a911d Mon Sep 17 00:00:00 2001 From: "blurb-it[bot]" <43283697+blurb-it[bot]@users.noreply.github.com> Date: Fri, 26 Apr 2024 12:08:34 +0000 Subject: [PATCH 2/7] =?UTF-8?q?=F0=9F=93=9C=F0=9F=A4=96=20Added=20by=20blu?= =?UTF-8?q?rb=5Fit.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../2024-04-26-12-08-33.gh-issue-117657.D6Zd73.rst | 1 + 1 file changed, 1 insertion(+) create mode 100644 Misc/NEWS.d/next/Core and Builtins/2024-04-26-12-08-33.gh-issue-117657.D6Zd73.rst diff --git a/Misc/NEWS.d/next/Core and Builtins/2024-04-26-12-08-33.gh-issue-117657.D6Zd73.rst b/Misc/NEWS.d/next/Core and Builtins/2024-04-26-12-08-33.gh-issue-117657.D6Zd73.rst new file mode 100644 index 00000000000000..03e7031e6174a5 --- /dev/null +++ b/Misc/NEWS.d/next/Core and Builtins/2024-04-26-12-08-33.gh-issue-117657.D6Zd73.rst @@ -0,0 +1 @@ +Continuing to whack-a-mole TSAN issues found in No-GIL tests. From d83833c8d36d9326dc857e4bbf9285e8592cc567 Mon Sep 17 00:00:00 2001 From: Alex Turner Date: Fri, 26 Apr 2024 09:38:50 -0700 Subject: [PATCH 3/7] merge with upstream --- .github/workflows/reusable-docs.yml | 2 +- Doc/conf.py | 4 +- Doc/requirements-oldest-sphinx.txt | 38 +-- Doc/tools/extensions/c_annotations.py | 11 - Doc/tools/extensions/peg_highlight.py | 2 + Doc/tools/extensions/pyspecific.py | 8 +- Include/cpython/dictobject.h | 4 + Include/internal/pycore_ceval.h | 6 +- Include/internal/pycore_ceval_state.h | 52 ++- Include/internal/pycore_object.h | 2 +- Include/internal/pycore_runtime_init.h | 8 + Include/internal/pycore_typeobject.h | 1 + Include/object.h | 3 +- Lib/ipaddress.py | 5 +- Lib/test/test_capi/test_mem.py | 2 +- Lib/test/test_capi/test_misc.py | 85 ++++- Lib/test/test_gdb/test_backtrace.py | 2 +- Lib/test/test_ipaddress.py | 16 + .../test_tkinter/test_geometry_managers.py | 4 - Lib/test/test_tkinter/test_widgets.py | 8 - Lib/test/test_ttk/test_widgets.py | 8 - ...-04-25-22-12-20.gh-issue-117928.LKdTno.rst | 1 + ...-04-05-15-51-01.gh-issue-117566.54nABf.rst | 3 + Modules/_decimal/_decimal.c | 8 +- Modules/_testcapimodule.c | 53 ++- Modules/_testinternalcapi.c | 63 ++-- Objects/dictobject.c | 302 ++++++++++-------- Objects/setobject.c | 7 + Objects/typeobject.c | 52 ++- Python/ceval_gil.c | 159 +++++---- Tools/tsan/suppressions_free_threading.txt | 9 +- 31 files changed, 601 insertions(+), 327 deletions(-) create mode 100644 Misc/NEWS.d/next/Documentation/2024-04-25-22-12-20.gh-issue-117928.LKdTno.rst create mode 100644 Misc/NEWS.d/next/Library/2024-04-05-15-51-01.gh-issue-117566.54nABf.rst diff --git a/.github/workflows/reusable-docs.yml b/.github/workflows/reusable-docs.yml index cea8f93d67b29c..9e26d7847d2bd3 100644 --- a/.github/workflows/reusable-docs.yml +++ b/.github/workflows/reusable-docs.yml @@ -74,7 +74,7 @@ jobs: - name: 'Set up Python' uses: actions/setup-python@v5 with: - python-version: '3.11' # known to work with Sphinx 4.2 + python-version: '3.12' # known to work with Sphinx 6.2.1 cache: 'pip' cache-dependency-path: 'Doc/requirements-oldest-sphinx.txt' - name: 'Install build dependencies' diff --git a/Doc/conf.py b/Doc/conf.py index e7b688e9e6e0a8..73abe8276f29fe 100644 --- a/Doc/conf.py +++ b/Doc/conf.py @@ -298,8 +298,8 @@ 'languages': ['ja', 'fr', 'zh_TW', 'zh_CN'], 'builders': ['man', 'text'], } -# Avoid a warning with Sphinx >= 2.0 -master_doc = 'contents' +# Avoid a warning with Sphinx >= 4.0 +root_doc = 'contents' # Allow translation of index directives gettext_additional_targets = [ diff --git a/Doc/requirements-oldest-sphinx.txt b/Doc/requirements-oldest-sphinx.txt index 597341d99ffeaa..9a5f4f3676e089 100644 --- a/Doc/requirements-oldest-sphinx.txt +++ b/Doc/requirements-oldest-sphinx.txt @@ -7,29 +7,29 @@ blurb python-docs-theme>=2022.1 # Generated from: -# pip install "Sphinx~=4.2.0" +# pip install "Sphinx~=6.2.1" # pip freeze # -# Sphinx 4.2 comes from ``needs_sphinx = '4.2'`` in ``Doc/conf.py``. +# Sphinx 6.2.1 comes from ``needs_sphinx = '6.2.1'`` in ``Doc/conf.py``. -alabaster==0.7.13 -Babel==2.13.0 -certifi==2023.7.22 -charset-normalizer==3.3.0 -docutils==0.17.1 -idna==3.4 +alabaster==0.7.16 +Babel==2.14.0 +certifi==2024.2.2 +charset-normalizer==3.3.2 +docutils==0.19 +idna==3.7 imagesize==1.4.1 -Jinja2==3.1.2 -MarkupSafe==2.1.3 -packaging==23.2 -Pygments==2.16.1 +Jinja2==3.1.3 +MarkupSafe==2.1.5 +packaging==24.0 +Pygments==2.17.2 requests==2.31.0 snowballstemmer==2.2.0 -Sphinx==4.2.0 -sphinxcontrib-applehelp==1.0.4 -sphinxcontrib-devhelp==1.0.2 -sphinxcontrib-htmlhelp==2.0.1 +Sphinx==6.2.1 +sphinxcontrib-applehelp==1.0.8 +sphinxcontrib-devhelp==1.0.6 +sphinxcontrib-htmlhelp==2.0.5 sphinxcontrib-jsmath==1.0.1 -sphinxcontrib-qthelp==1.0.3 -sphinxcontrib-serializinghtml==1.1.5 -urllib3==2.0.7 +sphinxcontrib-qthelp==1.0.7 +sphinxcontrib-serializinghtml==1.1.10 +urllib3==2.2.1 diff --git a/Doc/tools/extensions/c_annotations.py b/Doc/tools/extensions/c_annotations.py index abd0a8c817f154..7916b178f1c0f1 100644 --- a/Doc/tools/extensions/c_annotations.py +++ b/Doc/tools/extensions/c_annotations.py @@ -19,7 +19,6 @@ """ from os import path -import docutils from docutils import nodes from docutils.parsers.rst import directives from docutils.parsers.rst import Directive @@ -40,16 +39,6 @@ } -# Monkeypatch nodes.Node.findall for forwards compatibility -# This patch can be dropped when the minimum Sphinx version is 4.4.0 -# or the minimum Docutils version is 0.18.1. -if docutils.__version_info__ < (0, 18, 1): - def findall(self, *args, **kwargs): - return iter(self.traverse(*args, **kwargs)) - - nodes.Node.findall = findall - - class RCEntry: def __init__(self, name): self.name = name diff --git a/Doc/tools/extensions/peg_highlight.py b/Doc/tools/extensions/peg_highlight.py index 4bdc2ee1861334..5ab5530d269901 100644 --- a/Doc/tools/extensions/peg_highlight.py +++ b/Doc/tools/extensions/peg_highlight.py @@ -16,6 +16,7 @@ class PEGLexer(RegexLexer): - Rule types - Rule options - Rules named `invalid_*` or `incorrect_*` + - Rules with `RAISE_SYNTAX_ERROR` """ name = "PEG" @@ -59,6 +60,7 @@ class PEGLexer(RegexLexer): (r"^(\s+\|\s+.*invalid_\w+.*\n)", bygroups(None)), (r"^(\s+\|\s+.*incorrect_\w+.*\n)", bygroups(None)), (r"^(#.*invalid syntax.*(?:.|\n)*)", bygroups(None),), + (r"^(\s+\|\s+.*\{[^}]*RAISE_SYNTAX_ERROR[^}]*\})\n", bygroups(None)), ], "root": [ include("invalids"), diff --git a/Doc/tools/extensions/pyspecific.py b/Doc/tools/extensions/pyspecific.py index 8c88612cf68180..44db77af5d24d3 100644 --- a/Doc/tools/extensions/pyspecific.py +++ b/Doc/tools/extensions/pyspecific.py @@ -27,13 +27,7 @@ from sphinx.util import logging from sphinx.util.docutils import SphinxDirective from sphinx.writers.text import TextWriter, TextTranslator - -try: - # Sphinx 6+ - from sphinx.util.display import status_iterator -except ImportError: - # Deprecated in Sphinx 6.1, will be removed in Sphinx 8 - from sphinx.util import status_iterator +from sphinx.util.display import status_iterator ISSUE_URI = 'https://bugs.python.org/issue?@action=redirect&bpo=%s' diff --git a/Include/cpython/dictobject.h b/Include/cpython/dictobject.h index 35b6a822a0dfff..3fd23b9313c453 100644 --- a/Include/cpython/dictobject.h +++ b/Include/cpython/dictobject.h @@ -56,7 +56,11 @@ static inline Py_ssize_t PyDict_GET_SIZE(PyObject *op) { PyDictObject *mp; assert(PyDict_Check(op)); mp = _Py_CAST(PyDictObject*, op); +#ifdef Py_GIL_DISABLED + return _Py_atomic_load_ssize_relaxed(&mp->ma_used); +#else return mp->ma_used; +#endif } #define PyDict_GET_SIZE(op) PyDict_GET_SIZE(_PyObject_CAST(op)) diff --git a/Include/internal/pycore_ceval.h b/Include/internal/pycore_ceval.h index 8d88b5c1d15cb8..cfb88c3f4c8e15 100644 --- a/Include/internal/pycore_ceval.h +++ b/Include/internal/pycore_ceval.h @@ -48,8 +48,12 @@ extern void _PyEval_SignalReceived(void); #define _Py_PENDING_MAINTHREADONLY 1 #define _Py_PENDING_RAWFREE 2 +typedef int _Py_add_pending_call_result; +#define _Py_ADD_PENDING_SUCCESS 0 +#define _Py_ADD_PENDING_FULL -1 + // Export for '_testinternalcapi' shared extension -PyAPI_FUNC(int) _PyEval_AddPendingCall( +PyAPI_FUNC(_Py_add_pending_call_result) _PyEval_AddPendingCall( PyInterpreterState *interp, _Py_pending_call_func func, void *arg, diff --git a/Include/internal/pycore_ceval_state.h b/Include/internal/pycore_ceval_state.h index 168295534e036c..1831f58899b745 100644 --- a/Include/internal/pycore_ceval_state.h +++ b/Include/internal/pycore_ceval_state.h @@ -14,28 +14,56 @@ extern "C" { typedef int (*_Py_pending_call_func)(void *); +struct _pending_call { + _Py_pending_call_func func; + void *arg; + int flags; +}; + +#define PENDINGCALLSARRAYSIZE 32 + +#define MAXPENDINGCALLS PENDINGCALLSARRAYSIZE +/* For interpreter-level pending calls, we want to avoid spending too + much time on pending calls in any one thread, so we apply a limit. */ +#if MAXPENDINGCALLS > 100 +# define MAXPENDINGCALLSLOOP 100 +#else +# define MAXPENDINGCALLSLOOP MAXPENDINGCALLS +#endif + +#define MAXPENDINGCALLS_MAIN PENDINGCALLSARRAYSIZE +/* For the main thread, we want to make sure all pending calls are + run at once, for the sake of prompt signal handling. This is + unlikely to cause any problems since there should be very few + pending calls for the main thread. */ +#define MAXPENDINGCALLSLOOP_MAIN 0 + struct _pending_calls { int busy; PyMutex mutex; /* Request for running pending calls. */ - int32_t calls_to_do; -#define NPENDINGCALLS 32 - struct _pending_call { - _Py_pending_call_func func; - void *arg; - int flags; - } calls[NPENDINGCALLS]; + int32_t npending; + /* The maximum allowed number of pending calls. + If the queue fills up to this point then _PyEval_AddPendingCall() + will return _Py_ADD_PENDING_FULL. */ + int32_t max; + /* We don't want a flood of pending calls to interrupt any one thread + for too long, so we keep a limit on the number handled per pass. + A value of 0 means there is no limit (other than the maximum + size of the list of pending calls). */ + int32_t maxloop; + struct _pending_call calls[PENDINGCALLSARRAYSIZE]; int first; - int last; + int next; }; + typedef enum { PERF_STATUS_FAILED = -1, // Perf trampoline is in an invalid state PERF_STATUS_NO_INIT = 0, // Perf trampoline is not initialized PERF_STATUS_OK = 1, // Perf trampoline is ready to be executed } perf_status_t; - #ifdef PY_HAVE_PERF_TRAMPOLINE struct code_arena_st; @@ -48,6 +76,7 @@ struct trampoline_api_st { }; #endif + struct _ceval_runtime_state { struct { #ifdef PY_HAVE_PERF_TRAMPOLINE @@ -62,10 +91,15 @@ struct _ceval_runtime_state { #endif } perf; /* Pending calls to be made only on the main thread. */ + // The signal machinery falls back on this + // so it must be especially stable and efficient. + // For example, we use a preallocated array + // for the list of pending calls. struct _pending_calls pending_mainthread; PyMutex sys_trace_profile_mutex; }; + #ifdef PY_HAVE_PERF_TRAMPOLINE # define _PyEval_RUNTIME_PERF_INIT \ { \ diff --git a/Include/internal/pycore_object.h b/Include/internal/pycore_object.h index 88b052f4544b15..7df8003196d8cc 100644 --- a/Include/internal/pycore_object.h +++ b/Include/internal/pycore_object.h @@ -688,7 +688,7 @@ static inline PyDictObject * _PyObject_GetManagedDict(PyObject *obj) { PyManagedDictPointer *dorv = _PyObject_ManagedDictPointer(obj); - return (PyDictObject *)FT_ATOMIC_LOAD_PTR_RELAXED(dorv->dict); + return (PyDictObject *)FT_ATOMIC_LOAD_PTR_ACQUIRE(dorv->dict); } static inline PyDictValues * diff --git a/Include/internal/pycore_runtime_init.h b/Include/internal/pycore_runtime_init.h index 33c7a9dadfd2a1..41331df8320a9c 100644 --- a/Include/internal/pycore_runtime_init.h +++ b/Include/internal/pycore_runtime_init.h @@ -114,6 +114,10 @@ extern PyTypeObject _PyExc_MemoryError; .autoTSSkey = Py_tss_NEEDS_INIT, \ .parser = _parser_runtime_state_INIT, \ .ceval = { \ + .pending_mainthread = { \ + .max = MAXPENDINGCALLS_MAIN, \ + .maxloop = MAXPENDINGCALLSLOOP_MAIN, \ + }, \ .perf = _PyEval_RUNTIME_PERF_INIT, \ }, \ .gilstate = { \ @@ -166,6 +170,10 @@ extern PyTypeObject _PyExc_MemoryError; .imports = IMPORTS_INIT, \ .ceval = { \ .recursion_limit = Py_DEFAULT_RECURSION_LIMIT, \ + .pending = { \ + .max = MAXPENDINGCALLS, \ + .maxloop = MAXPENDINGCALLSLOOP, \ + }, \ }, \ .gc = { \ .enabled = 1, \ diff --git a/Include/internal/pycore_typeobject.h b/Include/internal/pycore_typeobject.h index 09c4501c38c935..7e533bd138469b 100644 --- a/Include/internal/pycore_typeobject.h +++ b/Include/internal/pycore_typeobject.h @@ -164,6 +164,7 @@ extern PyObject * _PyType_GetBases(PyTypeObject *type); extern PyObject * _PyType_GetMRO(PyTypeObject *type); extern PyObject* _PyType_GetSubclasses(PyTypeObject *); extern int _PyType_HasSubclasses(PyTypeObject *); +PyAPI_FUNC(PyObject *) _PyType_GetModuleByDef2(PyTypeObject *, PyTypeObject *, PyModuleDef *); // PyType_Ready() must be called if _PyType_IsReady() is false. // See also the Py_TPFLAGS_READY flag. diff --git a/Include/object.h b/Include/object.h index ffcacf1a3ef4ed..5aaf11c5194f0e 100644 --- a/Include/object.h +++ b/Include/object.h @@ -349,7 +349,8 @@ static inline Py_ssize_t Py_SIZE(PyObject *ob) { static inline Py_ALWAYS_INLINE int _Py_IsImmortal(PyObject *op) { #if defined(Py_GIL_DISABLED) - return (op->ob_ref_local == _Py_IMMORTAL_REFCNT_LOCAL); + return (_Py_atomic_load_uint32_relaxed(&op->ob_ref_local) == + _Py_IMMORTAL_REFCNT_LOCAL); #elif SIZEOF_VOID_P > 4 return (_Py_CAST(PY_INT32_T, op->ob_refcnt) < 0); #else diff --git a/Lib/ipaddress.py b/Lib/ipaddress.py index 22cdfc93d8ad32..8e4d49c859534d 100644 --- a/Lib/ipaddress.py +++ b/Lib/ipaddress.py @@ -2142,6 +2142,9 @@ def is_loopback(self): RFC 2373 2.5.3. """ + ipv4_mapped = self.ipv4_mapped + if ipv4_mapped is not None: + return ipv4_mapped.is_loopback return self._ip == 1 @property @@ -2258,7 +2261,7 @@ def is_unspecified(self): @property def is_loopback(self): - return self._ip == 1 and self.network.is_loopback + return super().is_loopback and self.network.is_loopback class IPv6Network(_BaseV6, _BaseNetwork): diff --git a/Lib/test/test_capi/test_mem.py b/Lib/test/test_capi/test_mem.py index 296601e8ee4f5f..6ab7b685c2e18b 100644 --- a/Lib/test/test_capi/test_mem.py +++ b/Lib/test/test_capi/test_mem.py @@ -153,7 +153,7 @@ class C(): pass # free-threading requires mimalloc (not malloc) -@support.requires_gil_enabled +@support.requires_gil_enabled() class PyMemMallocDebugTests(PyMemDebugTests): PYTHONMALLOC = 'malloc_debug' diff --git a/Lib/test/test_capi/test_misc.py b/Lib/test/test_capi/test_misc.py index 0701eafb7c36e0..49d1056f050467 100644 --- a/Lib/test/test_capi/test_misc.py +++ b/Lib/test/test_capi/test_misc.py @@ -1172,6 +1172,12 @@ class MyType: self.assertEqual(get_type_fullyqualname(MyType), 'my_qualname') + def test_gen_get_code(self): + def genf(): yield + gen = genf() + self.assertEqual(_testcapi.gen_get_code(gen), gen.gi_code) + + @requires_limited_api class TestHeapTypeRelative(unittest.TestCase): """Test API for extending opaque types (PEP 697)""" @@ -1452,7 +1458,7 @@ class TestPendingCalls(unittest.TestCase): # about when pending calls get run. This is especially relevant # here for creating deterministic tests. - def pendingcalls_submit(self, l, n): + def main_pendingcalls_submit(self, l, n): def callback(): #this function can be interrupted by thread switching so let's #use an atomic operation @@ -1467,12 +1473,27 @@ def callback(): if _testcapi._pending_threadfunc(callback): break - def pendingcalls_wait(self, l, n, context = None): + def pendingcalls_submit(self, l, n, *, main=True, ensure=False): + def callback(): + #this function can be interrupted by thread switching so let's + #use an atomic operation + l.append(None) + + if main: + return _testcapi._pending_threadfunc(callback, n, + blocking=False, + ensure_added=ensure) + else: + return _testinternalcapi.pending_threadfunc(callback, n, + blocking=False, + ensure_added=ensure) + + def pendingcalls_wait(self, l, numadded, context = None): #now, stick around until l[0] has grown to 10 count = 0 - while len(l) != n: + while len(l) != numadded: #this busy loop is where we expect to be interrupted to - #run our callbacks. Note that callbacks are only run on the + #run our callbacks. Note that some callbacks are only run on the #main thread if False and support.verbose: print("(%i)"%(len(l),),) @@ -1482,12 +1503,12 @@ def pendingcalls_wait(self, l, n, context = None): continue count += 1 self.assertTrue(count < 10000, - "timeout waiting for %i callbacks, got %i"%(n, len(l))) + "timeout waiting for %i callbacks, got %i"%(numadded, len(l))) if False and support.verbose: print("(%i)"%(len(l),)) @threading_helper.requires_working_threading() - def test_pendingcalls_threaded(self): + def test_main_pendingcalls_threaded(self): #do every callback on a separate thread n = 32 #total callbacks @@ -1501,15 +1522,15 @@ class foo(object):pass context.lock = threading.Lock() context.event = threading.Event() - threads = [threading.Thread(target=self.pendingcalls_thread, + threads = [threading.Thread(target=self.main_pendingcalls_thread, args=(context,)) for i in range(context.nThreads)] with threading_helper.start_threads(threads): self.pendingcalls_wait(context.l, n, context) - def pendingcalls_thread(self, context): + def main_pendingcalls_thread(self, context): try: - self.pendingcalls_submit(context.l, context.n) + self.main_pendingcalls_submit(context.l, context.n) finally: with context.lock: context.nFinished += 1 @@ -1519,20 +1540,54 @@ def pendingcalls_thread(self, context): if nFinished == context.nThreads: context.event.set() - def test_pendingcalls_non_threaded(self): + def test_main_pendingcalls_non_threaded(self): #again, just using the main thread, likely they will all be dispatched at #once. It is ok to ask for too many, because we loop until we find a slot. #the loop can be interrupted to dispatch. #there are only 32 dispatch slots, so we go for twice that! l = [] n = 64 - self.pendingcalls_submit(l, n) + self.main_pendingcalls_submit(l, n) self.pendingcalls_wait(l, n) - def test_gen_get_code(self): - def genf(): yield - gen = genf() - self.assertEqual(_testcapi.gen_get_code(gen), gen.gi_code) + def test_max_pending(self): + with self.subTest('main-only'): + maxpending = 32 + + l = [] + added = self.pendingcalls_submit(l, 1, main=True) + self.pendingcalls_wait(l, added) + self.assertEqual(added, 1) + + l = [] + added = self.pendingcalls_submit(l, maxpending, main=True) + self.pendingcalls_wait(l, added) + self.assertEqual(added, maxpending) + + l = [] + added = self.pendingcalls_submit(l, maxpending+1, main=True) + self.pendingcalls_wait(l, added) + self.assertEqual(added, maxpending) + + with self.subTest('not main-only'): + # Per-interpreter pending calls has the same low limit + # on how many may be pending at a time. + maxpending = 32 + + l = [] + added = self.pendingcalls_submit(l, 1, main=False) + self.pendingcalls_wait(l, added) + self.assertEqual(added, 1) + + l = [] + added = self.pendingcalls_submit(l, maxpending, main=False) + self.pendingcalls_wait(l, added) + self.assertEqual(added, maxpending) + + l = [] + added = self.pendingcalls_submit(l, maxpending+1, main=False) + self.pendingcalls_wait(l, added) + self.assertEqual(added, maxpending) class PendingTask(types.SimpleNamespace): diff --git a/Lib/test/test_gdb/test_backtrace.py b/Lib/test/test_gdb/test_backtrace.py index fe67bf9ecc8880..714853c7b4732d 100644 --- a/Lib/test/test_gdb/test_backtrace.py +++ b/Lib/test/test_gdb/test_backtrace.py @@ -49,7 +49,7 @@ def test_bt_full(self): @unittest.skipIf(python_is_optimized(), "Python was compiled with optimizations") - @support.requires_gil_enabled + @support.requires_gil_enabled() @support.requires_resource('cpu') def test_threads(self): 'Verify that "py-bt" indicates threads that are waiting for the GIL' diff --git a/Lib/test/test_ipaddress.py b/Lib/test/test_ipaddress.py index f1519df673747a..c3ecf2a742941a 100644 --- a/Lib/test/test_ipaddress.py +++ b/Lib/test/test_ipaddress.py @@ -2446,6 +2446,22 @@ def testIpv4MappedPrivateCheck(self): self.assertEqual( False, ipaddress.ip_address('::ffff:172.32.0.0').is_private) + def testIpv4MappedLoopbackCheck(self): + # test networks + self.assertEqual(True, ipaddress.ip_network( + '::ffff:127.100.200.254/128').is_loopback) + self.assertEqual(True, ipaddress.ip_network( + '::ffff:127.42.0.0/112').is_loopback) + self.assertEqual(False, ipaddress.ip_network( + '::ffff:128.0.0.0').is_loopback) + # test addresses + self.assertEqual(True, ipaddress.ip_address( + '::ffff:127.100.200.254').is_loopback) + self.assertEqual(True, ipaddress.ip_address( + '::ffff:127.42.0.0').is_loopback) + self.assertEqual(False, ipaddress.ip_address( + '::ffff:128.0.0.0').is_loopback) + def testAddrExclude(self): addr1 = ipaddress.ip_network('10.1.1.0/24') addr2 = ipaddress.ip_network('10.1.1.0/26') diff --git a/Lib/test/test_tkinter/test_geometry_managers.py b/Lib/test/test_tkinter/test_geometry_managers.py index 59fe592b492adc..f8f1c895c56340 100644 --- a/Lib/test/test_tkinter/test_geometry_managers.py +++ b/Lib/test/test_tkinter/test_geometry_managers.py @@ -893,9 +893,5 @@ def test_grid_slaves(self): self.assertEqual(self.root.grid_slaves(row=1, column=1), [d, c]) -tests_gui = ( - PackTest, PlaceTest, GridTest, -) - if __name__ == '__main__': unittest.main() diff --git a/Lib/test/test_tkinter/test_widgets.py b/Lib/test/test_tkinter/test_widgets.py index d3f942db7baf9a..85bf5ff7652b69 100644 --- a/Lib/test/test_tkinter/test_widgets.py +++ b/Lib/test/test_tkinter/test_widgets.py @@ -1479,13 +1479,5 @@ def test_label(self): self._test_widget(tkinter.Label) -tests_gui = ( - ButtonTest, CanvasTest, CheckbuttonTest, EntryTest, - FrameTest, LabelFrameTest,LabelTest, ListboxTest, - MenubuttonTest, MenuTest, MessageTest, OptionMenuTest, - PanedWindowTest, RadiobuttonTest, ScaleTest, ScrollbarTest, - SpinboxTest, TextTest, ToplevelTest, DefaultRootTest, -) - if __name__ == '__main__': unittest.main() diff --git a/Lib/test/test_ttk/test_widgets.py b/Lib/test/test_ttk/test_widgets.py index e3e440c45859f7..ca7402b276013d 100644 --- a/Lib/test/test_ttk/test_widgets.py +++ b/Lib/test/test_ttk/test_widgets.py @@ -1879,13 +1879,5 @@ def test_label(self): self._test_widget(ttk.Label) -tests_gui = ( - ButtonTest, CheckbuttonTest, ComboboxTest, EntryTest, - FrameTest, LabelFrameTest, LabelTest, MenubuttonTest, - NotebookTest, PanedWindowTest, ProgressbarTest, - RadiobuttonTest, ScaleTest, ScrollbarTest, SeparatorTest, - SizegripTest, SpinboxTest, TreeviewTest, WidgetTest, DefaultRootTest, - ) - if __name__ == "__main__": unittest.main() diff --git a/Misc/NEWS.d/next/Documentation/2024-04-25-22-12-20.gh-issue-117928.LKdTno.rst b/Misc/NEWS.d/next/Documentation/2024-04-25-22-12-20.gh-issue-117928.LKdTno.rst new file mode 100644 index 00000000000000..c8a2a6b759bae9 --- /dev/null +++ b/Misc/NEWS.d/next/Documentation/2024-04-25-22-12-20.gh-issue-117928.LKdTno.rst @@ -0,0 +1 @@ +The minimum Sphinx version required for the documentation is now 6.2.1. diff --git a/Misc/NEWS.d/next/Library/2024-04-05-15-51-01.gh-issue-117566.54nABf.rst b/Misc/NEWS.d/next/Library/2024-04-05-15-51-01.gh-issue-117566.54nABf.rst new file mode 100644 index 00000000000000..56c2fb0e25d494 --- /dev/null +++ b/Misc/NEWS.d/next/Library/2024-04-05-15-51-01.gh-issue-117566.54nABf.rst @@ -0,0 +1,3 @@ +:meth:`ipaddress.IPv6Address.is_loopback` will now return ``True`` for +IPv4-mapped loopback addresses, i.e. addresses in the +``::ffff:127.0.0.0/104`` address space. diff --git a/Modules/_decimal/_decimal.c b/Modules/_decimal/_decimal.c index 2481455ac0d143..fa425f4f740d31 100644 --- a/Modules/_decimal/_decimal.c +++ b/Modules/_decimal/_decimal.c @@ -32,6 +32,7 @@ #include #include "pycore_long.h" // _PyLong_IsZero() #include "pycore_pystate.h" // _PyThreadState_GET() +#include "pycore_typeobject.h" #include "complexobject.h" #include "mpdecimal.h" @@ -120,11 +121,8 @@ get_module_state_by_def(PyTypeObject *tp) static inline decimal_state * find_state_left_or_right(PyObject *left, PyObject *right) { - PyObject *mod = PyType_GetModuleByDef(Py_TYPE(left), &_decimal_module); - if (mod == NULL) { - PyErr_Clear(); - mod = PyType_GetModuleByDef(Py_TYPE(right), &_decimal_module); - } + PyObject *mod = _PyType_GetModuleByDef2(Py_TYPE(left), Py_TYPE(right), + &_decimal_module); assert(mod != NULL); return get_module_state(mod); } diff --git a/Modules/_testcapimodule.c b/Modules/_testcapimodule.c index 034a30fa47ed30..0bdd252efdabc7 100644 --- a/Modules/_testcapimodule.c +++ b/Modules/_testcapimodule.c @@ -819,25 +819,55 @@ static int _pending_callback(void *arg) * run from any python thread. */ static PyObject * -pending_threadfunc(PyObject *self, PyObject *arg) +pending_threadfunc(PyObject *self, PyObject *arg, PyObject *kwargs) { + static char *kwlist[] = {"callback", "num", + "blocking", "ensure_added", NULL}; PyObject *callable; - int r; - if (PyArg_ParseTuple(arg, "O", &callable) == 0) + unsigned int num = 1; + int blocking = 0; + int ensure_added = 0; + if (!PyArg_ParseTupleAndKeywords(arg, kwargs, + "O|I$pp:_pending_threadfunc", kwlist, + &callable, &num, &blocking, &ensure_added)) + { return NULL; + } /* create the reference for the callbackwhile we hold the lock */ - Py_INCREF(callable); + for (unsigned int i = 0; i < num; i++) { + Py_INCREF(callable); + } - Py_BEGIN_ALLOW_THREADS - r = Py_AddPendingCall(&_pending_callback, callable); - Py_END_ALLOW_THREADS + PyThreadState *save_tstate = NULL; + if (!blocking) { + save_tstate = PyEval_SaveThread(); + } + + unsigned int num_added = 0; + for (; num_added < num; num_added++) { + if (ensure_added) { + int r; + do { + r = Py_AddPendingCall(&_pending_callback, callable); + } while (r < 0); + } + else { + if (Py_AddPendingCall(&_pending_callback, callable) < 0) { + break; + } + } + } + + if (!blocking) { + PyEval_RestoreThread(save_tstate); + } - if (r<0) { + for (unsigned int i = num_added; i < num; i++) { Py_DECREF(callable); /* unsuccessful add, destroy the extra reference */ - Py_RETURN_FALSE; } - Py_RETURN_TRUE; + /* The callable is decref'ed above in each added _pending_callback(). */ + return PyLong_FromUnsignedLong((unsigned long)num_added); } /* Test PyOS_string_to_double. */ @@ -3232,7 +3262,8 @@ static PyMethodDef TestMethods[] = { {"_spawn_pthread_waiter", spawn_pthread_waiter, METH_NOARGS}, {"_end_spawned_pthread", end_spawned_pthread, METH_NOARGS}, #endif - {"_pending_threadfunc", pending_threadfunc, METH_VARARGS}, + {"_pending_threadfunc", _PyCFunction_CAST(pending_threadfunc), + METH_VARARGS|METH_KEYWORDS}, #ifdef HAVE_GETTIMEOFDAY {"profile_int", profile_int, METH_NOARGS}, #endif diff --git a/Modules/_testinternalcapi.c b/Modules/_testinternalcapi.c index cc9e1403f87ecd..b0bba3422a50a0 100644 --- a/Modules/_testinternalcapi.c +++ b/Modules/_testinternalcapi.c @@ -1062,37 +1062,56 @@ static PyObject * pending_threadfunc(PyObject *self, PyObject *args, PyObject *kwargs) { PyObject *callable; + unsigned int num = 1; + int blocking = 0; int ensure_added = 0; - static char *kwlist[] = {"", "ensure_added", NULL}; + static char *kwlist[] = {"callback", "num", + "blocking", "ensure_added", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwargs, - "O|$p:pending_threadfunc", kwlist, - &callable, &ensure_added)) + "O|I$pp:pending_threadfunc", kwlist, + &callable, &num, &blocking, &ensure_added)) { return NULL; } PyInterpreterState *interp = _PyInterpreterState_GET(); /* create the reference for the callbackwhile we hold the lock */ - Py_INCREF(callable); + for (unsigned int i = 0; i < num; i++) { + Py_INCREF(callable); + } - int r; - Py_BEGIN_ALLOW_THREADS - r = _PyEval_AddPendingCall(interp, &_pending_callback, callable, 0); - Py_END_ALLOW_THREADS - if (r < 0) { - /* unsuccessful add */ - if (!ensure_added) { - Py_DECREF(callable); - Py_RETURN_FALSE; + PyThreadState *save_tstate = NULL; + if (!blocking) { + save_tstate = PyEval_SaveThread(); + } + + unsigned int num_added = 0; + for (; num_added < num; num_added++) { + if (ensure_added) { + _Py_add_pending_call_result r; + do { + r = _PyEval_AddPendingCall(interp, &_pending_callback, callable, 0); + assert(r == _Py_ADD_PENDING_SUCCESS + || r == _Py_ADD_PENDING_FULL); + } while (r == _Py_ADD_PENDING_FULL); + } + else { + if (_PyEval_AddPendingCall(interp, &_pending_callback, callable, 0) < 0) { + break; + } } - do { - Py_BEGIN_ALLOW_THREADS - r = _PyEval_AddPendingCall(interp, &_pending_callback, callable, 0); - Py_END_ALLOW_THREADS - } while (r < 0); } - Py_RETURN_TRUE; + if (!blocking) { + PyEval_RestoreThread(save_tstate); + } + + for (unsigned int i = num_added; i < num; i++) { + Py_DECREF(callable); /* unsuccessful add, destroy the extra reference */ + } + + /* The callable is decref'ed in _pending_callback() above. */ + return PyLong_FromUnsignedLong((unsigned long)num_added); } @@ -1135,14 +1154,16 @@ pending_identify(PyObject *self, PyObject *args) PyThread_acquire_lock(mutex, WAIT_LOCK); /* It gets released in _pending_identify_callback(). */ - int r; + _Py_add_pending_call_result r; do { Py_BEGIN_ALLOW_THREADS r = _PyEval_AddPendingCall(interp, &_pending_identify_callback, (void *)mutex, 0); Py_END_ALLOW_THREADS - } while (r < 0); + assert(r == _Py_ADD_PENDING_SUCCESS + || r == _Py_ADD_PENDING_FULL); + } while (r == _Py_ADD_PENDING_FULL); /* Wait for the pending call to complete. */ PyThread_acquire_lock(mutex, WAIT_LOCK); diff --git a/Objects/dictobject.c b/Objects/dictobject.c index 2644516bc30770..43cb2350b7fc71 100644 --- a/Objects/dictobject.c +++ b/Objects/dictobject.c @@ -162,6 +162,16 @@ ASSERT_DICT_LOCKED(PyObject *op) assert(_Py_IsOwnedByCurrentThread((PyObject *)mp) || IS_DICT_SHARED(mp)); #define LOAD_KEYS_NENTRIES(d) +#define LOCK_KEYS_IF_SPLIT(keys, kind) \ + if (kind == DICT_KEYS_SPLIT) { \ + LOCK_KEYS(dk); \ + } + +#define UNLOCK_KEYS_IF_SPLIT(keys, kind) \ + if (kind == DICT_KEYS_SPLIT) { \ + UNLOCK_KEYS(dk); \ + } + static inline Py_ssize_t load_keys_nentries(PyDictObject *mp) { @@ -195,6 +205,9 @@ set_values(PyDictObject *mp, PyDictValues *values) #define DECREF_KEYS(dk) _Py_atomic_add_ssize(&dk->dk_refcnt, -1) #define LOAD_KEYS_NENTIRES(keys) _Py_atomic_load_ssize_relaxed(&keys->dk_nentries) +#define INCREF_KEYS_FT(dk) dictkeys_incref(dk) +#define DECREF_KEYS_FT(dk, shared) dictkeys_decref(_PyInterpreterState_GET(), dk, shared) + static inline void split_keys_entry_added(PyDictKeysObject *keys) { ASSERT_KEYS_LOCKED(keys); @@ -216,6 +229,10 @@ static inline void split_keys_entry_added(PyDictKeysObject *keys) #define INCREF_KEYS(dk) dk->dk_refcnt++ #define DECREF_KEYS(dk) dk->dk_refcnt-- #define LOAD_KEYS_NENTIRES(keys) keys->dk_nentries +#define INCREF_KEYS_FT(dk) +#define DECREF_KEYS_FT(dk, shared) +#define LOCK_KEYS_IF_SPLIT(keys, kind) +#define UNLOCK_KEYS_IF_SPLIT(keys, kind) #define IS_DICT_SHARED(mp) (false) #define SET_DICT_SHARED(mp) #define LOAD_INDEX(keys, size, idx) ((const int##size##_t*)(keys->dk_indices))[idx] @@ -1097,10 +1114,11 @@ compare_unicode_unicode(PyDictObject *mp, PyDictKeysObject *dk, void *ep0, Py_ssize_t ix, PyObject *key, Py_hash_t hash) { PyDictUnicodeEntry *ep = &((PyDictUnicodeEntry *)ep0)[ix]; - assert(ep->me_key != NULL); - assert(PyUnicode_CheckExact(ep->me_key)); - if (ep->me_key == key || - (unicode_get_hash(ep->me_key) == hash && unicode_eq(ep->me_key, key))) { + PyObject *ep_key = FT_ATOMIC_LOAD_PTR_RELAXED(ep->me_key); + assert(ep_key != NULL); + assert(PyUnicode_CheckExact(ep_key)); + if (ep_key == key || + (unicode_get_hash(ep_key) == hash && unicode_eq(ep_key, key))) { return 1; } return 0; @@ -1170,6 +1188,14 @@ _PyDictKeys_StringLookup(PyDictKeysObject* dk, PyObject *key) return unicodekeys_lookup_unicode(dk, key, hash); } +#ifdef Py_GIL_DISABLED + +static Py_ssize_t +unicodekeys_lookup_unicode_threadsafe(PyDictKeysObject* dk, PyObject *key, + Py_hash_t hash); + +#endif + /* The basic lookup function used by all operations. This is based on Algorithm D from Knuth Vol. 3, Sec. 6.4. @@ -1199,10 +1225,33 @@ _Py_dict_lookup(PyDictObject *mp, PyObject *key, Py_hash_t hash, PyObject **valu if (kind != DICT_KEYS_GENERAL) { if (PyUnicode_CheckExact(key)) { +#ifdef Py_GIL_DISABLED + if (kind == DICT_KEYS_SPLIT) { + // A split dictionaries keys can be mutated by other + // dictionaries but if we have a unicode key we can avoid + // locking the shared keys. + ix = unicodekeys_lookup_unicode_threadsafe(dk, key, hash); + if (ix == DKIX_KEY_CHANGED) { + LOCK_KEYS(dk); + ix = unicodekeys_lookup_unicode(dk, key, hash); + UNLOCK_KEYS(dk); + } + } + else { + ix = unicodekeys_lookup_unicode(dk, key, hash); + } +#else ix = unicodekeys_lookup_unicode(dk, key, hash); +#endif } else { + INCREF_KEYS_FT(dk); + LOCK_KEYS_IF_SPLIT(dk, kind); + ix = unicodekeys_lookup_generic(mp, dk, key, hash); + + UNLOCK_KEYS_IF_SPLIT(dk, kind); + DECREF_KEYS_FT(dk, IS_DICT_SHARED(mp)); if (ix == DKIX_KEY_CHANGED) { goto start; } @@ -1606,31 +1655,6 @@ insertion_resize(PyInterpreterState *interp, PyDictObject *mp, int unicode) return dictresize(interp, mp, calculate_log2_keysize(GROWTH_RATE(mp)), unicode); } -static Py_ssize_t -insert_into_splitdictkeys(PyDictKeysObject *keys, PyObject *name, Py_hash_t hash) -{ - assert(PyUnicode_CheckExact(name)); - ASSERT_KEYS_LOCKED(keys); - - Py_ssize_t ix = unicodekeys_lookup_unicode(keys, name, hash); - if (ix == DKIX_EMPTY) { - if (keys->dk_usable <= 0) { - return DKIX_EMPTY; - } - /* Insert into new slot. */ - keys->dk_version = 0; - Py_ssize_t hashpos = find_empty_slot(keys, hash); - ix = keys->dk_nentries; - PyDictUnicodeEntry *ep = &DK_UNICODE_ENTRIES(keys)[ix]; - dictkeys_set_index(keys, hashpos, ix); - assert(ep->me_key == NULL); - ep->me_key = Py_NewRef(name); - split_keys_entry_added(keys); - } - assert (ix < SHARED_KEYS_MAX_SIZE); - return ix; -} - static inline int insert_combined_dict(PyInterpreterState *interp, PyDictObject *mp, Py_hash_t hash, PyObject *key, PyObject *value) @@ -1664,39 +1688,57 @@ insert_combined_dict(PyInterpreterState *interp, PyDictObject *mp, return 0; } -static int -insert_split_dict(PyInterpreterState *interp, PyDictObject *mp, - Py_hash_t hash, PyObject *key, PyObject *value) +static Py_ssize_t +insert_split_key(PyDictKeysObject *keys, PyObject *key, Py_hash_t hash) { - PyDictKeysObject *keys = mp->ma_keys; - LOCK_KEYS(keys); - if (keys->dk_usable <= 0) { - /* Need to resize. */ - UNLOCK_KEYS(keys); - int ins = insertion_resize(interp, mp, 1); - if (ins < 0) { - return -1; - } - assert(!_PyDict_HasSplitTable(mp)); - return insert_combined_dict(interp, mp, hash, key, value); - } - - Py_ssize_t hashpos = find_empty_slot(keys, hash); - dictkeys_set_index(keys, hashpos, keys->dk_nentries); + assert(PyUnicode_CheckExact(key)); + Py_ssize_t ix; - PyDictUnicodeEntry *ep; - ep = &DK_UNICODE_ENTRIES(keys)[keys->dk_nentries]; - STORE_SHARED_KEY(ep->me_key, key); - Py_ssize_t index = keys->dk_nentries; - _PyDictValues_AddToInsertionOrder(mp->ma_values, index); - assert (mp->ma_values->values[index] == NULL); - STORE_SPLIT_VALUE(mp, index, value); +#ifdef Py_GIL_DISABLED + ix = unicodekeys_lookup_unicode_threadsafe(keys, key, hash); + if (ix >= 0) { + return ix; + } +#endif - split_keys_entry_added(keys); - assert(keys->dk_usable >= 0); + LOCK_KEYS(keys); + ix = unicodekeys_lookup_unicode(keys, key, hash); + if (ix == DKIX_EMPTY && keys->dk_usable > 0) { + // Insert into new slot + keys->dk_version = 0; + Py_ssize_t hashpos = find_empty_slot(keys, hash); + ix = keys->dk_nentries; + dictkeys_set_index(keys, hashpos, ix); + PyDictUnicodeEntry *ep = &DK_UNICODE_ENTRIES(keys)[ix]; + STORE_SHARED_KEY(ep->me_key, Py_NewRef(key)); + split_keys_entry_added(keys); + } + assert (ix < SHARED_KEYS_MAX_SIZE); UNLOCK_KEYS(keys); - return 0; + return ix; +} + +static void +insert_split_value(PyInterpreterState *interp, PyDictObject *mp, PyObject *key, PyObject *value, Py_ssize_t ix) +{ + assert(PyUnicode_CheckExact(key)); + MAINTAIN_TRACKING(mp, key, value); + PyObject *old_value = mp->ma_values->values[ix]; + if (old_value == NULL) { + uint64_t new_version = _PyDict_NotifyEvent(interp, PyDict_EVENT_ADDED, mp, key, value); + STORE_SPLIT_VALUE(mp, ix, Py_NewRef(value)); + _PyDictValues_AddToInsertionOrder(mp->ma_values, ix); + mp->ma_used++; + mp->ma_version_tag = new_version; + } + else { + uint64_t new_version = _PyDict_NotifyEvent(interp, PyDict_EVENT_MODIFIED, mp, key, value); + STORE_SPLIT_VALUE(mp, ix, Py_NewRef(value)); + mp->ma_version_tag = new_version; + Py_DECREF(old_value); + } + ASSERT_CONSISTENT(mp); } /* @@ -1719,6 +1761,21 @@ insertdict(PyInterpreterState *interp, PyDictObject *mp, assert(mp->ma_keys->dk_kind == DICT_KEYS_GENERAL); } + if (_PyDict_HasSplitTable(mp)) { + Py_ssize_t ix = insert_split_key(mp->ma_keys, key, hash); + if (ix != DKIX_EMPTY) { + insert_split_value(interp, mp, key, value, ix); + Py_DECREF(key); + Py_DECREF(value); + return 0; + } + + /* No space in shared keys. Resize and continue below. */ + if (insertion_resize(interp, mp, 1) < 0) { + goto Fail; + } + } + Py_ssize_t ix = _Py_dict_lookup(mp, key, hash, &old_value); if (ix == DKIX_ERROR) goto Fail; @@ -1726,24 +1783,17 @@ insertdict(PyInterpreterState *interp, PyDictObject *mp, MAINTAIN_TRACKING(mp, key, value); if (ix == DKIX_EMPTY) { + assert(!_PyDict_HasSplitTable(mp)); uint64_t new_version = _PyDict_NotifyEvent( interp, PyDict_EVENT_ADDED, mp, key, value); /* Insert into new slot. */ mp->ma_keys->dk_version = 0; assert(old_value == NULL); - - if (!_PyDict_HasSplitTable(mp)) { - if (insert_combined_dict(interp, mp, hash, key, value) < 0) { - goto Fail; - } - } - else { - if (insert_split_dict(interp, mp, hash, key, value) < 0) - goto Fail; + if (insert_combined_dict(interp, mp, hash, key, value) < 0) { + goto Fail; } - - mp->ma_used++; mp->ma_version_tag = new_version; + mp->ma_used++; ASSERT_CONSISTENT(mp); return 0; } @@ -1751,21 +1801,15 @@ insertdict(PyInterpreterState *interp, PyDictObject *mp, if (old_value != value) { uint64_t new_version = _PyDict_NotifyEvent( interp, PyDict_EVENT_MODIFIED, mp, key, value); - if (_PyDict_HasSplitTable(mp)) { - STORE_SPLIT_VALUE(mp, ix, value); - if (old_value == NULL) { - _PyDictValues_AddToInsertionOrder(mp->ma_values, ix); - mp->ma_used++; - } + assert(old_value != NULL); + assert(!_PyDict_HasSplitTable(mp)); + if (DK_IS_UNICODE(mp->ma_keys)) { + PyDictUnicodeEntry *ep = &DK_UNICODE_ENTRIES(mp->ma_keys)[ix]; + STORE_VALUE(ep, value); } else { - assert(old_value != NULL); - if (DK_IS_UNICODE(mp->ma_keys)) { - DK_UNICODE_ENTRIES(mp->ma_keys)[ix].me_value = value; - } - else { - DK_ENTRIES(mp->ma_keys)[ix].me_value = value; - } + PyDictKeyEntry *ep = &DK_ENTRIES(mp->ma_keys)[ix]; + STORE_VALUE(ep, value); } mp->ma_version_tag = new_version; } @@ -1810,15 +1854,15 @@ insert_to_emptydict(PyInterpreterState *interp, PyDictObject *mp, if (unicode) { PyDictUnicodeEntry *ep = DK_UNICODE_ENTRIES(newkeys); ep->me_key = key; - ep->me_value = value; + STORE_VALUE(ep, value); } else { PyDictKeyEntry *ep = DK_ENTRIES(newkeys); ep->me_key = key; ep->me_hash = hash; - ep->me_value = value; + STORE_VALUE(ep, value); } - mp->ma_used++; + FT_ATOMIC_STORE_SSIZE_RELAXED(mp->ma_used, FT_ATOMIC_LOAD_SSIZE_RELAXED(mp->ma_used) + 1); mp->ma_version_tag = new_version; newkeys->dk_usable--; newkeys->dk_nentries++; @@ -2510,7 +2554,7 @@ delitem_common(PyDictObject *mp, Py_hash_t hash, Py_ssize_t ix, Py_ssize_t hashpos = lookdict_index(mp->ma_keys, hash, ix); assert(hashpos >= 0); - mp->ma_used--; + FT_ATOMIC_STORE_SSIZE_RELAXED(mp->ma_used, FT_ATOMIC_LOAD_SSIZE(mp->ma_used) - 1); mp->ma_version_tag = new_version; if (_PyDict_HasSplitTable(mp)) { assert(old_value == mp->ma_values->values[ix]); @@ -4174,6 +4218,29 @@ dict_setdefault_ref_lock_held(PyObject *d, PyObject *key, PyObject *default_valu } } + if (_PyDict_HasSplitTable(mp)) { + Py_ssize_t ix = insert_split_key(mp->ma_keys, key, hash); + if (ix != DKIX_EMPTY) { + PyObject *value = mp->ma_values->values[ix]; + int already_present = value != NULL; + if (!already_present) { + insert_split_value(interp, mp, key, default_value, ix); + value = default_value; + } + if (result) { + *result = incref_result ? Py_NewRef(value) : value; + } + return already_present; + } + + /* No space in shared keys. Resize and continue below. */ + if (insertion_resize(interp, mp, 1) < 0) { + goto error; + } + } + + assert(!_PyDict_HasSplitTable(mp)); + Py_ssize_t ix = _Py_dict_lookup(mp, key, hash, &value); if (ix == DKIX_ERROR) { if (result) { @@ -4183,29 +4250,17 @@ dict_setdefault_ref_lock_held(PyObject *d, PyObject *key, PyObject *default_valu } if (ix == DKIX_EMPTY) { + assert(!_PyDict_HasSplitTable(mp)); uint64_t new_version = _PyDict_NotifyEvent( - interp, PyDict_EVENT_ADDED, mp, key, default_value); + interp, PyDict_EVENT_ADDED, mp, key, default_value); mp->ma_keys->dk_version = 0; value = default_value; - if (!_PyDict_HasSplitTable(mp)) { - if (insert_combined_dict(interp, mp, hash, Py_NewRef(key), Py_NewRef(value)) < 0) { - Py_DECREF(key); - Py_DECREF(value); - if (result) { - *result = NULL; - } - return -1; - } - } - else { - if (insert_split_dict(interp, mp, hash, Py_NewRef(key), Py_NewRef(value)) < 0) { - Py_DECREF(key); - Py_DECREF(value); - if (result) { - *result = NULL; - } - return -1; + if (insert_combined_dict(interp, mp, hash, Py_NewRef(key), Py_NewRef(value)) < 0) { + Py_DECREF(key); + Py_DECREF(value); + if (result) { + *result = NULL; } } @@ -4219,29 +4274,19 @@ dict_setdefault_ref_lock_held(PyObject *d, PyObject *key, PyObject *default_valu } return 0; } - else if (value == NULL) { - uint64_t new_version = _PyDict_NotifyEvent( - interp, PyDict_EVENT_ADDED, mp, key, default_value); - value = default_value; - assert(_PyDict_HasSplitTable(mp)); - assert(mp->ma_values->values[ix] == NULL); - MAINTAIN_TRACKING(mp, key, value); - STORE_SPLIT_VALUE(mp, ix, Py_NewRef(value)); - _PyDictValues_AddToInsertionOrder(mp->ma_values, ix); - mp->ma_used++; - mp->ma_version_tag = new_version; - ASSERT_CONSISTENT(mp); - if (result) { - *result = incref_result ? Py_NewRef(value) : value; - } - return 0; - } + assert(value != NULL); ASSERT_CONSISTENT(mp); if (result) { *result = incref_result ? Py_NewRef(value) : value; } return 1; + +error: + if (result) { + *result = NULL; + } + return -1; } int @@ -6696,18 +6741,7 @@ store_instance_attr_lock_held(PyObject *obj, PyDictValues *values, assert(hash != -1); } -#ifdef Py_GIL_DISABLED - // Try a thread-safe lookup to see if the index is already allocated - ix = unicodekeys_lookup_unicode_threadsafe(keys, name, hash); - if (ix == DKIX_EMPTY || ix == DKIX_KEY_CHANGED) { - // Lock keys and do insert - LOCK_KEYS(keys); - ix = insert_into_splitdictkeys(keys, name, hash); - UNLOCK_KEYS(keys); - } -#else - ix = insert_into_splitdictkeys(keys, name, hash); -#endif + ix = insert_split_key(keys, name, hash); #ifdef Py_STATS if (ix == DKIX_EMPTY) { @@ -6895,7 +6929,7 @@ _PyObject_TryGetInstanceAttribute(PyObject *obj, PyObject *name, PyObject **attr } #ifdef Py_GIL_DISABLED - PyObject *value = _Py_atomic_load_ptr_relaxed(&values->values[ix]); + PyObject *value = _Py_atomic_load_ptr_acquire(&values->values[ix]); if (value == NULL || _Py_TryIncrefCompare(&values->values[ix], value)) { *attr = value; return true; diff --git a/Objects/setobject.c b/Objects/setobject.c index 0d88f4ff922d24..19975e3d4d18e2 100644 --- a/Objects/setobject.c +++ b/Objects/setobject.c @@ -2333,6 +2333,13 @@ set_init(PySetObject *self, PyObject *args, PyObject *kwds) if (!PyArg_UnpackTuple(args, Py_TYPE(self)->tp_name, 0, 1, &iterable)) return -1; + if (Py_REFCNT(self) == 1 && self->fill == 0) { + self->hash = -1; + if (iterable == NULL) { + return 0; + } + return set_update_local(self, iterable); + } Py_BEGIN_CRITICAL_SECTION(self); if (self->fill) set_clear_internal(self); diff --git a/Objects/typeobject.c b/Objects/typeobject.c index 808e11fcbaf1ff..07e0a5a02da87f 100644 --- a/Objects/typeobject.c +++ b/Objects/typeobject.c @@ -4825,11 +4825,24 @@ PyType_GetModuleState(PyTypeObject *type) /* Get the module of the first superclass where the module has the * given PyModuleDef. */ -PyObject * -PyType_GetModuleByDef(PyTypeObject *type, PyModuleDef *def) +static inline PyObject * +get_module_by_def(PyTypeObject *type, PyModuleDef *def) { assert(PyType_Check(type)); + if (!_PyType_HasFeature(type, Py_TPFLAGS_HEAPTYPE)) { + // type_ready_mro() ensures that no heap type is + // contained in a static type MRO. + return NULL; + } + else { + PyHeapTypeObject *ht = (PyHeapTypeObject*)type; + PyObject *module = ht->ht_module; + if (module && _PyModule_GetDef(module) == def) { + return module; + } + } + PyObject *res = NULL; BEGIN_TYPE_LOCK() @@ -4837,12 +4850,14 @@ PyType_GetModuleByDef(PyTypeObject *type, PyModuleDef *def) // The type must be ready assert(mro != NULL); assert(PyTuple_Check(mro)); - // mro_invoke() ensures that the type MRO cannot be empty, so we don't have - // to check i < PyTuple_GET_SIZE(mro) at the first loop iteration. + // mro_invoke() ensures that the type MRO cannot be empty. assert(PyTuple_GET_SIZE(mro) >= 1); + // Also, the first item in the MRO is the type itself, which + // we already checked above. We skip it in the loop. + assert(PyTuple_GET_ITEM(mro, 0) == (PyObject *)type); Py_ssize_t n = PyTuple_GET_SIZE(mro); - for (Py_ssize_t i = 0; i < n; i++) { + for (Py_ssize_t i = 1; i < n; i++) { PyObject *super = PyTuple_GET_ITEM(mro, i); if(!_PyType_HasFeature((PyTypeObject *)super, Py_TPFLAGS_HEAPTYPE)) { // Static types in the MRO need to be skipped @@ -4857,14 +4872,37 @@ PyType_GetModuleByDef(PyTypeObject *type, PyModuleDef *def) } } END_TYPE_LOCK() + return res; +} - if (res == NULL) { +PyObject * +PyType_GetModuleByDef(PyTypeObject *type, PyModuleDef *def) +{ + PyObject *module = get_module_by_def(type, def); + if (module == NULL) { PyErr_Format( PyExc_TypeError, "PyType_GetModuleByDef: No superclass of '%s' has the given module", type->tp_name); } - return res; + return module; +} + +PyObject * +_PyType_GetModuleByDef2(PyTypeObject *left, PyTypeObject *right, + PyModuleDef *def) +{ + PyObject *module = get_module_by_def(left, def); + if (module == NULL) { + module = get_module_by_def(right, def); + if (module == NULL) { + PyErr_Format( + PyExc_TypeError, + "PyType_GetModuleByDef: No superclass of '%s' nor '%s' has " + "the given module", left->tp_name, right->tp_name); + } + } + return module; } void * diff --git a/Python/ceval_gil.c b/Python/ceval_gil.c index c0819d8ab1d8d0..c3c2c54b199c59 100644 --- a/Python/ceval_gil.c +++ b/Python/ceval_gil.c @@ -84,15 +84,15 @@ update_eval_breaker_for_thread(PyInterpreterState *interp, PyThreadState *tstate return; #endif - int32_t calls_to_do = _Py_atomic_load_int32_relaxed( - &interp->ceval.pending.calls_to_do); - if (calls_to_do) { + int32_t npending = _Py_atomic_load_int32_relaxed( + &interp->ceval.pending.npending); + if (npending) { _Py_set_eval_breaker_bit(tstate, _PY_CALLS_TO_DO_BIT); } else if (_Py_IsMainThread()) { - calls_to_do = _Py_atomic_load_int32_relaxed( - &_PyRuntime.ceval.pending_mainthread.calls_to_do); - if (calls_to_do) { + npending = _Py_atomic_load_int32_relaxed( + &_PyRuntime.ceval.pending_mainthread.npending); + if (npending) { _Py_set_eval_breaker_bit(tstate, _PY_CALLS_TO_DO_BIT); } } @@ -624,6 +624,34 @@ PyEval_RestoreThread(PyThreadState *tstate) } +void +_PyEval_SignalReceived(void) +{ + _Py_set_eval_breaker_bit(_PyRuntime.main_tstate, _PY_SIGNALS_PENDING_BIT); +} + + +#ifndef Py_GIL_DISABLED +static void +signal_active_thread(PyInterpreterState *interp, uintptr_t bit) +{ + struct _gil_runtime_state *gil = interp->ceval.gil; + + // If a thread from the targeted interpreter is holding the GIL, signal + // that thread. Otherwise, the next thread to run from the targeted + // interpreter will have its bit set as part of taking the GIL. + MUTEX_LOCK(gil->mutex); + if (_Py_atomic_load_int_relaxed(&gil->locked)) { + PyThreadState *holder = (PyThreadState*)_Py_atomic_load_ptr_relaxed(&gil->last_holder); + if (holder->interp == interp) { + _Py_set_eval_breaker_bit(holder, bit); + } + } + MUTEX_UNLOCK(gil->mutex); +} +#endif + + /* Mechanism whereby asynchronously executing callbacks (e.g. UNIX signal handlers or Mac I/O completion routines) can schedule calls to a function to be called synchronously. @@ -646,29 +674,31 @@ PyEval_RestoreThread(PyThreadState *tstate) threadstate. */ -void -_PyEval_SignalReceived(void) -{ - _Py_set_eval_breaker_bit(_PyRuntime.main_tstate, _PY_SIGNALS_PENDING_BIT); -} - /* Push one item onto the queue while holding the lock. */ static int _push_pending_call(struct _pending_calls *pending, _Py_pending_call_func func, void *arg, int flags) { - int i = pending->last; - int j = (i + 1) % NPENDINGCALLS; - if (j == pending->first) { - return -1; /* Queue full */ + if (pending->npending == pending->max) { + return _Py_ADD_PENDING_FULL; } + assert(pending->npending < pending->max); + + int i = pending->next; + assert(pending->calls[i].func == NULL); + pending->calls[i].func = func; pending->calls[i].arg = arg; pending->calls[i].flags = flags; - pending->last = j; - assert(pending->calls_to_do < NPENDINGCALLS); - _Py_atomic_add_int32(&pending->calls_to_do, 1); - return 0; + + assert(pending->npending < PENDINGCALLSARRAYSIZE); + _Py_atomic_add_int32(&pending->npending, 1); + + pending->next = (i + 1) % PENDINGCALLSARRAYSIZE; + assert(pending->next != pending->first + || pending->npending == pending->max); + + return _Py_ADD_PENDING_SUCCESS; } static int @@ -676,8 +706,9 @@ _next_pending_call(struct _pending_calls *pending, int (**func)(void *), void **arg, int *flags) { int i = pending->first; - if (i == pending->last) { + if (pending->npending == 0) { /* Queue empty */ + assert(i == pending->next); assert(pending->calls[i].func == NULL); return -1; } @@ -695,38 +726,18 @@ _pop_pending_call(struct _pending_calls *pending, int i = _next_pending_call(pending, func, arg, flags); if (i >= 0) { pending->calls[i] = (struct _pending_call){0}; - pending->first = (i + 1) % NPENDINGCALLS; - assert(pending->calls_to_do > 0); - _Py_atomic_add_int32(&pending->calls_to_do, -1); + pending->first = (i + 1) % PENDINGCALLSARRAYSIZE; + assert(pending->npending > 0); + _Py_atomic_add_int32(&pending->npending, -1); } } -#ifndef Py_GIL_DISABLED -static void -signal_active_thread(PyInterpreterState *interp, uintptr_t bit) -{ - struct _gil_runtime_state *gil = interp->ceval.gil; - - // If a thread from the targeted interpreter is holding the GIL, signal - // that thread. Otherwise, the next thread to run from the targeted - // interpreter will have its bit set as part of taking the GIL. - MUTEX_LOCK(gil->mutex); - if (_Py_atomic_load_int_relaxed(&gil->locked)) { - PyThreadState *holder = (PyThreadState*)_Py_atomic_load_ptr_relaxed(&gil->last_holder); - if (holder->interp == interp) { - _Py_set_eval_breaker_bit(holder, bit); - } - } - MUTEX_UNLOCK(gil->mutex); -} -#endif - /* This implementation is thread-safe. It allows scheduling to be made from any thread, and even from an executing callback. */ -int +_Py_add_pending_call_result _PyEval_AddPendingCall(PyInterpreterState *interp, _Py_pending_call_func func, void *arg, int flags) { @@ -739,7 +750,8 @@ _PyEval_AddPendingCall(PyInterpreterState *interp, } PyMutex_Lock(&pending->mutex); - int result = _push_pending_call(pending, func, arg, flags); + _Py_add_pending_call_result result = + _push_pending_call(pending, func, arg, flags); PyMutex_Unlock(&pending->mutex); if (main_only) { @@ -762,7 +774,15 @@ Py_AddPendingCall(_Py_pending_call_func func, void *arg) /* Legacy users of this API will continue to target the main thread (of the main interpreter). */ PyInterpreterState *interp = _PyInterpreterState_Main(); - return _PyEval_AddPendingCall(interp, func, arg, _Py_PENDING_MAINTHREADONLY); + _Py_add_pending_call_result r = + _PyEval_AddPendingCall(interp, func, arg, _Py_PENDING_MAINTHREADONLY); + if (r == _Py_ADD_PENDING_FULL) { + return -1; + } + else { + assert(r == _Py_ADD_PENDING_SUCCESS); + return 0; + } } static int @@ -782,10 +802,21 @@ handle_signals(PyThreadState *tstate) } static int -_make_pending_calls(struct _pending_calls *pending) +_make_pending_calls(struct _pending_calls *pending, int32_t *p_npending) { + int res = 0; + int32_t npending = -1; + + assert(sizeof(pending->max) <= sizeof(size_t) + && ((size_t)pending->max) <= Py_ARRAY_LENGTH(pending->calls)); + int32_t maxloop = pending->maxloop; + if (maxloop == 0) { + maxloop = pending->max; + } + assert(maxloop > 0 && maxloop <= pending->max); + /* perform a bounded number of calls, in case of recursion */ - for (int i=0; imutex); _pop_pending_call(pending, &func, &arg, &flags); + npending = pending->npending; PyMutex_Unlock(&pending->mutex); - /* having released the lock, perform the callback */ + /* Check if there are any more pending calls. */ if (func == NULL) { + assert(npending == 0); break; } - int res = func(arg); + + /* having released the lock, perform the callback */ + res = func(arg); if ((flags & _Py_PENDING_RAWFREE) && arg != NULL) { PyMem_RawFree(arg); } if (res != 0) { - return -1; + res = -1; + goto finally; } } - return 0; + +finally: + *p_npending = npending; + return res; } static void @@ -861,26 +900,36 @@ make_pending_calls(PyThreadState *tstate) added in-between re-signals */ unsignal_pending_calls(tstate, interp); - if (_make_pending_calls(pending) != 0) { + int32_t npending; + if (_make_pending_calls(pending, &npending) != 0) { pending->busy = 0; /* There might not be more calls to make, but we play it safe. */ signal_pending_calls(tstate, interp); return -1; } + if (npending > 0) { + /* We hit pending->maxloop. */ + signal_pending_calls(tstate, interp); + } if (_Py_IsMainThread() && _Py_IsMainInterpreter(interp)) { - if (_make_pending_calls(pending_main) != 0) { + if (_make_pending_calls(pending_main, &npending) != 0) { pending->busy = 0; /* There might not be more calls to make, but we play it safe. */ signal_pending_calls(tstate, interp); return -1; } + if (npending > 0) { + /* We hit pending_main->maxloop. */ + signal_pending_calls(tstate, interp); + } } pending->busy = 0; return 0; } + void _Py_set_eval_breaker_bit_all(PyInterpreterState *interp, uintptr_t bit) { diff --git a/Tools/tsan/suppressions_free_threading.txt b/Tools/tsan/suppressions_free_threading.txt index a98e65fdfc8bd9..2eda2e276794dd 100644 --- a/Tools/tsan/suppressions_free_threading.txt +++ b/Tools/tsan/suppressions_free_threading.txt @@ -14,7 +14,6 @@ race:set_allocator_unlocked race:_add_to_weak_set race:_in_weak_set race:_mi_heap_delayed_free_partial -race:_Py_IsImmortal race:_Py_IsOwnedByCurrentThread race:_PyEval_EvalFrameDefault race:_PyFunction_SetVersion @@ -25,11 +24,10 @@ race:_PyInterpreterState_IsRunningMain race:_PyObject_GC_IS_SHARED race:_PyObject_GC_SET_SHARED race:_PyObject_GC_TRACK +# https://gist.github.com/mpage/0a24eb2dd458441ededb498e9b0e5de8 +race:_PyParkingLot_Park race:_PyType_HasFeature race:assign_version_tag -race:compare_unicode_unicode -race:delitem_common -race:dictresize race:gc_restore_tid race:initialize_new_array race:insertdict @@ -43,3 +41,6 @@ race:set_inheritable race:start_the_world race:tstate_set_detached race:unicode_hash + +# https://gist.github.com/mpage/6962e8870606cfc960e159b407a0cb40 +thread:pthread_create From edd9b7b9bbded7cfbfd11d2dbfde3ccaff4bc45d Mon Sep 17 00:00:00 2001 From: Alex Turner Date: Fri, 26 Apr 2024 09:40:09 -0700 Subject: [PATCH 4/7] TSAN: move gc counter reset into critical section --- Python/gc_free_threading.c | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/Python/gc_free_threading.c b/Python/gc_free_threading.c index a17d42cd93b6fe..ca9708d974b3a5 100644 --- a/Python/gc_free_threading.c +++ b/Python/gc_free_threading.c @@ -1038,9 +1038,20 @@ record_deallocation(PyThreadState *tstate) } static void -gc_collect_internal(PyInterpreterState *interp, struct collection_state *state) +gc_collect_internal(PyInterpreterState *interp, struct collection_state *state, int generation) { _PyEval_StopTheWorld(interp); + + // update collection and allocation counters + if (generation+1 < NUM_GENERATIONS) { + state->gcstate->old[generation].count += 1; + } + + state->gcstate->young.count = 0; + for (size_t i = 1; i <= generation; ++i) { + state->gcstate->old[i-1].count = 0; + } + // merge refcounts for all queued objects merge_all_queued_objects(interp, state); process_delayed_frees(interp); @@ -1155,16 +1166,6 @@ gc_collect_main(PyThreadState *tstate, int generation, _PyGC_Reason reason) PyDTrace_GC_START(generation); } - /* update collection and allocation counters */ - if (generation+1 < NUM_GENERATIONS) { - gcstate->old[generation].count += 1; - } - - _Py_atomic_store_int(&gcstate->young.count, 0); - for (i = 1; i <= generation; i++) { - gcstate->old[i-1].count = 0; - } - PyInterpreterState *interp = tstate->interp; struct collection_state state = { @@ -1172,7 +1173,7 @@ gc_collect_main(PyThreadState *tstate, int generation, _PyGC_Reason reason) .gcstate = gcstate, }; - gc_collect_internal(interp, &state); + gc_collect_internal(interp, &state, generation); m = state.collected; n = state.uncollectable; From 364d1d9da92e33ce8469387aafadc94f92a7092a Mon Sep 17 00:00:00 2001 From: Alex Turner Date: Fri, 26 Apr 2024 12:35:47 -0700 Subject: [PATCH 5/7] TSAN: Fix compiler warning in gc patch --- Python/gc_free_threading.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/Python/gc_free_threading.c b/Python/gc_free_threading.c index ca9708d974b3a5..8a745bfb86dfe1 100644 --- a/Python/gc_free_threading.c +++ b/Python/gc_free_threading.c @@ -1048,7 +1048,7 @@ gc_collect_internal(PyInterpreterState *interp, struct collection_state *state, } state->gcstate->young.count = 0; - for (size_t i = 1; i <= generation; ++i) { + for (int i = 1; i <= generation; ++i) { state->gcstate->old[i-1].count = 0; } @@ -1120,7 +1120,6 @@ gc_collect_internal(PyInterpreterState *interp, struct collection_state *state, static Py_ssize_t gc_collect_main(PyThreadState *tstate, int generation, _PyGC_Reason reason) { - int i; Py_ssize_t m = 0; /* # objects collected */ Py_ssize_t n = 0; /* # unreachable objects that couldn't be collected */ PyTime_t t1 = 0; /* initialize to prevent a compiler warning */ From 655e071657e807eaa59567e45ecd528d22029cde Mon Sep 17 00:00:00 2001 From: Alex Turner Date: Mon, 29 Apr 2024 12:48:03 -0700 Subject: [PATCH 6/7] Remove not require news entry --- .../2024-04-26-12-08-33.gh-issue-117657.D6Zd73.rst | 1 - 1 file changed, 1 deletion(-) delete mode 100644 Misc/NEWS.d/next/Core and Builtins/2024-04-26-12-08-33.gh-issue-117657.D6Zd73.rst diff --git a/Misc/NEWS.d/next/Core and Builtins/2024-04-26-12-08-33.gh-issue-117657.D6Zd73.rst b/Misc/NEWS.d/next/Core and Builtins/2024-04-26-12-08-33.gh-issue-117657.D6Zd73.rst deleted file mode 100644 index 03e7031e6174a5..00000000000000 --- a/Misc/NEWS.d/next/Core and Builtins/2024-04-26-12-08-33.gh-issue-117657.D6Zd73.rst +++ /dev/null @@ -1 +0,0 @@ -Continuing to whack-a-mole TSAN issues found in No-GIL tests. From e34b902c4c83ab335a5a9f36ddb7569531e53eb5 Mon Sep 17 00:00:00 2001 From: Alex Turner Date: Wed, 8 May 2024 08:31:14 -0700 Subject: [PATCH 7/7] gh-117657: tsan initialize_new_array --- Include/internal/pycore_qsbr.h | 2 +- Python/pystate.c | 2 +- Python/qsbr.c | 11 ++++++----- 3 files changed, 8 insertions(+), 7 deletions(-) diff --git a/Include/internal/pycore_qsbr.h b/Include/internal/pycore_qsbr.h index c3680a205542f7..20e643e172b38d 100644 --- a/Include/internal/pycore_qsbr.h +++ b/Include/internal/pycore_qsbr.h @@ -140,7 +140,7 @@ _Py_qsbr_register(struct _PyThreadStateImpl *tstate, // Disassociates a PyThreadState from the QSBR state and frees the QSBR state. extern void -_Py_qsbr_unregister(struct _PyThreadStateImpl *tstate); +_Py_qsbr_unregister(PyThreadState *tstate); extern void _Py_qsbr_fini(PyInterpreterState *interp); diff --git a/Python/pystate.c b/Python/pystate.c index b1e085bb806915..0a226268c558c7 100644 --- a/Python/pystate.c +++ b/Python/pystate.c @@ -1785,7 +1785,7 @@ tstate_delete_common(PyThreadState *tstate) HEAD_UNLOCK(runtime); #ifdef Py_GIL_DISABLED - _Py_qsbr_unregister((_PyThreadStateImpl *)tstate); + _Py_qsbr_unregister(tstate); #endif // XXX Unbind in PyThreadState_Clear(), or earlier diff --git a/Python/qsbr.c b/Python/qsbr.c index d7ac8f479cda1b..1e02ff9c2e45f0 100644 --- a/Python/qsbr.c +++ b/Python/qsbr.c @@ -231,20 +231,21 @@ _Py_qsbr_register(_PyThreadStateImpl *tstate, PyInterpreterState *interp, } void -_Py_qsbr_unregister(_PyThreadStateImpl *tstate) +_Py_qsbr_unregister(PyThreadState *tstate) { - struct _qsbr_shared *shared = tstate->qsbr->shared; + struct _qsbr_shared *shared = &tstate->interp->qsbr; + struct _PyThreadStateImpl *tstate_imp = (_PyThreadStateImpl*) tstate; PyMutex_Lock(&shared->mutex); // NOTE: we must load (or reload) the thread state's qbsr inside the mutex // because the array may have been resized (changing tstate->qsbr) while // we waited to acquire the mutex. - struct _qsbr_thread_state *qsbr = tstate->qsbr; + struct _qsbr_thread_state *qsbr = tstate_imp->qsbr; assert(qsbr->seq == 0 && "thread state must be detached"); - assert(qsbr->allocated && qsbr->tstate == (PyThreadState *)tstate); + assert(qsbr->allocated && qsbr->tstate == tstate); - tstate->qsbr = NULL; + tstate_imp->qsbr = NULL; qsbr->tstate = NULL; qsbr->allocated = false; qsbr->freelist_next = shared->freelist; pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy