From a93e9d6d8db5045480ae868b467038156d218082 Mon Sep 17 00:00:00 2001 From: Cornelius Roemer Date: Sun, 20 Jul 2025 23:05:30 +0200 Subject: [PATCH 1/9] Fix various typos and spelling errors found by codespell. --- Doc/README.rst | 4 +- Doc/extending/extending.rst | 2 +- Doc/howto/functional.rst | 4 +- Doc/howto/gdb_helpers.rst | 2 +- Doc/includes/email-read-alternative.py | 4 +- Doc/library/bz2.rst | 8 +- Doc/library/collections.rst | 18 +- Doc/library/datetime.rst | 4 +- Doc/library/difflib.rst | 12 +- Doc/library/index.rst | 2 +- Doc/library/pickle.rst | 2 +- Doc/library/re.rst | 2 +- Doc/library/shelve.rst | 2 +- Doc/library/ssl.rst | 2 +- Doc/library/subprocess.rst | 8 +- Doc/library/unittest.mock.rst | 2 +- Doc/library/unittest.rst | 2 +- Doc/library/urllib.parse.rst | 2 +- Doc/library/venv.rst | 2 +- Doc/library/weakref.rst | 2 +- Doc/library/xml.sax.handler.rst | 4 +- Doc/tutorial/introduction.rst | 6 +- Doc/using/windows.rst | 2 +- Doc/whatsnew/2.4.rst | 2 +- Doc/whatsnew/2.6.rst | 2 +- Doc/whatsnew/3.0.rst | 2 +- Doc/whatsnew/3.11.rst | 2 +- Doc/whatsnew/3.12.rst | 4 +- Doc/whatsnew/3.14.rst | 2 +- Doc/whatsnew/3.2.rst | 2 +- Doc/whatsnew/3.4.rst | 2 +- Doc/whatsnew/3.5.rst | 2 +- Doc/whatsnew/3.8.rst | 2 +- Include/cpython/critical_section.h | 4 +- Include/internal/mimalloc/mimalloc/internal.h | 2 +- Include/internal/pycore_crossinterp.h | 2 +- Include/internal/pycore_pymem.h | 2 +- Include/internal/pycore_runtime_structs.h | 2 +- Include/internal/pycore_time.h | 6 +- Include/modsupport.h | 2 +- Include/refcount.h | 2 +- Include/unicodeobject.h | 2 +- InternalDocs/asyncio.md | 2 +- InternalDocs/interpreter.md | 2 +- Lib/_pydatetime.py | 8 +- Lib/_pydecimal.py | 466 +++++++++--------- Lib/_strptime.py | 8 +- Lib/asyncio/graph.py | 2 +- Lib/email/charset.py | 4 +- Lib/encodings/cp1006.py | 6 +- Lib/encodings/cp1253.py | 4 +- Lib/encodings/cp1256.py | 4 +- Lib/encodings/cp720.py | 4 +- Lib/encodings/cp737.py | 12 +- Lib/encodings/cp864.py | 18 +- Lib/encodings/cp869.py | 12 +- Lib/encodings/cp875.py | 4 +- Lib/encodings/iso8859_6.py | 4 +- Lib/encodings/iso8859_7.py | 4 +- Lib/encodings/mac_arabic.py | 12 +- Lib/encodings/mac_farsi.py | 4 +- Lib/encodings/mac_greek.py | 4 +- Lib/http/cookies.py | 2 +- Lib/idlelib/CREDITS.txt | 2 +- Lib/idlelib/News3.txt | 2 +- Lib/idlelib/editor.py | 4 +- Lib/idlelib/idle_test/test_editmenu.py | 18 +- Lib/idlelib/run.py | 6 +- Lib/idlelib/searchbase.py | 4 +- Lib/imaplib.py | 2 +- Lib/inspect.py | 8 +- Lib/locale.py | 2 +- Lib/logging/__init__.py | 2 +- Lib/logging/handlers.py | 2 +- Lib/multiprocessing/resource_tracker.py | 2 +- Lib/pickle.py | 2 +- Lib/sysconfig/__init__.py | 2 +- Lib/test/bisect_cmd.py | 2 +- Lib/test/configdata/cfgparser.2 | 8 +- Lib/test/crashers/README | 2 +- Lib/test/crashers/infinite_loop_re.py | 2 +- Lib/test/decimaltestdata/base.decTest | 2 +- Lib/test/decimaltestdata/ddBase.decTest | 2 +- Lib/test/decimaltestdata/dqBase.decTest | 2 +- Lib/test/decimaltestdata/dsBase.decTest | 2 +- Lib/test/encoded_modules/__init__.py | 2 +- Lib/test/encoded_modules/module_iso_8859_1.py | 2 +- Lib/test/libregrtest/cmdline.py | 4 +- Lib/test/libregrtest/filter.py | 6 +- Lib/test/libregrtest/findtests.py | 8 +- Lib/test/mime.types | 6 +- Lib/test/multibytecodec_support.py | 22 +- Lib/test/pickletester.py | 2 +- Lib/test/support/asyncore.py | 4 +- Lib/test/support/os_helper.py | 2 +- Lib/test/support/smtpd.py | 2 +- Lib/test/test_asyncio/test_sslproto.py | 2 +- Lib/test/test_asyncio/test_tasks.py | 6 +- Lib/test/test_buffer.py | 2 +- Lib/test/test_build_details.py | 4 +- Lib/test/test_bytes.py | 2 +- Lib/test/test_capi/test_tuple.py | 2 +- Lib/test/test_capi/test_type.py | 2 +- Lib/test/test_cmd_line.py | 2 +- Lib/test/test_codecs.py | 2 +- Lib/test/test_ctypes/test_win32.py | 4 +- .../test_win32_com_foreign_func.py | 6 +- Lib/test/test_decimal.py | 116 ++--- Lib/test/test_descr.py | 36 +- Lib/test/test_dict.py | 2 +- Lib/test/test_difflib.py | 2 +- Lib/test/test_dis.py | 2 +- .../test_email/test__header_value_parser.py | 24 +- Lib/test/test_exceptions.py | 2 +- Lib/test/test_fileio.py | 2 +- Lib/test/test_float.py | 2 +- Lib/test/test_fnmatch.py | 22 +- Lib/test/test_generators.py | 4 +- Lib/test/test_genexps.py | 4 +- Lib/test/test_gzip.py | 4 +- Lib/test/test_httpservers.py | 8 +- Lib/test/test_import/__init__.py | 2 +- Lib/test/test_interpreters/test_api.py | 2 +- Lib/test/test_iterlen.py | 2 +- Lib/test/test_itertools.py | 48 +- Lib/test/test_json/test_dump.py | 4 +- Lib/test/test_locale.py | 4 +- Lib/test/test_logging.py | 2 +- Lib/test/test_long.py | 4 +- Lib/test/test_memoryview.py | 2 +- Lib/test/test_ntpath.py | 16 +- Lib/test/test_opcache.py | 2 +- Lib/test/test_os.py | 2 +- Lib/test/test_pdb.py | 2 +- Lib/test/test_peg_generator/test_c_parser.py | 4 +- Lib/test/test_plistlib.py | 6 +- Lib/test/test_pty.py | 2 +- Lib/test/test_pyrepl/test_pyrepl.py | 2 +- Lib/test/test_regrtest.py | 6 +- Lib/test/test_richcmp.py | 12 +- Lib/test/test_set.py | 6 +- Lib/test/test_socket.py | 2 +- Lib/test/test_sort.py | 2 +- Lib/test/test_sqlite3/test_dbapi.py | 4 +- Lib/test/test_ssl.py | 8 +- Lib/test/test_stat.py | 2 +- Lib/test/test_statistics.py | 2 +- Lib/test/test_strptime.py | 20 +- Lib/test/test_subprocess.py | 2 +- Lib/test/test_syntax.py | 8 +- Lib/test/test_sysconfig.py | 4 +- Lib/test/test_tarfile.py | 2 +- .../test_tkinter/test_geometry_managers.py | 2 +- Lib/test/test_tkinter/test_widgets.py | 2 +- Lib/test/test_traceback.py | 12 +- Lib/test/test_typing.py | 4 +- Lib/test/test_unittest/testmock/testpatch.py | 20 +- Lib/test/test_urllib2.py | 4 +- Lib/test/test_weakref.py | 10 +- Lib/test/test_xml_etree.py | 2 +- Lib/test/test_zipfile/test_core.py | 46 +- Lib/tkinter/__init__.py | 2 +- Lib/tkinter/ttk.py | 14 +- Lib/turtle.py | 4 +- Lib/unittest/mock.py | 2 +- Lib/urllib/request.py | 2 +- Lib/xml/dom/minidom.py | 2 +- Mac/BuildScript/resources/Conclusion.rtf | 2 +- Mac/BuildScript/resources/License.rtf | 12 +- Mac/BuildScript/resources/ReadMe.rtf | 8 +- Mac/BuildScript/resources/Welcome.rtf | 2 +- Mac/PythonLauncher/English.lproj/Credits.rtf | 2 +- Makefile.pre.in | 2 +- Misc/ACKS | 12 +- Misc/HISTORY | 30 +- Misc/NEWS.d/3.10.0a3.rst | 2 +- Misc/NEWS.d/3.10.0a7.rst | 2 +- Misc/NEWS.d/3.12.0a4.rst | 4 +- Misc/NEWS.d/3.13.0b1.rst | 2 +- Misc/NEWS.d/3.14.0a1.rst | 4 +- Misc/NEWS.d/3.14.0a7.rst | 2 +- Misc/NEWS.d/3.14.0b1.rst | 4 +- Misc/NEWS.d/3.5.0a3.rst | 2 +- Misc/NEWS.d/3.5.1rc1.rst | 4 +- Misc/NEWS.d/3.5.2rc1.rst | 2 +- Misc/NEWS.d/3.6.0a1.rst | 4 +- Misc/NEWS.d/3.9.0a1.rst | 2 +- ...-07-19-12-37-05.gh-issue-136801.XU_tF2.rst | 2 +- ...-07-05-09-45-04.gh-issue-136286.N67Amr.rst | 2 +- ...-06-11-12-14-06.gh-issue-135379.25ttXq.rst | 2 +- Modules/Setup.stdlib.in | 2 +- Modules/_collectionsmodule.c | 6 +- Modules/_ctypes/ctypes.h | 2 +- Modules/_datetimemodule.c | 8 +- Modules/_decimal/libmpdec/basearith.h | 18 +- .../libmpdec/literature/mulmod-ppro.txt | 14 +- Modules/_decimal/libmpdec/umodarith.h | 4 +- Modules/_decimal/tests/bench.py | 4 +- Modules/_decimal/tests/bignum.py | 4 +- Modules/_functoolsmodule.c | 2 +- Modules/_pickle.c | 2 +- Modules/_ssl.c | 6 +- Modules/_testinternalcapi.c | 4 +- Modules/_zstd/_zstdmodule.c | 4 +- Modules/_zstd/_zstdmodule.h | 2 +- Modules/_zstd/buffer.h | 2 +- Modules/_zstd/clinic/_zstdmodule.c.h | 2 +- Modules/_zstd/compressor.c | 4 +- Modules/_zstd/decompressor.c | 2 +- Modules/_zstd/zstddict.c | 2 +- Modules/_zstd/zstddict.h | 2 +- Modules/cjkcodecs/_codecs_iso2022.c | 6 +- Modules/clinic/_pickle.c.h | 2 +- Modules/hmacmodule.c | 2 +- Modules/itertoolsmodule.c | 8 +- Modules/mathmodule.c | 6 +- Modules/mmapmodule.c | 2 +- Modules/posixmodule.c | 2 +- Modules/socketmodule.c | 2 +- Objects/clinic/unicodeobject.c.h | 4 +- Objects/codeobject.c | 4 +- Objects/dictnotes.txt | 2 +- Objects/dictobject.c | 16 +- Objects/exceptions.c | 2 +- Objects/listobject.c | 24 +- Objects/listsort.txt | 2 +- Objects/longobject.c | 4 +- Objects/mimalloc/arena.c | 4 +- Objects/mimalloc/os.c | 4 +- .../mimalloc/prim/windows/etw-mimalloc.wprp | 2 +- Objects/obmalloc.c | 4 +- Objects/typeobject.c | 4 +- Objects/unicodeobject.c | 18 +- PC/winreg.c | 2 +- PCbuild/pyproject-clangcl.props | 2 +- PCbuild/pyproject.props | 2 +- Parser/lexer/lexer.c | 2 +- Parser/pegen.c | 4 +- Parser/pegen.h | 2 +- Python/ceval.c | 4 +- Python/codecs.c | 22 +- Python/crossinterp.c | 4 +- Python/dynamic_annotations.c | 2 +- Python/gc.c | 2 +- Python/gc_free_threading.c | 6 +- Python/import.c | 2 +- Python/perf_trampoline.c | 2 +- Python/pystate.c | 4 +- Python/pytime.c | 20 +- Python/remote_debug.h | 2 +- Python/specialize.c | 2 +- Python/uniqueid.c | 2 +- Tools/build/deepfreeze.py | 6 +- Tools/build/freeze_modules.py | 2 +- Tools/build/generate-build-details.py | 2 +- Tools/build/parse_html5_entities.py | 6 +- Tools/cases_generator/stack.py | 8 +- Tools/cases_generator/tier1_generator.py | 2 +- Tools/i18n/pygettext.py | 2 +- .../peg_generator/pegen/grammar_visualizer.py | 6 +- Tools/scripts/combinerefs.py | 4 +- Tools/unicode/python-mappings/GB2312.TXT | 4 +- .../python-mappings/jisx0213-2004-std.txt | 4 +- configure.ac | 2 +- pyconfig.h.in | 2 +- 265 files changed, 958 insertions(+), 958 deletions(-) diff --git a/Doc/README.rst b/Doc/README.rst index 2d1148753e0c6b..1ac4995ba05a6a 100644 --- a/Doc/README.rst +++ b/Doc/README.rst @@ -59,10 +59,10 @@ Available make targets are: * "html", which builds standalone HTML files for offline viewing. -* "htmlview", which re-uses the "html" builder, but then opens the main page +* "htmlview", which reuses the "html" builder, but then opens the main page in your default web browser. -* "htmllive", which re-uses the "html" builder, rebuilds the docs, +* "htmllive", which reuses the "html" builder, rebuilds the docs, starts a local server, and automatically reloads the page in your browser when you make changes to reST files (Unix only). diff --git a/Doc/extending/extending.rst b/Doc/extending/extending.rst index fd63495674651b..a89a69043c0f9f 100644 --- a/Doc/extending/extending.rst +++ b/Doc/extending/extending.rst @@ -214,7 +214,7 @@ and initialize it by calling :c:func:`PyErr_NewException` in the module's SpamError = PyErr_NewException("spam.error", NULL, NULL); -Since :c:data:`!SpamError` is a global variable, it will be overwitten every time +Since :c:data:`!SpamError` is a global variable, it will be overwritten every time the module is reinitialized, when the :c:data:`Py_mod_exec` function is called. For now, let's avoid the issue: we will block repeated initialization by raising an diff --git a/Doc/howto/functional.rst b/Doc/howto/functional.rst index 053558e389030a..eb251df088a34b 100644 --- a/Doc/howto/functional.rst +++ b/Doc/howto/functional.rst @@ -375,7 +375,7 @@ have the form:: if condition3 ... for exprN in sequenceN - if conditionN ) + if condition ) Again, for a list comprehension only the outside brackets are different (square brackets instead of parentheses). @@ -407,7 +407,7 @@ equivalent to the following Python code:: continue # Skip this element ... for exprN in sequenceN: - if not (conditionN): + if not (condition): continue # Skip this element # Output the value of diff --git a/Doc/howto/gdb_helpers.rst b/Doc/howto/gdb_helpers.rst index 98ce813ca4ab02..b0d56a6bfb891f 100644 --- a/Doc/howto/gdb_helpers.rst +++ b/Doc/howto/gdb_helpers.rst @@ -136,7 +136,7 @@ enabled:: at Objects/unicodeobject.c:551 #7 0x0000000000440d94 in PyUnicodeUCS2_FromString (u=0x5c2b8d "__lltrace__") at Objects/unicodeobject.c:569 #8 0x0000000000584abd in PyDict_GetItemString (v= - {'Yuck': , '__builtins__': , '__file__': 'Lib/test/crashers/nasty_eq_vs_dict.py', '__package__': None, 'y': , 'dict': {0: 0, 1: 1, 2: 2, 3: 3}, '__cached__': None, '__name__': '__main__', 'z': , '__doc__': None}, key= + {'Yuck': , '__builtins__': , '__file__': 'Lib/test/crashes/nasty_eq_vs_dict.py', '__package__': None, 'y': , 'dict': {0: 0, 1: 1, 2: 2, 3: 3}, '__cached__': None, '__name__': '__main__', 'z': , '__doc__': None}, key= 0x5c2b8d "__lltrace__") at Objects/dictobject.c:2171 Notice how the dictionary argument to ``PyDict_GetItemString`` is displayed diff --git a/Doc/includes/email-read-alternative.py b/Doc/includes/email-read-alternative.py index 8d0b4e6eb6b6b5..579390a18e9211 100644 --- a/Doc/includes/email-read-alternative.py +++ b/Doc/includes/email-read-alternative.py @@ -36,8 +36,8 @@ def magic_html_parser(html_text, partfiles): print() print(''.join(simplest.get_content().splitlines(keepends=True)[:3])) -ans = input("View full message?") -if ans.lower()[0] == 'n': +and = input("View full message?") +if and.lower()[0] == 'n': sys.exit() # We can extract the richest alternative in order to display it: diff --git a/Doc/library/bz2.rst b/Doc/library/bz2.rst index ebe2e43febaefa..3aa83ab368d35d 100644 --- a/Doc/library/bz2.rst +++ b/Doc/library/bz2.rst @@ -322,9 +322,9 @@ Using :func:`compress` and :func:`decompress` to demonstrate round-trip compress >>> import bz2 >>> data = b"""\ ... Donec rhoncus quis sapien sit amet molestie. Fusce scelerisque vel augue - ... nec ullamcorper. Nam rutrum pretium placerat. Aliquam vel tristique lorem, + ... nec ullamcorper. Name rutrum pretium placerat. Aliquam vel tristique lorem, ... sit amet cursus ante. In interdum laoreet mi, sit amet ultrices purus - ... pulvinar a. Nam gravida euismod magna, non varius justo tincidunt feugiat. + ... pulvinar a. Name gravida euismod magna, non various justo tincidunt feugiat. ... Aliquam pharetra lacus non risus vehicula rutrum. Maecenas aliquam leo ... felis. Pellentesque semper nunc sit amet nibh ullamcorper, ac elementum ... dolor luctus. Curabitur lacinia mi ornare consectetur vestibulum.""" @@ -362,9 +362,9 @@ Writing and reading a bzip2-compressed file in binary mode: >>> import bz2 >>> data = b"""\ ... Donec rhoncus quis sapien sit amet molestie. Fusce scelerisque vel augue - ... nec ullamcorper. Nam rutrum pretium placerat. Aliquam vel tristique lorem, + ... nec ullamcorper. Name rutrum pretium placerat. Aliquam vel tristique lorem, ... sit amet cursus ante. In interdum laoreet mi, sit amet ultrices purus - ... pulvinar a. Nam gravida euismod magna, non varius justo tincidunt feugiat. + ... pulvinar a. Name gravida euismod magna, non various justo tincidunt feugiat. ... Aliquam pharetra lacus non risus vehicula rutrum. Maecenas aliquam leo ... felis. Pellentesque semper nunc sit amet nibh ullamcorper, ac elementum ... dolor luctus. Curabitur lacinia mi ornare consectetur vestibulum.""" diff --git a/Doc/library/collections.rst b/Doc/library/collections.rst index 5fbdb12f40cafa..0f778f4db2ce4b 100644 --- a/Doc/library/collections.rst +++ b/Doc/library/collections.rst @@ -455,8 +455,8 @@ or subtracting from an empty counter. Returns a new deque object initialized left-to-right (using :meth:`append`) with data from *iterable*. If *iterable* is not specified, the new deque is empty. - Deques are a generalization of stacks and queues (the name is pronounced "deck" - and is short for "double-ended queue"). Deques support thread-safe, memory + Dequeues are a generalization of stacks and queues (the name is pronounced "deck" + and is short for "double-ended queue"). Dequeues support thread-safe, memory efficient appends and pops from either side of the deque with approximately the same *O*\ (1) performance in either direction. @@ -466,11 +466,11 @@ or subtracting from an empty counter. position of the underlying data representation. - If *maxlen* is not specified or is ``None``, deques may grow to an + If *maxlen* is not specified or is ``None``, dequeues may grow to an arbitrary length. Otherwise, the deque is bounded to the specified maximum length. Once a bounded length deque is full, when new items are added, a corresponding number of items are discarded from the opposite end. Bounded - length deques provide functionality similar to the ``tail`` filter in + length dequeues provide functionality similar to the ``tail`` filter in Unix. They are also useful for tracking transactions and other pools of data where only the most recent activity is of interest. @@ -582,13 +582,13 @@ or subtracting from an empty counter. .. versionadded:: 3.1 -In addition to the above, deques support iteration, pickling, ``len(d)``, +In addition to the above, dequeues support iteration, pickling, ``len(d)``, ``reversed(d)``, ``copy.copy(d)``, ``copy.deepcopy(d)``, membership testing with the :keyword:`in` operator, and subscript references such as ``d[0]`` to access the first element. Indexed access is *O*\ (1) at both ends but slows to *O*\ (*n*) in the middle. For fast random access, use lists instead. -Starting in version 3.5, deques support ``__add__()``, ``__mul__()``, +Starting in version 3.5, dequeues support ``__add__()``, ``__mul__()``, and ``__imul__()``. Example: @@ -650,9 +650,9 @@ Example: :class:`deque` Recipes ^^^^^^^^^^^^^^^^^^^^^^ -This section shows various approaches to working with deques. +This section shows various approaches to working with dequeues. -Bounded length deques provide functionality similar to the ``tail`` filter +Bounded length dequeues provide functionality similar to the ``tail`` filter in Unix:: def tail(filename, n=10): @@ -660,7 +660,7 @@ in Unix:: with open(filename) as f: return deque(f, n) -Another approach to using deques is to maintain a sequence of recently +Another approach to using dequeues is to maintain a sequence of recently added elements by appending to the right and popping to the left:: def moving_average(iterable, n=3): diff --git a/Doc/library/datetime.rst b/Doc/library/datetime.rst index 16ed3215bc2c1a..14f5abb6fa351b 100644 --- a/Doc/library/datetime.rst +++ b/Doc/library/datetime.rst @@ -2515,8 +2515,8 @@ requires, and these work on all platforms with a standard C implementation. +-----------+--------------------------------+------------------------+-------+ | ``%B`` | Month as locale's full name. || January, February, | \(1) | | | | ..., December (en_US);| | -| | || Januar, Februar, ..., | | -| | | Dezember (de_DE) | | +| | || January, February, ..., | | +| | | December (de_DE) | | +-----------+--------------------------------+------------------------+-------+ | ``%m`` | Month as a zero-padded | 01, 02, ..., 12 | \(9) | | | decimal number. | | | diff --git a/Doc/library/difflib.rst b/Doc/library/difflib.rst index ce948a6860f02c..455ea27cb2c3e1 100644 --- a/Doc/library/difflib.rst +++ b/Doc/library/difflib.rst @@ -246,7 +246,7 @@ diffs. For comparing directories and files, see also, the :mod:`filecmp` module. >>> print(''.join(diff), end="") - one ? ^ - + ore + + or ? ^ - two - three @@ -273,7 +273,7 @@ diffs. For comparing directories and files, see also, the :mod:`filecmp` module. two three >>> print(''.join(restore(diff, 2)), end="") - ore + or tree emu @@ -420,12 +420,12 @@ The :class:`SequenceMatcher` class has this constructor: is not changed. - .. method:: find_longest_match(alo=0, ahi=None, blo=0, bhi=None) + .. method:: find_longest_match(also=0, ahi=None, blo=0, bhi=None) - Find longest matching block in ``a[alo:ahi]`` and ``b[blo:bhi]``. + Find longest matching block in ``a[also:ahi]`` and ``b[blo:bhi]``. If *isjunk* was omitted or ``None``, :meth:`find_longest_match` returns - ``(i, j, k)`` such that ``a[i:i+k]`` is equal to ``b[j:j+k]``, where ``alo + ``(i, j, k)`` such that ``a[i:i+k]`` is equal to ``b[j:j+k]``, where ``also <= i <= i+k <= ahi`` and ``blo <= j <= j+k <= bhi``. For all ``(i', j', k')`` meeting those conditions, the additional conditions ``k >= k'``, ``i <= i'``, and if ``i == i'``, ``j <= j'`` are also met. In other words, of @@ -453,7 +453,7 @@ The :class:`SequenceMatcher` class has this constructor: >>> s.find_longest_match(0, 5, 0, 9) Match(a=1, b=0, size=4) - If no blocks match, this returns ``(alo, blo, 0)``. + If no blocks match, this returns ``(also, blo, 0)``. This method returns a :term:`named tuple` ``Match(a, b, size)``. diff --git a/Doc/library/index.rst b/Doc/library/index.rst index 44b218948d07e1..781264cdfecba8 100644 --- a/Doc/library/index.rst +++ b/Doc/library/index.rst @@ -54,7 +54,7 @@ the `Python Package Index `_. archiving.rst fileformats.rst crypto.rst - allos.rst + allows.rst cmdlinelibs.rst concurrency.rst ipc.rst diff --git a/Doc/library/pickle.rst b/Doc/library/pickle.rst index 007c9fe1b950cf..adf37408c8645d 100644 --- a/Doc/library/pickle.rst +++ b/Doc/library/pickle.rst @@ -408,7 +408,7 @@ The :mod:`pickle` module exports three classes, :class:`Pickler`, The memo is the data structure that remembers which objects the pickler has already seen, so that shared or recursive objects are pickled by reference and not by value. This method is - useful when re-using picklers. + useful when reusing picklers. .. class:: Unpickler(file, *, fix_imports=True, encoding="ASCII", errors="strict", buffers=None) diff --git a/Doc/library/re.rst b/Doc/library/re.rst index 75ebbf11c8e47c..3ae2099e7fe701 100644 --- a/Doc/library/re.rst +++ b/Doc/library/re.rst @@ -1811,7 +1811,7 @@ in each word of a sentence except for the first and last characters:: >>> re.sub(r"(\w)(\w+)(\w)", repl, text) 'Poefsrosr Aealmlobdk, pslaee reorpt your abnseces plmrptoy.' >>> re.sub(r"(\w)(\w+)(\w)", repl, text) - 'Pofsroser Aodlambelk, plasee reoprt yuor asnebces potlmrpy.' + 'Pofsroser Aodlambelk, plasee report your asnebces potlmrpy.' Finding all Adverbs diff --git a/Doc/library/shelve.rst b/Doc/library/shelve.rst index 23808619524056..b88fe4157bdc29 100644 --- a/Doc/library/shelve.rst +++ b/Doc/library/shelve.rst @@ -144,7 +144,7 @@ Restrictions which can cause hard crashes when trying to read from the database. * :meth:`Shelf.reorganize` may not be available for all database packages and - may temporarely increase resource usage (especially disk space) when called. + may temporarily increase resource usage (especially disk space) when called. Additionally, it will never run automatically and instead needs to be called explicitly. diff --git a/Doc/library/ssl.rst b/Doc/library/ssl.rst index a9930183f9a400..f4e874b89a0f5c 100644 --- a/Doc/library/ssl.rst +++ b/Doc/library/ssl.rst @@ -2285,7 +2285,7 @@ something like the following:: Country Name (2 letter code) [AU]:US State or Province Name (full name) [Some-State]:MyState Locality Name (eg, city) []:Some City - Organization Name (eg, company) [Internet Widgits Pty Ltd]:My Organization, Inc. + Organization Name (eg, company) [Internet Widgets Pty Ltd]:My Organization, Inc. Organizational Unit Name (eg, section) []:My Group Common Name (eg, YOUR name) []:myserver.mygroup.myorganization.com Email Address []:ops@myserver.mygroup.myorganization.com diff --git a/Doc/library/subprocess.rst b/Doc/library/subprocess.rst index 028a7861f36798..aa74e0f57a39fa 100644 --- a/Doc/library/subprocess.rst +++ b/Doc/library/subprocess.rst @@ -1359,12 +1359,12 @@ Replacing shell pipeline .. code-block:: bash - output=$(dmesg | grep hda) + output=$(dmesg | grep had) becomes:: p1 = Popen(["dmesg"], stdout=PIPE) - p2 = Popen(["grep", "hda"], stdin=p1.stdout, stdout=PIPE) + p2 = Popen(["grep", "had"], stdin=p1.stdout, stdout=PIPE) p1.stdout.close() # Allow p1 to receive a SIGPIPE if p2 exits. output = p2.communicate()[0] @@ -1376,11 +1376,11 @@ be used directly: .. code-block:: bash - output=$(dmesg | grep hda) + output=$(dmesg | grep had) becomes:: - output = check_output("dmesg | grep hda", shell=True) + output = check_output("dmesg | grep had", shell=True) Replacing :func:`os.system` diff --git a/Doc/library/unittest.mock.rst b/Doc/library/unittest.mock.rst index 091562cc9aef98..b98848a25cfa70 100644 --- a/Doc/library/unittest.mock.rst +++ b/Doc/library/unittest.mock.rst @@ -265,7 +265,7 @@ the *new_callable* argument to :func:`patch`. :attr:`return_value` attribute. * *unsafe*: By default, accessing any attribute whose name starts with - *assert*, *assret*, *asert*, *aseert* or *assrt* will raise an + *assert*, *assret*, *assert*, *aseert* or *assrt* will raise an :exc:`AttributeError`. Passing ``unsafe=True`` will allow access to these attributes. diff --git a/Doc/library/unittest.rst b/Doc/library/unittest.rst index ec96e8416120fa..86b215775c6148 100644 --- a/Doc/library/unittest.rst +++ b/Doc/library/unittest.rst @@ -485,7 +485,7 @@ advantages to placing the test code in a separate module, such as .. _legacy-unit-tests: -Re-using old test code +Reusing old test code ---------------------- Some users will find that they have existing test code that they would like to diff --git a/Doc/library/urllib.parse.rst b/Doc/library/urllib.parse.rst index 44a9c79cba2216..bcee8ccaa1f5cb 100644 --- a/Doc/library/urllib.parse.rst +++ b/Doc/library/urllib.parse.rst @@ -24,7 +24,7 @@ The module has been designed to match the internet RFC on Relative Uniform Resource Locators. It supports the following URL schemes: ``file``, ``ftp``, ``gopher``, ``hdl``, ``http``, ``https``, ``imap``, ``itms-services``, ``mailto``, ``mms``, ``news``, ``nntp``, ``prospero``, ``rsync``, ``rtsp``, ``rtsps``, ``rtspu``, -``sftp``, ``shttp``, ``sip``, ``sips``, ``snews``, ``svn``, ``svn+ssh``, +``sftp``, ``https``, ``sip``, ``sips``, ``snews``, ``svn``, ``svn+ssh``, ``telnet``, ``wais``, ``ws``, ``wss``. .. impl-detail:: diff --git a/Doc/library/venv.rst b/Doc/library/venv.rst index de427fbafe71dc..3d04e130eba25a 100644 --- a/Doc/library/venv.rst +++ b/Doc/library/venv.rst @@ -79,7 +79,7 @@ containing a copy or symlink of the Python executable (as appropriate for the platform or arguments used at environment creation time). It also creates a :file:`lib/pythonX.Y/site-packages` subdirectory (on Windows, this is :file:`Lib\site-packages`). -If an existing directory is specified, it will be re-used. +If an existing directory is specified, it will be reused. .. versionchanged:: 3.5 The use of ``venv`` is now recommended for creating virtual environments. diff --git a/Doc/library/weakref.rst b/Doc/library/weakref.rst index 2a25ed045c68bd..43e7b2ab33f03f 100644 --- a/Doc/library/weakref.rst +++ b/Doc/library/weakref.rst @@ -68,7 +68,7 @@ exposed by the :mod:`weakref` module for the benefit of advanced uses. Not all objects can be weakly referenced. Objects which support weak references include class instances, functions written in Python (but not in C), instance methods, sets, frozensets, some :term:`file objects `, :term:`generators `, -type objects, sockets, arrays, deques, regular expression pattern objects, and code +type objects, sockets, arrays, dequeues, regular expression pattern objects, and code objects. .. versionchanged:: 3.2 diff --git a/Doc/library/xml.sax.handler.rst b/Doc/library/xml.sax.handler.rst index c2c9d6424b5072..fb7bfaa0bf1777 100644 --- a/Doc/library/xml.sax.handler.rst +++ b/Doc/library/xml.sax.handler.rst @@ -250,7 +250,7 @@ events in the input document: string and the *attrs* parameter holds an object of the :class:`~xml.sax.xmlreader.Attributes` interface (see :ref:`attributes-objects`) containing the attributes of - the element. The object passed as *attrs* may be re-used by the parser; holding + the element. The object passed as *attrs* may be reused by the parser; holding on to a reference to it is not a reliable way to keep a copy of the attributes. To keep a copy of the attributes, use the :meth:`copy` method of the *attrs* object. @@ -275,7 +275,7 @@ events in the input document: :ref:`attributes-ns-objects`) containing the attributes of the element. If no namespace is associated with the element, the *uri* component of *name* will be ``None``. The object passed - as *attrs* may be re-used by the parser; holding on to a reference to it is not + as *attrs* may be reused by the parser; holding on to a reference to it is not a reliable way to keep a copy of the attributes. To keep a copy of the attributes, use the :meth:`copy` method of the *attrs* object. diff --git a/Doc/tutorial/introduction.rst b/Doc/tutorial/introduction.rst index 9e06e03991bc96..53735eed311454 100644 --- a/Doc/tutorial/introduction.rst +++ b/Doc/tutorial/introduction.rst @@ -436,9 +436,9 @@ through all other variables that refer to it.:: >>> rgba = rgb >>> id(rgb) == id(rgba) # they reference the same object True - >>> rgba.append("Alph") + >>> rgba.append("Alpha") >>> rgb - ["Red", "Green", "Blue", "Alph"] + ["Red", "Green", "Blue", "Alpha"] All slice operations return a new list containing the requested elements. This means that the following slice returns a @@ -449,7 +449,7 @@ means that the following slice returns a >>> correct_rgba ["Red", "Green", "Blue", "Alpha"] >>> rgba - ["Red", "Green", "Blue", "Alph"] + ["Red", "Green", "Blue", "Alpha"] Assignment to slices is also possible, and this can even change the size of the list or clear it entirely:: diff --git a/Doc/using/windows.rst b/Doc/using/windows.rst index 7cc50bccb3724a..2eb756a9881ae9 100644 --- a/Doc/using/windows.rst +++ b/Doc/using/windows.rst @@ -1514,7 +1514,7 @@ free-threaded binaries at this time. To specify the install option at the command line, use ``Include_freethreaded=1``. See :ref:`install-layout-option` for instructions on -pre-emptively downloading the additional binaries for offline install. The +preemptively downloading the additional binaries for offline install. The options to include debug symbols and binaries also apply to the free-threaded builds. diff --git a/Doc/whatsnew/2.4.rst b/Doc/whatsnew/2.4.rst index 7628cfefe0ec96..31b2c9581153b0 100644 --- a/Doc/whatsnew/2.4.rst +++ b/Doc/whatsnew/2.4.rst @@ -1304,7 +1304,7 @@ complete list of changes, or look through the CVS logs for all the details. comparable. (Contributed by Raymond Hettinger.) * The :mod:`weakref` module now supports a wider variety of objects including - Python functions, class instances, sets, frozensets, deques, arrays, files, + Python functions, class instances, sets, frozensets, dequeues, arrays, files, sockets, and regular expression pattern objects. (Contributed by Raymond Hettinger.) diff --git a/Doc/whatsnew/2.6.rst b/Doc/whatsnew/2.6.rst index 0803eba99e6d17..9baf6a37fae23d 100644 --- a/Doc/whatsnew/2.6.rst +++ b/Doc/whatsnew/2.6.rst @@ -3093,7 +3093,7 @@ Changes to Python's build process and to the C API include: (Contributed by Collin Winter; :issue:`1530959`.) * Several basic data types, such as integers and strings, maintain - internal free lists of objects that can be re-used. The data + internal free lists of objects that can be reused. The data structures for these free lists now follow a naming convention: the variable is always named ``free_list``, the counter is always named ``numfree``, and a macro ``Py_MAXFREELIST`` is diff --git a/Doc/whatsnew/3.0.rst b/Doc/whatsnew/3.0.rst index d858586138e9ae..05e776aecab97f 100644 --- a/Doc/whatsnew/3.0.rst +++ b/Doc/whatsnew/3.0.rst @@ -782,7 +782,7 @@ Operators And Special Methods * The function attributes named :attr:`!func_X` have been renamed to use the :attr:`!__X__` form, freeing up these names in the function - attribute namespace for user-defined attributes. To wit, + attribute namespace for user-defined attributes. To with, :attr:`!func_closure`, :attr:`!func_code`, :attr:`!func_defaults`, :attr:`!func_dict`, :attr:`!func_doc`, :attr:`!func_globals`, :attr:`!func_name` were renamed to :attr:`~function.__closure__`, diff --git a/Doc/whatsnew/3.11.rst b/Doc/whatsnew/3.11.rst index abf9677fd9cac5..07aa1ec76adae7 100644 --- a/Doc/whatsnew/3.11.rst +++ b/Doc/whatsnew/3.11.rst @@ -1389,7 +1389,7 @@ are created whenever Python calls a Python function. The following are new frame optimizations: - Streamlined the frame creation process. -- Avoided memory allocation by generously re-using frame space on the C stack. +- Avoided memory allocation by generously reusing frame space on the C stack. - Streamlined the internal frame struct to contain only essential information. Frames previously held extra debugging and memory management information. diff --git a/Doc/whatsnew/3.12.rst b/Doc/whatsnew/3.12.rst index 7cfdc287b7fad7..25e972962dcd59 100644 --- a/Doc/whatsnew/3.12.rst +++ b/Doc/whatsnew/3.12.rst @@ -447,12 +447,12 @@ Improved Error Messages ... self.blech = 1 ... ... def foo(self): - ... somethin = blech + ... something = blech ... >>> A().foo() Traceback (most recent call last): File "", line 1 - somethin = blech + something = blech ^^^^^ NameError: name 'blech' is not defined. Did you mean: 'self.blech'? diff --git a/Doc/whatsnew/3.14.rst b/Doc/whatsnew/3.14.rst index c108a94692dca7..bf17f417a5980e 100644 --- a/Doc/whatsnew/3.14.rst +++ b/Doc/whatsnew/3.14.rst @@ -1051,7 +1051,7 @@ Concurrent safe warnings control The :class:`warnings.catch_warnings` context manager will now optionally use a context variable for warning filters. This is enabled by setting the :data:`~sys.flags.context_aware_warnings` flag, either with the ``-X`` -command-line option or an environment variable. This gives predicable +command-line option or an environment variable. This gives predictable warnings control when using :class:`~warnings.catch_warnings` combined with multiple threads or asynchronous tasks. The flag defaults to true for the free-threaded build and false for the GIL-enabled build. diff --git a/Doc/whatsnew/3.2.rst b/Doc/whatsnew/3.2.rst index 7104904c956a7a..24db1c9400ed97 100644 --- a/Doc/whatsnew/3.2.rst +++ b/Doc/whatsnew/3.2.rst @@ -167,7 +167,7 @@ each with their own argument patterns and help displays:: $ ./helm.py --help # top level help (launch and move) $ ./helm.py launch --help # help for launch options - $ ./helm.py launch --missiles # set missiles=True and torpedos=False + $ ./helm.py launch --missiles # set missiles=True and torpedoes=False $ ./helm.py steer --course 180 --speed 5 # set movement parameters .. seealso:: diff --git a/Doc/whatsnew/3.4.rst b/Doc/whatsnew/3.4.rst index e4f602a17ee968..7e1a46e1afe81c 100644 --- a/Doc/whatsnew/3.4.rst +++ b/Doc/whatsnew/3.4.rst @@ -2414,7 +2414,7 @@ Changes in the Python API * Because :mod:`unittest.TestSuite` now drops references to tests after they are run, test harnesses that reuse a :class:`~unittest.TestSuite` to re-run - a set of tests may fail. Test suites should not be re-used in this fashion + a set of tests may fail. Test suites should not be reused in this fashion since it means state is retained between test runs, breaking the test isolation that :mod:`unittest` is designed to provide. However, if the lack of isolation is considered acceptable, the old behavior can be restored by diff --git a/Doc/whatsnew/3.5.rst b/Doc/whatsnew/3.5.rst index db3f1db3bd74ad..2c60efb75736e5 100644 --- a/Doc/whatsnew/3.5.rst +++ b/Doc/whatsnew/3.5.rst @@ -914,7 +914,7 @@ makes it 4 to 100 times faster. (Contributed by Eric Snow in :issue:`16991`.) The :class:`~collections.deque` class now defines :meth:`~collections.deque.index`, :meth:`~collections.deque.insert`, and :meth:`~collections.deque.copy`, and supports the ``+`` and ``*`` operators. -This allows deques to be recognized as a :class:`~collections.abc.MutableSequence` +This allows dequeues to be recognized as a :class:`~collections.abc.MutableSequence` and improves their substitutability for lists. (Contributed by Raymond Hettinger in :issue:`23704`.) diff --git a/Doc/whatsnew/3.8.rst b/Doc/whatsnew/3.8.rst index bc2eb1d0e263f0..e2b6bf229d545d 100644 --- a/Doc/whatsnew/3.8.rst +++ b/Doc/whatsnew/3.8.rst @@ -431,7 +431,7 @@ Other Language Changes ... lastname, *members = family.split() ... return lastname.upper(), *members ... - >>> parse('simpsons homer marge bart lisa maggie') + >>> parse('simpsons homer merge bart lisa maggie') ('SIMPSONS', 'homer', 'marge', 'bart', 'lisa', 'maggie') (Contributed by David Cuthbert and Jordan Chapman in :issue:`32117`.) diff --git a/Include/cpython/critical_section.h b/Include/cpython/critical_section.h index 35db3fb6a59ce6..4d48ba13451304 100644 --- a/Include/cpython/critical_section.h +++ b/Include/cpython/critical_section.h @@ -93,7 +93,7 @@ PyCriticalSection2_End(PyCriticalSection2 *c); } #else /* !Py_GIL_DISABLED */ -// NOTE: the contents of this struct are private and may change betweeen +// NOTE: the contents of this struct are private and may change between // Python releases without a deprecation period. struct PyCriticalSection { // Tagged pointer to an outer active critical section (or 0). @@ -105,7 +105,7 @@ struct PyCriticalSection { // A critical section protected by two mutexes. Use // Py_BEGIN_CRITICAL_SECTION2 and Py_END_CRITICAL_SECTION2. -// NOTE: the contents of this struct are private and may change betweeen +// NOTE: the contents of this struct are private and may change between // Python releases without a deprecation period. struct PyCriticalSection2 { PyCriticalSection _cs_base; diff --git a/Include/internal/mimalloc/mimalloc/internal.h b/Include/internal/mimalloc/mimalloc/internal.h index a7daa3a40a4c0b..e367af348dafab 100644 --- a/Include/internal/mimalloc/mimalloc/internal.h +++ b/Include/internal/mimalloc/mimalloc/internal.h @@ -591,7 +591,7 @@ for the read case we can subtract two entries to discard the `+k1` term, but that leads to `((p1^k2)<<> 5 - preceding = _DAYS_BEFORE_MONTH[month] + (month > 2 and leapyear) + preceding = _DAYS_BEFORE_MONTH[month] + (month > 2 and leap year) if preceding > n: # estimate is too large month -= 1 - preceding -= _DAYS_IN_MONTH[month] + (month == 2 and leapyear) + preceding -= _DAYS_IN_MONTH[month] + (month == 2 and leap year) n -= preceding assert 0 <= n < _days_in_month(year, month) diff --git a/Lib/_pydecimal.py b/Lib/_pydecimal.py index 9b8e42a2342536..de8247c23df143 100644 --- a/Lib/_pydecimal.py +++ b/Lib/_pydecimal.py @@ -154,8 +154,8 @@ class InvalidOperation(DecimalException): """ def handle(self, context, *args): if args: - ans = _dec_from_triple(args[0]._sign, args[0]._int, 'n', True) - return ans._fix_nan(context) + and = _dec_from_triple(args[0]._sign, args[0]._int, 'n', True) + return and._fix_nan(context) return _NaN class ConversionSyntax(InvalidOperation): @@ -841,8 +841,8 @@ def __lt__(self, other, context=None): self, other = _convert_for_comparison(self, other) if other is NotImplemented: return other - ans = self._compare_check_nans(other, context) - if ans: + and = self._compare_check_nans(other, context) + if and: return False return self._cmp(other) < 0 @@ -850,8 +850,8 @@ def __le__(self, other, context=None): self, other = _convert_for_comparison(self, other) if other is NotImplemented: return other - ans = self._compare_check_nans(other, context) - if ans: + and = self._compare_check_nans(other, context) + if and: return False return self._cmp(other) <= 0 @@ -859,8 +859,8 @@ def __gt__(self, other, context=None): self, other = _convert_for_comparison(self, other) if other is NotImplemented: return other - ans = self._compare_check_nans(other, context) - if ans: + and = self._compare_check_nans(other, context) + if and: return False return self._cmp(other) > 0 @@ -868,8 +868,8 @@ def __ge__(self, other, context=None): self, other = _convert_for_comparison(self, other) if other is NotImplemented: return other - ans = self._compare_check_nans(other, context) - if ans: + and = self._compare_check_nans(other, context) + if and: return False return self._cmp(other) >= 0 @@ -885,9 +885,9 @@ def compare(self, other, context=None): # Compare(NaN, NaN) = NaN if (self._is_special or other and other._is_special): - ans = self._check_nans(other, context) - if ans: - return ans + and = self._check_nans(other, context) + if and: + return and return Decimal(self._cmp(other)) @@ -914,8 +914,8 @@ def __hash__(self): else: exp_hash = pow(_PyHASH_10INV, -self._exp, _PyHASH_MODULUS) hash_ = int(self._int) * exp_hash % _PyHASH_MODULUS - ans = hash_ if self >= 0 else -hash_ - return -2 if ans == -1 else ans + and = hash_ if self >= 0 else -hash_ + return -2 if and == -1 else and def as_tuple(self): """Represents the number as a triple tuple. @@ -1046,9 +1046,9 @@ def __neg__(self, context=None): Rounds, if it has reason. """ if self._is_special: - ans = self._check_nans(context=context) - if ans: - return ans + and = self._check_nans(context=context) + if and: + return and if context is None: context = getcontext() @@ -1056,11 +1056,11 @@ def __neg__(self, context=None): if not self and context.rounding != ROUND_FLOOR: # -Decimal('0') is Decimal('0'), not Decimal('-0'), except # in ROUND_FLOOR rounding mode. - ans = self.copy_abs() + and = self.copy_abs() else: - ans = self.copy_negate() + and = self.copy_negate() - return ans._fix(context) + return and._fix(context) def __pos__(self, context=None): """Returns a copy, unless it is a sNaN. @@ -1068,20 +1068,20 @@ def __pos__(self, context=None): Rounds the number (if more than precision digits) """ if self._is_special: - ans = self._check_nans(context=context) - if ans: - return ans + and = self._check_nans(context=context) + if and: + return and if context is None: context = getcontext() if not self and context.rounding != ROUND_FLOOR: # + (-0) = 0, except in ROUND_FLOOR rounding mode. - ans = self.copy_abs() + and = self.copy_abs() else: - ans = Decimal(self) + and = Decimal(self) - return ans._fix(context) + return and._fix(context) def __abs__(self, round=True, context=None): """Returns the absolute value of self. @@ -1094,16 +1094,16 @@ def __abs__(self, round=True, context=None): return self.copy_abs() if self._is_special: - ans = self._check_nans(context=context) - if ans: - return ans + and = self._check_nans(context=context) + if and: + return and if self._sign: - ans = self.__neg__(context=context) + and = self.__neg__(context=context) else: - ans = self.__pos__(context=context) + and = self.__pos__(context=context) - return ans + return and def __add__(self, other, context=None): """Returns self + other. @@ -1118,9 +1118,9 @@ def __add__(self, other, context=None): context = getcontext() if self._is_special or other._is_special: - ans = self._check_nans(other, context) - if ans: - return ans + and = self._check_nans(other, context) + if and: + return and if self._isinfinity(): # If both INF, same sign => same as both, opposite => error. @@ -1140,19 +1140,19 @@ def __add__(self, other, context=None): sign = min(self._sign, other._sign) if negativezero: sign = 1 - ans = _dec_from_triple(sign, '0', exp) - ans = ans._fix(context) - return ans + and = _dec_from_triple(sign, '0', exp) + and = and._fix(context) + return and if not self: exp = max(exp, other._exp - context.prec-1) - ans = other._rescale(exp, context.rounding) - ans = ans._fix(context) - return ans + and = other._rescale(exp, context.rounding) + and = and._fix(context) + return and if not other: exp = max(exp, self._exp - context.prec-1) - ans = self._rescale(exp, context.rounding) - ans = ans._fix(context) - return ans + and = self._rescale(exp, context.rounding) + and = and._fix(context) + return and op1 = _WorkRep(self) op2 = _WorkRep(other) @@ -1162,9 +1162,9 @@ def __add__(self, other, context=None): if op1.sign != op2.sign: # Equal and opposite if op1.int == op2.int: - ans = _dec_from_triple(negativezero, '0', exp) - ans = ans._fix(context) - return ans + and = _dec_from_triple(negativezero, '0', exp) + and = and._fix(context) + return and if op1.int < op2.int: op1, op2 = op2, op1 # OK, now abs(op1) > abs(op2) @@ -1187,9 +1187,9 @@ def __add__(self, other, context=None): result.int = op1.int - op2.int result.exp = op1.exp - ans = Decimal(result) - ans = ans._fix(context) - return ans + and = Decimal(result) + and = and._fix(context) + return and __radd__ = __add__ @@ -1200,9 +1200,9 @@ def __sub__(self, other, context=None): return other if self._is_special or other._is_special: - ans = self._check_nans(other, context=context) - if ans: - return ans + and = self._check_nans(other, context=context) + if and: + return and # self - other is computed as self + other.copy_negate() return self.__add__(other.copy_negate(), context=context) @@ -1230,9 +1230,9 @@ def __mul__(self, other, context=None): resultsign = self._sign ^ other._sign if self._is_special or other._is_special: - ans = self._check_nans(other, context) - if ans: - return ans + and = self._check_nans(other, context) + if and: + return and if self._isinfinity(): if not other: @@ -1248,28 +1248,28 @@ def __mul__(self, other, context=None): # Special case for multiplying by zero if not self or not other: - ans = _dec_from_triple(resultsign, '0', resultexp) + and = _dec_from_triple(resultsign, '0', resultexp) # Fixing in case the exponent is out of bounds - ans = ans._fix(context) - return ans + and = and._fix(context) + return and # Special case for multiplying by power of 10 if self._int == '1': - ans = _dec_from_triple(resultsign, other._int, resultexp) - ans = ans._fix(context) - return ans + and = _dec_from_triple(resultsign, other._int, resultexp) + and = and._fix(context) + return and if other._int == '1': - ans = _dec_from_triple(resultsign, self._int, resultexp) - ans = ans._fix(context) - return ans + and = _dec_from_triple(resultsign, self._int, resultexp) + and = and._fix(context) + return and op1 = _WorkRep(self) op2 = _WorkRep(other) - ans = _dec_from_triple(resultsign, str(op1.int * op2.int), resultexp) - ans = ans._fix(context) + and = _dec_from_triple(resultsign, str(op1.int * op2.int), resultexp) + and = and._fix(context) - return ans + return and __rmul__ = __mul__ def __truediv__(self, other, context=None): @@ -1284,9 +1284,9 @@ def __truediv__(self, other, context=None): sign = self._sign ^ other._sign if self._is_special or other._is_special: - ans = self._check_nans(other, context) - if ans: - return ans + and = self._check_nans(other, context) + if and: + return and if self._isinfinity() and other._isinfinity(): return context._raise_error(InvalidOperation, '(+-)INF/(+-)INF') @@ -1328,8 +1328,8 @@ def __truediv__(self, other, context=None): coeff //= 10 exp += 1 - ans = _dec_from_triple(sign, str(coeff), exp) - return ans._fix(context) + and = _dec_from_triple(sign, str(coeff), exp) + return and._fix(context) def _divide(self, other, context): """Return (self // other, self % other), to context.prec precision. @@ -1360,9 +1360,9 @@ def _divide(self, other, context): _dec_from_triple(self._sign, str(r), ideal_exp)) # Here the quotient is too large to be representable - ans = context._raise_error(DivisionImpossible, + and = context._raise_error(DivisionImpossible, 'quotient too large in //, % or divmod') - return ans, ans + return and, and def __rtruediv__(self, other, context=None): """Swaps self/other and returns __truediv__.""" @@ -1382,23 +1382,23 @@ def __divmod__(self, other, context=None): if context is None: context = getcontext() - ans = self._check_nans(other, context) - if ans: - return (ans, ans) + and = self._check_nans(other, context) + if and: + return (and, and) sign = self._sign ^ other._sign if self._isinfinity(): if other._isinfinity(): - ans = context._raise_error(InvalidOperation, 'divmod(INF, INF)') - return ans, ans + and = context._raise_error(InvalidOperation, 'divmod(INF, INF)') + return and, and else: return (_SignedInfinity[sign], context._raise_error(InvalidOperation, 'INF % x')) if not other: if not self: - ans = context._raise_error(DivisionUndefined, 'divmod(0, 0)') - return ans, ans + and = context._raise_error(DivisionUndefined, 'divmod(0, 0)') + return and, and else: return (context._raise_error(DivisionByZero, 'x // 0', sign), context._raise_error(InvalidOperation, 'x % 0')) @@ -1425,9 +1425,9 @@ def __mod__(self, other, context=None): if context is None: context = getcontext() - ans = self._check_nans(other, context) - if ans: - return ans + and = self._check_nans(other, context) + if and: + return and if self._isinfinity(): return context._raise_error(InvalidOperation, 'INF % x') @@ -1457,9 +1457,9 @@ def remainder_near(self, other, context=None): other = _convert_other(other, raiseit=True) - ans = self._check_nans(other, context) - if ans: - return ans + and = self._check_nans(other, context) + if and: + return and # self == +/-infinity -> InvalidOperation if self._isinfinity(): @@ -1477,14 +1477,14 @@ def remainder_near(self, other, context=None): # other = +/-infinity -> remainder = self if other._isinfinity(): - ans = Decimal(self) - return ans._fix(context) + and = Decimal(self) + return and._fix(context) # self = 0 -> remainder = self, with ideal exponent ideal_exponent = min(self._exp, other._exp) if not self: - ans = _dec_from_triple(self._sign, '0', ideal_exponent) - return ans._fix(context) + and = _dec_from_triple(self._sign, '0', ideal_exponent) + return and._fix(context) # catch most cases of large or small quotient expdiff = self.adjusted() - other.adjusted() @@ -1493,8 +1493,8 @@ def remainder_near(self, other, context=None): return context._raise_error(DivisionImpossible) if expdiff <= -2: # expdiff <= -2 => abs(self/other) < 0.1 - ans = self._rescale(ideal_exponent, context.rounding) - return ans._fix(context) + and = self._rescale(ideal_exponent, context.rounding) + return and._fix(context) # adjust both arguments to have the same exponent, then divide op1 = _WorkRep(self) @@ -1520,8 +1520,8 @@ def remainder_near(self, other, context=None): sign = 1-sign r = -r - ans = _dec_from_triple(sign, str(r), ideal_exponent) - return ans._fix(context) + and = _dec_from_triple(sign, str(r), ideal_exponent) + return and._fix(context) def __floordiv__(self, other, context=None): """self // other""" @@ -1532,9 +1532,9 @@ def __floordiv__(self, other, context=None): if context is None: context = getcontext() - ans = self._check_nans(other, context) - if ans: - return ans + and = self._check_nans(other, context) + if and: + return and if self._isinfinity(): if other._isinfinity(): @@ -1645,10 +1645,10 @@ def _fix(self, context): exp_min = len(self._int) + self._exp - context.prec if exp_min > Etop: # overflow: exp_min > Etop iff self.adjusted() > Emax - ans = context._raise_error(Overflow, 'above Emax', self._sign) + and = context._raise_error(Overflow, 'above Emax', self._sign) context._raise_error(Inexact) context._raise_error(Rounded) - return ans + return and self_is_subnormal = exp_min < Etiny if self_is_subnormal: @@ -1671,9 +1671,9 @@ def _fix(self, context): # check whether the rounding pushed the exponent out of range if exp_min > Etop: - ans = context._raise_error(Overflow, 'above Emax', self._sign) + and = context._raise_error(Overflow, 'above Emax', self._sign) else: - ans = _dec_from_triple(self._sign, coeff, exp_min) + and = _dec_from_triple(self._sign, coeff, exp_min) # raise the appropriate signals, taking care to respect # the precedence described in the specification @@ -1684,10 +1684,10 @@ def _fix(self, context): if changed: context._raise_error(Inexact) context._raise_error(Rounded) - if not ans: + if not and: # raise Clamped on underflow to 0 context._raise_error(Clamped) - return ans + return and if self_is_subnormal: context._raise_error(Subnormal) @@ -2282,9 +2282,9 @@ def __pow__(self, other, modulo=None, context=None): context = getcontext() # either argument is a NaN => result is NaN - ans = self._check_nans(other, context) - if ans: - return ans + and = self._check_nans(other, context) + if and: + return and # 0**0 = NaN (!), x**0 = 1 for nonzero x (including +/-Infinity) if not other: @@ -2362,7 +2362,7 @@ def __pow__(self, other, modulo=None, context=None): # from here on, the result always goes through the call # to _fix at the end of this function. - ans = None + and = None exact = False # crude test to catch cases of extreme overflow/underflow. If @@ -2375,24 +2375,24 @@ def __pow__(self, other, modulo=None, context=None): # self > 1 and other +ve, or self < 1 and other -ve # possibility of overflow if bound >= len(str(context.Emax)): - ans = _dec_from_triple(result_sign, '1', context.Emax+1) + and = _dec_from_triple(result_sign, '1', context.Emax+1) else: # self > 1 and other -ve, or self < 1 and other +ve # possibility of underflow to 0 Etiny = context.Etiny() if bound >= len(str(-Etiny)): - ans = _dec_from_triple(result_sign, '1', Etiny-1) + and = _dec_from_triple(result_sign, '1', Etiny-1) # try for an exact result with precision +1 - if ans is None: - ans = self._power_exact(other, context.prec + 1) - if ans is not None: + if and is None: + and = self._power_exact(other, context.prec + 1) + if and is not None: if result_sign == 1: - ans = _dec_from_triple(1, ans._int, ans._exp) + and = _dec_from_triple(1, and._int, and._exp) exact = True # usual case: inexact result, x**y computed directly as exp(y*log(x)) - if ans is None: + if and is None: p = context.prec x = _WorkRep(self) xc, xe = x.int, x.exp @@ -2410,7 +2410,7 @@ def __pow__(self, other, modulo=None, context=None): break extra += 3 - ans = _dec_from_triple(result_sign, str(coeff), exp) + and = _dec_from_triple(result_sign, str(coeff), exp) # unlike exp, ln and log10, the power function respects the # rounding mode; no need to switch to ROUND_HALF_EVEN here @@ -2428,10 +2428,10 @@ def __pow__(self, other, modulo=None, context=None): if exact and not other._isinteger(): # pad with zeros up to length context.prec+1 if necessary; this # ensures that the Rounded signal will be raised. - if len(ans._int) <= context.prec: - expdiff = context.prec + 1 - len(ans._int) - ans = _dec_from_triple(ans._sign, ans._int+'0'*expdiff, - ans._exp-expdiff) + if len(and._int) <= context.prec: + expdiff = context.prec + 1 - len(and._int) + and = _dec_from_triple(and._sign, and._int+'0'*expdiff, + and._exp-expdiff) # create a copy of the current context, with cleared flags/traps newcontext = context.copy() @@ -2440,7 +2440,7 @@ def __pow__(self, other, modulo=None, context=None): newcontext.traps[exception] = 0 # round in the new context - ans = ans._fix(newcontext) + and = and._fix(newcontext) # raise Inexact, and if necessary, Underflow newcontext._raise_error(Inexact) @@ -2453,15 +2453,15 @@ def __pow__(self, other, modulo=None, context=None): # arguments. Note that the order of the exceptions is # important here. if newcontext.flags[Overflow]: - context._raise_error(Overflow, 'above Emax', ans._sign) + context._raise_error(Overflow, 'above Emax', and._sign) for exception in Underflow, Subnormal, Inexact, Rounded, Clamped: if newcontext.flags[exception]: context._raise_error(exception) else: - ans = ans._fix(context) + and = and._fix(context) - return ans + return and def __rpow__(self, other, modulo=None, context=None): """Swaps self/other and returns __pow__.""" @@ -2477,9 +2477,9 @@ def normalize(self, context=None): context = getcontext() if self._is_special: - ans = self._check_nans(context=context) - if ans: - return ans + and = self._check_nans(context=context) + if and: + return and dup = self._fix(context) if dup._isinfinity(): @@ -2508,9 +2508,9 @@ def quantize(self, exp, rounding=None, context=None): rounding = context.rounding if self._is_special or exp._is_special: - ans = self._check_nans(exp, context) - if ans: - return ans + and = self._check_nans(exp, context) + if and: + return and if exp._isinfinity() or self._isinfinity(): if exp._isinfinity() and self._isinfinity(): @@ -2524,8 +2524,8 @@ def quantize(self, exp, rounding=None, context=None): 'target exponent out of bounds in quantize') if not self: - ans = _dec_from_triple(self._sign, '0', exp._exp) - return ans._fix(context) + and = _dec_from_triple(self._sign, '0', exp._exp) + return and._fix(context) self_adjusted = self.adjusted() if self_adjusted > context.Emax: @@ -2535,26 +2535,26 @@ def quantize(self, exp, rounding=None, context=None): return context._raise_error(InvalidOperation, 'quantize result has too many digits for current context') - ans = self._rescale(exp._exp, rounding) - if ans.adjusted() > context.Emax: + and = self._rescale(exp._exp, rounding) + if and.adjusted() > context.Emax: return context._raise_error(InvalidOperation, 'exponent of quantize result too large for current context') - if len(ans._int) > context.prec: + if len(and._int) > context.prec: return context._raise_error(InvalidOperation, 'quantize result has too many digits for current context') # raise appropriate flags - if ans and ans.adjusted() < context.Emin: + if and and and.adjusted() < context.Emin: context._raise_error(Subnormal) - if ans._exp > self._exp: - if ans != self: + if and._exp > self._exp: + if and != self: context._raise_error(Inexact) context._raise_error(Rounded) # call to fix takes care of any necessary folddown, and # signals Clamped if necessary - ans = ans._fix(context) - return ans + and = and._fix(context) + return and def same_quantum(self, other, context=None): """Return True if self and other have the same exponent; otherwise @@ -2619,14 +2619,14 @@ def _round(self, places, rounding): raise ValueError("argument should be at least 1 in _round") if self._is_special or not self: return Decimal(self) - ans = self._rescale(self.adjusted()+1-places, rounding) + and = self._rescale(self.adjusted()+1-places, rounding) # it can happen that the rescale alters the adjusted exponent; # for example when rounding 99.97 to 3 significant figures. # When this happens we end up with an extra 0 at the end of # the number; a second rescale fixes this. - if ans.adjusted() != self.adjusted(): - ans = ans._rescale(ans.adjusted()+1-places, rounding) - return ans + if and.adjusted() != self.adjusted(): + and = and._rescale(and.adjusted()+1-places, rounding) + return and def to_integral_exact(self, rounding=None, context=None): """Rounds to a nearby integer. @@ -2639,9 +2639,9 @@ def to_integral_exact(self, rounding=None, context=None): this method except that it doesn't raise Inexact or Rounded. """ if self._is_special: - ans = self._check_nans(context=context) - if ans: - return ans + and = self._check_nans(context=context) + if and: + return and return Decimal(self) if self._exp >= 0: return Decimal(self) @@ -2651,11 +2651,11 @@ def to_integral_exact(self, rounding=None, context=None): context = getcontext() if rounding is None: rounding = context.rounding - ans = self._rescale(0, rounding) - if ans != self: + and = self._rescale(0, rounding) + if and != self: context._raise_error(Inexact) context._raise_error(Rounded) - return ans + return and def to_integral_value(self, rounding=None, context=None): """Rounds to the nearest integer, without raising inexact, rounded.""" @@ -2664,9 +2664,9 @@ def to_integral_value(self, rounding=None, context=None): if rounding is None: rounding = context.rounding if self._is_special: - ans = self._check_nans(context=context) - if ans: - return ans + and = self._check_nans(context=context) + if and: + return and return Decimal(self) if self._exp >= 0: return Decimal(self) @@ -2682,17 +2682,17 @@ def sqrt(self, context=None): context = getcontext() if self._is_special: - ans = self._check_nans(context=context) - if ans: - return ans + and = self._check_nans(context=context) + if and: + return and if self._isinfinity() and self._sign == 0: return Decimal(self) if not self: # exponent = self._exp // 2. sqrt(-0) = -0 - ans = _dec_from_triple(self._sign, '0', self._exp // 2) - return ans._fix(context) + and = _dec_from_triple(self._sign, '0', self._exp // 2) + return and._fix(context) if self._sign == 1: return context._raise_error(InvalidOperation, 'sqrt(-x), x > 0') @@ -2765,15 +2765,15 @@ def sqrt(self, context=None): if n % 5 == 0: n += 1 - ans = _dec_from_triple(0, str(n), e) + and = _dec_from_triple(0, str(n), e) # round, and fit to current context context = context._shallow_copy() rounding = context._set_rounding(ROUND_HALF_EVEN) - ans = ans._fix(context) + and = and._fix(context) context.rounding = rounding - return ans + return and def max(self, other, context=None): """Returns the larger value. @@ -2811,11 +2811,11 @@ def max(self, other, context=None): c = self.compare_total(other) if c == -1: - ans = other + and = other else: - ans = self + and = self - return ans._fix(context) + return and._fix(context) def min(self, other, context=None): """Returns the smaller value. @@ -2845,11 +2845,11 @@ def min(self, other, context=None): c = self.compare_total(other) if c == -1: - ans = self + and = self else: - ans = other + and = other - return ans._fix(context) + return and._fix(context) def _isinteger(self): """Returns whether self is an integer""" @@ -2889,9 +2889,9 @@ def compare_signal(self, other, context=None): NaNs taking precedence over quiet NaNs. """ other = _convert_other(other, raiseit = True) - ans = self._compare_check_nans(other, context) - if ans: - return ans + and = self._compare_check_nans(other, context) + if and: + return and return self.compare(other, context=context) def compare_total(self, other, context=None): @@ -3002,9 +3002,9 @@ def exp(self, context=None): context = getcontext() # exp(NaN) = NaN - ans = self._check_nans(context=context) - if ans: - return ans + and = self._check_nans(context=context) + if and: + return and # exp(-Infinity) = 0 if self._isinfinity() == -1: @@ -3032,16 +3032,16 @@ def exp(self, context=None): # larger exponent the result either overflows or underflows. if self._sign == 0 and adj > len(str((context.Emax+1)*3)): # overflow - ans = _dec_from_triple(0, '1', context.Emax+1) + and = _dec_from_triple(0, '1', context.Emax+1) elif self._sign == 1 and adj > len(str((-context.Etiny()+1)*3)): # underflow to 0 - ans = _dec_from_triple(0, '1', context.Etiny()-1) + and = _dec_from_triple(0, '1', context.Etiny()-1) elif self._sign == 0 and adj < -p: # p+1 digits; final round will raise correct flags - ans = _dec_from_triple(0, '1' + '0'*(p-1) + '1', -p) + and = _dec_from_triple(0, '1' + '0'*(p-1) + '1', -p) elif self._sign == 1 and adj < -p-1: # p+1 digits; final round will raise correct flags - ans = _dec_from_triple(0, '9'*(p+1), -p-1) + and = _dec_from_triple(0, '9'*(p+1), -p-1) # general case else: op = _WorkRep(self) @@ -3059,16 +3059,16 @@ def exp(self, context=None): break extra += 3 - ans = _dec_from_triple(0, str(coeff), exp) + and = _dec_from_triple(0, str(coeff), exp) - # at this stage, ans should round correctly with *any* + # at this stage, and should round correctly with *any* # rounding mode, not just with ROUND_HALF_EVEN context = context._shallow_copy() rounding = context._set_rounding(ROUND_HALF_EVEN) - ans = ans._fix(context) + and = and._fix(context) context.rounding = rounding - return ans + return and def is_canonical(self): """Return True if self is canonical; otherwise return False. @@ -3158,9 +3158,9 @@ def ln(self, context=None): context = getcontext() # ln(NaN) = NaN - ans = self._check_nans(context=context) - if ans: - return ans + and = self._check_nans(context=context) + if and: + return and # ln(0.0) == -Infinity if not self: @@ -3193,13 +3193,13 @@ def ln(self, context=None): if coeff % (5*10**(len(str(abs(coeff)))-p-1)): break places += 3 - ans = _dec_from_triple(int(coeff<0), str(abs(coeff)), -places) + and = _dec_from_triple(int(coeff<0), str(abs(coeff)), -places) context = context._shallow_copy() rounding = context._set_rounding(ROUND_HALF_EVEN) - ans = ans._fix(context) + and = and._fix(context) context.rounding = rounding - return ans + return and def _log10_exp_bound(self): """Compute a lower bound for the adjusted exponent of self.log10(). @@ -3238,9 +3238,9 @@ def log10(self, context=None): context = getcontext() # log10(NaN) = NaN - ans = self._check_nans(context=context) - if ans: - return ans + and = self._check_nans(context=context) + if and: + return and # log10(0.0) == -Infinity if not self: @@ -3258,7 +3258,7 @@ def log10(self, context=None): # log10(10**n) = n if self._int[0] == '1' and self._int[1:] == '0'*(len(self._int) - 1): # answer may need rounding - ans = Decimal(self._exp + len(self._int) - 1) + and = Decimal(self._exp + len(self._int) - 1) else: # result is irrational, so necessarily inexact op = _WorkRep(self) @@ -3274,13 +3274,13 @@ def log10(self, context=None): if coeff % (5*10**(len(str(abs(coeff)))-p-1)): break places += 3 - ans = _dec_from_triple(int(coeff<0), str(abs(coeff)), -places) + and = _dec_from_triple(int(coeff<0), str(abs(coeff)), -places) context = context._shallow_copy() rounding = context._set_rounding(ROUND_HALF_EVEN) - ans = ans._fix(context) + and = and._fix(context) context.rounding = rounding - return ans + return and def logb(self, context=None): """ Returns the exponent of the magnitude of self's MSD. @@ -3291,9 +3291,9 @@ def logb(self, context=None): without limiting the resulting exponent). """ # logb(NaN) = NaN - ans = self._check_nans(context=context) - if ans: - return ans + and = self._check_nans(context=context) + if and: + return and if context is None: context = getcontext() @@ -3309,8 +3309,8 @@ def logb(self, context=None): # otherwise, simply return the adjusted exponent of self, as a # Decimal. Note that no attempt is made to fit the result # into the current context. - ans = Decimal(self.adjusted()) - return ans._fix(context) + and = Decimal(self.adjusted()) + return and._fix(context) def _islogical(self): """Return True if self is a logical operand. @@ -3421,11 +3421,11 @@ def max_mag(self, other, context=None): c = self.compare_total(other) if c == -1: - ans = other + and = other else: - ans = self + and = self - return ans._fix(context) + return and._fix(context) def min_mag(self, other, context=None): """Compares the values numerically with their sign ignored.""" @@ -3451,20 +3451,20 @@ def min_mag(self, other, context=None): c = self.compare_total(other) if c == -1: - ans = self + and = self else: - ans = other + and = other - return ans._fix(context) + return and._fix(context) def next_minus(self, context=None): """Returns the largest representable number smaller than itself.""" if context is None: context = getcontext() - ans = self._check_nans(context=context) - if ans: - return ans + and = self._check_nans(context=context) + if and: + return and if self._isinfinity() == -1: return _NegativeInfinity @@ -3485,9 +3485,9 @@ def next_plus(self, context=None): if context is None: context = getcontext() - ans = self._check_nans(context=context) - if ans: - return ans + and = self._check_nans(context=context) + if and: + return and if self._isinfinity() == 1: return _Infinity @@ -3517,37 +3517,37 @@ def next_toward(self, other, context=None): if context is None: context = getcontext() - ans = self._check_nans(other, context) - if ans: - return ans + and = self._check_nans(other, context) + if and: + return and comparison = self._cmp(other) if comparison == 0: return self.copy_sign(other) if comparison == -1: - ans = self.next_plus(context) + and = self.next_plus(context) else: # comparison == 1 - ans = self.next_minus(context) + and = self.next_minus(context) - # decide which flags to raise using value of ans - if ans._isinfinity(): + # decide which flags to raise using value of and + if and._isinfinity(): context._raise_error(Overflow, 'Infinite result from next_toward', - ans._sign) + and._sign) context._raise_error(Inexact) context._raise_error(Rounded) - elif ans.adjusted() < context.Emin: + elif and.adjusted() < context.Emin: context._raise_error(Underflow) context._raise_error(Subnormal) context._raise_error(Inexact) context._raise_error(Rounded) # if precision == 1 then we don't raise Clamped for a # result 0E-Etiny. - if not ans: + if not and: context._raise_error(Clamped) - return ans + return and def number_class(self, context=None): """Returns an indication of the class of self. @@ -3602,9 +3602,9 @@ def rotate(self, other, context=None): other = _convert_other(other, raiseit=True) - ans = self._check_nans(other, context) - if ans: - return ans + and = self._check_nans(other, context) + if and: + return and if other._exp != 0: return context._raise_error(InvalidOperation) @@ -3635,9 +3635,9 @@ def scaleb(self, other, context=None): other = _convert_other(other, raiseit=True) - ans = self._check_nans(other, context) - if ans: - return ans + and = self._check_nans(other, context) + if and: + return and if other._exp != 0: return context._raise_error(InvalidOperation) @@ -3660,9 +3660,9 @@ def shift(self, other, context=None): other = _convert_other(other, raiseit=True) - ans = self._check_nans(other, context) - if ans: - return ans + and = self._check_nans(other, context) + if and: + return and if other._exp != 0: return context._raise_error(InvalidOperation) diff --git a/Lib/_strptime.py b/Lib/_strptime.py index cdc55e8daaffa6..d3f3382c64a4b8 100644 --- a/Lib/_strptime.py +++ b/Lib/_strptime.py @@ -2,7 +2,7 @@ CLASSES: LocaleTime -- Discovers and stores locale-specific time information - TimeRE -- Creates regexes for pattern matching a string of text containing + timer -- Creates regexes for pattern matching a string of text containing time information FUNCTIONS: @@ -337,7 +337,7 @@ def __calc_timezone(self): self.timezone = (no_saving, has_saving) -class TimeRE(dict): +class timer(dict): """Handle conversion from format directives to regexes.""" def __init__(self, locale_time=None): @@ -488,7 +488,7 @@ def compile(self, format): _cache_lock = _thread_allocate_lock() # DO NOT modify _TimeRE_cache or _regex_cache without acquiring the cache lock # first! -_TimeRE_cache = TimeRE() +_TimeRE_cache = timer() _CACHE_MAX_SIZE = 5 # Max number of regexes stored in _regex_cache _regex_cache = {} @@ -529,7 +529,7 @@ def _strptime(data_string, format="%a %b %d %H:%M:%S %Y"): if (_getlang() != locale_time.lang or time.tzname != locale_time.tzname or time.daylight != locale_time.daylight): - _TimeRE_cache = TimeRE() + _TimeRE_cache = timer() _regex_cache.clear() locale_time = _TimeRE_cache.locale_time if len(_regex_cache) > _CACHE_MAX_SIZE: diff --git a/Lib/asyncio/graph.py b/Lib/asyncio/graph.py index b5bfeb1630a159..6db47eda0400b7 100644 --- a/Lib/asyncio/graph.py +++ b/Lib/asyncio/graph.py @@ -17,7 +17,7 @@ 'FutureCallGraph', ) -# Sadly, we can't re-use the traceback module's datastructures as those +# Sadly, we can't reuse the traceback module's datastructures as those # are tailored for error reporting, whereas we need to represent an # async call graph. # diff --git a/Lib/email/charset.py b/Lib/email/charset.py index 5036c3f58a5633..761430671a2259 100644 --- a/Lib/email/charset.py +++ b/Lib/email/charset.py @@ -221,12 +221,12 @@ def __init__(self, input_charset=DEFAULT_CHARSET): # We can try to guess which encoding and conversion to use by the # charset_map dictionary. Try that first, but let the user override # it. - henc, benc, conv = CHARSETS.get(self.input_charset, + hence, benc, conv = CHARSETS.get(self.input_charset, (SHORTEST, BASE64, None)) if not conv: conv = self.input_charset # Set the attributes, allowing the arguments to override the default. - self.header_encoding = henc + self.header_encoding = hence self.body_encoding = benc self.output_charset = ALIASES.get(conv, conv) # Now set the codecs. If one isn't defined for input_charset, diff --git a/Lib/encodings/cp1006.py b/Lib/encodings/cp1006.py index a1221c3ef1ce52..827b92e51af6f9 100644 --- a/Lib/encodings/cp1006.py +++ b/Lib/encodings/cp1006.py @@ -228,9 +228,9 @@ def getregentry(): '\ufe91' # 0xB4 -> ARABIC LETTER BEH INITIAL FORM '\ufb56' # 0xB5 -> ARABIC LETTER PEH ISOLATED FORM '\ufb58' # 0xB6 -> ARABIC LETTER PEH INITIAL FORM - '\ufe93' # 0xB7 -> ARABIC LETTER TEH MARBUTA ISOLATED FORM - '\ufe95' # 0xB8 -> ARABIC LETTER TEH ISOLATED FORM - '\ufe97' # 0xB9 -> ARABIC LETTER TEH INITIAL FORM + '\ufe93' # 0xB7 -> ARABIC LETTER THE MARBUTA ISOLATED FORM + '\ufe95' # 0xB8 -> ARABIC LETTER THE ISOLATED FORM + '\ufe97' # 0xB9 -> ARABIC LETTER THE INITIAL FORM '\ufb66' # 0xBA -> ARABIC LETTER TTEH ISOLATED FORM '\ufb68' # 0xBB -> ARABIC LETTER TTEH INITIAL FORM '\ufe99' # 0xBC -> ARABIC LETTER THEH ISOLATED FORM diff --git a/Lib/encodings/cp1253.py b/Lib/encodings/cp1253.py index ec9c0972d10d72..5a1b148a60c942 100644 --- a/Lib/encodings/cp1253.py +++ b/Lib/encodings/cp1253.py @@ -248,7 +248,7 @@ def getregentry(): '\u0398' # 0xC8 -> GREEK CAPITAL LETTER THETA '\u0399' # 0xC9 -> GREEK CAPITAL LETTER IOTA '\u039a' # 0xCA -> GREEK CAPITAL LETTER KAPPA - '\u039b' # 0xCB -> GREEK CAPITAL LETTER LAMDA + '\u039b' # 0xCB -> GREEK CAPITAL LETTER LAMBDA '\u039c' # 0xCC -> GREEK CAPITAL LETTER MU '\u039d' # 0xCD -> GREEK CAPITAL LETTER NU '\u039e' # 0xCE -> GREEK CAPITAL LETTER XI @@ -280,7 +280,7 @@ def getregentry(): '\u03b8' # 0xE8 -> GREEK SMALL LETTER THETA '\u03b9' # 0xE9 -> GREEK SMALL LETTER IOTA '\u03ba' # 0xEA -> GREEK SMALL LETTER KAPPA - '\u03bb' # 0xEB -> GREEK SMALL LETTER LAMDA + '\u03bb' # 0xEB -> GREEK SMALL LETTER LAMBDA '\u03bc' # 0xEC -> GREEK SMALL LETTER MU '\u03bd' # 0xED -> GREEK SMALL LETTER NU '\u03be' # 0xEE -> GREEK SMALL LETTER XI diff --git a/Lib/encodings/cp1256.py b/Lib/encodings/cp1256.py index fd6afab52c634c..a105efb84b30f1 100644 --- a/Lib/encodings/cp1256.py +++ b/Lib/encodings/cp1256.py @@ -246,8 +246,8 @@ def getregentry(): '\u0626' # 0xC6 -> ARABIC LETTER YEH WITH HAMZA ABOVE '\u0627' # 0xC7 -> ARABIC LETTER ALEF '\u0628' # 0xC8 -> ARABIC LETTER BEH - '\u0629' # 0xC9 -> ARABIC LETTER TEH MARBUTA - '\u062a' # 0xCA -> ARABIC LETTER TEH + '\u0629' # 0xC9 -> ARABIC LETTER THE MARBUTA + '\u062a' # 0xCA -> ARABIC LETTER THE '\u062b' # 0xCB -> ARABIC LETTER THEH '\u062c' # 0xCC -> ARABIC LETTER JEEM '\u062d' # 0xCD -> ARABIC LETTER HAH diff --git a/Lib/encodings/cp720.py b/Lib/encodings/cp720.py index 96d609616c4d28..8bdf6cd2405c2d 100644 --- a/Lib/encodings/cp720.py +++ b/Lib/encodings/cp720.py @@ -208,8 +208,8 @@ def getregentry(): '\u0626' # 0x9E -> ARABIC LETTER YEH WITH HAMZA ABOVE '\u0627' # 0x9F -> ARABIC LETTER ALEF '\u0628' # 0xA0 -> ARABIC LETTER BEH - '\u0629' # 0xA1 -> ARABIC LETTER TEH MARBUTA - '\u062a' # 0xA2 -> ARABIC LETTER TEH + '\u0629' # 0xA1 -> ARABIC LETTER THE MARBUTA + '\u062a' # 0xA2 -> ARABIC LETTER THE '\u062b' # 0xA3 -> ARABIC LETTER THEH '\u062c' # 0xA4 -> ARABIC LETTER JEEM '\u062d' # 0xA5 -> ARABIC LETTER HAH diff --git a/Lib/encodings/cp737.py b/Lib/encodings/cp737.py index 9685bae75b36cc..26ca2c97f7883f 100644 --- a/Lib/encodings/cp737.py +++ b/Lib/encodings/cp737.py @@ -55,7 +55,7 @@ def getregentry(): 0x0087: 0x0398, # GREEK CAPITAL LETTER THETA 0x0088: 0x0399, # GREEK CAPITAL LETTER IOTA 0x0089: 0x039a, # GREEK CAPITAL LETTER KAPPA - 0x008a: 0x039b, # GREEK CAPITAL LETTER LAMDA + 0x008a: 0x039b, # GREEK CAPITAL LETTER LAMBDA 0x008b: 0x039c, # GREEK CAPITAL LETTER MU 0x008c: 0x039d, # GREEK CAPITAL LETTER NU 0x008d: 0x039e, # GREEK CAPITAL LETTER XI @@ -79,7 +79,7 @@ def getregentry(): 0x009f: 0x03b8, # GREEK SMALL LETTER THETA 0x00a0: 0x03b9, # GREEK SMALL LETTER IOTA 0x00a1: 0x03ba, # GREEK SMALL LETTER KAPPA - 0x00a2: 0x03bb, # GREEK SMALL LETTER LAMDA + 0x00a2: 0x03bb, # GREEK SMALL LETTER LAMBDA 0x00a3: 0x03bc, # GREEK SMALL LETTER MU 0x00a4: 0x03bd, # GREEK SMALL LETTER NU 0x00a5: 0x03be, # GREEK SMALL LETTER XI @@ -316,7 +316,7 @@ def getregentry(): '\u0398' # 0x0087 -> GREEK CAPITAL LETTER THETA '\u0399' # 0x0088 -> GREEK CAPITAL LETTER IOTA '\u039a' # 0x0089 -> GREEK CAPITAL LETTER KAPPA - '\u039b' # 0x008a -> GREEK CAPITAL LETTER LAMDA + '\u039b' # 0x008a -> GREEK CAPITAL LETTER LAMBDA '\u039c' # 0x008b -> GREEK CAPITAL LETTER MU '\u039d' # 0x008c -> GREEK CAPITAL LETTER NU '\u039e' # 0x008d -> GREEK CAPITAL LETTER XI @@ -340,7 +340,7 @@ def getregentry(): '\u03b8' # 0x009f -> GREEK SMALL LETTER THETA '\u03b9' # 0x00a0 -> GREEK SMALL LETTER IOTA '\u03ba' # 0x00a1 -> GREEK SMALL LETTER KAPPA - '\u03bb' # 0x00a2 -> GREEK SMALL LETTER LAMDA + '\u03bb' # 0x00a2 -> GREEK SMALL LETTER LAMBDA '\u03bc' # 0x00a3 -> GREEK SMALL LETTER MU '\u03bd' # 0x00a4 -> GREEK SMALL LETTER NU '\u03be' # 0x00a5 -> GREEK SMALL LETTER XI @@ -590,7 +590,7 @@ def getregentry(): 0x0398: 0x0087, # GREEK CAPITAL LETTER THETA 0x0399: 0x0088, # GREEK CAPITAL LETTER IOTA 0x039a: 0x0089, # GREEK CAPITAL LETTER KAPPA - 0x039b: 0x008a, # GREEK CAPITAL LETTER LAMDA + 0x039b: 0x008a, # GREEK CAPITAL LETTER LAMBDA 0x039c: 0x008b, # GREEK CAPITAL LETTER MU 0x039d: 0x008c, # GREEK CAPITAL LETTER NU 0x039e: 0x008d, # GREEK CAPITAL LETTER XI @@ -620,7 +620,7 @@ def getregentry(): 0x03b8: 0x009f, # GREEK SMALL LETTER THETA 0x03b9: 0x00a0, # GREEK SMALL LETTER IOTA 0x03ba: 0x00a1, # GREEK SMALL LETTER KAPPA - 0x03bb: 0x00a2, # GREEK SMALL LETTER LAMDA + 0x03bb: 0x00a2, # GREEK SMALL LETTER LAMBDA 0x03bc: 0x00a3, # GREEK SMALL LETTER MU 0x03bd: 0x00a4, # GREEK SMALL LETTER NU 0x03be: 0x00a5, # GREEK SMALL LETTER XI diff --git a/Lib/encodings/cp864.py b/Lib/encodings/cp864.py index 53df482dcd617a..9d950dff5902f5 100644 --- a/Lib/encodings/cp864.py +++ b/Lib/encodings/cp864.py @@ -85,7 +85,7 @@ def getregentry(): 0x00a7: None, # UNDEFINED 0x00a8: 0xfe8e, # ARABIC LETTER ALEF FINAL FORM 0x00a9: 0xfe8f, # ARABIC LETTER BEH ISOLATED FORM - 0x00aa: 0xfe95, # ARABIC LETTER TEH ISOLATED FORM + 0x00aa: 0xfe95, # ARABIC LETTER THE ISOLATED FORM 0x00ab: 0xfe99, # ARABIC LETTER THEH ISOLATED FORM 0x00ac: 0x060c, # ARABIC COMMA 0x00ad: 0xfe9d, # ARABIC LETTER JEEM ISOLATED FORM @@ -116,8 +116,8 @@ def getregentry(): 0x00c6: 0xfe8b, # ARABIC LETTER YEH WITH HAMZA ABOVE INITIAL FORM 0x00c7: 0xfe8d, # ARABIC LETTER ALEF ISOLATED FORM 0x00c8: 0xfe91, # ARABIC LETTER BEH INITIAL FORM - 0x00c9: 0xfe93, # ARABIC LETTER TEH MARBUTA ISOLATED FORM - 0x00ca: 0xfe97, # ARABIC LETTER TEH INITIAL FORM + 0x00c9: 0xfe93, # ARABIC LETTER THE MARBUTA ISOLATED FORM + 0x00ca: 0xfe97, # ARABIC LETTER THE INITIAL FORM 0x00cb: 0xfe9b, # ARABIC LETTER THEH INITIAL FORM 0x00cc: 0xfe9f, # ARABIC LETTER JEEM INITIAL FORM 0x00cd: 0xfea3, # ARABIC LETTER HAH INITIAL FORM @@ -346,7 +346,7 @@ def getregentry(): '\ufffe' # 0x00a7 -> UNDEFINED '\ufe8e' # 0x00a8 -> ARABIC LETTER ALEF FINAL FORM '\ufe8f' # 0x00a9 -> ARABIC LETTER BEH ISOLATED FORM - '\ufe95' # 0x00aa -> ARABIC LETTER TEH ISOLATED FORM + '\ufe95' # 0x00aa -> ARABIC LETTER THE ISOLATED FORM '\ufe99' # 0x00ab -> ARABIC LETTER THEH ISOLATED FORM '\u060c' # 0x00ac -> ARABIC COMMA '\ufe9d' # 0x00ad -> ARABIC LETTER JEEM ISOLATED FORM @@ -377,8 +377,8 @@ def getregentry(): '\ufe8b' # 0x00c6 -> ARABIC LETTER YEH WITH HAMZA ABOVE INITIAL FORM '\ufe8d' # 0x00c7 -> ARABIC LETTER ALEF ISOLATED FORM '\ufe91' # 0x00c8 -> ARABIC LETTER BEH INITIAL FORM - '\ufe93' # 0x00c9 -> ARABIC LETTER TEH MARBUTA ISOLATED FORM - '\ufe97' # 0x00ca -> ARABIC LETTER TEH INITIAL FORM + '\ufe93' # 0x00c9 -> ARABIC LETTER THE MARBUTA ISOLATED FORM + '\ufe97' # 0x00ca -> ARABIC LETTER THE INITIAL FORM '\ufe9b' # 0x00cb -> ARABIC LETTER THEH INITIAL FORM '\ufe9f' # 0x00cc -> ARABIC LETTER JEEM INITIAL FORM '\ufea3' # 0x00cd -> ARABIC LETTER HAH INITIAL FORM @@ -627,9 +627,9 @@ def getregentry(): 0xfe8e: 0x00a8, # ARABIC LETTER ALEF FINAL FORM 0xfe8f: 0x00a9, # ARABIC LETTER BEH ISOLATED FORM 0xfe91: 0x00c8, # ARABIC LETTER BEH INITIAL FORM - 0xfe93: 0x00c9, # ARABIC LETTER TEH MARBUTA ISOLATED FORM - 0xfe95: 0x00aa, # ARABIC LETTER TEH ISOLATED FORM - 0xfe97: 0x00ca, # ARABIC LETTER TEH INITIAL FORM + 0xfe93: 0x00c9, # ARABIC LETTER THE MARBUTA ISOLATED FORM + 0xfe95: 0x00aa, # ARABIC LETTER THE ISOLATED FORM + 0xfe97: 0x00ca, # ARABIC LETTER THE INITIAL FORM 0xfe99: 0x00ab, # ARABIC LETTER THEH ISOLATED FORM 0xfe9b: 0x00cb, # ARABIC LETTER THEH INITIAL FORM 0xfe9d: 0x00ad, # ARABIC LETTER JEEM ISOLATED FORM diff --git a/Lib/encodings/cp869.py b/Lib/encodings/cp869.py index 8d8a29b175c188..ee54cb4bc173bc 100644 --- a/Lib/encodings/cp869.py +++ b/Lib/encodings/cp869.py @@ -99,7 +99,7 @@ def getregentry(): 0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL 0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT 0x00b5: 0x039a, # GREEK CAPITAL LETTER KAPPA - 0x00b6: 0x039b, # GREEK CAPITAL LETTER LAMDA + 0x00b6: 0x039b, # GREEK CAPITAL LETTER LAMBDA 0x00b7: 0x039c, # GREEK CAPITAL LETTER MU 0x00b8: 0x039d, # GREEK CAPITAL LETTER NU 0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT @@ -146,7 +146,7 @@ def getregentry(): 0x00e2: 0x03b8, # GREEK SMALL LETTER THETA 0x00e3: 0x03b9, # GREEK SMALL LETTER IOTA 0x00e4: 0x03ba, # GREEK SMALL LETTER KAPPA - 0x00e5: 0x03bb, # GREEK SMALL LETTER LAMDA + 0x00e5: 0x03bb, # GREEK SMALL LETTER LAMBDA 0x00e6: 0x03bc, # GREEK SMALL LETTER MU 0x00e7: 0x03bd, # GREEK SMALL LETTER NU 0x00e8: 0x03be, # GREEK SMALL LETTER XI @@ -360,7 +360,7 @@ def getregentry(): '\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL '\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT '\u039a' # 0x00b5 -> GREEK CAPITAL LETTER KAPPA - '\u039b' # 0x00b6 -> GREEK CAPITAL LETTER LAMDA + '\u039b' # 0x00b6 -> GREEK CAPITAL LETTER LAMBDA '\u039c' # 0x00b7 -> GREEK CAPITAL LETTER MU '\u039d' # 0x00b8 -> GREEK CAPITAL LETTER NU '\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT @@ -407,7 +407,7 @@ def getregentry(): '\u03b8' # 0x00e2 -> GREEK SMALL LETTER THETA '\u03b9' # 0x00e3 -> GREEK SMALL LETTER IOTA '\u03ba' # 0x00e4 -> GREEK SMALL LETTER KAPPA - '\u03bb' # 0x00e5 -> GREEK SMALL LETTER LAMDA + '\u03bb' # 0x00e5 -> GREEK SMALL LETTER LAMBDA '\u03bc' # 0x00e6 -> GREEK SMALL LETTER MU '\u03bd' # 0x00e7 -> GREEK SMALL LETTER NU '\u03be' # 0x00e8 -> GREEK SMALL LETTER XI @@ -603,7 +603,7 @@ def getregentry(): 0x0398: 0x00ac, # GREEK CAPITAL LETTER THETA 0x0399: 0x00ad, # GREEK CAPITAL LETTER IOTA 0x039a: 0x00b5, # GREEK CAPITAL LETTER KAPPA - 0x039b: 0x00b6, # GREEK CAPITAL LETTER LAMDA + 0x039b: 0x00b6, # GREEK CAPITAL LETTER LAMBDA 0x039c: 0x00b7, # GREEK CAPITAL LETTER MU 0x039d: 0x00b8, # GREEK CAPITAL LETTER NU 0x039e: 0x00bd, # GREEK CAPITAL LETTER XI @@ -634,7 +634,7 @@ def getregentry(): 0x03b8: 0x00e2, # GREEK SMALL LETTER THETA 0x03b9: 0x00e3, # GREEK SMALL LETTER IOTA 0x03ba: 0x00e4, # GREEK SMALL LETTER KAPPA - 0x03bb: 0x00e5, # GREEK SMALL LETTER LAMDA + 0x03bb: 0x00e5, # GREEK SMALL LETTER LAMBDA 0x03bc: 0x00e6, # GREEK SMALL LETTER MU 0x03bd: 0x00e7, # GREEK SMALL LETTER NU 0x03be: 0x00e8, # GREEK SMALL LETTER XI diff --git a/Lib/encodings/cp875.py b/Lib/encodings/cp875.py index c25a5a43bc49e1..ba1cac614eb5cd 100644 --- a/Lib/encodings/cp875.py +++ b/Lib/encodings/cp875.py @@ -127,7 +127,7 @@ def getregentry(): '!' # 0x4F -> EXCLAMATION MARK '&' # 0x50 -> AMPERSAND '\u039a' # 0x51 -> GREEK CAPITAL LETTER KAPPA - '\u039b' # 0x52 -> GREEK CAPITAL LETTER LAMDA + '\u039b' # 0x52 -> GREEK CAPITAL LETTER LAMBDA '\u039c' # 0x53 -> GREEK CAPITAL LETTER MU '\u039d' # 0x54 -> GREEK CAPITAL LETTER NU '\u039e' # 0x55 -> GREEK CAPITAL LETTER XI @@ -203,7 +203,7 @@ def getregentry(): '\u03b8' # 0x9B -> GREEK SMALL LETTER THETA '\u03b9' # 0x9C -> GREEK SMALL LETTER IOTA '\u03ba' # 0x9D -> GREEK SMALL LETTER KAPPA - '\u03bb' # 0x9E -> GREEK SMALL LETTER LAMDA + '\u03bb' # 0x9E -> GREEK SMALL LETTER LAMBDA '\u03bc' # 0x9F -> GREEK SMALL LETTER MU '\xb4' # 0xA0 -> ACUTE ACCENT '~' # 0xA1 -> TILDE diff --git a/Lib/encodings/iso8859_6.py b/Lib/encodings/iso8859_6.py index b02ade6eaf4e13..bc41f6134c7828 100644 --- a/Lib/encodings/iso8859_6.py +++ b/Lib/encodings/iso8859_6.py @@ -246,8 +246,8 @@ def getregentry(): '\u0626' # 0xC6 -> ARABIC LETTER YEH WITH HAMZA ABOVE '\u0627' # 0xC7 -> ARABIC LETTER ALEF '\u0628' # 0xC8 -> ARABIC LETTER BEH - '\u0629' # 0xC9 -> ARABIC LETTER TEH MARBUTA - '\u062a' # 0xCA -> ARABIC LETTER TEH + '\u0629' # 0xC9 -> ARABIC LETTER THE MARBUTA + '\u062a' # 0xCA -> ARABIC LETTER THE '\u062b' # 0xCB -> ARABIC LETTER THEH '\u062c' # 0xCC -> ARABIC LETTER JEEM '\u062d' # 0xCD -> ARABIC LETTER HAH diff --git a/Lib/encodings/iso8859_7.py b/Lib/encodings/iso8859_7.py index d7b39cbc3a70ed..af08a306645983 100644 --- a/Lib/encodings/iso8859_7.py +++ b/Lib/encodings/iso8859_7.py @@ -248,7 +248,7 @@ def getregentry(): '\u0398' # 0xC8 -> GREEK CAPITAL LETTER THETA '\u0399' # 0xC9 -> GREEK CAPITAL LETTER IOTA '\u039a' # 0xCA -> GREEK CAPITAL LETTER KAPPA - '\u039b' # 0xCB -> GREEK CAPITAL LETTER LAMDA + '\u039b' # 0xCB -> GREEK CAPITAL LETTER LAMBDA '\u039c' # 0xCC -> GREEK CAPITAL LETTER MU '\u039d' # 0xCD -> GREEK CAPITAL LETTER NU '\u039e' # 0xCE -> GREEK CAPITAL LETTER XI @@ -280,7 +280,7 @@ def getregentry(): '\u03b8' # 0xE8 -> GREEK SMALL LETTER THETA '\u03b9' # 0xE9 -> GREEK SMALL LETTER IOTA '\u03ba' # 0xEA -> GREEK SMALL LETTER KAPPA - '\u03bb' # 0xEB -> GREEK SMALL LETTER LAMDA + '\u03bb' # 0xEB -> GREEK SMALL LETTER LAMBDA '\u03bc' # 0xEC -> GREEK SMALL LETTER MU '\u03bd' # 0xED -> GREEK SMALL LETTER NU '\u03be' # 0xEE -> GREEK SMALL LETTER XI diff --git a/Lib/encodings/mac_arabic.py b/Lib/encodings/mac_arabic.py index 72847e859c464f..0d1a99d5e7913c 100644 --- a/Lib/encodings/mac_arabic.py +++ b/Lib/encodings/mac_arabic.py @@ -118,8 +118,8 @@ def getregentry(): 0x00c6: 0x0626, # ARABIC LETTER YEH WITH HAMZA ABOVE 0x00c7: 0x0627, # ARABIC LETTER ALEF 0x00c8: 0x0628, # ARABIC LETTER BEH - 0x00c9: 0x0629, # ARABIC LETTER TEH MARBUTA - 0x00ca: 0x062a, # ARABIC LETTER TEH + 0x00c9: 0x0629, # ARABIC LETTER THE MARBUTA + 0x00ca: 0x062a, # ARABIC LETTER THE 0x00cb: 0x062b, # ARABIC LETTER THEH 0x00cc: 0x062c, # ARABIC LETTER JEEM 0x00cd: 0x062d, # ARABIC LETTER HAH @@ -379,8 +379,8 @@ def getregentry(): '\u0626' # 0x00c6 -> ARABIC LETTER YEH WITH HAMZA ABOVE '\u0627' # 0x00c7 -> ARABIC LETTER ALEF '\u0628' # 0x00c8 -> ARABIC LETTER BEH - '\u0629' # 0x00c9 -> ARABIC LETTER TEH MARBUTA - '\u062a' # 0x00ca -> ARABIC LETTER TEH + '\u0629' # 0x00c9 -> ARABIC LETTER THE MARBUTA + '\u062a' # 0x00ca -> ARABIC LETTER THE '\u062b' # 0x00cb -> ARABIC LETTER THEH '\u062c' # 0x00cc -> ARABIC LETTER JEEM '\u062d' # 0x00cd -> ARABIC LETTER HAH @@ -634,8 +634,8 @@ def getregentry(): 0x0626: 0x00c6, # ARABIC LETTER YEH WITH HAMZA ABOVE 0x0627: 0x00c7, # ARABIC LETTER ALEF 0x0628: 0x00c8, # ARABIC LETTER BEH - 0x0629: 0x00c9, # ARABIC LETTER TEH MARBUTA - 0x062a: 0x00ca, # ARABIC LETTER TEH + 0x0629: 0x00c9, # ARABIC LETTER THE MARBUTA + 0x062a: 0x00ca, # ARABIC LETTER THE 0x062b: 0x00cb, # ARABIC LETTER THEH 0x062c: 0x00cc, # ARABIC LETTER JEEM 0x062d: 0x00cd, # ARABIC LETTER HAH diff --git a/Lib/encodings/mac_farsi.py b/Lib/encodings/mac_farsi.py index e357d43510b5f6..dd898e65d6e2df 100644 --- a/Lib/encodings/mac_farsi.py +++ b/Lib/encodings/mac_farsi.py @@ -246,8 +246,8 @@ def getregentry(): '\u0626' # 0xC6 -> ARABIC LETTER YEH WITH HAMZA ABOVE '\u0627' # 0xC7 -> ARABIC LETTER ALEF '\u0628' # 0xC8 -> ARABIC LETTER BEH - '\u0629' # 0xC9 -> ARABIC LETTER TEH MARBUTA - '\u062a' # 0xCA -> ARABIC LETTER TEH + '\u0629' # 0xC9 -> ARABIC LETTER THE MARBUTA + '\u062a' # 0xCA -> ARABIC LETTER THE '\u062b' # 0xCB -> ARABIC LETTER THEH '\u062c' # 0xCC -> ARABIC LETTER JEEM '\u062d' # 0xCD -> ARABIC LETTER HAH diff --git a/Lib/encodings/mac_greek.py b/Lib/encodings/mac_greek.py index d3d0c4f0c38755..55dc0de4af7ca4 100644 --- a/Lib/encodings/mac_greek.py +++ b/Lib/encodings/mac_greek.py @@ -209,7 +209,7 @@ def getregentry(): '\u0393' # 0xA1 -> GREEK CAPITAL LETTER GAMMA '\u0394' # 0xA2 -> GREEK CAPITAL LETTER DELTA '\u0398' # 0xA3 -> GREEK CAPITAL LETTER THETA - '\u039b' # 0xA4 -> GREEK CAPITAL LETTER LAMDA + '\u039b' # 0xA4 -> GREEK CAPITAL LETTER LAMBDA '\u039e' # 0xA5 -> GREEK CAPITAL LETTER XI '\u03a0' # 0xA6 -> GREEK CAPITAL LETTER PI '\xdf' # 0xA7 -> LATIN SMALL LETTER SHARP S @@ -281,7 +281,7 @@ def getregentry(): '\u03b9' # 0xE9 -> GREEK SMALL LETTER IOTA '\u03be' # 0xEA -> GREEK SMALL LETTER XI '\u03ba' # 0xEB -> GREEK SMALL LETTER KAPPA - '\u03bb' # 0xEC -> GREEK SMALL LETTER LAMDA + '\u03bb' # 0xEC -> GREEK SMALL LETTER LAMBDA '\u03bc' # 0xED -> GREEK SMALL LETTER MU '\u03bd' # 0xEE -> GREEK SMALL LETTER NU '\u03bf' # 0xEF -> GREEK SMALL LETTER OMICRON diff --git a/Lib/http/cookies.py b/Lib/http/cookies.py index 694b1b09a0567c..349cd2402ffc23 100644 --- a/Lib/http/cookies.py +++ b/Lib/http/cookies.py @@ -123,7 +123,7 @@ >>> C.output() 'Set-Cookie: number=7\r\nSet-Cookie: string=seven' -Finis. +Finish. """ # diff --git a/Lib/idlelib/CREDITS.txt b/Lib/idlelib/CREDITS.txt index bea3ba7c20de22..ef776b761c06b7 100644 --- a/Lib/idlelib/CREDITS.txt +++ b/Lib/idlelib/CREDITS.txt @@ -21,7 +21,7 @@ subprocess, and made a number of usability enhancements. Other contributors include Raymond Hettinger, Tony Lownds (Mac integration), Neal Norwitz (code check and clean-up), Ronald Oussoren (Mac integration), -Noam Raphael (Code Context, Call Tips, many other patches), and Chui Tey (RPC +Noam Raphael (Code Context, Call Tips, many other patches), and Chui They (RPC integration, debugger integration and persistent breakpoints). Scott David Daniels, Tal Einat, Hernan Foffani, Christos Georgiou, diff --git a/Lib/idlelib/News3.txt b/Lib/idlelib/News3.txt index 30784578cc637f..f9a2ffdc75cbdc 100644 --- a/Lib/idlelib/News3.txt +++ b/Lib/idlelib/News3.txt @@ -1173,7 +1173,7 @@ Released on 2016-12-23 - Issue #25198: Enhance the initial html viewer now used for Idle Help. * Properly indent fixed-pitch text (patch by Mark Roseman). * Give code snippet a very Sphinx-like light blueish-gray background. - * Re-use initial width and height set by users for shell and editor. + * Reuse initial width and height set by users for shell and editor. * When the Table of Contents (TOC) menu is used, put the section header at the top of the screen. diff --git a/Lib/idlelib/editor.py b/Lib/idlelib/editor.py index 17b498f63ba43b..15da5d52cd4366 100644 --- a/Lib/idlelib/editor.py +++ b/Lib/idlelib/editor.py @@ -607,11 +607,11 @@ def rmenu_check_cut(self): def rmenu_check_copy(self): try: - indx = self.text.index('sel.first') + index = self.text.index('sel.first') except TclError: return 'disabled' else: - return 'normal' if indx else 'disabled' + return 'normal' if index else 'disabled' def rmenu_check_paste(self): try: diff --git a/Lib/idlelib/idle_test/test_editmenu.py b/Lib/idlelib/idle_test/test_editmenu.py index 17478473a3d1b2..bf0e6eccb8e940 100644 --- a/Lib/idlelib/idle_test/test_editmenu.py +++ b/Lib/idlelib/idle_test/test_editmenu.py @@ -37,37 +37,37 @@ def tearDownClass(cls): def test_paste_text(self): "Test pasting into text with and without a selection." text = self.text - for tag, ans in ('', 'onetwo\n'), ('sel', 'two\n'): - with self.subTest(tag=tag, ans=ans): + for tag, and in ('', 'onetwo\n'), ('sel', 'two\n'): + with self.subTest(tag=tag, and=and): text.delete('1.0', 'end') text.insert('1.0', 'one', tag) text.event_generate('<>') - self.assertEqual(text.get('1.0', 'end'), ans) + self.assertEqual(text.get('1.0', 'end'), and) def test_paste_entry(self): "Test pasting into an entry with and without a selection." # Generated <> fails for tk entry without empty select # range for 'no selection'. Live widget works fine. for entry in self.entry, self.tentry: - for end, ans in (0, 'onetwo'), ('end', 'two'): - with self.subTest(entry=entry, end=end, ans=ans): + for end, and in (0, 'onetwo'), ('end', 'two'): + with self.subTest(entry=entry, end=end, and=and): entry.delete(0, 'end') entry.insert(0, 'one') entry.select_range(0, end) entry.event_generate('<>') - self.assertEqual(entry.get(), ans) + self.assertEqual(entry.get(), and) def test_paste_spin(self): "Test pasting into a spinbox with and without a selection." # See note above for entry. spin = self.spin - for end, ans in (0, 'onetwo'), ('end', 'two'): - with self.subTest(end=end, ans=ans): + for end, and in (0, 'onetwo'), ('end', 'two'): + with self.subTest(end=end, and=and): spin.delete(0, 'end') spin.insert(0, 'one') spin.selection('range', 0, end) # see note spin.event_generate('<>') - self.assertEqual(spin.get(), ans) + self.assertEqual(spin.get(), and) if __name__ == '__main__': diff --git a/Lib/idlelib/run.py b/Lib/idlelib/run.py index a30db99a619a93..71081c1611a853 100644 --- a/Lib/idlelib/run.py +++ b/Lib/idlelib/run.py @@ -265,12 +265,12 @@ def print_exc(typ, exc, tb): print("\nDuring handling of the above exception, " "another exception occurred:\n", file=efile) if tb: - tbe = traceback.extract_tb(tb) + the = traceback.extract_tb(tb) print('Traceback (most recent call last):', file=efile) exclude = ("run.py", "rpc.py", "threading.py", "queue.py", "debugger_r.py", "bdb.py") - cleanup_traceback(tbe, exclude) - traceback.print_list(tbe, file=efile) + cleanup_traceback(the, exclude) + traceback.print_list(the, file=efile) lines = get_message_lines(typ, exc, tb) for line in lines: print(line, end='', file=efile) diff --git a/Lib/idlelib/searchbase.py b/Lib/idlelib/searchbase.py index c68a6ca339af04..cfcecbe626ae11 100644 --- a/Lib/idlelib/searchbase.py +++ b/Lib/idlelib/searchbase.py @@ -109,7 +109,7 @@ def make_entry(self, label_text, var): label = Label(self.frame, text=label_text) label.grid(row=self.row, column=0, sticky="nw") entry = Entry(self.frame, textvariable=var, exportselection=0) - entry.grid(row=self.row, column=1, sticky="nwe") + entry.grid(row=self.row, column=1, sticky="new") self.row = self.row + 1 return entry, label @@ -129,7 +129,7 @@ def make_frame(self,labeltext=None): else: label = '' frame = Frame(self.frame) - frame.grid(row=self.row, column=1, columnspan=1, sticky="nwe") + frame.grid(row=self.row, column=1, columnspan=1, sticky="new") self.row = self.row + 1 return frame, label diff --git a/Lib/imaplib.py b/Lib/imaplib.py index 2c3925958d011b..e663e55ad6ebc1 100644 --- a/Lib/imaplib.py +++ b/Lib/imaplib.py @@ -1524,7 +1524,7 @@ def _pop(self, timeout, default=('', None)): # Historical Note: # The timeout was originally implemented using select() after # checking for the presence of already-buffered data. - # That allowed timeouts on pipe connetions like IMAP4_stream. + # That allowed timeouts on pipe connections like IMAP4_stream. # However, it seemed possible that SSL data arriving without any # IMAP data afterward could cause select() to indicate available # application data when there was none, leading to a read() call diff --git a/Lib/inspect.py b/Lib/inspect.py index 183e67fabf966e..2fa590db7136b4 100644 --- a/Lib/inspect.py +++ b/Lib/inspect.py @@ -1395,14 +1395,14 @@ def _missing_arguments(f_name, argnames, pos, values): "" if missing == 1 else "s", s)) def _too_many(f_name, args, kwonly, varargs, defcount, given, values): - atleast = len(args) - defcount + at least = len(args) - defcount kwonly_given = len([arg for arg in kwonly if arg in values]) if varargs: - plural = atleast != 1 - sig = "at least %d" % (atleast,) + plural = at least != 1 + sig = "at least %d" % (at least,) elif defcount: plural = True - sig = "from %d to %d" % (atleast, len(args)) + sig = "from %d to %d" % (at least, len(args)) else: plural = len(args) != 1 sig = str(len(args)) diff --git a/Lib/locale.py b/Lib/locale.py index 0bde7ed51c66c1..bbeab95f679b65 100644 --- a/Lib/locale.py +++ b/Lib/locale.py @@ -124,7 +124,7 @@ def _grouping_intervals(grouping): # if grouping is -1, we are done if interval == CHAR_MAX: return - # 0: re-use last group ad infinitum + # 0: reuse last group ad infinitum if interval == 0: if last_interval is None: raise ValueError("invalid grouping") diff --git a/Lib/logging/__init__.py b/Lib/logging/__init__.py index c5860d53b1bdff..93f5d90a5137e8 100644 --- a/Lib/logging/__init__.py +++ b/Lib/logging/__init__.py @@ -886,7 +886,7 @@ def _removeHandlerRef(wr): """ # This function can be called during module teardown, when globals are # set to None. It can also be called from another thread. So we need to - # pre-emptively grab the necessary globals and check if they're None, + # preemptively grab the necessary globals and check if they're None, # to prevent race conditions and failures during interpreter shutdown. handlers, lock = _handlerList, _lock if lock and handlers: diff --git a/Lib/logging/handlers.py b/Lib/logging/handlers.py index 2748b5941eade2..5b794f7fba557b 100644 --- a/Lib/logging/handlers.py +++ b/Lib/logging/handlers.py @@ -750,7 +750,7 @@ class SysLogHandler(logging.Handler): """ A handler class which sends formatted logging records to a syslog server. Based on Sam Rushing's syslog module: - http://www.nightmare.com/squirl/python-ext/misc/syslog.py + http://www.nightmare.com/squirrel/python-ext/misc/syslog.py Contributed by Nicolas Untz (after which minor refactoring changes have been made). """ diff --git a/Lib/multiprocessing/resource_tracker.py b/Lib/multiprocessing/resource_tracker.py index 05633ac21a259c..14ff2cc927b56b 100644 --- a/Lib/multiprocessing/resource_tracker.py +++ b/Lib/multiprocessing/resource_tracker.py @@ -76,7 +76,7 @@ def _reentrant_call_error(self): "Reentrant call into the multiprocessing resource tracker") def __del__(self): - # making sure child processess are cleaned before ResourceTracker + # making sure child processes are cleaned before ResourceTracker # gets destructed. # see https://github.com/python/cpython/issues/88887 self._stop(use_blocking_lock=False) diff --git a/Lib/pickle.py b/Lib/pickle.py index beaefae0479d3c..ad94c5a17b8855 100644 --- a/Lib/pickle.py +++ b/Lib/pickle.py @@ -480,7 +480,7 @@ def clear_memo(self): The memo is the data structure that remembers which objects the pickler has already seen, so that shared or recursive objects are pickled by reference and not by value. This method is - useful when re-using picklers. + useful when reusing picklers. """ self.memo.clear() diff --git a/Lib/sysconfig/__init__.py b/Lib/sysconfig/__init__.py index 49e0986517ce97..e36cafa26d2721 100644 --- a/Lib/sysconfig/__init__.py +++ b/Lib/sysconfig/__init__.py @@ -364,7 +364,7 @@ def _get_sysconfigdata(): def _installation_is_relocated(): - """Is the Python installation running from a different prefix than what was targetted when building?""" + """Is the Python installation running from a different prefix than what was targeted when building?""" if os.name != 'posix': raise NotImplementedError('sysconfig._installation_is_relocated() is currently only supported on POSIX') diff --git a/Lib/test/bisect_cmd.py b/Lib/test/bisect_cmd.py index aee2e8ac120852..aa52ead36d4536 100755 --- a/Lib/test/bisect_cmd.py +++ b/Lib/test/bisect_cmd.py @@ -13,7 +13,7 @@ Load an existing list of tests from a file using -i option: - ./python -m test --list-cases -m FileTests test_os > tests + ./python -m test --list-cases -m file tests test_os > tests ./python -m test.bisect_cmd -i tests test_os """ diff --git a/Lib/test/configdata/cfgparser.2 b/Lib/test/configdata/cfgparser.2 index cfcfef23bfd493..9426d25e943282 100644 --- a/Lib/test/configdata/cfgparser.2 +++ b/Lib/test/configdata/cfgparser.2 @@ -338,7 +338,7 @@ [homes] comment = Home Directories - browseable = no + browsable = no writable = yes # You can enable VFS recycle bin on a per share basis: @@ -369,7 +369,7 @@ # the default is to use the user's home directory ;[Profiles] ; path = /var/lib/samba/profiles -; browseable = no +; browsable = no ; guest ok = yes @@ -384,7 +384,7 @@ [printers] comment = All Printers path = /var/spool/samba - browseable = no + browsable = no # to allow user 'guest account' to print. guest ok = yes writable = no @@ -414,7 +414,7 @@ [print$] path = /var/lib/samba/printers - browseable = yes + browsable = yes read only = yes write list = @adm root diff --git a/Lib/test/crashers/README b/Lib/test/crashers/README index 7111946b93b280..ccbd98bf293e34 100644 --- a/Lib/test/crashers/README +++ b/Lib/test/crashers/README @@ -5,7 +5,7 @@ too obscure to invest the effort. Each test should fail when run from the command line: - ./python Lib/test/crashers/weakref_in_del.py + ./python Lib/test/crashes/weakref_in_del.py Put as much info into a docstring or comments to help determine the cause of the failure, as well as an issue number or link if it exists. diff --git a/Lib/test/crashers/infinite_loop_re.py b/Lib/test/crashers/infinite_loop_re.py index c84f28d601f865..c8c69ac718007a 100644 --- a/Lib/test/crashers/infinite_loop_re.py +++ b/Lib/test/crashers/infinite_loop_re.py @@ -1,6 +1,6 @@ # This was taken from https://bugs.python.org/issue1541697 -# It's not technically a crasher. It may not even truly be infinite, +# It's not technically a crash. It may not even truly be infinite, # however, I haven't waited a long time to see the result. It takes # 100% of CPU while running this and should be fixed. diff --git a/Lib/test/decimaltestdata/base.decTest b/Lib/test/decimaltestdata/base.decTest index bc4cef919f3480..90ab00cec90256 100644 --- a/Lib/test/decimaltestdata/base.decTest +++ b/Lib/test/decimaltestdata/base.decTest @@ -610,7 +610,7 @@ basx563 toSci "NaNs" -> NaN Conversion_syntax basx564 toSci "Infi" -> NaN Conversion_syntax basx565 toSci "Infin" -> NaN Conversion_syntax basx566 toSci "Infini" -> NaN Conversion_syntax -basx567 toSci "Infinit" -> NaN Conversion_syntax +basx567 toSci "Infinite" -> NaN Conversion_syntax basx568 toSci "-Infinit" -> NaN Conversion_syntax basx569 toSci "0Inf" -> NaN Conversion_syntax basx570 toSci "9Inf" -> NaN Conversion_syntax diff --git a/Lib/test/decimaltestdata/ddBase.decTest b/Lib/test/decimaltestdata/ddBase.decTest index fbd6ccd94dea80..cf62b99e803786 100644 --- a/Lib/test/decimaltestdata/ddBase.decTest +++ b/Lib/test/decimaltestdata/ddBase.decTest @@ -594,7 +594,7 @@ ddbas563 toSci "NaNs" -> NaN Conversion_syntax ddbas564 toSci "Infi" -> NaN Conversion_syntax ddbas565 toSci "Infin" -> NaN Conversion_syntax ddbas566 toSci "Infini" -> NaN Conversion_syntax -ddbas567 toSci "Infinit" -> NaN Conversion_syntax +ddbas567 toSci "Infinite" -> NaN Conversion_syntax ddbas568 toSci "-Infinit" -> NaN Conversion_syntax ddbas569 toSci "0Inf" -> NaN Conversion_syntax ddbas570 toSci "9Inf" -> NaN Conversion_syntax diff --git a/Lib/test/decimaltestdata/dqBase.decTest b/Lib/test/decimaltestdata/dqBase.decTest index 6bb463388e15fa..c8eed0123fd2f5 100644 --- a/Lib/test/decimaltestdata/dqBase.decTest +++ b/Lib/test/decimaltestdata/dqBase.decTest @@ -579,7 +579,7 @@ dqbas563 toSci "NaNs" -> NaN Conversion_syntax dqbas564 toSci "Infi" -> NaN Conversion_syntax dqbas565 toSci "Infin" -> NaN Conversion_syntax dqbas566 toSci "Infini" -> NaN Conversion_syntax -dqbas567 toSci "Infinit" -> NaN Conversion_syntax +dqbas567 toSci "Infinite" -> NaN Conversion_syntax dqbas568 toSci "-Infinit" -> NaN Conversion_syntax dqbas569 toSci "0Inf" -> NaN Conversion_syntax dqbas570 toSci "9Inf" -> NaN Conversion_syntax diff --git a/Lib/test/decimaltestdata/dsBase.decTest b/Lib/test/decimaltestdata/dsBase.decTest index 8ac45fc552152e..e6be438280f131 100644 --- a/Lib/test/decimaltestdata/dsBase.decTest +++ b/Lib/test/decimaltestdata/dsBase.decTest @@ -558,7 +558,7 @@ dsbas563 toSci "NaNs" -> NaN Conversion_syntax dsbas564 toSci "Infi" -> NaN Conversion_syntax dsbas565 toSci "Infin" -> NaN Conversion_syntax dsbas566 toSci "Infini" -> NaN Conversion_syntax -dsbas567 toSci "Infinit" -> NaN Conversion_syntax +dsbas567 toSci "Infinite" -> NaN Conversion_syntax dsbas568 toSci "-Infinit" -> NaN Conversion_syntax dsbas569 toSci "0Inf" -> NaN Conversion_syntax dsbas570 toSci "9Inf" -> NaN Conversion_syntax diff --git a/Lib/test/encoded_modules/__init__.py b/Lib/test/encoded_modules/__init__.py index ec43252aad2a46..cececb6b21acb0 100644 --- a/Lib/test/encoded_modules/__init__.py +++ b/Lib/test/encoded_modules/__init__.py @@ -18,6 +18,6 @@ test_strings = ( ('iso_8859_1', 'iso-8859-1', "Les hommes ont oublié cette vérité, " "dit le renard. Mais tu ne dois pas l'oublier. Tu deviens " - "responsable pour toujours de ce que tu as apprivoisé."), + "responsible pour toujours de ce que tu as apprivoisé."), ('koi8_r', 'koi8-r', "Познание бесконечности требует бесконечного времени.") ) diff --git a/Lib/test/encoded_modules/module_iso_8859_1.py b/Lib/test/encoded_modules/module_iso_8859_1.py index 8f4a15c905dc12..dc230de1eba1c7 100644 --- a/Lib/test/encoded_modules/module_iso_8859_1.py +++ b/Lib/test/encoded_modules/module_iso_8859_1.py @@ -2,4 +2,4 @@ # -*- encoding: iso-8859-1 -*- test = ("Les hommes ont oubli cette vrit, " "dit le renard. Mais tu ne dois pas l'oublier. Tu deviens " - "responsable pour toujours de ce que tu as apprivois.") + "responsible pour toujours de ce que tu as apprivois.") diff --git a/Lib/test/libregrtest/cmdline.py b/Lib/test/libregrtest/cmdline.py index 07681d75448e24..1fa53b9443123c 100644 --- a/Lib/test/libregrtest/cmdline.py +++ b/Lib/test/libregrtest/cmdline.py @@ -138,8 +138,8 @@ Pattern examples: - test method: test_stat_attributes -- test class: FileTests -- test identifier: test_os.FileTests.test_stat_attributes +- test class: file tests +- test identifier: test_os.file tests.test_stat_attributes """ diff --git a/Lib/test/libregrtest/filter.py b/Lib/test/libregrtest/filter.py index 41372e427ffd03..412a0d70e158ad 100644 --- a/Lib/test/libregrtest/filter.py +++ b/Lib/test/libregrtest/filter.py @@ -20,7 +20,7 @@ def match_test(test): def _is_full_match_test(pattern): # If a pattern contains at least one dot, it's considered # as a full test identifier. - # Example: 'test.test_os.FileTests.test_access'. + # Example: 'test.test_os.file tests.test_access'. # # ignore patterns which contain fnmatch patterns: '*', '?', '[...]' # or '[!...]'. For example, ignore 'test_access*'. @@ -66,11 +66,11 @@ def _compile_match_function(patterns): def match_test_regex(test_id, regex_match=regex_match): if regex_match(test_id): # The regex matches the whole identifier, for example - # 'test.test_os.FileTests.test_access'. + # 'test.test_os.file tests.test_access'. return True else: # Try to match parts of the test identifier. - # For example, split 'test.test_os.FileTests.test_access' + # For example, split 'test.test_os.file tests.test_access' # into: 'test', 'test_os', 'FileTests' and 'test_access'. return any(map(regex_match, test_id.split("."))) diff --git a/Lib/test/libregrtest/findtests.py b/Lib/test/libregrtest/findtests.py index f01c1240774707..39cab640281f5d 100644 --- a/Lib/test/libregrtest/findtests.py +++ b/Lib/test/libregrtest/findtests.py @@ -65,16 +65,16 @@ def split_test_packages(tests, *, testdir: StrPath | None = None, exclude: Container[str] = (), split_test_dirs=SPLITTESTDIRS) -> list[TestName]: testdir = findtestdir(testdir) - splitted = [] + split = [] for name in tests: if name in split_test_dirs: subdir = os.path.join(testdir, name) - splitted.extend(findtests(testdir=subdir, exclude=exclude, + split.extend(findtests(testdir=subdir, exclude=exclude, split_test_dirs=split_test_dirs, base_mod=name)) else: - splitted.append(name) - return splitted + split.append(name) + return split def _list_cases(suite: unittest.TestSuite) -> None: diff --git a/Lib/test/mime.types b/Lib/test/mime.types index eb39a17b6bf4b2..437a38411b214d 100644 --- a/Lib/test/mime.types +++ b/Lib/test/mime.types @@ -584,7 +584,7 @@ application/vnd.musician mus application/vnd.muvee.style msty application/vnd.ncd.control application/vnd.ncd.reference -application/vnd.nervana entity request bkm kcm +application/vnd.nirvana entity request bkm kcm application/vnd.netfpx application/vnd.neurolanguage.nlu nlu application/vnd.noblenet-directory nnd @@ -824,7 +824,7 @@ application/vnd.sealed.net # spp: application/scvp-vp-response application/vnd.sealed.ppt sppt s1p application/vnd.sealed.tiff stif -application/vnd.sealed.xls sxls sxl s1e +application/vnd.sealed.xls sxls xsl s1e # stm: audio/x-stm application/vnd.sealedmedia.softseal.html stml s1h application/vnd.sealedmedia.softseal.pdf spdf spd s1a @@ -834,7 +834,7 @@ application/vnd.semd semd application/vnd.semf semf application/vnd.shana.informed.formdata ifm application/vnd.shana.informed.formtemplate itp -application/vnd.shana.informed.interchange iif +application/vnd.shana.informed.interchange if application/vnd.shana.informed.package ipk application/vnd.SimTech-MindMapper twd twds application/vnd.smaf mmf diff --git a/Lib/test/multibytecodec_support.py b/Lib/test/multibytecodec_support.py index dbf0cc428e3ff6..9600a4bc03acb8 100644 --- a/Lib/test/multibytecodec_support.py +++ b/Lib/test/multibytecodec_support.py @@ -324,31 +324,31 @@ def unichrs(s): if len(csetch) == 1 and 0x80 <= csetch[0]: continue - unich = unichrs(data[1]) - if ord(unich) == 0xfffd or unich in urt_wa: + unix = unichrs(data[1]) + if ord(unix) == 0xfffd or unix in urt_wa: continue - urt_wa[unich] = csetch + urt_wa[unix] = csetch - self._testpoint(csetch, unich) + self._testpoint(csetch, unix) def _test_mapping_file_ucm(self): with self.open_mapping_file() as f: ucmdata = f.read() uc = re.findall('', ucmdata) for uni, coded in uc: - unich = chr(int(uni, 16)) + unix = chr(int(uni, 16)) codech = bytes.fromhex(coded) - self._testpoint(codech, unich) + self._testpoint(codech, unix) def test_mapping_supplemental(self): for mapping in self.supmaps: self._testpoint(*mapping) - def _testpoint(self, csetch, unich): - if (csetch, unich) not in self.pass_enctest: - self.assertEqual(unich.encode(self.encoding), csetch) - if (csetch, unich) not in self.pass_dectest: - self.assertEqual(str(csetch, self.encoding), unich) + def _testpoint(self, csetch, unix): + if (csetch, unix) not in self.pass_enctest: + self.assertEqual(unix.encode(self.encoding), csetch) + if (csetch, unix) not in self.pass_dectest: + self.assertEqual(str(csetch, self.encoding), unix) def test_errorhandle(self): for source, scheme, expected in self.codectests: diff --git a/Lib/test/pickletester.py b/Lib/test/pickletester.py index 9a3a26a8400844..5fbcd4eba97062 100644 --- a/Lib/test/pickletester.py +++ b/Lib/test/pickletester.py @@ -2838,7 +2838,7 @@ def test_unicode_high_plane(self): self.assert_is_copy(t, t2) def test_unicode_memoization(self): - # Repeated str is re-used (even when escapes added). + # Repeated str is reused (even when escapes added). for proto in protocols: for s in '', 'xyz', 'xyz\n', 'x\\yz', 'x\xa1yz\r': p = self.dumps((s, s), proto) diff --git a/Lib/test/support/asyncore.py b/Lib/test/support/asyncore.py index 870e42837640de..9d1e317cf1968d 100644 --- a/Lib/test/support/asyncore.py +++ b/Lib/test/support/asyncore.py @@ -37,7 +37,7 @@ most popular way to do it, but there is another very different technique, that lets you have nearly all the advantages of multi-threading, without actually using multiple threads. it's really only practical if your program -is largely I/O bound. If your program is CPU bound, then pre-emptive +is largely I/O bound. If your program is CPU bound, then preemptive scheduled threads are probably what you really need. Network servers are rarely CPU-bound, however. @@ -295,7 +295,7 @@ def set_socket(self, sock, map=None): self.add_channel(map) def set_reuse_addr(self): - # try to re-use a server port if possible + # try to reuse a server port if possible try: self.socket.setsockopt( socket.SOL_SOCKET, socket.SO_REUSEADDR, diff --git a/Lib/test/support/os_helper.py b/Lib/test/support/os_helper.py index 2c45fe2369ec36..3b3c363d9040b3 100644 --- a/Lib/test/support/os_helper.py +++ b/Lib/test/support/os_helper.py @@ -85,7 +85,7 @@ '\u05D0', # U+060C (Arabic Comma): cp864, cp1006, iso8859_6, mac_arabic '\u060C', - # U+062A (Arabic Letter Teh): cp720 + # U+062A (Arabic Letter The): cp720 '\u062A', # U+0E01 (Thai Character Ko Kai): cp874 '\u0E01', diff --git a/Lib/test/support/smtpd.py b/Lib/test/support/smtpd.py index 6537679db9ad24..39670f7be7e9ad 100755 --- a/Lib/test/support/smtpd.py +++ b/Lib/test/support/smtpd.py @@ -637,7 +637,7 @@ def __init__(self, localaddr, remoteaddr, gai_results = socket.getaddrinfo(*localaddr, family=family, type=socket.SOCK_STREAM) self.create_socket(gai_results[0][0], gai_results[0][1]) - # try to re-use a server port if possible + # try to reuse a server port if possible self.set_reuse_addr() self.bind(localaddr) self.listen(5) diff --git a/Lib/test/test_asyncio/test_sslproto.py b/Lib/test/test_asyncio/test_sslproto.py index 3e304c166425b0..859175858e5f96 100644 --- a/Lib/test/test_asyncio/test_sslproto.py +++ b/Lib/test/test_asyncio/test_sslproto.py @@ -116,7 +116,7 @@ def test_connection_lost_when_busy(self): sock.fileno = mock.Mock(return_value=12345) sock.send = mock.Mock(side_effect=BrokenPipeError) - # construct StreamWriter chain that contains loop dependant logic this emulates + # construct StreamWriter chain that contains loop dependent logic this emulates # what _make_ssl_transport() does in BaseSelectorEventLoop reader = asyncio.StreamReader(limit=2 ** 16, loop=self.loop) protocol = asyncio.StreamReaderProtocol(reader, loop=self.loop) diff --git a/Lib/test/test_asyncio/test_tasks.py b/Lib/test/test_asyncio/test_tasks.py index 931a43816a257a..363e73fac0cded 100644 --- a/Lib/test/test_asyncio/test_tasks.py +++ b/Lib/test/test_asyncio/test_tasks.py @@ -1491,7 +1491,7 @@ async def coro(i): with contextlib.closing(asyncio.new_event_loop()) as loop: # Coroutines shouldn't be yielded back as finished coroutines - # can't be re-used. + # can't be reused. awaitables_in = frozenset( (coro(0), coro(1), coro(2), coro(3)) ) @@ -1922,13 +1922,13 @@ async def sleeper(): base_exc = SystemExit() - async def notmutch(): + async def notmuch(): try: await sleeper() except asyncio.CancelledError: raise base_exc - task = self.new_task(loop, notmutch()) + task = self.new_task(loop, notmuch()) test_utils.run_briefly(loop) task.cancel() diff --git a/Lib/test/test_buffer.py b/Lib/test/test_buffer.py index 19582e757161fc..b6afddb0f35428 100644 --- a/Lib/test/test_buffer.py +++ b/Lib/test/test_buffer.py @@ -4456,7 +4456,7 @@ def test_pybuffer_size_from_format(self): @support.cpython_only def test_flags_overflow(self): - # gh-126594: Check for integer overlow on large flags + # gh-126594: Check for integer overflow on large flags try: from _testcapi import INT_MIN, INT_MAX except ImportError: diff --git a/Lib/test/test_build_details.py b/Lib/test/test_build_details.py index ba4b8c5aa9b58e..d7a718139953b0 100644 --- a/Lib/test/test_build_details.py +++ b/Lib/test/test_build_details.py @@ -11,7 +11,7 @@ class FormatTestsBase: @property def contents(self): - """Install details file contents. Should be overriden by subclasses.""" + """Install details file contents. Should be overridden by subclasses.""" raise NotImplementedError @property @@ -114,7 +114,7 @@ def contents(self): def test_location(self): self.assertTrue(os.path.isfile(self.location)) - # Override generic format tests with tests for our specific implemenation. + # Override generic format tests with tests for our specific implementation. @needs_installed_python @unittest.skipIf( diff --git a/Lib/test/test_bytes.py b/Lib/test/test_bytes.py index 2591e7ca6ab0ec..7bd66c6f779040 100644 --- a/Lib/test/test_bytes.py +++ b/Lib/test/test_bytes.py @@ -632,7 +632,7 @@ def test_startswith(self): self.assertTrue(b.startswith(b"hello")) self.assertTrue(b.startswith(b"hel")) self.assertTrue(b.startswith(b"h")) - self.assertFalse(b.startswith(b"hellow")) + self.assertFalse(b.startswith(b"hello")) self.assertFalse(b.startswith(b"ha")) with self.assertRaises(TypeError) as cm: b.startswith([b'h']) diff --git a/Lib/test/test_capi/test_tuple.py b/Lib/test/test_capi/test_tuple.py index 7c07bc64e247c5..0eb70ff68f32f5 100644 --- a/Lib/test/test_capi/test_tuple.py +++ b/Lib/test/test_capi/test_tuple.py @@ -259,7 +259,7 @@ def test__tuple_resize(self): def test_bug_59313(self): # Before 3.14, the C-API function PySequence_Tuple # would create incomplete tuples which were visible to - # the cycle GC, and this test would crash the interpeter. + # the cycle GC, and this test would crash the interpreter. TAG = object() tuples = [] diff --git a/Lib/test/test_capi/test_type.py b/Lib/test/test_capi/test_type.py index 15fb4a93e2ad74..dd660216770dde 100644 --- a/Lib/test/test_capi/test_type.py +++ b/Lib/test/test_capi/test_type.py @@ -259,7 +259,7 @@ class FreezeThis(metaclass=Meta): self.assertEqual(FreezeThis.value, 2) def test_manual_heap_type(self): - # gh-128923: test that a manually allocated and initailized heap type + # gh-128923: test that a manually allocated and initialized heap type # works correctly ManualHeapType = _testcapi.ManualHeapType for i in range(100): diff --git a/Lib/test/test_cmd_line.py b/Lib/test/test_cmd_line.py index f30a1874ab96d4..cc3802a90a850f 100644 --- a/Lib/test/test_cmd_line.py +++ b/Lib/test/test_cmd_line.py @@ -980,7 +980,7 @@ def test_python_legacy_windows_fs_encoding(self): def test_python_legacy_windows_stdio(self): # Test that _WindowsConsoleIO is used when PYTHONLEGACYWINDOWSSTDIO # is not set. - # We cannot use PIPE becase it prevents creating new console. + # We cannot use PIPE because it prevents creating new console. # So we use exit code. code = "import sys; sys.exit(type(sys.stdout.buffer.raw).__name__ != '_WindowsConsoleIO')" env = os.environ.copy() diff --git a/Lib/test/test_codecs.py b/Lib/test/test_codecs.py index d8666f7290e72e..b20c424878450d 100644 --- a/Lib/test/test_codecs.py +++ b/Lib/test/test_codecs.py @@ -3249,7 +3249,7 @@ def test_codec_lookup_failure(self): def test_unflagged_non_text_codec_handling(self): # The stdlib non-text codecs are now marked so they're - # pre-emptively skipped by the text model related methods + # preemptively skipped by the text model related methods # However, third party codecs won't be flagged, so we still make # sure the case where an inappropriate output type is produced is # handled appropriately diff --git a/Lib/test/test_ctypes/test_win32.py b/Lib/test/test_ctypes/test_win32.py index 7d5133221906bb..dc13fae456b45d 100644 --- a/Lib/test/test_ctypes/test_win32.py +++ b/Lib/test/test_ctypes/test_win32.py @@ -13,9 +13,9 @@ @unittest.skipUnless(sys.platform == "win32", 'Windows-specific test') class FunctionCallTestCase(unittest.TestCase): - @unittest.skipUnless('MSC' in sys.version, "SEH only supported by MSC") + @unittest.skipUnless('MSC' in sys.version, "SHE only supported by MSC") @unittest.skipIf(sys.executable.lower().endswith('_d.exe'), - "SEH not enabled in debug builds") + "SHE not enabled in debug builds") def test_SEH(self): # Disable faulthandler to prevent logging the warning: # "Windows fatal exception: access violation" diff --git a/Lib/test/test_ctypes/test_win32_com_foreign_func.py b/Lib/test/test_ctypes/test_win32_com_foreign_func.py index 7e54f8f6c31d33..432894b090638d 100644 --- a/Lib/test/test_ctypes/test_win32_com_foreign_func.py +++ b/Lib/test/test_ctypes/test_win32_com_foreign_func.py @@ -63,13 +63,13 @@ def is_equal_guid(guid1, guid2): IID_IPersist = create_guid("{0000010C-0000-0000-C000-000000000046}") CLSID_ShellLink = create_guid("{00021401-0000-0000-C000-000000000046}") -# https://learn.microsoft.com/en-us/windows/win32/api/unknwn/nf-unknwn-iunknown-queryinterface(refiid_void) +# https://learn.microsoft.com/en-us/windows/win32/api/unknown/nf-unknown-iunknown-queryinterface(refiid_void) proto_query_interface = create_proto_com_method( "QueryInterface", 0, HRESULT, POINTER(GUID), POINTER(c_void_p) ) -# https://learn.microsoft.com/en-us/windows/win32/api/unknwn/nf-unknwn-iunknown-addref +# https://learn.microsoft.com/en-us/windows/win32/api/unknown/nf-unknown-iunknown-addref proto_add_ref = create_proto_com_method("AddRef", 1, ctypes.c_long) -# https://learn.microsoft.com/en-us/windows/win32/api/unknwn/nf-unknwn-iunknown-release +# https://learn.microsoft.com/en-us/windows/win32/api/unknown/nf-unknown-iunknown-release proto_release = create_proto_com_method("Release", 2, ctypes.c_long) # https://learn.microsoft.com/en-us/windows/win32/api/objidl/nf-objidl-ipersist-getclassid proto_get_class_id = create_proto_com_method( diff --git a/Lib/test/test_decimal.py b/Lib/test/test_decimal.py index 08a8f4c3b36bd6..def33ed81a374c 100644 --- a/Lib/test/test_decimal.py +++ b/Lib/test/test_decimal.py @@ -355,7 +355,7 @@ def eval_equation(self, s): funct = L[1].lower() valstemp = L[2:] L = Sides[1].strip().split() - ans = L[0] + and = L[0] exceptions = L[1:] except (TypeError, AttributeError, IndexError): raise self.decimal.InvalidOperation @@ -410,7 +410,7 @@ def FixQuotes(val): v = self.read_unlimited(v, self.context) vals.append(v) - ans = FixQuotes(ans) + and = FixQuotes(and) if EXTENDEDERRORTEST and fname not in ('to_sci_string', 'to_eng_string'): for error in theirexceptions: @@ -461,7 +461,7 @@ def FixQuotes(val): myexceptions.sort(key=repr) theirexceptions.sort(key=repr) - self.assertEqual(result, ans, + self.assertEqual(result, and, 'Incorrect answer for ' + s + ' -- got ' + result) self.assertEqual(myexceptions, theirexceptions, @@ -2403,137 +2403,137 @@ def test_none_args(self): ##### Binary functions c.clear_flags() - ans = str(x.compare(Decimal('Nan891287828'), context=None)) - self.assertEqual(ans, 'NaN1287828') + and = str(x.compare(Decimal('Nan891287828'), context=None)) + self.assertEqual(and, 'NaN1287828') self.assertRaises(InvalidOperation, x.compare, Decimal('sNaN'), context=None) self.assertTrue(c.flags[InvalidOperation]) c.clear_flags() - ans = str(x.compare_signal(8224, context=None)) - self.assertEqual(ans, '-1') + and = str(x.compare_signal(8224, context=None)) + self.assertEqual(and, '-1') self.assertRaises(InvalidOperation, x.compare_signal, Decimal('NaN'), context=None) self.assertTrue(c.flags[InvalidOperation]) c.clear_flags() - ans = str(x.logical_and(101, context=None)) - self.assertEqual(ans, '101') + and = str(x.logical_and(101, context=None)) + self.assertEqual(and, '101') self.assertRaises(InvalidOperation, x.logical_and, 123, context=None) self.assertTrue(c.flags[InvalidOperation]) c.clear_flags() - ans = str(x.logical_or(101, context=None)) - self.assertEqual(ans, '111') + and = str(x.logical_or(101, context=None)) + self.assertEqual(and, '111') self.assertRaises(InvalidOperation, x.logical_or, 123, context=None) self.assertTrue(c.flags[InvalidOperation]) c.clear_flags() - ans = str(x.logical_xor(101, context=None)) - self.assertEqual(ans, '10') + and = str(x.logical_xor(101, context=None)) + self.assertEqual(and, '10') self.assertRaises(InvalidOperation, x.logical_xor, 123, context=None) self.assertTrue(c.flags[InvalidOperation]) c.clear_flags() - ans = str(x.max(101, context=None)) - self.assertEqual(ans, '111') + and = str(x.max(101, context=None)) + self.assertEqual(and, '111') self.assertRaises(InvalidOperation, x.max, Decimal('sNaN'), context=None) self.assertTrue(c.flags[InvalidOperation]) c.clear_flags() - ans = str(x.max_mag(101, context=None)) - self.assertEqual(ans, '111') + and = str(x.max_mag(101, context=None)) + self.assertEqual(and, '111') self.assertRaises(InvalidOperation, x.max_mag, Decimal('sNaN'), context=None) self.assertTrue(c.flags[InvalidOperation]) c.clear_flags() - ans = str(x.min(101, context=None)) - self.assertEqual(ans, '101') + and = str(x.min(101, context=None)) + self.assertEqual(and, '101') self.assertRaises(InvalidOperation, x.min, Decimal('sNaN'), context=None) self.assertTrue(c.flags[InvalidOperation]) c.clear_flags() - ans = str(x.min_mag(101, context=None)) - self.assertEqual(ans, '101') + and = str(x.min_mag(101, context=None)) + self.assertEqual(and, '101') self.assertRaises(InvalidOperation, x.min_mag, Decimal('sNaN'), context=None) self.assertTrue(c.flags[InvalidOperation]) c.clear_flags() - ans = str(x.remainder_near(101, context=None)) - self.assertEqual(ans, '10') + and = str(x.remainder_near(101, context=None)) + self.assertEqual(and, '10') self.assertRaises(InvalidOperation, y.remainder_near, 101, context=None) self.assertTrue(c.flags[InvalidOperation]) c.clear_flags() - ans = str(x.rotate(2, context=None)) - self.assertEqual(ans, '11100') + and = str(x.rotate(2, context=None)) + self.assertEqual(and, '11100') self.assertRaises(InvalidOperation, x.rotate, 101, context=None) self.assertTrue(c.flags[InvalidOperation]) c.clear_flags() - ans = str(x.scaleb(7, context=None)) - self.assertEqual(ans, '1.11E+9') + and = str(x.scaleb(7, context=None)) + self.assertEqual(and, '1.11E+9') self.assertRaises(InvalidOperation, x.scaleb, 10000, context=None) self.assertTrue(c.flags[InvalidOperation]) c.clear_flags() - ans = str(x.shift(2, context=None)) - self.assertEqual(ans, '11100') + and = str(x.shift(2, context=None)) + self.assertEqual(and, '11100') self.assertRaises(InvalidOperation, x.shift, 10000, context=None) self.assertTrue(c.flags[InvalidOperation]) ##### Ternary functions c.clear_flags() - ans = str(x.fma(2, 3, context=None)) - self.assertEqual(ans, '225') + and = str(x.fma(2, 3, context=None)) + self.assertEqual(and, '225') self.assertRaises(Overflow, x.fma, Decimal('1e9999'), 3, context=None) self.assertTrue(c.flags[Overflow]) ##### Special cases c.rounding = ROUND_HALF_EVEN - ans = str(Decimal('1.5').to_integral(rounding=None, context=None)) - self.assertEqual(ans, '2') + and = str(Decimal('1.5').to_integral(rounding=None, context=None)) + self.assertEqual(and, '2') c.rounding = ROUND_DOWN - ans = str(Decimal('1.5').to_integral(rounding=None, context=None)) - self.assertEqual(ans, '1') - ans = str(Decimal('1.5').to_integral(rounding=ROUND_UP, context=None)) - self.assertEqual(ans, '2') + and = str(Decimal('1.5').to_integral(rounding=None, context=None)) + self.assertEqual(and, '1') + and = str(Decimal('1.5').to_integral(rounding=ROUND_UP, context=None)) + self.assertEqual(and, '2') c.clear_flags() self.assertRaises(InvalidOperation, Decimal('sNaN').to_integral, context=None) self.assertTrue(c.flags[InvalidOperation]) c.rounding = ROUND_HALF_EVEN - ans = str(Decimal('1.5').to_integral_value(rounding=None, context=None)) - self.assertEqual(ans, '2') + and = str(Decimal('1.5').to_integral_value(rounding=None, context=None)) + self.assertEqual(and, '2') c.rounding = ROUND_DOWN - ans = str(Decimal('1.5').to_integral_value(rounding=None, context=None)) - self.assertEqual(ans, '1') - ans = str(Decimal('1.5').to_integral_value(rounding=ROUND_UP, context=None)) - self.assertEqual(ans, '2') + and = str(Decimal('1.5').to_integral_value(rounding=None, context=None)) + self.assertEqual(and, '1') + and = str(Decimal('1.5').to_integral_value(rounding=ROUND_UP, context=None)) + self.assertEqual(and, '2') c.clear_flags() self.assertRaises(InvalidOperation, Decimal('sNaN').to_integral_value, context=None) self.assertTrue(c.flags[InvalidOperation]) c.rounding = ROUND_HALF_EVEN - ans = str(Decimal('1.5').to_integral_exact(rounding=None, context=None)) - self.assertEqual(ans, '2') + and = str(Decimal('1.5').to_integral_exact(rounding=None, context=None)) + self.assertEqual(and, '2') c.rounding = ROUND_DOWN - ans = str(Decimal('1.5').to_integral_exact(rounding=None, context=None)) - self.assertEqual(ans, '1') - ans = str(Decimal('1.5').to_integral_exact(rounding=ROUND_UP, context=None)) - self.assertEqual(ans, '2') + and = str(Decimal('1.5').to_integral_exact(rounding=None, context=None)) + self.assertEqual(and, '1') + and = str(Decimal('1.5').to_integral_exact(rounding=ROUND_UP, context=None)) + self.assertEqual(and, '2') c.clear_flags() self.assertRaises(InvalidOperation, Decimal('sNaN').to_integral_exact, context=None) self.assertTrue(c.flags[InvalidOperation]) c.rounding = ROUND_UP - ans = str(Decimal('1.50001').quantize(exp=Decimal('1e-3'), rounding=None, context=None)) - self.assertEqual(ans, '1.501') + and = str(Decimal('1.50001').quantize(exp=Decimal('1e-3'), rounding=None, context=None)) + self.assertEqual(and, '1.501') c.rounding = ROUND_DOWN - ans = str(Decimal('1.50001').quantize(exp=Decimal('1e-3'), rounding=None, context=None)) - self.assertEqual(ans, '1.500') - ans = str(Decimal('1.50001').quantize(exp=Decimal('1e-3'), rounding=ROUND_UP, context=None)) - self.assertEqual(ans, '1.501') + and = str(Decimal('1.50001').quantize(exp=Decimal('1e-3'), rounding=None, context=None)) + self.assertEqual(and, '1.500') + and = str(Decimal('1.50001').quantize(exp=Decimal('1e-3'), rounding=ROUND_UP, context=None)) + self.assertEqual(and, '1.501') c.clear_flags() self.assertRaises(InvalidOperation, y.quantize, Decimal('1e-10'), rounding=ROUND_UP, context=None) self.assertTrue(c.flags[InvalidOperation]) @@ -3911,7 +3911,7 @@ def raise_error(context, flag): for fn, args in operations: # find answer and flags raised using a clean context context.clear_flags() - ans = fn(*args) + and = fn(*args) flags = [k for k, v in context.flags.items() if v] for extra_flags in flagsets: @@ -3932,9 +3932,9 @@ def raise_error(context, flag): new_flags = [k for k,v in context.flags.items() if v] new_flags.sort(key=id) - self.assertEqual(ans, new_ans, + self.assertEqual(and, new_ans, "operation produces different answers depending on flags set: " + - "expected %s, got %s." % (ans, new_ans)) + "expected %s, got %s." % (and, new_ans)) self.assertEqual(new_flags, expected_flags, "operation raises different flags depending on flags set: " + "expected %s, got %s" % (expected_flags, new_flags)) diff --git a/Lib/test/test_descr.py b/Lib/test/test_descr.py index 8da6647c3f71fc..8ef17086641468 100644 --- a/Lib/test/test_descr.py +++ b/Lib/test/test_descr.py @@ -3323,7 +3323,7 @@ class F(D, E): pass self.assertIs(x.__class__, cls2) x.__class__ = cls self.assertIs(x.__class__, cls) - def cant(x, C): + def can't(x, C): try: x.__class__ = C except TypeError: @@ -3336,18 +3336,18 @@ def cant(x, C): pass else: self.fail("shouldn't allow del %r.__class__" % x) - cant(C(), list) - cant(list(), C) - cant(C(), 1) - cant(C(), object) - cant(object(), list) - cant(list(), object) + can't(C(), list) + can't(list(), C) + can't(C(), 1) + can't(C(), object) + can't(object(), list) + can't(list(), object) class Int(int): __slots__ = [] - cant(True, int) - cant(2, bool) + can't(True, int) + can't(2, bool) o = object() - cant(o, int) - cant(o, type(None)) + can't(o, int) + can't(o, type(None)) del o class G(object): __slots__ = ["a", "b"] @@ -3387,7 +3387,7 @@ class R(J): for cls2 in G, J, K, L, M, N, P, R, list, Int: if cls is cls2: continue - cant(cls(), cls2) + can't(cls(), cls2) # Issue5283: when __class__ changes in __del__, the wrong # type gets DECREF'd. @@ -3405,16 +3405,16 @@ class C(object): pass a = C() a.__dict__ = {'b': 1} self.assertEqual(a.b, 1) - def cant(x, dict): + def can't(x, dict): try: x.__dict__ = dict except (AttributeError, TypeError): pass else: self.fail("shouldn't allow %r.__dict__ = %r" % (x, dict)) - cant(a, None) - cant(a, []) - cant(a, 1) + can't(a, None) + can't(a, []) + can't(a, 1) del a.__dict__ # Deleting __dict__ is allowed class Base(object): @@ -3423,7 +3423,7 @@ def verify_dict_readonly(x): """ x has to be an instance of a class inheriting from Base. """ - cant(x, {}) + can't(x, {}) try: del x.__dict__ except (AttributeError, TypeError): @@ -5282,7 +5282,7 @@ class Base2(object): bases_before = ",".join([c.__name__ for c in X.__bases__]) print(f"before={bases_before}") - # mykey is initially read from Base, however, the lookup will be perfomed + # mykey is initially read from Base, however, the lookup will be performed # again if specialization fails. The second lookup will use the new # mro set by __eq__. print(X.mykey) diff --git a/Lib/test/test_dict.py b/Lib/test/test_dict.py index 60c62430370e96..4888bf10a983d5 100644 --- a/Lib/test/test_dict.py +++ b/Lib/test/test_dict.py @@ -1581,7 +1581,7 @@ def check_unhashable_key(): with check_unhashable_key(): d.get(key) - # Only TypeError exception is overriden, + # Only TypeError exception is overridden, # other exceptions are left unchanged. class HashError: def __hash__(self): diff --git a/Lib/test/test_difflib.py b/Lib/test/test_difflib.py index 6ac584a08d1e86..1766c9828aa4a2 100644 --- a/Lib/test/test_difflib.py +++ b/Lib/test/test_difflib.py @@ -556,7 +556,7 @@ def test_default_args(self): b[match.b: match.b + match.size]) self.assertFalse(self.longer_match_exists(a, b, match.size)) - match = sm.find_longest_match(alo=2, blo=4) + match = sm.find_longest_match(also=2, blo=4) self.assertEqual(match.a, 3) self.assertEqual(match.b, 7) self.assertEqual(match.size, 4) diff --git a/Lib/test/test_dis.py b/Lib/test/test_dis.py index 355990ed58ee09..fc3d53271d1c0b 100644 --- a/Lib/test/test_dis.py +++ b/Lib/test/test_dis.py @@ -1696,7 +1696,7 @@ def jumpy(): # code_object_inner before rerunning the tests def _stringify_instruction(instr): - # Since postions offsets change a lot for these test cases, ignore them. + # Since positions offsets change a lot for these test cases, ignore them. base = ( f" make_inst(opname={instr.opname!r}, arg={instr.arg!r}, argval={instr.argval!r}, " + f"argrepr={instr.argrepr!r}, offset={instr.offset}, start_offset={instr.start_offset}, " + diff --git a/Lib/test/test_email/test__header_value_parser.py b/Lib/test/test_email/test__header_value_parser.py index 179e236ecdfd7f..0cf6fb5b3d6bc3 100644 --- a/Lib/test/test_email/test__header_value_parser.py +++ b/Lib/test/test_email/test__header_value_parser.py @@ -2285,11 +2285,11 @@ def test_get_group_single_mailbox(self): def test_get_group_mixed_list(self): group = self._test_get_x(parser.get_group, ('Monty Python: "Fred A. Bear" ,' - '(foo) Roger , x@test.example.com;'), + '(foo) Roger , x@test.example.com;'), ('Monty Python: "Fred A. Bear" ,' - '(foo) Roger , x@test.example.com;'), + '(foo) Roger , x@test.example.com;'), ('Monty Python: "Fred A. Bear" ,' - ' Roger , x@test.example.com;'), + ' Roger , x@test.example.com;'), [], '') self.assertEqual(group.token_type, 'group') @@ -2306,11 +2306,11 @@ def test_get_group_mixed_list(self): def test_get_group_one_invalid(self): group = self._test_get_x(parser.get_group, ('Monty Python: "Fred A. Bear" ,' - '(foo) Roger ping@exampele.com, x@test.example.com;'), + '(foo) Roger ping@example.com, x@test.example.com;'), ('Monty Python: "Fred A. Bear" ,' - '(foo) Roger ping@exampele.com, x@test.example.com;'), + '(foo) Roger ping@example.com, x@test.example.com;'), ('Monty Python: "Fred A. Bear" ,' - ' Roger ping@exampele.com, x@test.example.com;'), + ' Roger ping@example.com, x@test.example.com;'), [errors.InvalidHeaderDefect, # non-angle addr makes local part invalid errors.InvalidHeaderDefect], # and its not obs-local either: no dots. '') @@ -2718,9 +2718,9 @@ def test_get_msg_id_empty(self): def test_get_msg_id_valid(self): msg_id = self._test_get_x( parser.get_msg_id, - "", - "", - "", + "", + "", + "", [], '', ) @@ -2729,9 +2729,9 @@ def test_get_msg_id_valid(self): def test_get_msg_id_obsolete_local(self): msg_id = self._test_get_x( parser.get_msg_id, - '<"simeple.local"@example.com>', - '<"simeple.local"@example.com>', - '', + '<"simple.local"@example.com>', + '<"simple.local"@example.com>', + '', [errors.ObsoleteHeaderDefect], '', ) diff --git a/Lib/test/test_exceptions.py b/Lib/test/test_exceptions.py index 57d0656487d4db..687e45a764bd3d 100644 --- a/Lib/test/test_exceptions.py +++ b/Lib/test/test_exceptions.py @@ -1295,7 +1295,7 @@ def test_context_of_exception_in_else_and_finally(self): self.assertIs(exc.__context__, ve) def test_unicode_change_attributes(self): - # See issue 7309. This was a crasher. + # See issue 7309. This was a crash. u = UnicodeEncodeError('baz', 'xxxxx', 1, 5, 'foo') self.assertEqual(str(u), "'baz' codec can't encode characters in position 1-4: foo") diff --git a/Lib/test/test_fileio.py b/Lib/test/test_fileio.py index e3d54f6315aade..2e5b894fd5b063 100644 --- a/Lib/test/test_fileio.py +++ b/Lib/test/test_fileio.py @@ -388,7 +388,7 @@ def check_readall(name, code, prelude="", cleanup="", syscalls = strace_helper.filter_memory(syscalls) # The first call should be an open that returns a - # file descriptor (fd). Afer that calls may vary. Once the file + # file descriptor (fd). After that calls may vary. Once the file # is opened, check calls refer to it by fd as the filename # could be removed from the filesystem, renamed, etc. See: # Time-of-check time-of-use (TOCTOU) software bug class. diff --git a/Lib/test/test_float.py b/Lib/test/test_float.py index 00518abcb11b46..e39cba06d03c82 100644 --- a/Lib/test/test_float.py +++ b/Lib/test/test_float.py @@ -1071,7 +1071,7 @@ def test_inf_from_str(self): self.assertRaises(ValueError, float, "in") self.assertRaises(ValueError, float, "+in") self.assertRaises(ValueError, float, "-in") - self.assertRaises(ValueError, float, "infinit") + self.assertRaises(ValueError, float, "infinite") self.assertRaises(ValueError, float, "+Infin") self.assertRaises(ValueError, float, "-INFI") self.assertRaises(ValueError, float, "infinitys") diff --git a/Lib/test/test_fnmatch.py b/Lib/test/test_fnmatch.py index 5daaf3b3fddb9e..546682653a071e 100644 --- a/Lib/test/test_fnmatch.py +++ b/Lib/test/test_fnmatch.py @@ -96,22 +96,22 @@ def test_sep(self): def test_char_set(self): check = self.check_match - tescases = string.ascii_lowercase + string.digits + string.punctuation - for c in tescases: + testcases = string.ascii_lowercase + string.digits + string.punctuation + for c in testcases: check(c, '[az]', c in 'az') check(c, '[!az]', c not in 'az') # Case insensitive. - for c in tescases: + for c in testcases: check(c, '[AZ]', (c in 'az') and IGNORECASE) check(c, '[!AZ]', (c not in 'az') or not IGNORECASE) for c in string.ascii_uppercase: check(c, '[az]', (c in 'AZ') and IGNORECASE) check(c, '[!az]', (c not in 'AZ') or not IGNORECASE) # Repeated same character. - for c in tescases: + for c in testcases: check(c, '[aa]', c == 'a') # Special cases. - for c in tescases: + for c in testcases: check(c, '[^az]', c in '^az') check(c, '[[az]', c in '[az') check(c, r'[!]]', c != ']') @@ -122,24 +122,24 @@ def test_char_set(self): def test_range(self): check = self.check_match - tescases = string.ascii_lowercase + string.digits + string.punctuation - for c in tescases: + testcases = string.ascii_lowercase + string.digits + string.punctuation + for c in testcases: check(c, '[b-d]', c in 'bcd') check(c, '[!b-d]', c not in 'bcd') check(c, '[b-dx-z]', c in 'bcdxyz') check(c, '[!b-dx-z]', c not in 'bcdxyz') # Case insensitive. - for c in tescases: + for c in testcases: check(c, '[B-D]', (c in 'bcd') and IGNORECASE) check(c, '[!B-D]', (c not in 'bcd') or not IGNORECASE) for c in string.ascii_uppercase: check(c, '[b-d]', (c in 'BCD') and IGNORECASE) check(c, '[!b-d]', (c not in 'BCD') or not IGNORECASE) # Upper bound == lower bound. - for c in tescases: + for c in testcases: check(c, '[b-b]', c == 'b') # Special cases. - for c in tescases: + for c in testcases: check(c, '[!-#]', c not in '-#') check(c, '[!--.]', c not in '-.') check(c, '[^-`]', c in '^_`') @@ -153,7 +153,7 @@ def test_range(self): check(c, '[-]', c in '-') check(c, '[!-]', c not in '-') # Upper bound is less that lower bound: error in RE. - for c in tescases: + for c in testcases: check(c, '[d-b]', False) check(c, '[!d-b]', True) check(c, '[d-bx-z]', c in 'xyz') diff --git a/Lib/test/test_generators.py b/Lib/test/test_generators.py index 3e41c7b9663491..7ce84ca0a3bb42 100644 --- a/Lib/test/test_generators.py +++ b/Lib/test/test_generators.py @@ -2377,7 +2377,7 @@ def printsolution(self, x): """ weakref_tests = """\ -Generators are weakly referencable: +Generators are weakly referenceable: >>> import weakref >>> def gen(): @@ -2388,7 +2388,7 @@ def printsolution(self, x): True >>> p = weakref.proxy(gen) -Generator-iterators are weakly referencable as well: +Generator-iterators are weakly referenceable as well: >>> gi = gen() >>> wr = weakref.ref(gi) diff --git a/Lib/test/test_genexps.py b/Lib/test/test_genexps.py index fe5f18fa3f88a0..d819d1fd4b9f26 100644 --- a/Lib/test/test_genexps.py +++ b/Lib/test/test_genexps.py @@ -138,7 +138,7 @@ >>> list(g) [(0, 0), (0, 1), (0, 2), (0, 3), (1, 0), (1, 1), (1, 2), (1, 3), (2, 0), (2, 1), (2, 2), (2, 3)] -Verify re-use of tuples (a side benefit of using genexps over listcomps) +Verify reuse of tuples (a side benefit of using genexps over listcomps) >>> tupleids = list(map(id, ((i,i) for i in range(10)))) >>> int(max(tupleids) - min(tupleids)) @@ -256,7 +256,7 @@ >>> me.gi_running 0 -Verify that genexps are weakly referencable +Verify that genexps are weakly referenceable >>> import weakref >>> g = (i*i for i in range(4)) diff --git a/Lib/test/test_gzip.py b/Lib/test/test_gzip.py index a12ff5662a73db..df4748057ab254 100644 --- a/Lib/test/test_gzip.py +++ b/Lib/test/test_gzip.py @@ -144,7 +144,7 @@ def test_read1(self): self.assertEqual(b''.join(blocks), data1 * 50) def test_readinto(self): - # 10MB of uncompressible data to ensure multiple reads + # 10MB of incompressible data to ensure multiple reads large_data = os.urandom(10 * 2**20) with gzip.GzipFile(self.filename, 'wb') as f: f.write(large_data) @@ -156,7 +156,7 @@ def test_readinto(self): self.assertEqual(buf, large_data) def test_readinto1(self): - # 10MB of uncompressible data to ensure multiple reads + # 10MB of incompressible data to ensure multiple reads large_data = os.urandom(10 * 2**20) with gzip.GzipFile(self.filename, 'wb') as f: f.write(large_data) diff --git a/Lib/test/test_httpservers.py b/Lib/test/test_httpservers.py index 2548a7c5f292f0..ff5184b396081d 100644 --- a/Lib/test/test_httpservers.py +++ b/Lib/test/test_httpservers.py @@ -853,17 +853,17 @@ def handle_expect_100(self): class AuditableBytesIO: def __init__(self): - self.datas = [] + self.data = [] def write(self, data): - self.datas.append(data) + self.data.append(data) def getData(self): - return b''.join(self.datas) + return b''.join(self.data) @property def numWrites(self): - return len(self.datas) + return len(self.data) class BaseHTTPRequestHandlerTestCase(unittest.TestCase): diff --git a/Lib/test/test_import/__init__.py b/Lib/test/test_import/__init__.py index 6e34094c5aa422..8a7fadcbee2d79 100644 --- a/Lib/test/test_import/__init__.py +++ b/Lib/test/test_import/__init__.py @@ -2879,7 +2879,7 @@ def check_direct(self, loaded): self.assertIs(loaded.snapshot.lookedup, loaded.module) def check_indirect(self, loaded, orig): - # The module re-uses another's PyModuleDef, with a different name. + # The module reuses another's PyModuleDef, with a different name. assert orig is not loaded.module assert orig.__name__ != loaded.name self.assertNotEqual(loaded.module.__name__, loaded.name) diff --git a/Lib/test/test_interpreters/test_api.py b/Lib/test/test_interpreters/test_api.py index a34b20beaca7a3..1566d7acc9b6ba 100644 --- a/Lib/test/test_interpreters/test_api.py +++ b/Lib/test/test_interpreters/test_api.py @@ -1217,7 +1217,7 @@ def test_stateless_func_returns_arg(self): # builtin exceptions Exception('uh-oh!'), ModuleNotFoundError('mymodule'), - # builtin fnctions + # builtin functions len, sys.exit, # user classes diff --git a/Lib/test/test_iterlen.py b/Lib/test/test_iterlen.py index 41c9752e557fb3..d527177215f107 100644 --- a/Lib/test/test_iterlen.py +++ b/Lib/test/test_iterlen.py @@ -63,7 +63,7 @@ def test_invariant(self): class TestTemporarilyImmutable(TestInvariantWithoutMutations): def test_immutable_during_iteration(self): - # objects such as deques, sets, and dictionaries enforce + # objects such as dequeues, sets, and dictionaries enforce # length immutability during iteration it = self.it diff --git a/Lib/test/test_itertools.py b/Lib/test/test_itertools.py index 61bea9dba07fec..e7da03cb9b3240 100644 --- a/Lib/test/test_itertools.py +++ b/Lib/test/test_itertools.py @@ -286,7 +286,7 @@ def test_combinations_overflow(self): with self.assertRaises((OverflowError, MemoryError)): combinations("AA", 2**29) - # Test implementation detail: tuple re-use + # Test implementation detail: tuple reuse @support.impl_detail("tuple reuse is specific to CPython") def test_combinations_tuple_reuse(self): self.assertEqual(len(set(map(id, combinations('abcde', 3)))), 1) @@ -361,7 +361,7 @@ def test_combinations_with_replacement_overflow(self): with self.assertRaises((OverflowError, MemoryError)): combinations_with_replacement("AA", 2**30) - # Test implementation detail: tuple re-use + # Test implementation detail: tuple reuse @support.impl_detail("tuple reuse is specific to CPython") def test_combinations_with_replacement_tuple_reuse(self): cwr = combinations_with_replacement @@ -745,17 +745,17 @@ def test_filter(self): self.assertRaises(TypeError, next, filter(range(6), range(6))) # check copy, deepcopy, pickle - ans = [0,2,4] + and = [0,2,4] c = filter(isEven, range(6)) - self.assertEqual(list(copy.copy(c)), ans) + self.assertEqual(list(copy.copy(c)), and) c = filter(isEven, range(6)) - self.assertEqual(list(copy.deepcopy(c)), ans) + self.assertEqual(list(copy.deepcopy(c)), and) for proto in range(pickle.HIGHEST_PROTOCOL + 1): c = filter(isEven, range(6)) - self.assertEqual(list(pickle.loads(pickle.dumps(c, proto))), ans) + self.assertEqual(list(pickle.loads(pickle.dumps(c, proto))), and) next(c) - self.assertEqual(list(pickle.loads(pickle.dumps(c, proto))), ans[1:]) + self.assertEqual(list(pickle.loads(pickle.dumps(c, proto))), and[1:]) for proto in range(pickle.HIGHEST_PROTOCOL + 1): c = filter(isEven, range(6)) self.pickletest(proto, c) @@ -773,8 +773,8 @@ def test_filterfalse(self): def test_zip(self): # XXX This is rather silly now that builtin zip() calls zip()... - ans = [(x,y) for x, y in zip('abc',count())] - self.assertEqual(ans, [('a', 0), ('b', 1), ('c', 2)]) + and = [(x,y) for x, y in zip('abc',count())] + self.assertEqual(and, [('a', 0), ('b', 1), ('c', 2)]) self.assertEqual(list(zip('abc', range(6))), lzip('abc', range(6))) self.assertEqual(list(zip('abcdef', range(3))), lzip('abcdef', range(3))) self.assertEqual(take(3,zip('abcdef', count())), lzip('abcdef', range(3))) @@ -1303,13 +1303,13 @@ def test_tee(self): support.gc_collect() # For PyPy or other GCs. self.assertRaises(ReferenceError, getattr, p, '__class__') - ans = list('abc') + and = list('abc') long_ans = list(range(10000)) # check copy a, b = tee('abc') - self.assertEqual(list(copy.copy(a)), ans) - self.assertEqual(list(copy.copy(b)), ans) + self.assertEqual(list(copy.copy(a)), and) + self.assertEqual(list(copy.copy(b)), and) a, b = tee(list(range(10000))) self.assertEqual(list(copy.copy(a)), long_ans) self.assertEqual(list(copy.copy(b)), long_ans) @@ -1318,10 +1318,10 @@ def test_tee(self): a, b = tee('abc') take(2, a) take(1, b) - self.assertEqual(list(copy.copy(a)), ans[2:]) - self.assertEqual(list(copy.copy(b)), ans[1:]) - self.assertEqual(list(a), ans[2:]) - self.assertEqual(list(b), ans[1:]) + self.assertEqual(list(copy.copy(a)), and[2:]) + self.assertEqual(list(copy.copy(b)), and[1:]) + self.assertEqual(list(a), and[2:]) + self.assertEqual(list(b), and[1:]) a, b = tee(range(10000)) take(100, a) take(60, b) @@ -1905,7 +1905,7 @@ def __next__(self): t3 = tnew(t1) self.assertTrue(list(t1) == list(t2) == list(t3) == list('abc')) - # test that tee objects are weak referencable + # test that tee objects are weak referenceable a, b = tee(range(10)) p = weakref.proxy(a) self.assertEqual(getattr(p, '__class__'), type(b)) @@ -1913,15 +1913,15 @@ def __next__(self): gc.collect() # For PyPy or other GCs. self.assertRaises(ReferenceError, getattr, p, '__class__') - ans = list('abc') + and = list('abc') long_ans = list(range(10000)) # Tests not applicable to the tee() recipe if False: # check copy a, b = tee('abc') - self.assertEqual(list(copy.copy(a)), ans) - self.assertEqual(list(copy.copy(b)), ans) + self.assertEqual(list(copy.copy(a)), and) + self.assertEqual(list(copy.copy(b)), and) a, b = tee(list(range(10000))) self.assertEqual(list(copy.copy(a)), long_ans) self.assertEqual(list(copy.copy(b)), long_ans) @@ -1930,10 +1930,10 @@ def __next__(self): a, b = tee('abc') take(2, a) take(1, b) - self.assertEqual(list(copy.copy(a)), ans[2:]) - self.assertEqual(list(copy.copy(b)), ans[1:]) - self.assertEqual(list(a), ans[2:]) - self.assertEqual(list(b), ans[1:]) + self.assertEqual(list(copy.copy(a)), and[2:]) + self.assertEqual(list(copy.copy(b)), and[1:]) + self.assertEqual(list(a), and[2:]) + self.assertEqual(list(b), and[1:]) a, b = tee(range(10000)) take(100, a) take(60, b) diff --git a/Lib/test/test_json/test_dump.py b/Lib/test/test_json/test_dump.py index 39470754003bb6..64f3624f3fbb8e 100644 --- a/Lib/test/test_json/test_dump.py +++ b/Lib/test/test_json/test_dump.py @@ -41,9 +41,9 @@ def test_encode_truefalse(self): # Issue 16228: Crash on encoding resized list def test_encode_mutated(self): a = [object()] * 10 - def crasher(obj): + def crash(obj): del a[-1] - self.assertEqual(self.dumps(a, default=crasher), + self.assertEqual(self.dumps(a, default=crash), '[null, null, null, null, null]') # Issue 24094 diff --git a/Lib/test/test_locale.py b/Lib/test/test_locale.py index 55b502e52ca454..0f5858188124ef 100644 --- a/Lib/test/test_locale.py +++ b/Lib/test/test_locale.py @@ -537,7 +537,7 @@ def test_getpreferredencoding(self): codecs.lookup(enc) def test_strcoll_3303(self): - # test crasher from bug #3303 + # test crash from bug #3303 self.assertRaises(TypeError, locale.strcoll, "a", None) self.assertRaises(TypeError, locale.strcoll, b"a", None) @@ -549,7 +549,7 @@ def test_setlocale_category(self): locale.setlocale(locale.LC_MONETARY) locale.setlocale(locale.LC_NUMERIC) - # crasher from bug #7419 + # crash from bug #7419 self.assertRaises(locale.Error, locale.setlocale, 12345) def test_getsetlocale_issue1813(self): diff --git a/Lib/test/test_logging.py b/Lib/test/test_logging.py index 275f7ce47d09b5..e82dc611baac61 100644 --- a/Lib/test/test_logging.py +++ b/Lib/test/test_logging.py @@ -2387,7 +2387,7 @@ def __getattr__(self, attribute): return getattr(queue, attribute) class CustomQueueFakeProtocol(CustomQueueProtocol): - # An object implementing the minimial Queue API for + # An object implementing the minimal Queue API for # the logging module but with incorrect signatures. # # The object will be considered a valid queue class since we diff --git a/Lib/test/test_long.py b/Lib/test/test_long.py index f336d49fa4f008..2e1f174c8fe456 100644 --- a/Lib/test/test_long.py +++ b/Lib/test/test_long.py @@ -938,9 +938,9 @@ def test_correctly_rounded_true_division(self): self.check_truediv(n, 2**1076) # largeish random divisions: a/b where |a| <= |b| <= - # 2*|a|; |ans| is between 0.5 and 1.0, so error should + # 2*|a|; |and| is between 0.5 and 1.0, so error should # always be bounded by 2**-54 with equality possible only - # if the least significant bit of q=ans*2**53 is zero. + # if the least significant bit of q=and*2**53 is zero. for M in [10**10, 10**100, 10**1000]: for i in range(1000): a = random.randrange(1, M) diff --git a/Lib/test/test_memoryview.py b/Lib/test/test_memoryview.py index 64f440f180bbf0..653b8e0eeba6bd 100644 --- a/Lib/test/test_memoryview.py +++ b/Lib/test/test_memoryview.py @@ -738,7 +738,7 @@ def test_picklebuffer_reference_loop(self): @support.requires_resource("cpu") class RacingTest(unittest.TestCase): def test_racing_getbuf_and_releasebuf(self): - """Repeatly access the memoryview for racing.""" + """Repeatedly access the memoryview for racing.""" try: from multiprocessing.managers import SharedMemoryManager except ImportError: diff --git a/Lib/test/test_ntpath.py b/Lib/test/test_ntpath.py index 22f6403d482bc4..80405563d54d1d 100644 --- a/Lib/test/test_ntpath.py +++ b/Lib/test/test_ntpath.py @@ -131,10 +131,10 @@ def test_splitdrive(self): def test_splitdrive_invalid_paths(self): splitdrive = ntpath.splitdrive - self.assertEqual(splitdrive('\\\\ser\x00ver\\sha\x00re\\di\x00r'), - ('\\\\ser\x00ver\\sha\x00re', '\\di\x00r')) - self.assertEqual(splitdrive(b'\\\\ser\x00ver\\sha\x00re\\di\x00r'), - (b'\\\\ser\x00ver\\sha\x00re', b'\\di\x00r')) + self.assertEqual(splitdrive('\\\\set\x00ver\\sha\x00re\\di\x00r'), + ('\\\\set\x00ver\\sha\x00re', '\\di\x00r')) + self.assertEqual(splitdrive(b'\\\\set\x00ver\\sha\x00re\\di\x00r'), + (b'\\\\set\x00ver\\sha\x00re', b'\\di\x00r')) self.assertEqual(splitdrive("\\\\\udfff\\\udffe\\\udffd"), ('\\\\\udfff\\\udffe', '\\\udffd')) if sys.platform == 'win32': @@ -237,10 +237,10 @@ def test_splitroot(self): def test_splitroot_invalid_paths(self): splitroot = ntpath.splitroot - self.assertEqual(splitroot('\\\\ser\x00ver\\sha\x00re\\di\x00r'), - ('\\\\ser\x00ver\\sha\x00re', '\\', 'di\x00r')) - self.assertEqual(splitroot(b'\\\\ser\x00ver\\sha\x00re\\di\x00r'), - (b'\\\\ser\x00ver\\sha\x00re', b'\\', b'di\x00r')) + self.assertEqual(splitroot('\\\\set\x00ver\\sha\x00re\\di\x00r'), + ('\\\\set\x00ver\\sha\x00re', '\\', 'di\x00r')) + self.assertEqual(splitroot(b'\\\\set\x00ver\\sha\x00re\\di\x00r'), + (b'\\\\set\x00ver\\sha\x00re', b'\\', b'di\x00r')) self.assertEqual(splitroot("\\\\\udfff\\\udffe\\\udffd"), ('\\\\\udfff\\\udffe', '\\', '\udffd')) if sys.platform == 'win32': diff --git a/Lib/test/test_opcache.py b/Lib/test/test_opcache.py index 30baa09048616c..4f4a9516d8411b 100644 --- a/Lib/test/test_opcache.py +++ b/Lib/test/test_opcache.py @@ -571,7 +571,7 @@ def test(default=None): def make_deferred_ref_count_obj(): """Create an object that uses deferred reference counting. - Only objects that use deferred refence counting may be stored in inline + Only objects that use deferred reference counting may be stored in inline caches in free-threaded builds. This constructs a new class named Foo, which uses deferred reference counting. """ diff --git a/Lib/test/test_os.py b/Lib/test/test_os.py index de3a17fe893170..779c85400b477d 100644 --- a/Lib/test/test_os.py +++ b/Lib/test/test_os.py @@ -176,7 +176,7 @@ def test_getcwdb(self): # Tests creating TESTFN -class FileTests(unittest.TestCase): +class file tests(unittest.TestCase): def setUp(self): if os.path.lexists(os_helper.TESTFN): os.unlink(os_helper.TESTFN) diff --git a/Lib/test/test_pdb.py b/Lib/test/test_pdb.py index 6b74e21ad73d1a..33977bee188d9e 100644 --- a/Lib/test/test_pdb.py +++ b/Lib/test/test_pdb.py @@ -4835,7 +4835,7 @@ def test_convvar_completion(self): def test_local_namespace(self): script = textwrap.dedent(""" def f(): - original = "I live Pythin" + original = "I live Python" import pdb; pdb.Pdb().set_trace() f() """) diff --git a/Lib/test/test_peg_generator/test_c_parser.py b/Lib/test/test_peg_generator/test_c_parser.py index aa01a9b8f7ed87..4d97018937efa4 100644 --- a/Lib/test/test_peg_generator/test_c_parser.py +++ b/Lib/test/test_peg_generator/test_c_parser.py @@ -77,7 +77,7 @@ class TestCParser(unittest.TestCase): @classmethod def setUpClass(cls): if cls._has_run: - # Since gh-104798 (Use setuptools in peg-generator and reenable + # Since gh-104798 (Use setuptools in peg-generator and re-enable # tests), this test case has been producing ref leaks. Initial # debugging points to bug(s) in setuptools and/or importlib. # See gh-105063 for more info. @@ -92,7 +92,7 @@ def setUpClass(cls): cls.tmp_base = os.getcwd() if os.path.samefile(cls.tmp_base, os_helper.SAVEDCWD): cls.tmp_base = None - # Create a directory for the reuseable static library part of + # Create a directory for the reusable static library part of # the pegen extension build process. This greatly reduces the # runtime overhead of spawning compiler processes. cls.library_dir = tempfile.mkdtemp(dir=cls.tmp_base) diff --git a/Lib/test/test_plistlib.py b/Lib/test/test_plistlib.py index a0c76e5dec5ebe..5b420cb54a2e68 100644 --- a/Lib/test/test_plistlib.py +++ b/Lib/test/test_plistlib.py @@ -858,7 +858,7 @@ def test_load_aware_datetime(self): self.assertEqual(dt.tzinfo, datetime.UTC) @unittest.skipUnless("America/Los_Angeles" in zoneinfo.available_timezones(), - "Can't find timezone datebase") + "Can't find timezone database") def test_dump_aware_datetime(self): dt = datetime.datetime(2345, 6, 7, 8, 9, 10, tzinfo=zoneinfo.ZoneInfo("America/Los_Angeles")) @@ -877,7 +877,7 @@ def test_dump_utc_aware_datetime(self): self.assertEqual(loaded_dt, dt) @unittest.skipUnless("America/Los_Angeles" in zoneinfo.available_timezones(), - "Can't find timezone datebase") + "Can't find timezone database") def test_dump_aware_datetime_without_aware_datetime_option(self): dt = datetime.datetime(2345, 6, 7, 8, tzinfo=zoneinfo.ZoneInfo("America/Los_Angeles")) @@ -1032,7 +1032,7 @@ def test_load_aware_datetime(self): datetime.datetime(2345, 6, 7, 8, tzinfo=datetime.UTC)) @unittest.skipUnless("America/Los_Angeles" in zoneinfo.available_timezones(), - "Can't find timezone datebase") + "Can't find timezone database") def test_dump_aware_datetime_without_aware_datetime_option(self): dt = datetime.datetime(2345, 6, 7, 8, tzinfo=zoneinfo.ZoneInfo("America/Los_Angeles")) diff --git a/Lib/test/test_pty.py b/Lib/test/test_pty.py index 4836f38c388c05..9b2ffd1e375ab5 100644 --- a/Lib/test/test_pty.py +++ b/Lib/test/test_pty.py @@ -53,7 +53,7 @@ def normalize_output(data): # etc.) # This is about the best we can do without getting some feedback - # from someone more knowledgable. + # from someone more knowledgeable. # OSF/1 (Tru64) apparently turns \n into \r\r\n. if data.endswith(b'\r\r\n'): diff --git a/Lib/test/test_pyrepl/test_pyrepl.py b/Lib/test/test_pyrepl/test_pyrepl.py index 657a971f8769df..6997a9223f8b0d 100644 --- a/Lib/test/test_pyrepl/test_pyrepl.py +++ b/Lib/test/test_pyrepl/test_pyrepl.py @@ -1005,7 +1005,7 @@ def test_builtin_completion_top_level(self): # Make iter_modules() search only the standard library. # This makes the test more reliable in case there are # other user packages/scripts on PYTHONPATH which can - # intefere with the completions. + # interfere with the completions. lib_path = os.path.dirname(importlib.__path__[0]) sys.path = [lib_path] diff --git a/Lib/test/test_regrtest.py b/Lib/test/test_regrtest.py index 5bc3c5924b07fb..c33c0fd6f971b6 100644 --- a/Lib/test/test_regrtest.py +++ b/Lib/test/test_regrtest.py @@ -1966,7 +1966,7 @@ def test_leak_tmp_file(self): import tempfile import unittest - class FileTests(unittest.TestCase): + class file tests(unittest.TestCase): def test_leak_tmp_file(self): filename = os.path.join(tempfile.gettempdir(), 'mytmpfile') with open(filename, "wb") as fp: @@ -2383,7 +2383,7 @@ def test_format_duration(self): def test_normalize_test_name(self): normalize = normalize_test_name - self.assertEqual(normalize('test_access (test.test_os.FileTests.test_access)'), + self.assertEqual(normalize('test_access (test.test_os.file tests.test_access)'), 'test_access') self.assertEqual(normalize('setUpClass (test.test_os.ChownFileTests)', is_error=True), 'ChownFileTests') @@ -2424,7 +2424,7 @@ def id(self): patterns = get_match_tests() self.addCleanup(set_match_tests, patterns) - test_access = Test('test.test_os.FileTests.test_access') + test_access = Test('test.test_os.file tests.test_access') test_chdir = Test('test.test_os.Win32ErrorTests.test_chdir') test_copy = Test('test.test_shutil.TestCopy.test_copy') diff --git a/Lib/test/test_richcmp.py b/Lib/test/test_richcmp.py index b967c7623c57b0..f1c726dd24c0e6 100644 --- a/Lib/test/test_richcmp.py +++ b/Lib/test/test_richcmp.py @@ -93,14 +93,14 @@ def checkfail(self, error, opname, *args): for op in opmap[opname]: self.assertRaises(error, op, *args) - def checkequal(self, opname, a, b, expres): + def checkequal(self, opname, a, b, express): for op in opmap[opname]: realres = op(a, b) - # can't use assertEqual(realres, expres) here - self.assertEqual(len(realres), len(expres)) + # can't use assertEqual(realres, express) here + self.assertEqual(len(realres), len(express)) for i in range(len(realres)): # results are bool, so we can use "is" here - self.assertTrue(realres[i] is expres[i]) + self.assertTrue(realres[i] is express[i]) def test_mixed(self): # check that comparisons involving Vector objects @@ -149,7 +149,7 @@ def test_basic(self): testoutcome = op(ta, tb) self.assertEqual(realoutcome, testoutcome) - def checkvalue(self, opname, a, b, expres): + def checkvalue(self, opname, a, b, express): for typea in (int, Number): for typeb in (int, Number): ta = typea(a) @@ -157,7 +157,7 @@ def checkvalue(self, opname, a, b, expres): for op in opmap[opname]: realres = op(ta, tb) realres = getattr(realres, "x", realres) - self.assertTrue(realres is expres) + self.assertTrue(realres is express) def test_values(self): # check all operators and all comparison results diff --git a/Lib/test/test_set.py b/Lib/test/test_set.py index c0df9507bd7f5e..491c9e65d7213f 100644 --- a/Lib/test/test_set.py +++ b/Lib/test/test_set.py @@ -124,8 +124,8 @@ def test_isdisjoint(self): def f(s1, s2): 'Pure python equivalent of isdisjoint()' return not set(s1).intersection(s2) - for larg in '', 'a', 'ab', 'abc', 'ababac', 'cdc', 'cc', 'efgfe', 'ccb', 'ef': - s1 = self.thetype(larg) + for large in '', 'a', 'ab', 'abc', 'ababac', 'cdc', 'cc', 'efgfe', 'ccb', 'ef': + s1 = self.thetype(large) for rarg in '', 'a', 'ab', 'abc', 'ababac', 'cdc', 'cc', 'efgfe', 'ccb', 'ef': for C in set, frozenset, dict.fromkeys, str, list, tuple: s2 = C(rarg) @@ -661,7 +661,7 @@ def check_unhashable_element(): with check_unhashable_element(): myset.discard(elem) - # Only TypeError exception is overriden, + # Only TypeError exception is overridden, # other exceptions are left unchanged. class HashError: def __hash__(self): diff --git a/Lib/test/test_socket.py b/Lib/test/test_socket.py index 3dd67b2a2aba97..0a9eb04ba166b2 100644 --- a/Lib/test/test_socket.py +++ b/Lib/test/test_socket.py @@ -7083,7 +7083,7 @@ def test_aead_aes_gcm(self): self.assertEqual(expected_ct, res[assoclen:-taglen]) self.assertEqual(expected_tag, res[-taglen:]) - # create anc data manually + # create and data manually pack_uint32 = struct.Struct('I').pack op, _ = algo.accept() with op: diff --git a/Lib/test/test_sort.py b/Lib/test/test_sort.py index 2a7cfb7affaa21..9c12875357c725 100644 --- a/Lib/test/test_sort.py +++ b/Lib/test/test_sort.py @@ -154,7 +154,7 @@ def test_small_stability(self): class TestBugs(unittest.TestCase): def test_bug453523(self): - # bug 453523 -- list.sort() crasher. + # bug 453523 -- list.sort() crash. # If this fails, the most likely outcome is a core dump. # Mutations during a list sort should raise a ValueError. diff --git a/Lib/test/test_sqlite3/test_dbapi.py b/Lib/test/test_sqlite3/test_dbapi.py index 3602726437d8cf..6530342be04b95 100644 --- a/Lib/test/test_sqlite3/test_dbapi.py +++ b/Lib/test/test_sqlite3/test_dbapi.py @@ -460,8 +460,8 @@ def test_connection_init_bad_isolation_level(self): "BOGUS", " ", "DEFERRE", - "IMMEDIAT", - "EXCLUSIV", + "IMMEDIATE", + "EXCLUSIVE", "DEFERREDS", "IMMEDIATES", "EXCLUSIVES", diff --git a/Lib/test/test_ssl.py b/Lib/test/test_ssl.py index 9e519537ca5ed3..99d7aec925fcd5 100644 --- a/Lib/test/test_ssl.py +++ b/Lib/test/test_ssl.py @@ -4100,7 +4100,7 @@ def test_ecdh_curve(self): client_context, server_context, hostname = testing_context() server_context.set_ecdh_curve("secp384r1") - server_context.set_ciphers("ECDHE:!eNULL:!aNULL") + server_context.set_ciphers("ECDHE:!eNULL:!annul") server_context.minimum_version = ssl.TLSVersion.TLSv1_2 stats = server_params_test(client_context, server_context, chatty=True, connectionchatty=True, @@ -4109,7 +4109,7 @@ def test_ecdh_curve(self): # server auto, client secp384r1 client_context, server_context, hostname = testing_context() client_context.set_ecdh_curve("secp384r1") - server_context.set_ciphers("ECDHE:!eNULL:!aNULL") + server_context.set_ciphers("ECDHE:!eNULL:!annul") server_context.minimum_version = ssl.TLSVersion.TLSv1_2 stats = server_params_test(client_context, server_context, chatty=True, connectionchatty=True, @@ -4119,7 +4119,7 @@ def test_ecdh_curve(self): client_context, server_context, hostname = testing_context() client_context.set_ecdh_curve("prime256v1") server_context.set_ecdh_curve("secp384r1") - server_context.set_ciphers("ECDHE:!eNULL:!aNULL") + server_context.set_ciphers("ECDHE:!eNULL:!annul") server_context.minimum_version = ssl.TLSVersion.TLSv1_2 with self.assertRaises(ssl.SSLError): server_params_test(client_context, server_context, @@ -4441,7 +4441,7 @@ def test_session_handling(self): with client_context2.wrap_socket(socket.socket(), server_hostname=hostname) as s: - # cannot re-use session with a different SSLContext + # cannot reuse session with a different SSLContext with self.assertRaises(ValueError) as e: s.session = session s.connect((HOST, server.port)) diff --git a/Lib/test/test_stat.py b/Lib/test/test_stat.py index 5fd25d5012c425..99c28ac304bd10 100644 --- a/Lib/test/test_stat.py +++ b/Lib/test/test_stat.py @@ -207,7 +207,7 @@ def test_devices(self): self.assertEqual(modestr[0], 'c') self.assertS_IS("CHR", st_mode) # Linux block devices, BSD has no block devices anymore - for blockdev in ("/dev/sda", "/dev/hda"): + for blockdev in ("/dev/sda", "/dev/had"): if os.path.exists(blockdev): st_mode, modestr = self.get_mode(blockdev, lstat=False) self.assertEqual(modestr[0], 'b') diff --git a/Lib/test/test_statistics.py b/Lib/test/test_statistics.py index 8250b0aef09aec..6eb7f15cbfe3f3 100644 --- a/Lib/test/test_statistics.py +++ b/Lib/test/test_statistics.py @@ -2998,7 +2998,7 @@ def test_cdf(self): X = NormalDist(100, 15) cdfs = [X.cdf(x) for x in range(1, 200)] self.assertEqual(set(map(type, cdfs)), {float}) - # Verify montonic + # Verify monotonic self.assertEqual(cdfs, sorted(cdfs)) # Verify center (should be exact) self.assertEqual(X.cdf(100), 0.50) diff --git a/Lib/test/test_strptime.py b/Lib/test/test_strptime.py index 0241e543cd7dde..0377b7baf63b16 100644 --- a/Lib/test/test_strptime.py +++ b/Lib/test/test_strptime.py @@ -117,15 +117,15 @@ def test_lang(self): class TimeRETests(unittest.TestCase): - """Tests for TimeRE.""" + """Tests for timer.""" def setUp(self): - """Construct generic TimeRE object.""" - self.time_re = _strptime.TimeRE() + """Construct generic timer object.""" + self.time_re = _strptime.timer() self.locale_time = _strptime.LocaleTime() def test_pattern(self): - # Test TimeRE.pattern + # Test timer.pattern pattern_string = self.time_re.pattern(r"%a %A %d %Y") self.assertTrue(pattern_string.find(self.locale_time.a_weekday[2]) != -1, "did not find abbreviated weekday in pattern string '%s'" % @@ -178,8 +178,8 @@ def test_blankpattern(self): # Fixes bug #661354 test_locale = _strptime.LocaleTime() test_locale.timezone = (frozenset(), frozenset()) - self.assertEqual(_strptime.TimeRE(test_locale).pattern("%Z"), '', - "with timezone == ('',''), TimeRE().pattern('%Z') != ''") + self.assertEqual(_strptime.timer(test_locale).pattern("%Z"), '', + "with timezone == ('',''), timer().pattern('%Z') != ''") def test_matching_with_escapes(self): # Make sure a format that requires escaping of characters works @@ -195,7 +195,7 @@ def test_locale_data_w_regex_metacharacters(self): locale_time.timezone = (frozenset(("utc", "gmt", "Tokyo (standard time)")), frozenset("Tokyo (daylight time)")) - time_re = _strptime.TimeRE(locale_time) + time_re = _strptime.timer(locale_time) self.assertTrue(time_re.compile("%Z").match("Tokyo (standard time)"), "locale data that contains regex metacharacters is not" " properly escaped") @@ -832,7 +832,7 @@ def test_regex_cleanup(self): self.assertEqual(len(_strptime._regex_cache), 1) def test_new_localetime(self): - # A new LocaleTime instance should be created when a new TimeRE object + # A new LocaleTime instance should be created when a new timer object # is created. locale_time_id = _strptime._TimeRE_cache.locale_time _strptime._TimeRE_cache.locale_time.lang = "Ni" @@ -840,7 +840,7 @@ def test_new_localetime(self): self.assertIsNot(locale_time_id, _strptime._TimeRE_cache.locale_time) def test_TimeRE_recreation_locale(self): - # The TimeRE instance should be recreated upon changing the locale. + # The timer instance should be recreated upon changing the locale. with support.run_with_locale('LC_TIME', 'en_US.UTF8'): _strptime._strptime_time('10 2004', '%d %Y') # Get id of current cache object. @@ -861,7 +861,7 @@ def test_TimeRE_recreation_locale(self): @support.run_with_tz('STD-1DST,M4.1.0,M10.1.0') def test_TimeRE_recreation_timezone(self): - # The TimeRE instance should be recreated upon changing the timezone. + # The timer instance should be recreated upon changing the timezone. oldtzname = time.tzname tm = _strptime._strptime_time(time.tzname[0], '%Z') self.assertEqual(tm.tm_isdst, 0) diff --git a/Lib/test/test_subprocess.py b/Lib/test/test_subprocess.py index f0e350c71f60ea..4287765c60348d 100644 --- a/Lib/test/test_subprocess.py +++ b/Lib/test/test_subprocess.py @@ -3438,7 +3438,7 @@ def test_vfork_used_when_expected(self): # because libc tends to implement that internally using vfork. But # that'd just be testing a libc+kernel implementation detail. - # Are intersted in the system calls: + # Are interested in the system calls: # clone,clone2,clone3,fork,vfork,exit,exit_group # Unfortunately using `--trace` with that list to strace fails because # not all are supported on all platforms (ex. clone2 is ia64 only...) diff --git a/Lib/test/test_syntax.py b/Lib/test/test_syntax.py index c52d24219410c2..e7bb38484c6306 100644 --- a/Lib/test/test_syntax.py +++ b/Lib/test/test_syntax.py @@ -1844,21 +1844,21 @@ SyntaxError: invalid syntax. Did you mean 'def'? >>> def foo(): -... returm result +... return result Traceback (most recent call last): SyntaxError: invalid syntax. Did you mean 'return'? ->>> lamda x: x ** 2 +>>> lambda x: x ** 2 Traceback (most recent call last): SyntaxError: invalid syntax. Did you mean 'lambda'? >>> def foo(): -... yeld i +... yield i Traceback (most recent call last): SyntaxError: invalid syntax. Did you mean 'yield'? >>> def foo(): -... globel counter +... global counter Traceback (most recent call last): SyntaxError: invalid syntax. Did you mean 'global'? diff --git a/Lib/test/test_sysconfig.py b/Lib/test/test_sysconfig.py index 2eb8de4b29fe96..f2c9848eefc45f 100644 --- a/Lib/test/test_sysconfig.py +++ b/Lib/test/test_sysconfig.py @@ -697,7 +697,7 @@ def test_sysconfigdata_json(self): # Keys dependent on uncontrollable external context ignore_keys = {'userbase'} - # Keys dependent on Python being run outside the build directrory + # Keys dependent on Python being run outside the build directory if sysconfig.is_python_build(): ignore_keys |= {'srcdir'} # Keys dependent on the executable location @@ -706,7 +706,7 @@ def test_sysconfigdata_json(self): # Keys dependent on the environment (different inside virtual environments) if sys.prefix != sys.base_prefix: ignore_keys |= {'prefix', 'exec_prefix', 'base', 'platbase'} - # Keys dependent on Python being run from the prefix targetted when building (different on relocatable installs) + # Keys dependent on Python being run from the prefix targeted when building (different on relocatable installs) if sysconfig._installation_is_relocated(): ignore_keys |= {'prefix', 'exec_prefix', 'base', 'platbase', 'installed_base', 'installed_platbase'} diff --git a/Lib/test/test_tarfile.py b/Lib/test/test_tarfile.py index 7055e1ed147a9e..db4028a5d3677c 100644 --- a/Lib/test/test_tarfile.py +++ b/Lib/test/test_tarfile.py @@ -4112,7 +4112,7 @@ def test_sneaky_hardlink_fallback(self): arc.add("b/") # Point "c" to the bottom of the tree in "a" arc.add("c", symlink_to=os.path.join("a", "t")) - # link to non-existant location under "a" + # link to non-existent location under "a" arc.add("c/escape", symlink_to=os.path.join("..", "..", "link_here")) # Move "c" to point to "b" ("c/escape" no longer exists) diff --git a/Lib/test/test_tkinter/test_geometry_managers.py b/Lib/test/test_tkinter/test_geometry_managers.py index d71a634a767310..7337c4390d604b 100644 --- a/Lib/test/test_tkinter/test_geometry_managers.py +++ b/Lib/test/test_tkinter/test_geometry_managers.py @@ -604,7 +604,7 @@ def test_grid_configure_rownspan(self): def test_grid_configure_sticky(self): f = tkinter.Frame(self.root, bg='red') - with self.assertRaisesRegex(TclError, 'bad stickyness value "glue"'): + with self.assertRaisesRegex(TclError, 'bad stickiness value "glue"'): f.grid_configure(sticky='glue') f.grid_configure(sticky='ne') self.assertEqual(f.grid_info()['sticky'], 'ne') diff --git a/Lib/test/test_tkinter/test_widgets.py b/Lib/test/test_tkinter/test_widgets.py index ff3f92e9b5ef83..30d129e4205725 100644 --- a/Lib/test/test_tkinter/test_widgets.py +++ b/Lib/test/test_tkinter/test_widgets.py @@ -1440,7 +1440,7 @@ def test_paneconfigure_sticky(self): p, b, c = self.create2() self.check_paneconfigure(p, b, 'sticky', 'nsew', 'nesw') self.check_paneconfigure_bad(p, b, 'sticky', - 'bad stickyness value "badValue": must ' + 'bad stickiness value "badValue": must ' 'be a string containing zero or more of ' 'n, e, s, and w') diff --git a/Lib/test/test_traceback.py b/Lib/test/test_traceback.py index 74b979d009664d..904429dbd5a60a 100644 --- a/Lib/test/test_traceback.py +++ b/Lib/test/test_traceback.py @@ -4107,7 +4107,7 @@ class A: def test_getattr_error_bad_suggestions_do_not_trigger_for_small_names(self): class MyClass: - vvv = mom = w = id = pytho = None + vvv = mom = w = id = python = None for name in ("b", "v", "m", "py"): with self.subTest(name=name): @@ -4324,7 +4324,7 @@ def test_import_from_suggestions_do_not_trigger_for_long_attributes(self): self.assertNotIn("blech", actual) def test_import_from_error_bad_suggestions_do_not_trigger_for_small_names(self): - code = "vvv = mom = w = id = pytho = None" + code = "vvv = mom = w = id = python = None" for name in ("b", "v", "m", "py"): with self.subTest(name=name): @@ -4432,19 +4432,19 @@ def func(): def test_name_error_bad_suggestions_do_not_trigger_for_small_names(self): def f_b(): - vvv = mom = w = id = pytho = None + vvv = mom = w = id = python = None b def f_v(): - vvv = mom = w = id = pytho = None + vvv = mom = w = id = python = None v def f_m(): - vvv = mom = w = id = pytho = None + vvv = mom = w = id = python = None m def f_py(): - vvv = mom = w = id = pytho = None + vvv = mom = w = id = python = None py for name, func in (("b", f_b), ("v", f_v), ("m", f_m), ("py", f_py)): diff --git a/Lib/test/test_typing.py b/Lib/test/test_typing.py index b1615bbff383c2..af7c7947da65e3 100644 --- a/Lib/test/test_typing.py +++ b/Lib/test/test_typing.py @@ -9497,7 +9497,7 @@ class FC: class ACF: x: Annotated[ClassVar[Final[int]], "a decoration"] - class CAF: + class CALF: x: ClassVar[Annotated[Final[int], "a decoration"]] class AFC: @@ -9509,7 +9509,7 @@ class FAC: self.assertEqual(get_type_hints(CF, globals())['x'], ClassVar[Final[int]]) self.assertEqual(get_type_hints(FC, globals())['x'], Final[ClassVar[int]]) self.assertEqual(get_type_hints(ACF, globals())['x'], ClassVar[Final[int]]) - self.assertEqual(get_type_hints(CAF, globals())['x'], ClassVar[Final[int]]) + self.assertEqual(get_type_hints(CALF, globals())['x'], ClassVar[Final[int]]) self.assertEqual(get_type_hints(AFC, globals())['x'], Final[ClassVar[int]]) self.assertEqual(get_type_hints(FAC, globals())['x'], Final[ClassVar[int]]) diff --git a/Lib/test/test_unittest/testmock/testpatch.py b/Lib/test/test_unittest/testmock/testpatch.py index bd85fdcfc472a6..abff8542dd2271 100644 --- a/Lib/test/test_unittest/testmock/testpatch.py +++ b/Lib/test/test_unittest/testmock/testpatch.py @@ -1544,22 +1544,22 @@ def test_new_callable_failure(self): original_g = Foo.g original_foo = Foo.foo - def crasher(): + def crash(): raise NameError('crasher') @patch.object(Foo, 'g', 1) - @patch.object(Foo, 'foo', new_callable=crasher) + @patch.object(Foo, 'foo', new_callable=crash) @patch.object(Foo, 'f', 1) def thing1(): pass - @patch.object(Foo, 'foo', new_callable=crasher) + @patch.object(Foo, 'foo', new_callable=crash) @patch.object(Foo, 'g', 1) @patch.object(Foo, 'f', 1) def thing2(): pass @patch.object(Foo, 'g', 1) @patch.object(Foo, 'f', 1) - @patch.object(Foo, 'foo', new_callable=crasher) + @patch.object(Foo, 'foo', new_callable=crash) def thing3(): pass for func in thing1, thing2, thing3: @@ -1582,8 +1582,8 @@ def test_patch_multiple_failure(self): bad = patch.object(Foo, 'missing', 1) bad.attribute_name = 'missing' - for additionals in [good, bad], [bad, good]: - patcher.additional_patchers = additionals + for additional in [good, bad], [bad, good]: + patcher.additional_patchers = additional @patcher def func(): pass @@ -1598,7 +1598,7 @@ def test_patch_multiple_new_callable_failure(self): original_g = Foo.g original_foo = Foo.foo - def crasher(): + def crash(): raise NameError('crasher') patcher = patch.object(Foo, 'f', 1) @@ -1607,11 +1607,11 @@ def crasher(): good = patch.object(Foo, 'g', 1) good.attribute_name = 'g' - bad = patch.object(Foo, 'foo', new_callable=crasher) + bad = patch.object(Foo, 'foo', new_callable=crash) bad.attribute_name = 'foo' - for additionals in [good, bad], [bad, good]: - patcher.additional_patchers = additionals + for additional in [good, bad], [bad, good]: + patcher.additional_patchers = additional @patcher def func(): pass diff --git a/Lib/test/test_urllib2.py b/Lib/test/test_urllib2.py index 7d7f2fa00d35b6..e22fa444c18556 100644 --- a/Lib/test/test_urllib2.py +++ b/Lib/test/test_urllib2.py @@ -1719,7 +1719,7 @@ def _test_basic_auth(self, opener, auth_handler, auth_header, realm, http_handler, password_manager, request_url, protected_url): import base64 - user, password = "wile", "coyote" + user, password = "while", "coyote" # .add_password() fed through to password manager auth_handler.add_password(realm, request_url, user, password) @@ -1756,7 +1756,7 @@ def test_basic_prior_auth_auto_send(self): # Assume already authenticated if is_authenticated=True # for APIs like Github that don't return 401 - user, password = "wile", "coyote" + user, password = "while", "coyote" request_url = "http://acme.example.com/protected" http_handler = MockHTTPHandlerCheckAuth(200) diff --git a/Lib/test/test_weakref.py b/Lib/test/test_weakref.py index 4c7c900eb56ae1..b071ba3f06bf9f 100644 --- a/Lib/test/test_weakref.py +++ b/Lib/test/test_weakref.py @@ -274,14 +274,14 @@ def test_ref_reuse(self): proxy = weakref.proxy(o) ref2 = weakref.ref(o) self.assertIs(ref1, ref2, - "reference object w/out callback should be re-used") + "reference object w/out callback should be reused") o = C() proxy = weakref.proxy(o) ref1 = weakref.ref(o) ref2 = weakref.ref(o) self.assertIs(ref1, ref2, - "reference object w/out callback should be re-used") + "reference object w/out callback should be reused") self.assertEqual(weakref.getweakrefcount(o), 2, "wrong weak ref count for object") del proxy @@ -295,7 +295,7 @@ def test_proxy_reuse(self): ref = weakref.ref(o) proxy2 = weakref.proxy(o) self.assertIs(proxy1, proxy2, - "proxy object w/out callback should have been re-used") + "proxy object w/out callback should have been reused") def test_basic_proxy(self): o = C() @@ -1857,7 +1857,7 @@ def test_weak_keyed_bad_delitem(self): self.assertRaises(KeyError, d.__delitem__, o) self.assertRaises(KeyError, d.__getitem__, o) - # If a key isn't of a weakly referencable type, __getitem__ and + # If a key isn't of a weakly referenceable type, __getitem__ and # __setitem__ raise TypeError. __delitem__ should too. self.assertRaises(TypeError, d.__delitem__, 13) self.assertRaises(TypeError, d.__getitem__, 13) @@ -2260,7 +2260,7 @@ def test_names(self): >>> class Dict(dict): ... pass ... ->>> obj = Dict(red=1, green=2, blue=3) # this object is weak referencable +>>> obj = Dict(red=1, green=2, blue=3) # this object is weak referenceable >>> r = weakref.ref(obj) >>> print(r() is obj) True diff --git a/Lib/test/test_xml_etree.py b/Lib/test/test_xml_etree.py index bf6d5074fdebd8..5e4b704f869e1f 100644 --- a/Lib/test/test_xml_etree.py +++ b/Lib/test/test_xml_etree.py @@ -2736,7 +2736,7 @@ def test_remove_with_clear_assume_existing(self): def do_test_remove_with_clear(self, *, raises): - # Until the discrepency between "del root[:]" and "root.clear()" is + # Until the discrepancy between "del root[:]" and "root.clear()" is # resolved, we need to keep two tests. Previously, using "del root[:]" # did not crash with the reproducer of gh-126033 while "root.clear()" # did. diff --git a/Lib/test/test_zipfile/test_core.py b/Lib/test/test_zipfile/test_core.py index ada96813709aea..355a5af7911542 100644 --- a/Lib/test/test_zipfile/test_core.py +++ b/Lib/test/test_zipfile/test_core.py @@ -2378,20 +2378,20 @@ def test_open_conflicting_handles(self): def test_seek_tell(self): # Test seek functionality txt = b"Where's Bruce?" - bloc = txt.find(b"Bruce") + block = txt.find(b"Bruce") # Check seek on a file with zipfile.ZipFile(TESTFN, "w") as zipf: zipf.writestr("foo.txt", txt) with zipfile.ZipFile(TESTFN, "r") as zipf: with zipf.open("foo.txt", "r") as fp: - fp.seek(bloc, os.SEEK_SET) - self.assertEqual(fp.tell(), bloc) + fp.seek(block, os.SEEK_SET) + self.assertEqual(fp.tell(), block) fp.seek(-bloc, os.SEEK_CUR) self.assertEqual(fp.tell(), 0) - fp.seek(bloc, os.SEEK_CUR) - self.assertEqual(fp.tell(), bloc) - self.assertEqual(fp.read(5), txt[bloc:bloc+5]) - self.assertEqual(fp.tell(), bloc + 5) + fp.seek(block, os.SEEK_CUR) + self.assertEqual(fp.tell(), block) + self.assertEqual(fp.read(5), txt[block:block+5]) + self.assertEqual(fp.tell(), block + 5) fp.seek(0, os.SEEK_END) self.assertEqual(fp.tell(), len(txt)) fp.seek(0, os.SEEK_SET) @@ -2402,14 +2402,14 @@ def test_seek_tell(self): zipf.writestr("foo.txt", txt) with zipfile.ZipFile(data, mode="r") as zipf: with zipf.open("foo.txt", "r") as fp: - fp.seek(bloc, os.SEEK_SET) - self.assertEqual(fp.tell(), bloc) + fp.seek(block, os.SEEK_SET) + self.assertEqual(fp.tell(), block) fp.seek(-bloc, os.SEEK_CUR) self.assertEqual(fp.tell(), 0) - fp.seek(bloc, os.SEEK_CUR) - self.assertEqual(fp.tell(), bloc) - self.assertEqual(fp.read(5), txt[bloc:bloc+5]) - self.assertEqual(fp.tell(), bloc + 5) + fp.seek(block, os.SEEK_CUR) + self.assertEqual(fp.tell(), block) + self.assertEqual(fp.read(5), txt[block:block+5]) + self.assertEqual(fp.tell(), block + 5) fp.seek(0, os.SEEK_END) self.assertEqual(fp.tell(), len(txt)) fp.seek(0, os.SEEK_SET) @@ -2418,12 +2418,12 @@ def test_seek_tell(self): def test_read_after_seek(self): # Issue 102956: Make sure seek(x, os.SEEK_CUR) doesn't break read() txt = b"Charge men!" - bloc = txt.find(b"men") + block = txt.find(b"men") with zipfile.ZipFile(TESTFN, "w") as zipf: zipf.writestr("foo.txt", txt) with zipfile.ZipFile(TESTFN, mode="r") as zipf: with zipf.open("foo.txt", "r") as fp: - fp.seek(bloc, os.SEEK_CUR) + fp.seek(block, os.SEEK_CUR) self.assertEqual(fp.read(-1), b'men!') with zipfile.ZipFile(TESTFN, mode="r") as zipf: with zipf.open("foo.txt", "r") as fp: @@ -2802,16 +2802,16 @@ def test_seek_tell(self): self.zip.setpassword(b"python") txt = self.plain test_word = b'encryption' - bloc = txt.find(test_word) + block = txt.find(test_word) bloc_len = len(test_word) with self.zip.open("test.txt", "r") as fp: - fp.seek(bloc, os.SEEK_SET) - self.assertEqual(fp.tell(), bloc) + fp.seek(block, os.SEEK_SET) + self.assertEqual(fp.tell(), block) fp.seek(-bloc, os.SEEK_CUR) self.assertEqual(fp.tell(), 0) - fp.seek(bloc, os.SEEK_CUR) - self.assertEqual(fp.tell(), bloc) - self.assertEqual(fp.read(bloc_len), txt[bloc:bloc+bloc_len]) + fp.seek(block, os.SEEK_CUR) + self.assertEqual(fp.tell(), block) + self.assertEqual(fp.read(bloc_len), txt[block:block+bloc_len]) # Make sure that the second read after seeking back beyond # _readbuffer returns the same content (ie. rewind to the start of @@ -2822,8 +2822,8 @@ def test_seek_tell(self): fp._offset = 0 fp.seek(0, os.SEEK_SET) self.assertEqual(fp.tell(), 0) - fp.seek(bloc, os.SEEK_CUR) - self.assertEqual(fp.read(bloc_len), txt[bloc:bloc+bloc_len]) + fp.seek(block, os.SEEK_CUR) + self.assertEqual(fp.read(bloc_len), txt[block:block+bloc_len]) fp.MIN_READ_SIZE = old_read_size fp.seek(0, os.SEEK_END) diff --git a/Lib/tkinter/__init__.py b/Lib/tkinter/__init__.py index a693b04870b995..f47d2ae34531b3 100644 --- a/Lib/tkinter/__init__.py +++ b/Lib/tkinter/__init__.py @@ -3697,7 +3697,7 @@ def set(self, value): def coords(self, value=None): """Return a tuple (X,Y) of the point along the centerline of the - trough that corresponds to VALUE or the current value if None is + through that corresponds to VALUE or the current value if None is given.""" return self._getints(self.tk.call(self._w, 'coords', value)) diff --git a/Lib/tkinter/ttk.py b/Lib/tkinter/ttk.py index c0cf1e787fa9ad..4f92997ebd17d4 100644 --- a/Lib/tkinter/ttk.py +++ b/Lib/tkinter/ttk.py @@ -254,20 +254,20 @@ def _list_from_layouttuple(tk, ltuple): ltuple = tk.splitlist(ltuple) res = [] - indx = 0 - while indx < len(ltuple): - name = ltuple[indx] + index = 0 + while index < len(ltuple): + name = ltuple[index] opts = {} res.append((name, opts)) - indx += 1 + index += 1 - while indx < len(ltuple): # grab name's options - opt, val = ltuple[indx:indx + 2] + while index < len(ltuple): # grab name's options + opt, val = ltuple[index:index + 2] if not opt.startswith('-'): # found next name break opt = opt[1:] # remove the '-' from the option - indx += 2 + index += 2 if opt == 'children': val = _list_from_layouttuple(tk, val) diff --git a/Lib/turtle.py b/Lib/turtle.py index e88981d298ad52..2a47aa083b7fdd 100644 --- a/Lib/turtle.py +++ b/Lib/turtle.py @@ -1646,8 +1646,8 @@ def radians(self): def _go(self, distance): """move turtle forward by specified distance""" - ende = self._position + self._orient * distance - self._goto(ende) + end = self._position + self._orient * distance + self._goto(end) def _rotate(self, angle): """Turn turtle counterclockwise by specified angle if angle > 0.""" diff --git a/Lib/unittest/mock.py b/Lib/unittest/mock.py index e1dbfdacf56337..b49409150a8414 100644 --- a/Lib/unittest/mock.py +++ b/Lib/unittest/mock.py @@ -1289,7 +1289,7 @@ class or instance) that acts as the specification for the mock object. If `return_value` attribute. * `unsafe`: By default, accessing any attribute whose name starts with - *assert*, *assret*, *asert*, *aseert*, or *assrt* raises an AttributeError. + *assert*, *assret*, *assert*, *aseert*, or *assrt* raises an AttributeError. Additionally, an AttributeError is raised when accessing attributes that match the name of an assertion method without the prefix `assert_`, e.g. accessing `called_once` instead of `assert_called_once`. diff --git a/Lib/urllib/request.py b/Lib/urllib/request.py index 41dc5d7b35dedb..e79b8e3e40a2fd 100644 --- a/Lib/urllib/request.py +++ b/Lib/urllib/request.py @@ -922,7 +922,7 @@ class AbstractBasicAuthHandler: 'realm=(["\']?)([^"\']*)\\2', re.I) - # XXX could pre-emptively send auth info already accepted (RFC 2617, + # XXX could preemptively send auth info already accepted (RFC 2617, # end of section 2, and section 1.2 immediately after "credentials" # production). diff --git a/Lib/xml/dom/minidom.py b/Lib/xml/dom/minidom.py index db51f350ea0153..2204eae79c2237 100644 --- a/Lib/xml/dom/minidom.py +++ b/Lib/xml/dom/minidom.py @@ -1893,7 +1893,7 @@ def renameNode(self, n, namespaceURI, name): element.setIdAttributeNode(n) # It's not clear from a semantic perspective whether we should # call the user data handlers for the NODE_RENAMED event since - # we're re-using the existing node. The draft spec has been + # we're reusing the existing node. The draft spec has been # interpreted as meaning "no, don't call the handler unless a # new node is created." return n diff --git a/Mac/BuildScript/resources/Conclusion.rtf b/Mac/BuildScript/resources/Conclusion.rtf index 9e0fa9fa6eeb73..48923ab91d86a0 100644 --- a/Mac/BuildScript/resources/Conclusion.rtf +++ b/Mac/BuildScript/resources/Conclusion.rtf @@ -4,7 +4,7 @@ {\colortbl;\red255\green255\blue255;} {\*\expandedcolortbl;;} \margl1440\margr1440\vieww10540\viewh8400\viewkind0 -\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\pardirnatural\partightenfactor0 +\part\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\pardirnatural\partightenfactor0 \f0\fs28 \cf0 Congratulations! \fs24 diff --git a/Mac/BuildScript/resources/License.rtf b/Mac/BuildScript/resources/License.rtf index b5cb8ec41c86e2..78bd476fd8443b 100644 --- a/Mac/BuildScript/resources/License.rtf +++ b/Mac/BuildScript/resources/License.rtf @@ -4,7 +4,7 @@ {\colortbl;\red255\green255\blue255;} {\*\expandedcolortbl;;} \margl1440\margr1440\vieww18500\viewh13520\viewkind0 -\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\pardirnatural\partightenfactor0 +\part\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\pardirnatural\partightenfactor0 \f0\b\fs36 \cf0 \ul \ulc0 HISTORY AND LICENSE\ @@ -48,7 +48,7 @@ Thanks to the many outside volunteers who have worked under Guido's direction to \f0\b \ul TERMS AND CONDITIONS FOR ACCESSING OR OTHERWISE USING PYTHON\ \ -\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\pardirnatural\partightenfactor0 +\part\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\pardirnatural\partightenfactor0 \f1\b0 \cf0 \ulnone Python software and documentation are licensed under the Python Software Foundation License Version 2.\ \ @@ -56,7 +56,7 @@ Starting with Python 3.8.6, examples, recipes, and other code in the documentati \ Some software incorporated into Python is under different licenses. The licenses are listed with code falling under that license.\ \ -\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\pardirnatural\partightenfactor0 +\part\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\pardirnatural\partightenfactor0 \cf0 \ \f0\b PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2\ @@ -134,16 +134,16 @@ Permission to use, copy, modify, and distribute this software and its documentat STICHTING MATHEMATISCH CENTRUM DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL STICHTING MATHEMATISCH CENTRUM BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\ \ \ -\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\pardirnatural\partightenfactor0 +\part\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\pardirnatural\partightenfactor0 \f0\b \cf0 ZERO-CLAUSE BSD LICENSE FOR CODE IN THE PYTHON DOCUMENTATION\ -\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\pardirnatural\partightenfactor0 +\part\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\pardirnatural\partightenfactor0 \f1\b0 \cf0 \ Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted.\ \ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\ -\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\pardirnatural\partightenfactor0 +\part\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\pardirnatural\partightenfactor0 \cf0 \ \ diff --git a/Mac/BuildScript/resources/ReadMe.rtf b/Mac/BuildScript/resources/ReadMe.rtf index ee5ba4707dfea4..5c47d37d92a860 100644 --- a/Mac/BuildScript/resources/ReadMe.rtf +++ b/Mac/BuildScript/resources/ReadMe.rtf @@ -4,17 +4,17 @@ {\colortbl;\red255\green255\blue255;} {\*\expandedcolortbl;;} \margl1440\margr1440\vieww13380\viewh14580\viewkind0 -\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\pardirnatural\partightenfactor0 +\part\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\pardirnatural\partightenfactor0 \f0\fs24 \cf0 This package will install Python $FULL_VERSION for macOS $MACOSX_DEPLOYMENT_TARGET for the following architecture(s): $ARCHITECTURES.\ \ -\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\partightenfactor0 +\part\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\partightenfactor0 \f1\b \cf0 NOTE: \f0\b0 This is a beta preview of Python 3.13.0, the next feature release of Python 3. It is not intended for production use.\ -\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\pardirnatural\partightenfactor0 +\part\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\pardirnatural\partightenfactor0 \cf0 \ -\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\pardirnatural\partightenfactor0 +\part\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\pardirnatural\partightenfactor0 \f1\b \cf0 \ul \ulc0 Certificate verification and OpenSSL\ diff --git a/Mac/BuildScript/resources/Welcome.rtf b/Mac/BuildScript/resources/Welcome.rtf index 49d6e22286be26..f1a2a8183b9df6 100644 --- a/Mac/BuildScript/resources/Welcome.rtf +++ b/Mac/BuildScript/resources/Welcome.rtf @@ -4,7 +4,7 @@ {\colortbl;\red255\green255\blue255;} {\*\expandedcolortbl;;} \margl1440\margr1440\vieww12200\viewh10880\viewkind0 -\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\partightenfactor0 +\part\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\partightenfactor0 \f0\fs24 \cf0 This package will install \f1\b Python $FULL_VERSION diff --git a/Mac/PythonLauncher/English.lproj/Credits.rtf b/Mac/PythonLauncher/English.lproj/Credits.rtf index 930ca221a128b0..b2ec2771a90062 100644 --- a/Mac/PythonLauncher/English.lproj/Credits.rtf +++ b/Mac/PythonLauncher/English.lproj/Credits.rtf @@ -1,7 +1,7 @@ {\rtf1\mac\ansicpg10000\cocoartf100 {\fonttbl\f0\fswiss\fcharset77 Helvetica-Bold;\f1\fswiss\fcharset77 Helvetica;} {\colortbl;\red255\green255\blue255;} -\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\ql\qnatural +\part\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\ql\qnatural \f0\b\fs24 \cf0 Engineering: \f1\b0 \ diff --git a/Makefile.pre.in b/Makefile.pre.in index 7fea799c3912dd..b55239e020afdd 100644 --- a/Makefile.pre.in +++ b/Makefile.pre.in @@ -2560,7 +2560,7 @@ TESTSUBDIRS= idlelib/idle_test \ test/certdata/capath \ test/cjkencodings \ test/configdata \ - test/crashers \ + test/crashes \ test/data \ test/decimaltestdata \ test/dtracedata \ diff --git a/Misc/ACKS b/Misc/ACKS index fabd79b9f74210..5be30c7579c1fa 100644 --- a/Misc/ACKS +++ b/Misc/ACKS @@ -32,7 +32,7 @@ Farhan Ahmad Matthew Ahrens Nir Aides Akira -Ege Akman +Edge Akman Yaniv Aknin Jyrki Alakuijala Tatiana Al-Chueyr @@ -318,7 +318,7 @@ Brad Chapman Greg Chapman Mitch Chapman Matt Chaput -Willow Chargin +Willow Charging Ben Chatterton Yogesh Chaudhari Gautam Chaudhuri @@ -551,7 +551,7 @@ Troy J. Farrell Jim Fasarakis-Hilliard Mark Favas Sergey Fedoseev -Boris Feld +Boris Field M. Felt Thomas Fenzl Niels Ferguson @@ -973,7 +973,7 @@ Derek D. Kim Gihwan Kim Jan Kim Noah Kim -Taek Joo Kim +Taek You Kim Yeojin Kim Sam Kimbrel Tomohiko Kinebuchi @@ -1336,7 +1336,7 @@ Max Neunhöffer Anthon van der Neut George Neville-Neil Hieu Nguyen -Nam Nguyen +Name Nguyen Johannes Nicolai Samuel Nicolary Jonathan Niehof @@ -1544,7 +1544,7 @@ Nikolaus Rath Sridhar Ratnakumar Ysj Ray Eric S. Raymond -Edward K. Ream +Edward K. Stream Chris Rebert Marc Recht John Redford diff --git a/Misc/HISTORY b/Misc/HISTORY index d68aaa066771fb..b0de1e3b670f91 100644 --- a/Misc/HISTORY +++ b/Misc/HISTORY @@ -168,7 +168,7 @@ Core and Builtins - Issue #24806: Prevent builtin types that are not allowed to be subclassed from being subclassed through multiple inheritance. -- Issue #24848: Fixed a number of bugs in UTF-7 decoding of misformed data. +- Issue #24848: Fixed a number of bugs in UTF-7 decoding of malformed data. - Issue #25280: Import trace messages emitted in verbose (-v) mode are no longer formatted twice. @@ -802,7 +802,7 @@ IDLE - Issue #25198: Enhance the initial html viewer now used for Idle Help. * Properly indent fixed-pitch text (patch by Mark Roseman). * Give code snippet a very Sphinx-like light blueish-gray background. - * Re-use initial width and height set by users for shell and editor. + * Reuse initial width and height set by users for shell and editor. * When the Table of Contents (TOC) menu is used, put the section header at the top of the screen. @@ -5277,7 +5277,7 @@ Library - Issue #17018: Make Process.join() retry if os.waitpid() fails with EINTR. -- Issue #17223: array module: Fix a crasher when converting an array containing +- Issue #17223: array module: Fix a crash when converting an array containing invalid characters (outside range [U+0000; U+10ffff]) to Unicode: repr(array), str(array) and array.tounicode(). Patch written by Manuel Jacob. @@ -10477,7 +10477,7 @@ Tests - Issue #11577: improve test coverage of binhex.py. Patch by Arkady Koplyarov. -- New test_crashers added to exercise the scripts in the Lib/test/crashers +- New test_crashers added to exercise the scripts in the Lib/test/crashes directory and confirm they fail as expected - Issue #11578: added test for the timeit module. Patch by Michael Henry. @@ -13868,7 +13868,7 @@ Library - Add count() and reverse() methods to collections.deque(). -- Fix variations of extending deques: d.extend(d) d.extendleft(d) d+=d +- Fix variations of extending dequeues: d.extend(d) d.extendleft(d) d+=d - Issue #6986: Fix crash in the JSON C accelerator when called with the wrong parameter types. Patch by Victor Stinner. @@ -14105,7 +14105,7 @@ Library digits in input, as recommended by the standard. Previously it was restricted to accepting [0-9]. -- Issue #6106: telnetlib.Telnet.process_rawq doesn't handle default WILL/WONT +- Issue #6106: telnetlib.Telnet.process_rawq doesn't handle default WILL/WON'T DO/DONT correctly. - Issue #1424152: Fix for http.client, urllib.request to support SSL while @@ -17280,7 +17280,7 @@ Core and Builtins * nb_divide, nb_inplace_divide * operator.div, operator.idiv, operator.__div__, operator.__idiv__ (Only __truediv__ and __floordiv__ remain, not sure how to handle - them if we want to re-use __div__ and friends. If we do, it will + them if we want to reuse __div__ and friends. If we do, it will make it harder to write code for both 2.x and 3.x.) - 'as' and 'with' are keywords. @@ -17957,7 +17957,7 @@ Core and builtins attribute on objects until one without one is found. This leads to recursion when you take a class and set its __call__ attribute to an instance of the class. Originally fixed for classic classes, but this fix is for new-style. - Removes the infinite_rec_3 crasher. + Removes the infinite_rec_3 crash. - The string and unicode methods startswith() and endswith() now accept a tuple of prefixes/suffixes to look for. Implements RFE #1491485. @@ -20460,7 +20460,7 @@ Core and builtins the overallocation is no more than three elements -- this improves space utilization for applications that have large numbers of small lists. -- Most list bodies now get re-used rather than freed. Speeds up list +- Most list bodies now get reused rather than freed. Speeds up list instantiation and deletion by saving calls to malloc() and free(). - The dict.update() method now accepts all the same argument forms @@ -20892,7 +20892,7 @@ Library allow any iterable. - _strptime.py now has a behind-the-scenes caching mechanism for the most - recent TimeRE instance used along with the last five unique directive + recent timer instance used along with the last five unique directive patterns. The overall module was also made more thread-safe. - random.cunifvariate() and random.stdgamma() were deprecated in Py2.3 @@ -21081,7 +21081,7 @@ Library - Lib/encodings/rot_13.py when used as a script, now more properly uses the first Python interpreter on your path. -- Removed caching of TimeRE (and thus LocaleTime) in _strptime.py to +- Removed caching of timer (and thus LocaleTime) in _strptime.py to fix a locale related bug in the test suite. Although another patch was needed to actually fix the problem, the cache code was not restored. @@ -21342,7 +21342,7 @@ Core and builtins thread mutated the dict during __delitem__, or if a comparison function mutated it. It also neglected to raise KeyError when the key wasn't present; didn't raise TypeError when the key wasn't of a weakly - referencable type; and broke various more-or-less obscure dict + referenceable type; and broke various more-or-less obscure dict invariants by using a sequence of equality comparisons over the whole set of dict keys instead of computing the key's hash code to narrow the search to those keys with the same hash code. All of these are @@ -25106,8 +25106,8 @@ Python/C API - Extensions types which support weak references must now set the field allocated for the weak reference machinery to NULL themselves; this is done to avoid the cost of checking each object for having a - weakly referencable type in PyObject_INIT(), since most types are - not weakly referencable. + weakly referenceable type in PyObject_INIT(), since most types are + not weakly referenceable. - PyFrame_FastToLocals() and PyFrame_LocalsToFast() copy bindings for free variables and cell variables to and from the frame's f_locals. @@ -31115,7 +31115,7 @@ __del__" under certain circumstances have been fixed (mostly by changes elsewher in the interpreter). - In urlparse.py, there is a cache for results in urlparse.urlparse(); -its size limit is set to 20. Also, new URL schemes shttp, https, and +its size limit is set to 20. Also, new URL schemes https, https, and snews are "supported". - shelve.py: use cPickle and cStringIO when available. Also added diff --git a/Misc/NEWS.d/3.10.0a3.rst b/Misc/NEWS.d/3.10.0a3.rst index 6cf3db3eb43c8b..b143d5868d5f6b 100644 --- a/Misc/NEWS.d/3.10.0a3.rst +++ b/Misc/NEWS.d/3.10.0a3.rst @@ -636,7 +636,7 @@ find_module(), and load_module(). .. section: Library Mock objects which are not unsafe will now raise an AttributeError if an -attribute with the prefix asert, aseert, or assrt is accessed, in addition +attribute with the prefix assert, aseert, or assrt is accessed, in addition to this already happening for the prefixes assert or assret. .. diff --git a/Misc/NEWS.d/3.10.0a7.rst b/Misc/NEWS.d/3.10.0a7.rst index d866e805fd3a7e..d7ed53f8060c7f 100644 --- a/Misc/NEWS.d/3.10.0a7.rst +++ b/Misc/NEWS.d/3.10.0a7.rst @@ -534,7 +534,7 @@ during connection setup. .. nonce: cee_X5 .. section: Library -Improve performance of :class:`fractions.Fraction` arithmetics for large +Improve performance of :class:`fractions.Fraction` arithmetic for large components. Contributed by Sergey B. Kirpichev. .. diff --git a/Misc/NEWS.d/3.12.0a4.rst b/Misc/NEWS.d/3.12.0a4.rst index 57fb2052764b6f..f49f09765f65df 100644 --- a/Misc/NEWS.d/3.12.0a4.rst +++ b/Misc/NEWS.d/3.12.0a4.rst @@ -429,7 +429,7 @@ different set of index file names instead of using ``__init__`` parameters. when accessing an attribute that matches the name of an assertion but without the prefix ``assert_``, e.g. accessing ``called_once`` instead of ``assert_called_once``. This is in addition to this already happening for -accessing attributes with prefixes ``assert``, ``assret``, ``asert``, +accessing attributes with prefixes ``assert``, ``assret``, ``assert``, ``aseert``, and ``assrt``. .. @@ -871,7 +871,7 @@ Fix reStructuredText syntax errors in docstrings in the :mod:`enum` module. .. nonce: Jd47V6 .. section: Library -Optimize the :class:`~fractions.Fraction` arithmetics for small components. +Optimize the :class:`~fractions.Fraction` arithmetic for small components. .. diff --git a/Misc/NEWS.d/3.13.0b1.rst b/Misc/NEWS.d/3.13.0b1.rst index 97731276679ba6..46879b1b0fb9f1 100644 --- a/Misc/NEWS.d/3.13.0b1.rst +++ b/Misc/NEWS.d/3.13.0b1.rst @@ -340,7 +340,7 @@ contain lambdas. .. nonce: 8LpZ6m .. section: Core and Builtins -Prevent ``agen.aclose()`` objects being re-used after ``.throw()``. +Prevent ``agen.aclose()`` objects being reused after ``.throw()``. .. diff --git a/Misc/NEWS.d/3.14.0a1.rst b/Misc/NEWS.d/3.14.0a1.rst index 67451a7e0087cb..54cbcd70b0bd2d 100644 --- a/Misc/NEWS.d/3.14.0a1.rst +++ b/Misc/NEWS.d/3.14.0a1.rst @@ -234,7 +234,7 @@ tool .. nonce: HW8CIS .. section: Tests -Update ``Lib/test/crashers/bogus_code_obj.py`` so that it crashes properly +Update ``Lib/test/crashes/bogus_code_obj.py`` so that it crashes properly again. .. @@ -5231,7 +5231,7 @@ both reference ``__class__``. .. nonce: D9EE-o .. section: Core and Builtins -JIT: Re-use trampolines on AArch64 when creating stencils. Patch by Diego +JIT: Reuse trampolines on AArch64 when creating stencils. Patch by Diego Russo .. diff --git a/Misc/NEWS.d/3.14.0a7.rst b/Misc/NEWS.d/3.14.0a7.rst index 35b96d33da4175..1eb3b43748ddd5 100644 --- a/Misc/NEWS.d/3.14.0a7.rst +++ b/Misc/NEWS.d/3.14.0a7.rst @@ -976,7 +976,7 @@ Fix mimalloc library builds for 32-bit ARM targets. .. nonce: 2BgHU5 .. section: Build -clang-cl on Windows needs option ``/EHa`` to support SEH (structured +clang-cl on Windows needs option ``/EHa`` to support SHE (structured exception handling) correctly. Fix by Chris Eibl. .. diff --git a/Misc/NEWS.d/3.14.0b1.rst b/Misc/NEWS.d/3.14.0b1.rst index 041fbaf2051719..02ceb82b556386 100644 --- a/Misc/NEWS.d/3.14.0b1.rst +++ b/Misc/NEWS.d/3.14.0b1.rst @@ -1756,7 +1756,7 @@ Add support for macOS multi-arch builds with the JIT enabled .. nonce: q9fvyM .. section: Core and Builtins -PyREPL now supports syntax highlighing. Contributed by Łukasz Langa. +PyREPL now supports syntax highlighting. Contributed by Łukasz Langa. .. @@ -1797,7 +1797,7 @@ non-``None`` ``closure``. Patch by Bartosz Sławecki. .. nonce: Uj7lyY .. section: Core and Builtins -Fix a bug that was allowing newlines inconsitently in format specifiers for +Fix a bug that was allowing newlines inconsistently in format specifiers for single-quoted f-strings. Patch by Pablo Galindo. .. diff --git a/Misc/NEWS.d/3.5.0a3.rst b/Misc/NEWS.d/3.5.0a3.rst index a81d67aea8663b..e62ceaa1335684 100644 --- a/Misc/NEWS.d/3.5.0a3.rst +++ b/Misc/NEWS.d/3.5.0a3.rst @@ -270,7 +270,7 @@ until the garbage collector cleans them up. Patch by Martin Panter. .. section: Library collections.deque() objects now support methods for index(), insert(), and -copy(). This allows deques to be registered as a MutableSequence and it +copy(). This allows dequeues to be registered as a MutableSequence and it improves their substitutability for lists. .. diff --git a/Misc/NEWS.d/3.5.1rc1.rst b/Misc/NEWS.d/3.5.1rc1.rst index 05e1ecfaf6bc79..ec684bfb8b8d54 100644 --- a/Misc/NEWS.d/3.5.1rc1.rst +++ b/Misc/NEWS.d/3.5.1rc1.rst @@ -138,7 +138,7 @@ subclassed through multiple inheritance. .. nonce: HlUSuy .. section: Core and Builtins -Fixed a number of bugs in UTF-7 decoding of misformed data. +Fixed a number of bugs in UTF-7 decoding of malformed data. .. @@ -1085,7 +1085,7 @@ them a 'sheet'. Patch by Mark Roseman. Enhance the initial html viewer now used for Idle Help. Properly indent fixed-pitch text (patch by Mark Roseman). Give code snippet a very -Sphinx-like light blueish-gray background. Re-use initial width and height +Sphinx-like light blueish-gray background. Reuse initial width and height set by users for shell and editor. When the Table of Contents (TOC) menu is used, put the section header at the top of the screen. diff --git a/Misc/NEWS.d/3.5.2rc1.rst b/Misc/NEWS.d/3.5.2rc1.rst index f9409b62e352ac..a50edd49311e9f 100644 --- a/Misc/NEWS.d/3.5.2rc1.rst +++ b/Misc/NEWS.d/3.5.2rc1.rst @@ -262,7 +262,7 @@ compiler issues. .. nonce: j9zand .. section: Core and Builtins -Deque.insert() gave odd results for bounded deques that had reached their +Deque.insert() gave odd results for bounded dequeues that had reached their maximum size. Now an IndexError will be raised when attempting to insert into a full deque. diff --git a/Misc/NEWS.d/3.6.0a1.rst b/Misc/NEWS.d/3.6.0a1.rst index 803c9fc5925fa6..b628b63f99c0d9 100644 --- a/Misc/NEWS.d/3.6.0a1.rst +++ b/Misc/NEWS.d/3.6.0a1.rst @@ -751,7 +751,7 @@ The UTF-8 decoder is now up to 15 times as fast for error handlers: .. nonce: HlUSuy .. section: Core and Builtins -Fixed a number of bugs in UTF-7 decoding of misformed data. +Fixed a number of bugs in UTF-7 decoding of malformed data. .. @@ -3329,7 +3329,7 @@ them a 'sheet'. Patch by Mark Roseman. Enhance the initial html viewer now used for Idle Help. Properly indent fixed-pitch text (patch by Mark Roseman). Give code snippet a very -Sphinx-like light blueish-gray background. Re-use initial width and height set by +Sphinx-like light blueish-gray background. Reuse initial width and height set by users for shell and editor. When the Table of Contents (TOC) menu is used, put the section header at the top of the screen. diff --git a/Misc/NEWS.d/3.9.0a1.rst b/Misc/NEWS.d/3.9.0a1.rst index cc24bae5881df1..796322ae42cc4b 100644 --- a/Misc/NEWS.d/3.9.0a1.rst +++ b/Misc/NEWS.d/3.9.0a1.rst @@ -414,7 +414,7 @@ The select module is now PEP-384 compliant and no longer has static state .. nonce: yZXC3P .. section: Core and Builtins -ast module updated to PEP-384 and all statics removed +ast module updated to PEP-384 and all statistics removed .. diff --git a/Misc/NEWS.d/next/Core_and_Builtins/2025-07-19-12-37-05.gh-issue-136801.XU_tF2.rst b/Misc/NEWS.d/next/Core_and_Builtins/2025-07-19-12-37-05.gh-issue-136801.XU_tF2.rst index 5c0813b1a0abda..767d7b97726971 100644 --- a/Misc/NEWS.d/next/Core_and_Builtins/2025-07-19-12-37-05.gh-issue-136801.XU_tF2.rst +++ b/Misc/NEWS.d/next/Core_and_Builtins/2025-07-19-12-37-05.gh-issue-136801.XU_tF2.rst @@ -1 +1 @@ -Fix PyREPL syntax highlightning on match cases after multi-line case. Contributed by Olga Matoula. +Fix PyREPL syntax highlighting on match cases after multi-line case. Contributed by Olga Matoula. diff --git a/Misc/NEWS.d/next/Library/2025-07-05-09-45-04.gh-issue-136286.N67Amr.rst b/Misc/NEWS.d/next/Library/2025-07-05-09-45-04.gh-issue-136286.N67Amr.rst index 0a0d66ac0b8abf..ddc2310392fe92 100644 --- a/Misc/NEWS.d/next/Library/2025-07-05-09-45-04.gh-issue-136286.N67Amr.rst +++ b/Misc/NEWS.d/next/Library/2025-07-05-09-45-04.gh-issue-136286.N67Amr.rst @@ -1,2 +1,2 @@ -Fix pickling failures for protocols 0 and 1 for many objects realted to +Fix pickling failures for protocols 0 and 1 for many objects related to subinterpreters. diff --git a/Misc/NEWS.d/next/Tools-Demos/2025-06-11-12-14-06.gh-issue-135379.25ttXq.rst b/Misc/NEWS.d/next/Tools-Demos/2025-06-11-12-14-06.gh-issue-135379.25ttXq.rst index 25599a865b7246..ebe3ab0e7d1993 100644 --- a/Misc/NEWS.d/next/Tools-Demos/2025-06-11-12-14-06.gh-issue-135379.25ttXq.rst +++ b/Misc/NEWS.d/next/Tools-Demos/2025-06-11-12-14-06.gh-issue-135379.25ttXq.rst @@ -1,4 +1,4 @@ The cases generator no longer accepts type annotations on stack items. -Conversions to non-default types are now done explictly in bytecodes.c and +Conversions to non-default types are now done explicitly in bytecodes.c and optimizer_bytecodes.c. This will simplify code generation for top-of-stack caching and other future features. diff --git a/Modules/Setup.stdlib.in b/Modules/Setup.stdlib.in index 86c8eb27c0a6c7..3c6ee659e744af 100644 --- a/Modules/Setup.stdlib.in +++ b/Modules/Setup.stdlib.in @@ -85,7 +85,7 @@ # # Since the compilation of the built-in cryptographic modules depends # on whether we are building on WASI or not, rules will be explicitly -# written. In the future, it should be preferrable to be able to setup +# written. In the future, it should be preferable to be able to setup # the relevant bits here instead of in Makefile.pre.in or configure.ac. # Hash functions can be disabled with --without-builtin-hashlib-hashes. diff --git a/Modules/_collectionsmodule.c b/Modules/_collectionsmodule.c index 3ba48d5d9d3c64..e5086c922fa1b4 100644 --- a/Modules/_collectionsmodule.c +++ b/Modules/_collectionsmodule.c @@ -117,7 +117,7 @@ class dequeobject_converter(self_converter): * d.rightindex become indices into distinct blocks and either may * be larger than the other. * - * Empty deques have: + * Empty dequeues have: * d.len == 0 * d.leftblock == d.rightblock * d.leftindex == CENTER + 1 @@ -139,7 +139,7 @@ struct dequeobject { Py_ssize_t leftindex; /* 0 <= leftindex < BLOCKLEN */ Py_ssize_t rightindex; /* 0 <= rightindex < BLOCKLEN */ size_t state; /* incremented whenever the indices move */ - Py_ssize_t maxlen; /* maxlen is -1 for unbounded deques */ + Py_ssize_t maxlen; /* maxlen is -1 for unbounded dequeues */ Py_ssize_t numfreeblocks; block *freeblocks[MAXFREEBLOCKS]; PyObject *weakreflist; @@ -2551,7 +2551,7 @@ _collections__count_elements_impl(PyObject *module, PyObject *mapping, while (1) { /* Fast path advantages: 1. Eliminate double hashing - (by re-using the same hash for both the get and set) + (by reusing the same hash for both the get and set) 2. Avoid argument overhead of PyObject_CallFunctionObjArgs (argument tuple creation and parsing) 3. Avoid indirection through a bound method object diff --git a/Modules/_ctypes/ctypes.h b/Modules/_ctypes/ctypes.h index 5b4f97d43b8721..eef57a75d07160 100644 --- a/Modules/_ctypes/ctypes.h +++ b/Modules/_ctypes/ctypes.h @@ -68,7 +68,7 @@ #endif #ifdef MS_WIN32 -#include // for IUnknown interface +#include // for IUnknown interface #endif typedef struct { diff --git a/Modules/_datetimemodule.c b/Modules/_datetimemodule.c index 7a6426593d021f..11bf170b16ad81 100644 --- a/Modules/_datetimemodule.c +++ b/Modules/_datetimemodule.c @@ -478,7 +478,7 @@ days_before_year(int year) static void ord_to_ymd(int ordinal, int *year, int *month, int *day) { - int n, n1, n4, n100, n400, leapyear, preceding; + int n, n1, n4, n100, n400, leap year, preceding; /* ordinal is a 1-based index, starting at 1-Jan-1. The pattern of * leap years repeats exactly every 400 years. The basic strategy is @@ -542,10 +542,10 @@ ord_to_ymd(int ordinal, int *year, int *month, int *day) * find the month via an estimate that's either exact or one too * large. */ - leapyear = n1 == 3 && (n4 != 24 || n100 == 3); - assert(leapyear == is_leap(*year)); + leap year = n1 == 3 && (n4 != 24 || n100 == 3); + assert(leap year == is_leap(*year)); *month = (n + 50) >> 5; - preceding = (_days_before_month[*month] + (*month > 2 && leapyear)); + preceding = (_days_before_month[*month] + (*month > 2 && leap year)); if (preceding > n) { /* estimate is too large */ *month -= 1; diff --git a/Modules/_decimal/libmpdec/basearith.h b/Modules/_decimal/libmpdec/basearith.h index d35925aaddb48e..f5ad0b4f449fee 100644 --- a/Modules/_decimal/libmpdec/basearith.h +++ b/Modules/_decimal/libmpdec/basearith.h @@ -110,27 +110,27 @@ _mpd_div_words_r(mpd_uint_t *q, mpd_uint_t *r, mpd_uint_t hi, mpd_uint_t lo) l = l + n_adj; if (l < n_adj) h++; t = h + hi; - /* At this point t == qest, with q == qest or q == qest+1: - * 1) 0 <= 2**64*hi + lo - qest*MPD_RADIX < 2*MPD_RADIX + /* At this point t == quest, with q == quest or q == quest+1: + * 1) 0 <= 2**64*hi + lo - quest*MPD_RADIX < 2*MPD_RADIX */ - /* t = 2**64-1 - qest = 2**64 - (qest+1) */ + /* t = 2**64-1 - quest = 2**64 - (quest+1) */ t = MPD_UINT_MAX - t; - /* (h, l) = 2**64*MPD_RADIX - (qest+1)*MPD_RADIX */ + /* (h, l) = 2**64*MPD_RADIX - (quest+1)*MPD_RADIX */ _mpd_mul_words(&h, &l, t, MPD_RADIX); l = l + lo; if (l < lo) h++; h += hi; h -= MPD_RADIX; - /* (h, l) = 2**64*hi + lo - (qest+1)*MPD_RADIX (mod 2**128) - * Case q == qest+1: + /* (h, l) = 2**64*hi + lo - (quest+1)*MPD_RADIX (mod 2**128) + * Case q == quest+1: * a) h == 0, l == r - * b) q := h - t == qest+1 + * b) q := h - t == quest+1 * c) r := l - * Case q == qest: + * Case q == quest: * a) h == MPD_UINT_MAX, l == 2**64-(MPD_RADIX-r) - * b) q := h - t == qest + * b) q := h - t == quest * c) r := l + MPD_RADIX = r */ diff --git a/Modules/_decimal/libmpdec/literature/mulmod-ppro.txt b/Modules/_decimal/libmpdec/literature/mulmod-ppro.txt index ba804e4b4e7864..11e4927b5ce637 100644 --- a/Modules/_decimal/libmpdec/literature/mulmod-ppro.txt +++ b/Modules/_decimal/libmpdec/literature/mulmod-ppro.txt @@ -54,29 +54,29 @@ relative error of 2**(1-F): Calculate an estimate for q = floor(n/p). The multiplication has another maximum relative error of 2**(1-F): - (9) qest = n * pinv + (9) quest = n * pinv -If we can show that q < qest < q+1, then trunc(qest) = q. It is then +If we can show that q < quest < q+1, then trunc(quest) = q. It is then easy to recover the remainder r. The complete algorithm is: a) Set the control word to 64-bit precision and truncation mode. b) n = a * b # Calculate exact product. - c) qest = n * pinv # Calculate estimate for the quotient. + c) quest = n * pinv # Calculate estimate for the quotient. - d) q = (qest+2**63)-2**63 # Truncate qest to the exact quotient. + d) q = (quest+2**63)-2**63 # Truncate quest to the exact quotient. f) r = n - q * p # Calculate remainder. -Proof for q < qest < q+1: +Proof for q < quest < q+1: ------------------------- -Using the cumulative error, the error bounds for qest are: +Using the cumulative error, the error bounds for quest are: n n * (1 + 2**(1-F))**2 - (9) --------------------- <= qest <= --------------------- + (9) --------------------- <= quest <= --------------------- p * (1 + 2**(1-F))**2 p diff --git a/Modules/_decimal/libmpdec/umodarith.h b/Modules/_decimal/libmpdec/umodarith.h index d7dbbbe6a7331a..e4fe2dc35ba26b 100644 --- a/Modules/_decimal/libmpdec/umodarith.h +++ b/Modules/_decimal/libmpdec/umodarith.h @@ -380,8 +380,8 @@ std_powmod(mpd_uint_t base, mpd_uint_t exp, mpd_uint_t umod) * pinv := (long double)1.0 / p (precalculated) * * a) n = a * b # Calculate exact product. - * b) qest = n * pinv # Calculate estimate for q = n / p. - * c) q = (qest+2**63)-2**63 # Truncate qest to the exact quotient. + * b) quest = n * pinv # Calculate estimate for q = n / p. + * c) q = (quest+2**63)-2**63 # Truncate quest to the exact quotient. * d) r = n - q * p # Calculate remainder. * * Remarks: diff --git a/Modules/_decimal/tests/bench.py b/Modules/_decimal/tests/bench.py index 6605e9a92e2dde..ed562cb79b7c80 100644 --- a/Modules/_decimal/tests/bench.py +++ b/Modules/_decimal/tests/bench.py @@ -74,9 +74,9 @@ def _increase_int_max_str_digits(func, maxdigits=maxdigits): def wrapper(*args, **kwargs): previous_int_limit = sys.get_int_max_str_digits() sys.set_int_max_str_digits(maxdigits) - ans = func(*args, **kwargs) + and = func(*args, **kwargs) sys.set_int_max_str_digits(previous_int_limit) - return ans + return and return wrapper return _increase_int_max_str_digits diff --git a/Modules/_decimal/tests/bignum.py b/Modules/_decimal/tests/bignum.py index a67e161ddf098f..2d232a362536fd 100644 --- a/Modules/_decimal/tests/bignum.py +++ b/Modules/_decimal/tests/bignum.py @@ -27,8 +27,8 @@ def xhash(coeff, exp): else: exp_hash = pow(_PyHASH_10INV, -exp, _PyHASH_MODULUS) hash_ = coeff * exp_hash % _PyHASH_MODULUS - ans = hash_ if sign == 1 else -hash_ - return -2 if ans == -1 else ans + and = hash_ if sign == 1 else -hash_ + return -2 if and == -1 else and x = mpz(10) ** 425000000 - 1 diff --git a/Modules/_functoolsmodule.c b/Modules/_functoolsmodule.c index 1c888295cb07f1..e4d32415d2be0c 100644 --- a/Modules/_functoolsmodule.c +++ b/Modules/_functoolsmodule.c @@ -503,7 +503,7 @@ partial_vectorcall(PyObject *self, PyObject *const *args, assert(i == pto_nkwds); Py_XDECREF(pto_kw_merged); - /* Resize Stack if the removing overallocation saves some noticable memory + /* Resize Stack if the removing overallocation saves some noticeable memory * NOTE: This whole block can be removed without breaking anything */ Py_ssize_t noveralloc = n_merges + nkwds; if (stack != small_stack && noveralloc > 6 && noveralloc > init_stack_size / 10) { diff --git a/Modules/_pickle.c b/Modules/_pickle.c index cf3ceb43fb3f3f..1c6a5900ec1df4 100644 --- a/Modules/_pickle.c +++ b/Modules/_pickle.c @@ -4650,7 +4650,7 @@ Clears the pickler's "memo". The memo is the data structure that remembers which objects the pickler has already seen, so that shared or recursive objects are pickled by reference and not by value. This method is useful when -re-using picklers. +reusing picklers. [clinic start generated code]*/ static PyObject * diff --git a/Modules/_ssl.c b/Modules/_ssl.c index 24c243e330d4bf..bd78ccb72db4f1 100644 --- a/Modules/_ssl.c +++ b/Modules/_ssl.c @@ -197,7 +197,7 @@ extern const SSL_METHOD *TLSv1_2_method(void); * ECDH+*: enable ephemeral elliptic curve Diffie-Hellman * DHE+*: fallback to ephemeral finite field Diffie-Hellman * encryption order: AES AEAD (GCM), ChaCha AEAD, AES CBC - * !aNULL:!eNULL: really no NULL ciphers + * !annul:!eNULL: really no NULL ciphers * !aDSS: no authentication with discrete logarithm DSA algorithm * !SHA1: no weak SHA1 MAC * !AESCCM: no CCM mode, it's uncommon and slow @@ -205,7 +205,7 @@ extern const SSL_METHOD *TLSv1_2_method(void); * Based on Hynek's excellent blog post (update 2021-02-11) * https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/ */ - #define PY_SSL_DEFAULT_CIPHER_STRING "@SECLEVEL=2:ECDH+AESGCM:ECDH+CHACHA20:ECDH+AES:DHE+AES:!aNULL:!eNULL:!aDSS:!SHA1:!AESCCM" + #define PY_SSL_DEFAULT_CIPHER_STRING "@SECLEVEL=2:ECDH+AESGCM:ECDH+CHACHA20:ECDH+AES:DHE+AES:!annul:!eNULL:!aDSS:!SHA1:!AESCCM" #ifndef PY_SSL_MIN_PROTOCOL #define PY_SSL_MIN_PROTOCOL TLS1_2_VERSION #endif @@ -3450,7 +3450,7 @@ _ssl__SSLContext_impl(PyTypeObject *type, int proto_version) #endif } else { /* SSLv2 needs MD5 */ - result = SSL_CTX_set_cipher_list(ctx, "HIGH:!aNULL:!eNULL"); + result = SSL_CTX_set_cipher_list(ctx, "HIGH:!annul:!eNULL"); } if (result == 0) { ERR_clear_error(); diff --git a/Modules/_testinternalcapi.c b/Modules/_testinternalcapi.c index 533e7dd3a7ec00..b5eea0b6194f5d 100644 --- a/Modules/_testinternalcapi.c +++ b/Modules/_testinternalcapi.c @@ -497,7 +497,7 @@ test_bytes_find(PyObject *self, PyObject *Py_UNUSED(args)) CHECK("ython", "thon", 1, 2); CHECK("thon", "thon", 2, 2); CHECK("hon", "thon", 3, -1); - CHECK("Pytho", "zz", 0, -1); + CHECK("Python", "zz", 0, -1); CHECK("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "ab", 0, -1); CHECK("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "ba", 0, -1); CHECK("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "bb", 0, -1); @@ -891,7 +891,7 @@ write_perf_map_entry(PyObject *self, PyObject *args) unsigned int code_size; const char *entry_name; - if (!PyArg_ParseTuple(args, "OIs", &code_addr_v, &code_size, &entry_name)) + if (!PyArg_ParseTuple(args, "is", &code_addr_v, &code_size, &entry_name)) return NULL; code_addr = PyLong_AsVoidPtr(code_addr_v); if (code_addr == NULL) { diff --git a/Modules/_zstd/_zstdmodule.c b/Modules/_zstd/_zstdmodule.c index d75c0779474a82..59aece588ce8ca 100644 --- a/Modules/_zstd/_zstdmodule.c +++ b/Modules/_zstd/_zstdmodule.c @@ -1,4 +1,4 @@ -/* Low level interface to the Zstandard algorthm & the zstd library. */ +/* Low level interface to the Zstandard algorithm & the zstd library. */ #ifndef Py_BUILD_CORE_BUILTIN # define Py_BUILD_CORE_MODULE 1 @@ -497,7 +497,7 @@ _zstd.get_frame_info frame_buffer: Py_buffer A bytes-like object, containing the header of a Zstandard frame. -Get Zstandard frame infomation from a frame header. +Get Zstandard frame information from a frame header. [clinic start generated code]*/ static PyObject * diff --git a/Modules/_zstd/_zstdmodule.h b/Modules/_zstd/_zstdmodule.h index 4e8f708f2232c7..82226ff8718e6b 100644 --- a/Modules/_zstd/_zstdmodule.h +++ b/Modules/_zstd/_zstdmodule.h @@ -1,4 +1,4 @@ -/* Low level interface to the Zstandard algorthm & the zstd library. */ +/* Low level interface to the Zstandard algorithm & the zstd library. */ /* Declarations shared between different parts of the _zstd module*/ diff --git a/Modules/_zstd/buffer.h b/Modules/_zstd/buffer.h index 4c885fa0d720fd..0ac7bcb4ddc416 100644 --- a/Modules/_zstd/buffer.h +++ b/Modules/_zstd/buffer.h @@ -1,4 +1,4 @@ -/* Low level interface to the Zstandard algorthm & the zstd library. */ +/* Low level interface to the Zstandard algorithm & the zstd library. */ #ifndef ZSTD_BUFFER_H #define ZSTD_BUFFER_H diff --git a/Modules/_zstd/clinic/_zstdmodule.c.h b/Modules/_zstd/clinic/_zstdmodule.c.h index 766e1cfa776767..081ea728001757 100644 --- a/Modules/_zstd/clinic/_zstdmodule.c.h +++ b/Modules/_zstd/clinic/_zstdmodule.c.h @@ -289,7 +289,7 @@ PyDoc_STRVAR(_zstd_get_frame_info__doc__, "get_frame_info($module, /, frame_buffer)\n" "--\n" "\n" -"Get Zstandard frame infomation from a frame header.\n" +"Get Zstandard frame information from a frame header.\n" "\n" " frame_buffer\n" " A bytes-like object, containing the header of a Zstandard frame."); diff --git a/Modules/_zstd/compressor.c b/Modules/_zstd/compressor.c index bc9e6eff89af68..dcd4513f9a7414 100644 --- a/Modules/_zstd/compressor.c +++ b/Modules/_zstd/compressor.c @@ -1,4 +1,4 @@ -/* Low level interface to the Zstandard algorthm & the zstd library. */ +/* Low level interface to the Zstandard algorithm & the zstd library. */ /* ZstdCompressor class definitions */ @@ -713,7 +713,7 @@ _zstd_ZstdCompressor_set_pledged_input_size_impl(ZstdCompressor *self, unsigned long long size) /*[clinic end generated code: output=3a09e55cc0e3b4f9 input=afd8a7d78cff2eb5]*/ { - // Error occured while converting argument, should be unreachable + // Error occurred while converting argument, should be unreachable assert(size != ZSTD_CONTENTSIZE_ERROR); /* Thread-safe code */ diff --git a/Modules/_zstd/decompressor.c b/Modules/_zstd/decompressor.c index c53d6e4cb05cf0..b00ee05d2f51bf 100644 --- a/Modules/_zstd/decompressor.c +++ b/Modules/_zstd/decompressor.c @@ -1,4 +1,4 @@ -/* Low level interface to the Zstandard algorthm & the zstd library. */ +/* Low level interface to the Zstandard algorithm & the zstd library. */ /* ZstdDecompressor class definition */ diff --git a/Modules/_zstd/zstddict.c b/Modules/_zstd/zstddict.c index 14f74aaed46ec5..35d6ca8e55a265 100644 --- a/Modules/_zstd/zstddict.c +++ b/Modules/_zstd/zstddict.c @@ -1,4 +1,4 @@ -/* Low level interface to the Zstandard algorthm & the zstd library. */ +/* Low level interface to the Zstandard algorithm & the zstd library. */ /* ZstdDict class definitions */ diff --git a/Modules/_zstd/zstddict.h b/Modules/_zstd/zstddict.h index 4a403416dbd4a3..e0d3f46b2b14a6 100644 --- a/Modules/_zstd/zstddict.h +++ b/Modules/_zstd/zstddict.h @@ -1,4 +1,4 @@ -/* Low level interface to the Zstandard algorthm & the zstd library. */ +/* Low level interface to the Zstandard algorithm & the zstd library. */ #ifndef ZSTD_DICT_H #define ZSTD_DICT_H diff --git a/Modules/cjkcodecs/_codecs_iso2022.c b/Modules/cjkcodecs/_codecs_iso2022.c index ef6faeb71274e1..41466dd19acea8 100644 --- a/Modules/cjkcodecs/_codecs_iso2022.c +++ b/Modules/cjkcodecs/_codecs_iso2022.c @@ -140,15 +140,15 @@ struct iso2022_designation { struct iso2022_config { int flags; - const struct iso2022_designation *designations; /* non-ascii desigs */ + const struct iso2022_designation *designations; /* non-ascii designs */ }; /*-*- iso-2022 codec implementation -*-*/ CODEC_INIT(iso2022) { - const struct iso2022_designation *desig; - for (desig = CONFIG_DESIGNATIONS; desig->mark; desig++) { + const struct iso2022_designation *design; + for (design = CONFIG_DESIGNATIONS; design->mark; design++) { if (desig->initializer != NULL && desig->initializer(codec) != 0) { return -1; } diff --git a/Modules/clinic/_pickle.c.h b/Modules/clinic/_pickle.c.h index 213e817a50a287..94220e47198735 100644 --- a/Modules/clinic/_pickle.c.h +++ b/Modules/clinic/_pickle.c.h @@ -17,7 +17,7 @@ PyDoc_STRVAR(_pickle_Pickler_clear_memo__doc__, "The memo is the data structure that remembers which objects the\n" "pickler has already seen, so that shared or recursive objects are\n" "pickled by reference and not by value. This method is useful when\n" -"re-using picklers."); +"reusing picklers."); #define _PICKLE_PICKLER_CLEAR_MEMO_METHODDEF \ {"clear_memo", (PyCFunction)_pickle_Pickler_clear_memo, METH_NOARGS, _pickle_Pickler_clear_memo__doc__}, diff --git a/Modules/hmacmodule.c b/Modules/hmacmodule.c index 95e400231bb65c..b11e97e52dde8b 100644 --- a/Modules/hmacmodule.c +++ b/Modules/hmacmodule.c @@ -649,7 +649,7 @@ find_hash_info(hmacmodule_state *state, PyObject *hash_info_ref) { const py_hmac_hinfo *info = NULL; int rc = find_hash_info_impl(state, hash_info_ref, &info); - // The code below could be simplfied with only 'rc == 0' case, + // The code below could be simplified with only 'rc == 0' case, // but we are deliberately verbose to ease future improvements. if (rc < 0) { return NULL; diff --git a/Modules/itertoolsmodule.c b/Modules/itertoolsmodule.c index cc1a558001563c..5806185db44e84 100644 --- a/Modules/itertoolsmodule.c +++ b/Modules/itertoolsmodule.c @@ -2128,7 +2128,7 @@ product_next_lock_held(PyObject *op) } else { Py_ssize_t *indices = lz->indices; - /* Copy the previous result tuple or re-use it if available */ + /* Copy the previous result tuple or reuse it if available */ if (Py_REFCNT(result) > 1) { PyObject *old_result = result; result = _PyTuple_FromArray(_PyTuple_ITEMS(old_result), npools); @@ -2367,7 +2367,7 @@ combinations_next_lock_held(PyObject *op) PyTuple_SET_ITEM(result, i, elem); } } else { - /* Copy the previous result tuple or re-use it if available */ + /* Copy the previous result tuple or reuse it if available */ if (Py_REFCNT(result) > 1) { PyObject *old_result = result; result = _PyTuple_FromArray(_PyTuple_ITEMS(old_result), r); @@ -2623,7 +2623,7 @@ cwr_next(PyObject *op) } } } else { - /* Copy the previous result tuple or re-use it if available */ + /* Copy the previous result tuple or reuse it if available */ if (Py_REFCNT(result) > 1) { PyObject *old_result = result; result = _PyTuple_FromArray(_PyTuple_ITEMS(old_result), r); @@ -2884,7 +2884,7 @@ permutations_next(PyObject *op) if (n == 0) goto empty; - /* Copy the previous result tuple or re-use it if available */ + /* Copy the previous result tuple or reuse it if available */ if (Py_REFCNT(result) > 1) { PyObject *old_result = result; result = _PyTuple_FromArray(_PyTuple_ITEMS(old_result), r); diff --git a/Modules/mathmodule.c b/Modules/mathmodule.c index 7c2a421dd6a450..ee94f031899238 100644 --- a/Modules/mathmodule.c +++ b/Modules/mathmodule.c @@ -2328,7 +2328,7 @@ static PyObject * math_log(PyObject *module, PyObject * const *args, Py_ssize_t nargs) { PyObject *num, *den; - PyObject *ans; + PyObject *and; if (!_PyArg_CheckPositional("log", nargs, 1, 2)) return NULL; @@ -2343,10 +2343,10 @@ math_log(PyObject *module, PyObject * const *args, Py_ssize_t nargs) return NULL; } - ans = PyNumber_TrueDivide(num, den); + and = PyNumber_TrueDivide(num, den); Py_DECREF(num); Py_DECREF(den); - return ans; + return and; } PyDoc_STRVAR(math_log_doc, diff --git a/Modules/mmapmodule.c b/Modules/mmapmodule.c index 142ff1a21316ab..44899198a90c38 100644 --- a/Modules/mmapmodule.c +++ b/Modules/mmapmodule.c @@ -15,7 +15,7 @@ / This version of mmapmodule.c has been changed significantly / from the original mmapfile.c on which it was based. / The original version of mmapfile is maintained by Sam at - / ftp://squirl.nightmare.com/pub/python/python-ext. + / ftp://squirrel.nightmare.com/pub/python/python-ext. */ #ifndef Py_BUILD_CORE_BUILTIN diff --git a/Modules/posixmodule.c b/Modules/posixmodule.c index 47eaf5cd428a53..29ff229df718e3 100644 --- a/Modules/posixmodule.c +++ b/Modules/posixmodule.c @@ -5830,7 +5830,7 @@ os_nice_impl(PyObject *module, int increment) /* There are two flavours of 'nice': one that returns the new priority (as required by almost all standards out there) and the - Linux/FreeBSD one, which returns '0' on success and advices + Linux/FreeBSD one, which returns '0' on success and advice the use of getpriority() to get the new priority. If we are of the nice family that returns the new priority, we diff --git a/Modules/socketmodule.c b/Modules/socketmodule.c index f3ad01854de93b..f115c1bff136ca 100644 --- a/Modules/socketmodule.c +++ b/Modules/socketmodule.c @@ -6125,7 +6125,7 @@ gethost_common(socket_state *state, struct hostent *h, struct sockaddr *addr, name = sock_decode_hostname(h->h_name); if (name == NULL) goto err; - rtn_tuple = Py_BuildValue("NOO", name, name_list, addr_list); + rtn_tuple = Py_BuildValue("NO", name, name_list, addr_list); err: Py_XDECREF(name_list); diff --git a/Objects/clinic/unicodeobject.c.h b/Objects/clinic/unicodeobject.c.h index 1819fbaea220a3..162d7af6c9f9a0 100644 --- a/Objects/clinic/unicodeobject.c.h +++ b/Objects/clinic/unicodeobject.c.h @@ -604,9 +604,9 @@ PyDoc_STRVAR(unicode_isalnum__doc__, "isalnum($self, /)\n" "--\n" "\n" -"Return True if the string is an alpha-numeric string, False otherwise.\n" +"Return True if the string is an alphanumeric string, False otherwise.\n" "\n" -"A string is alpha-numeric if all characters in the string are alpha-numeric and\n" +"A string is alphanumeric if all characters in the string are alphanumeric and\n" "there is at least one character in the string."); #define UNICODE_ISALNUM_METHODDEF \ diff --git a/Objects/codeobject.c b/Objects/codeobject.c index 42e021679b583f..223bb747ca2de4 100644 --- a/Objects/codeobject.c +++ b/Objects/codeobject.c @@ -2012,7 +2012,7 @@ _PyCode_CheckNoExternalState(PyCodeObject *co, _PyCode_var_counts_t *counts, errmsg = "globals not supported"; } // Otherwise we don't check counts.unbound.globals.numunknown since we can't - // distinguish beween globals and builtins here. + // distinguish between globals and builtins here. if (errmsg != NULL) { if (p_errmsg != NULL) { @@ -2123,7 +2123,7 @@ code_returns_only_none(PyCodeObject *co) for (int i = 0; i < len; i += _PyInstruction_GetLength(co, i)) { _Py_CODEUNIT inst = _Py_GetBaseCodeUnit(co, i); if (IS_RETURN_OPCODE(inst.op.code)) { - // We alraedy know it isn't returning None. + // We already know it isn't returning None. return 0; } } diff --git a/Objects/dictnotes.txt b/Objects/dictnotes.txt index db6a3cf1d634b0..838c6b334ea330 100644 --- a/Objects/dictnotes.txt +++ b/Objects/dictnotes.txt @@ -56,7 +56,7 @@ Membership Testing Dynamic Mappings Characterized by deletions interspersed with adds and replacements. - Performance benefits greatly from the re-use of dummy entries. + Performance benefits greatly from the reuse of dummy entries. Data Layout ----------- diff --git a/Objects/dictobject.c b/Objects/dictobject.c index 0ed52ac5e87b6e..f41337c7b7666e 100644 --- a/Objects/dictobject.c +++ b/Objects/dictobject.c @@ -81,7 +81,7 @@ DK_ENTRIES(keys)[index] if index >= 0): Active upon key insertion. Dummy slots cannot be made Unused again else the probe sequence in case of collision would have no way to know they were once active. - In free-threaded builds dummy slots are not re-used to allow lock-free + In free-threaded builds dummy slots are not reused to allow lock-free lookups to proceed safely. 4. Pending. index >= 0, key != NULL, and value == NULL (split only) @@ -659,7 +659,7 @@ _PyDict_CheckConsistency(PyObject *op, int check_content) PyDictObject *mp = (PyDictObject *)op; PyDictKeysObject *keys = mp->ma_keys; - int splitted = _PyDict_HasSplitTable(mp); + int split = _PyDict_HasSplitTable(mp); Py_ssize_t usable = USABLE_FRACTION(DK_SIZE(keys)); // In the free-threaded build, shared keys may be concurrently modified, @@ -672,7 +672,7 @@ _PyDict_CheckConsistency(PyObject *op, int check_content) CHECK(0 <= dk_nentries && dk_nentries <= usable); CHECK(dk_usable + dk_nentries <= usable); - if (!splitted) { + if (!split) { /* combined table */ CHECK(keys->dk_kind != DICT_KEYS_SPLIT); CHECK(keys->dk_refcnt == 1 || keys == Py_EMPTY_KEYS); @@ -721,20 +721,20 @@ _PyDict_CheckConsistency(PyObject *op, int check_content) CHECK(PyUnicode_CheckExact(key)); Py_hash_t hash = unicode_get_hash(key); CHECK(hash != -1); - if (!splitted) { + if (!split) { CHECK(entry->me_value != NULL); } } - if (splitted) { + if (split) { CHECK(entry->me_value == NULL); } } } - if (splitted) { + if (split) { CHECK(mp->ma_used <= SHARED_KEYS_MAX_SIZE); - /* splitted table */ + /* split table */ int duplicate_check = 0; for (Py_ssize_t i=0; i < mp->ma_used; i++) { int index = get_index_from_order(mp, i); @@ -7218,7 +7218,7 @@ set_dict_inline_values(PyObject *obj, PyDictObject *new_dict) #ifdef Py_GIL_DISABLED -// Trys and sets the dictionary for an object in the easy case when our current +// Tries and sets the dictionary for an object in the easy case when our current // dictionary is either completely not materialized or is a dictionary which // does not point at the inline values. static bool diff --git a/Objects/exceptions.c b/Objects/exceptions.c index b17cac83551670..31d84e4ba6e94b 100644 --- a/Objects/exceptions.c +++ b/Objects/exceptions.c @@ -3698,7 +3698,7 @@ UnicodeDecodeError_init(PyObject *self, PyObject *args, PyObject *kwds) if (PyObject_GetBuffer(object, &view, PyBUF_SIMPLE) != 0) { return -1; } - // 'object' is borrowed, so we can re-use the variable + // 'object' is borrowed, so we can reuse the variable object = PyBytes_FromStringAndSize(view.buf, view.len); PyBuffer_Release(&view); if (object == NULL) { diff --git a/Objects/listobject.c b/Objects/listobject.c index 1b36f4c25abf4d..1d9041aa9ecc22 100644 --- a/Objects/listobject.c +++ b/Objects/listobject.c @@ -2304,7 +2304,7 @@ merge_lo(MergeState *ms, sortslice ssa, Py_ssize_t na, min_gallop = ms->min_gallop; for (;;) { - Py_ssize_t acount = 0; /* # of times A won in a row */ + Py_ssize_t account = 0; /* # of times A won in a row */ Py_ssize_t bcount = 0; /* # of times B won in a row */ /* Do the straightforward thing until (if ever) one run @@ -2318,7 +2318,7 @@ merge_lo(MergeState *ms, sortslice ssa, Py_ssize_t na, goto Fail; sortslice_copy_incr(&dest, &ssb); ++bcount; - acount = 0; + account = 0; --nb; if (nb == 0) goto Succeed; @@ -2327,12 +2327,12 @@ merge_lo(MergeState *ms, sortslice ssa, Py_ssize_t na, } else { sortslice_copy_incr(&dest, &ssa); - ++acount; + ++account; bcount = 0; --na; if (na == 1) goto CopyB; - if (acount >= min_gallop) + if (account >= min_gallop) break; } } @@ -2348,7 +2348,7 @@ merge_lo(MergeState *ms, sortslice ssa, Py_ssize_t na, min_gallop -= min_gallop > 1; ms->min_gallop = min_gallop; k = gallop_right(ms, ssb.keys[0], ssa.keys, na, 0); - acount = k; + account = k; if (k) { if (k < 0) goto Fail; @@ -2386,7 +2386,7 @@ merge_lo(MergeState *ms, sortslice ssa, Py_ssize_t na, --na; if (na == 1) goto CopyB; - } while (acount >= MIN_GALLOP || bcount >= MIN_GALLOP); + } while (account >= MIN_GALLOP || bcount >= MIN_GALLOP); ++min_gallop; /* penalize it for leaving galloping mode */ ms->min_gallop = min_gallop; } @@ -2442,7 +2442,7 @@ merge_hi(MergeState *ms, sortslice ssa, Py_ssize_t na, min_gallop = ms->min_gallop; for (;;) { - Py_ssize_t acount = 0; /* # of times A won in a row */ + Py_ssize_t account = 0; /* # of times A won in a row */ Py_ssize_t bcount = 0; /* # of times B won in a row */ /* Do the straightforward thing until (if ever) one run @@ -2455,18 +2455,18 @@ merge_hi(MergeState *ms, sortslice ssa, Py_ssize_t na, if (k < 0) goto Fail; sortslice_copy_decr(&dest, &ssa); - ++acount; + ++account; bcount = 0; --na; if (na == 0) goto Succeed; - if (acount >= min_gallop) + if (account >= min_gallop) break; } else { sortslice_copy_decr(&dest, &ssb); ++bcount; - acount = 0; + account = 0; --nb; if (nb == 1) goto CopyA; @@ -2489,7 +2489,7 @@ merge_hi(MergeState *ms, sortslice ssa, Py_ssize_t na, if (k < 0) goto Fail; k = na - k; - acount = k; + account = k; if (k) { sortslice_advance(&dest, -k); sortslice_advance(&ssa, -k); @@ -2526,7 +2526,7 @@ merge_hi(MergeState *ms, sortslice ssa, Py_ssize_t na, --na; if (na == 0) goto Succeed; - } while (acount >= MIN_GALLOP || bcount >= MIN_GALLOP); + } while (account >= MIN_GALLOP || bcount >= MIN_GALLOP); ++min_gallop; /* penalize it for leaving galloping mode */ ms->min_gallop = min_gallop; } diff --git a/Objects/listsort.txt b/Objects/listsort.txt index 5b2fc7d50a25ca..4dd0955e35810b 100644 --- a/Objects/listsort.txt +++ b/Objects/listsort.txt @@ -721,7 +721,7 @@ wildly unbalanced runs already enjoys excellent performance. ~sort is a good example of when balanced runs could benefit from a better hint value: to the extent possible, this would like to use a starting -offset equal to the previous value of acount/bcount. Doing so saves about +offset equal to the previous value of account/bcount. Doing so saves about 10% of the compares in ~sort. However, doing so is also a mixed bag, hurting other cases. diff --git a/Objects/longobject.c b/Objects/longobject.c index 581db10b54ab57..1e73b6a442303c 100644 --- a/Objects/longobject.c +++ b/Objects/longobject.c @@ -2155,7 +2155,7 @@ long_to_decimal_string_internal(PyObject *aa, /* convert array of base _PyLong_BASE digits in pin to an array of base _PyLong_DECIMAL_BASE digits in pout, following Knuth (TAOCP, - Volume 2 (3rd edn), section 4.4, Method 1b). */ + Volume 2 (3rd end), section 4.4, Method 1b). */ pin = a->long_value.ob_digit; pout = scratch->long_value.ob_digit; size = 0; @@ -3312,7 +3312,7 @@ x_divrem(PyLongObject *v1, PyLongObject *w1, PyLongObject **prem) stwodigits z; /* We follow Knuth [The Art of Computer Programming, Vol. 2 (3rd - edn.), section 4.3.1, Algorithm D], except that we don't explicitly + end.), section 4.3.1, Algorithm D], except that we don't explicitly handle the special case when the initial estimate q for a quotient digit is >= PyLong_BASE: the max value for q is PyLong_BASE+1, and that won't overflow a digit. */ diff --git a/Objects/mimalloc/arena.c b/Objects/mimalloc/arena.c index 5db5d950c43b68..858ff319fb2c70 100644 --- a/Objects/mimalloc/arena.c +++ b/Objects/mimalloc/arena.c @@ -644,11 +644,11 @@ void _mi_arena_free(void* p, size_t size, size_t committed_size, mi_memid_t memi mi_assert_internal(arena->blocks_purge != NULL); if (!all_committed) { - // mark the entire range as no longer committed (so we recommit the full range when re-using) + // mark the entire range as no longer committed (so we recommit the full range when reusing) _mi_bitmap_unclaim_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx); mi_track_mem_noaccess(p,size); if (committed_size > 0) { - // if partially committed, adjust the committed stats (is it will be recommitted when re-using) + // if partially committed, adjust the committed stats (is it will be recommitted when reusing) // in the delayed purge, we now need to not count a decommit if the range is not marked as committed. _mi_stat_decrease(&stats->committed, committed_size); } diff --git a/Objects/mimalloc/os.c b/Objects/mimalloc/os.c index c9103168c12507..cdd4a091d5d469 100644 --- a/Objects/mimalloc/os.c +++ b/Objects/mimalloc/os.c @@ -476,7 +476,7 @@ bool _mi_os_reset(void* addr, size_t size, mi_stats_t* stats) { // either resets or decommits memory, returns true if the memory needs -// to be recommitted if it is to be re-used later on. +// to be recommitted if it is to be reused later on. bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset, mi_stats_t* stats) { if (mi_option_get(mi_option_purge_delay) < 0) return false; // is purging allowed? @@ -499,7 +499,7 @@ bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset, mi_stats_t* stats) } // either resets or decommits memory, returns true if the memory needs -// to be recommitted if it is to be re-used later on. +// to be recommitted if it is to be reused later on. bool _mi_os_purge(void* p, size_t size, mi_stats_t * stats) { return _mi_os_purge_ex(p, size, true, stats); } diff --git a/Objects/mimalloc/prim/windows/etw-mimalloc.wprp b/Objects/mimalloc/prim/windows/etw-mimalloc.wprp index b00cd7adf2285c..26d24d953199cd 100644 --- a/Objects/mimalloc/prim/windows/etw-mimalloc.wprp +++ b/Objects/mimalloc/prim/windows/etw-mimalloc.wprp @@ -14,7 +14,7 @@ - + diff --git a/Objects/obmalloc.c b/Objects/obmalloc.c index deb7fd957e57dd..92364137b9e347 100644 --- a/Objects/obmalloc.c +++ b/Objects/obmalloc.c @@ -128,7 +128,7 @@ _PyMem_mi_page_is_safe_to_free(mi_page_t *page) // If we are deferring collection of more than this amount of memory for // mimalloc pages, advance the write sequence. Advancing allows these -// pages to be re-used in a different thread or for a different size class. +// pages to be reused in a different thread or for a different size class. #define QSBR_PAGE_MEM_LIMIT 4096*20 // Return true if the global write sequence should be advanced for a mimalloc @@ -1233,7 +1233,7 @@ free_delayed(uintptr_t ptr, size_t size) struct _mem_work_chunk *buf = NULL; if (!llist_empty(head)) { - // Try to re-use the last buffer + // Try to reuse the last buffer buf = llist_data(head->prev, struct _mem_work_chunk, node); if (buf->wr_idx == WORK_ITEMS_PER_CHUNK) { // already full diff --git a/Objects/typeobject.c b/Objects/typeobject.c index 379c4d0467c487..ce7d6c6a9c093d 100644 --- a/Objects/typeobject.c +++ b/Objects/typeobject.c @@ -4880,7 +4880,7 @@ type_new_impl(type_new_ctx *ctx) assert(_PyType_CheckConsistency(type)); #if defined(Py_GIL_DISABLED) && defined(Py_DEBUG) && SIZEOF_VOID_P > 4 - // After this point, other threads can potentally use this type. + // After this point, other threads can potentially use this type. ((PyObject*)type)->ob_flags |= _Py_TYPE_REVEALED_FLAG; #endif @@ -5597,7 +5597,7 @@ PyType_FromMetaclass( assert(_PyType_CheckConsistency(type)); #if defined(Py_GIL_DISABLED) && defined(Py_DEBUG) && SIZEOF_VOID_P > 4 - // After this point, other threads can potentally use this type. + // After this point, other threads can potentially use this type. ((PyObject*)type)->ob_flags |= _Py_TYPE_REVEALED_FLAG; #endif diff --git a/Objects/unicodeobject.c b/Objects/unicodeobject.c index 5c2308a012142a..22a095e05e363c 100644 --- a/Objects/unicodeobject.c +++ b/Objects/unicodeobject.c @@ -2918,7 +2918,7 @@ unicode_fromformat_arg(_PyUnicodeWriter *writer, { char buffer[MAX_INTMAX_CHARS]; - // Fill buffer using sprinf, with one of many possible format + // Fill buffer using sprintf, with one of many possible format // strings, like "%llX" for `long long` in hexadecimal. // The type/size is in `sizemod`; the format is in `*f`. @@ -7771,7 +7771,7 @@ decode_code_page_errors(UINT code_page, const char *in, const int size, const char *errors, int final) { - const char *startin = in; + const char *starting = in; const char *endin = in + size; DWORD flags = MB_ERR_INVALID_CHARS; /* Ideally, we should get reason from FormatMessage. This is the Windows @@ -7854,13 +7854,13 @@ decode_code_page_errors(UINT code_page, if (in + insize >= endin && !final) break; - startinpos = in - startin; + startinpos = in - starting; endinpos = startinpos + 1; outpos = out - *buf; if (unicode_decode_call_errorhandler_wchar( errors, &errorHandler, encoding, reason, - &startin, &endin, &startinpos, &endinpos, &exc, &in, + &starting, &endin, &startinpos, &endinpos, &exc, &in, buf, bufsize, &outpos)) { goto error; @@ -7877,8 +7877,8 @@ decode_code_page_errors(UINT code_page, /* Shrink the buffer */ assert(out - *buf <= *bufsize); *bufsize = out - *buf; - /* (in - startin) <= size and size is an int */ - ret = Py_SAFE_DOWNCAST(in - startin, Py_ssize_t, int); + /* (in - starting) <= size and size is an int */ + ret = Py_SAFE_DOWNCAST(in - starting, Py_ssize_t, int); error: Py_XDECREF(encoding_obj); @@ -12252,9 +12252,9 @@ unicode_isalpha_impl(PyObject *self) /*[clinic input] str.isalnum as unicode_isalnum -Return True if the string is an alpha-numeric string, False otherwise. +Return True if the string is an alphanumeric string, False otherwise. -A string is alpha-numeric if all characters in the string are alpha-numeric and +A string is alphanumeric if all characters in the string are alphanumeric and there is at least one character in the string. [clinic start generated code]*/ @@ -14289,7 +14289,7 @@ unicode_getnewargs(PyObject *v, PyObject *Py_UNUSED(ignored)) } /* -This function searchs the longest common leading whitespace +This function searches the longest common leading whitespace of all lines in the [src, end). It returns the length of the common leading whitespace and sets `output` to point to the beginning of the common leading whitespace if length > 0. diff --git a/PC/winreg.c b/PC/winreg.c index d1a1c3d1c97850..1c3145aaee2b7a 100644 --- a/PC/winreg.c +++ b/PC/winreg.c @@ -26,7 +26,7 @@ typedef struct { /* Forward declares */ -static BOOL PyHKEY_AsHKEY(winreg_state *st, PyObject *ob, HKEY *pRes, BOOL bNoneOK); +static BOOL PyHKEY_AsHKEY(winreg_state *st, PyObject *ob, HKEY *press, BOOL bNoneOK); static BOOL clinic_HKEY_converter(winreg_state *st, PyObject *ob, void *p); static PyObject *PyHKEY_FromHKEY(winreg_state *st, HKEY h); static BOOL PyHKEY_Close(winreg_state *st, PyObject *obHandle); diff --git a/PCbuild/pyproject-clangcl.props b/PCbuild/pyproject-clangcl.props index 70a81ca7da306c..6b2c663bd8cc5c 100644 --- a/PCbuild/pyproject-clangcl.props +++ b/PCbuild/pyproject-clangcl.props @@ -41,7 +41,7 @@ diff --git a/PCbuild/pyproject.props b/PCbuild/pyproject.props index cf35e705f355a7..f3fb0ad44c1705 100644 --- a/PCbuild/pyproject.props +++ b/PCbuild/pyproject.props @@ -127,7 +127,7 @@ diff --git a/Parser/lexer/lexer.c b/Parser/lexer/lexer.c index 81363cf8e810fe..ace1bced5bd733 100644 --- a/Parser/lexer/lexer.c +++ b/Parser/lexer/lexer.c @@ -137,7 +137,7 @@ set_ftstring_expr(struct tok_state* tok, struct token *token, char c) { // Handle quotes if (ch == '"' || ch == '\'') { - // The following if/else block works becase there is an off number + // The following if/else block works because there is an off number // of quotes in STRING tokens and the lexer only ever reaches this // function with valid STRING tokens. // For example: """hello""" diff --git a/Parser/pegen.c b/Parser/pegen.c index 50641de27d37fd..fc08425a93503c 100644 --- a/Parser/pegen.c +++ b/Parser/pegen.c @@ -346,7 +346,7 @@ _PyPegen_get_memo_statistics(void) #endif int // bool -_PyPegen_is_memoized(Parser *p, int type, void *pres) +_PyPegen_is_memoized(Parser *p, int type, void *press) { if (p->mark == p->fill) { if (_PyPegen_fill_token(p) < 0) { @@ -372,7 +372,7 @@ _PyPegen_is_memoized(Parser *p, int type, void *pres) } #endif p->mark = m->mark; - *(void **)(pres) = m->node; + *(void **)(press) = m->node; return 1; } } diff --git a/Parser/pegen.h b/Parser/pegen.h index 804f931871aec8..b5048583c44852 100644 --- a/Parser/pegen.h +++ b/Parser/pegen.h @@ -143,7 +143,7 @@ PyObject *_PyPegen_get_memo_statistics(void); int _PyPegen_insert_memo(Parser *p, int mark, int type, void *node); int _PyPegen_update_memo(Parser *p, int mark, int type, void *node); -int _PyPegen_is_memoized(Parser *p, int type, void *pres); +int _PyPegen_is_memoized(Parser *p, int type, void *press); int _PyPegen_lookahead(int, void *(func)(Parser *), Parser *); int _PyPegen_lookahead_for_expr(int, expr_ty (func)(Parser *), Parser *); diff --git a/Python/ceval.c b/Python/ceval.c index 291e753dec0ce5..5c21d7e9e49080 100644 --- a/Python/ceval.c +++ b/Python/ceval.c @@ -1371,9 +1371,9 @@ too_many_positional(PyThreadState *tstate, PyCodeObject *co, } Py_ssize_t defcount = defaults == NULL ? 0 : PyTuple_GET_SIZE(defaults); if (defcount) { - Py_ssize_t atleast = co_argcount - defcount; + Py_ssize_t at least = co_argcount - defcount; plural = 1; - sig = PyUnicode_FromFormat("from %zd to %zd", atleast, co_argcount); + sig = PyUnicode_FromFormat("from %zd to %zd", at least, co_argcount); } else { plural = (co_argcount != 1); diff --git a/Python/codecs.c b/Python/codecs.c index caf8d9d5f3c188..451d83d4ba2f6e 100644 --- a/Python/codecs.c +++ b/Python/codecs.c @@ -926,17 +926,17 @@ PyObject *PyCodec_XMLCharRefReplaceErrors(PyObject *exc) slen = Py_MAX(0, end - start); } - Py_ssize_t ressize = 0; + Py_ssize_t resize = 0; for (Py_ssize_t i = start; i < end; ++i) { Py_UCS4 ch = PyUnicode_READ_CHAR(obj, i); int k = n_decimal_digits_for_codepoint(ch); assert(k != 0); assert(k <= 7); - ressize += 2 + k + 1; + resize += 2 + k + 1; } /* allocate replacement */ - PyObject *res = PyUnicode_New(ressize, 127); + PyObject *res = PyUnicode_New(resize, 127); if (res == NULL) { Py_DECREF(obj); return NULL; @@ -993,12 +993,12 @@ _PyCodec_BackslashReplaceUnicodeEncodeError(PyObject *exc) slen = Py_MAX(0, end - start); } - Py_ssize_t ressize = 0; + Py_ssize_t resize = 0; for (Py_ssize_t i = start; i < end; ++i) { Py_UCS4 c = PyUnicode_READ_CHAR(obj, i); - ressize += codec_handler_unicode_hex_width(c); + resize += codec_handler_unicode_hex_width(c); } - PyObject *res = PyUnicode_New(ressize, 127); + PyObject *res = PyUnicode_New(resize, 127); if (res == NULL) { Py_DECREF(obj); return NULL; @@ -1097,7 +1097,7 @@ PyObject *PyCodec_NameReplaceErrors(PyObject *exc) } char buffer[256]; /* NAME_MAXLEN in unicodename_db.h */ - Py_ssize_t imax = start, ressize = 0, replsize; + Py_ssize_t imax = start, resize = 0, replsize; for (; imax < end; ++imax) { Py_UCS4 c = PyUnicode_READ_CHAR(obj, imax); if (ucnhash_capi->getname(c, buffer, sizeof(buffer), 1)) { @@ -1109,13 +1109,13 @@ PyObject *PyCodec_NameReplaceErrors(PyObject *exc) else { replsize = codec_handler_unicode_hex_width(c); } - if (ressize > PY_SSIZE_T_MAX - replsize) { + if (resize > PY_SSIZE_T_MAX - replsize) { break; } - ressize += replsize; + resize += replsize; } - PyObject *res = PyUnicode_New(ressize, 127); + PyObject *res = PyUnicode_New(resize, 127); if (res == NULL) { Py_DECREF(obj); return NULL; @@ -1137,7 +1137,7 @@ PyObject *PyCodec_NameReplaceErrors(PyObject *exc) } } - assert(outp == PyUnicode_1BYTE_DATA(res) + ressize); + assert(outp == PyUnicode_1BYTE_DATA(res) + resize); assert(_PyUnicode_CheckConsistency(res, 1)); PyObject *restuple = Py_BuildValue("(Nn)", res, imax); Py_DECREF(obj); diff --git a/Python/crossinterp.c b/Python/crossinterp.c index 16a23f0351cd26..ce361ae363e5fb 100644 --- a/Python/crossinterp.c +++ b/Python/crossinterp.c @@ -772,7 +772,7 @@ _PyPickle_GetXIData(PyThreadState *tstate, PyObject *obj, _PyXIData_t *xidata) return -1; } - // If we had an "unwrapper" mechnanism, we could call + // If we had an "unwrapper" mechanism, we could call // _PyObject_GetXIData() on the bytes object directly and add // a simple unwrapper to call pickle.loads() on the bytes. size_t size = sizeof(struct _shared_pickle_data); @@ -3176,7 +3176,7 @@ _PyXI_InitTypes(PyInterpreterState *interp) "failed to initialize the cross-interpreter exception types"); } // We would initialize heap types here too but that leads to ref leaks. - // Instead, we intialize them in _PyXI_Init(). + // Instead, we initialize them in _PyXI_Init(). return _PyStatus_OK(); } diff --git a/Python/dynamic_annotations.c b/Python/dynamic_annotations.c index 7febaa09df1950..bca89a30535a7a 100644 --- a/Python/dynamic_annotations.c +++ b/Python/dynamic_annotations.c @@ -143,7 +143,7 @@ static int GetRunningOnValgrind(void) { /* See the comments in dynamic_annotations.h */ int RunningOnValgrind(void) { static volatile int running_on_valgrind = -1; - /* C doesn't have thread-safe initialization of statics, and we + /* C doesn't have thread-safe initialization of statistics, and we don't want to depend on pthread_once here, so hack it. */ int local_running_on_valgrind = running_on_valgrind; if (local_running_on_valgrind == -1) diff --git a/Python/gc.c b/Python/gc.c index 4160f68c27a3ef..d485a0d0b17e6f 100644 --- a/Python/gc.c +++ b/Python/gc.c @@ -1,6 +1,6 @@ // This implements the reference cycle garbage collector. // The Python module interface to the collector is in gcmodule.c. -// See InternalDocs/garbage_collector.md for more infromation. +// See InternalDocs/garbage_collector.md for more information. #include "Python.h" #include "pycore_ceval.h" // _Py_set_eval_breaker_bit() diff --git a/Python/gc_free_threading.c b/Python/gc_free_threading.c index 0b0ddf227e4952..9a9e28c5982fd6 100644 --- a/Python/gc_free_threading.c +++ b/Python/gc_free_threading.c @@ -529,7 +529,7 @@ static_assert(BUFFER_HI < BUFFER_SIZE && BUFFER_LO > 0, "Invalid prefetch buffer level settings."); -// Prefetch intructions will fetch the line of data from memory that +// Prefetch instructions will fetch the line of data from memory that // contains the byte specified with the source operand to a location in // the cache hierarchy specified by a locality hint. The instruction // is only a hint and the CPU is free to ignore it. Instructions and @@ -581,7 +581,7 @@ static_assert(BUFFER_HI < BUFFER_SIZE && #define prefetch(ptr) #endif -// a contigous sequence of PyObject pointers, can contain NULLs +// a contiguous sequence of PyObject pointers, can contain NULLs typedef struct { PyObject **start; PyObject **end; @@ -750,7 +750,7 @@ gc_mark_enqueue(PyObject *op, gc_mark_args_t *args) } } -// Called when we have a contigous sequence of PyObject pointers, either +// Called when we have a contiguous sequence of PyObject pointers, either // a tuple or list object. This will add the items to the buffer if there // is space for them all otherwise push a new "span" on the span stack. Using // spans has the advantage of not creating a deep _PyObjectStack stack when diff --git a/Python/import.c b/Python/import.c index 73b94d0dd2a1b1..6dfa2c135d95a8 100644 --- a/Python/import.c +++ b/Python/import.c @@ -2636,7 +2636,7 @@ module_dict_for_exec(PyThreadState *tstate, PyObject *name) if (m == NULL) return NULL; /* If the module is being reloaded, we get the old module back - and re-use its dict to exec the new code. */ + and reuse its dict to exec the new code. */ d = PyModule_GetDict(m); int r = PyDict_Contains(d, &_Py_ID(__builtins__)); if (r == 0) { diff --git a/Python/perf_trampoline.c b/Python/perf_trampoline.c index a2da3c7d56df50..edd02582f31d4f 100644 --- a/Python/perf_trampoline.c +++ b/Python/perf_trampoline.c @@ -110,7 +110,7 @@ achieve this we have a assembly template in Objects/asm_trampiline.S that is compiled into the Python executable/shared library. This template generates a symbol that maps the start of the assembly code and another that marks the end of the assembly code for the trampoline. Then, every time we need a unique -trampoline for a Python code object, we copy the assembly code into a mmaped +trampoline for a Python code object, we copy the assembly code into a mapped area that has executable permissions and we return the start of that area as our trampoline function. diff --git a/Python/pystate.c b/Python/pystate.c index 0d4c26f92cec90..2a948498ea9cd1 100644 --- a/Python/pystate.c +++ b/Python/pystate.c @@ -309,7 +309,7 @@ holds_gil(PyThreadState *tstate) /* Suppress deprecation warning for PyBytesObject.ob_shash */ _Py_COMP_DIAG_PUSH _Py_COMP_DIAG_IGNORE_DEPR_DECLS -/* We use "initial" if the runtime gets re-used +/* We use "initial" if the runtime gets reused (e.g. Py_Finalize() followed by Py_Initialize(). Note that we initialize "initial" relative to _PyRuntime, to ensure pre-initialized pointers point to the active @@ -543,7 +543,7 @@ init_interpreter(PyInterpreterState *interp, interp->threads.preallocated = &interp->_initial_thread; // We would call _PyObject_InitState() at this point - // if interp->feature_flags were alredy set. + // if interp->feature_flags were already set. _PyEval_InitState(interp); _PyGC_InitState(&interp->gc); diff --git a/Python/pytime.c b/Python/pytime.c index 67cf6437264490..7b9cd17dd58a7d 100644 --- a/Python/pytime.c +++ b/Python/pytime.c @@ -75,14 +75,14 @@ _PyTime_GCD(PyTime_t x, PyTime_t y) int -_PyTimeFraction_Set(_PyTimeFraction *frac, PyTime_t numer, PyTime_t denom) +_PyTimeFraction_Set(_PyTimeFraction *frac, PyTime_t number, PyTime_t denom) { - if (numer < 1 || denom < 1) { + if (number < 1 || denom < 1) { return -1; } - PyTime_t gcd = _PyTime_GCD(numer, denom); - frac->numer = numer / gcd; + PyTime_t gcd = _PyTime_GCD(number, denom); + frac->number = number / gcd; frac->denom = denom / gcd; return 0; } @@ -91,7 +91,7 @@ _PyTimeFraction_Set(_PyTimeFraction *frac, PyTime_t numer, PyTime_t denom) double _PyTimeFraction_Resolution(const _PyTimeFraction *frac) { - return (double)frac->numer / (double)frac->denom / 1e9; + return (double)frac->number / (double)frac->denom / 1e9; } @@ -179,7 +179,7 @@ _PyTime_Mul(PyTime_t t, PyTime_t k) PyTime_t _PyTimeFraction_Mul(PyTime_t ticks, const _PyTimeFraction *frac) { - const PyTime_t mul = frac->numer; + const PyTime_t mul = frac->number; const PyTime_t div = frac->denom; if (div == 1) { @@ -1099,13 +1099,13 @@ py_mach_timebase_info(_PyTimeFraction *base) // fail: https://developer.apple.com/library/mac/#qa/qa1398/ (void)mach_timebase_info(&timebase); - // Check that timebase.numer and timebase.denom can be casted to + // Check that timebase.number and timebase.denom can be casted to // PyTime_t. In practice, timebase uses uint32_t, so casting cannot // overflow. At the end, only make sure that the type is uint32_t // (PyTime_t is 64-bit long). - Py_BUILD_ASSERT(sizeof(timebase.numer) <= sizeof(PyTime_t)); + Py_BUILD_ASSERT(sizeof(timebase.number) <= sizeof(PyTime_t)); Py_BUILD_ASSERT(sizeof(timebase.denom) <= sizeof(PyTime_t)); - PyTime_t numer = (PyTime_t)timebase.numer; + PyTime_t number = (PyTime_t)timebase.number; PyTime_t denom = (PyTime_t)timebase.denom; // Known time bases: @@ -1113,7 +1113,7 @@ py_mach_timebase_info(_PyTimeFraction *base) // * (1, 1) on Intel: 1 ns // * (1000000000, 33333335) on PowerPC: ~30 ns // * (1000000000, 25000000) on PowerPC: 40 ns - if (_PyTimeFraction_Set(base, numer, denom) < 0) { + if (_PyTimeFraction_Set(base, number, denom) < 0) { return _PyStatus_ERR("invalid mach_timebase_info"); } return PyStatus_Ok(); diff --git a/Python/remote_debug.h b/Python/remote_debug.h index 5324a7aaa6f5e5..fa6e149e82f8ca 100644 --- a/Python/remote_debug.h +++ b/Python/remote_debug.h @@ -1119,7 +1119,7 @@ _Py_RemoteDebug_PagedReadRemoteMemory(proc_handle_t *handle, } if (_Py_RemoteDebug_ReadRemoteMemory(handle, page_base, page_size, entry->data) < 0) { - // Try to just copy the exact ammount as a fallback + // Try to just copy the exact amount as a fallback PyErr_Clear(); goto fallback; } diff --git a/Python/specialize.c b/Python/specialize.c index fe8d04cf3442f1..04f50c9ea8ee4c 100644 --- a/Python/specialize.c +++ b/Python/specialize.c @@ -2544,7 +2544,7 @@ static _PyBinaryOpSpecializationDescr binaryop_extend_descrs[] = { {NB_INPLACE_AND, compactlongs_guard, compactlongs_and}, {NB_INPLACE_XOR, compactlongs_guard, compactlongs_xor}, - /* float-long arithemetic */ + /* float-long arithmetic */ {NB_ADD, float_compactlong_guard, float_compactlong_add}, {NB_SUBTRACT, float_compactlong_guard, float_compactlong_subtract}, {NB_TRUE_DIVIDE, nonzero_float_compactlong_guard, float_compactlong_true_div}, diff --git a/Python/uniqueid.c b/Python/uniqueid.c index 64c3e6cfbbe825..cce847604e841d 100644 --- a/Python/uniqueid.c +++ b/Python/uniqueid.c @@ -7,7 +7,7 @@ #include "pycore_uniqueid.h" // This contains code for allocating unique ids for per-thread reference -// counting and re-using those ids when an object is deallocated. +// counting and reusing those ids when an object is deallocated. // // Currently, per-thread reference counting is only used for heap types. // diff --git a/Tools/build/deepfreeze.py b/Tools/build/deepfreeze.py index 2b9f03aebb6d7e..0f218942a74cdc 100644 --- a/Tools/build/deepfreeze.py +++ b/Tools/build/deepfreeze.py @@ -118,7 +118,7 @@ def __init__(self, file: TextIO) -> None: self.file = file self.cache: dict[tuple[type, object, str], str] = {} self.hits, self.misses = 0, 0 - self.finis: list[str] = [] + self.finish: list[str] = [] self.inits: list[str] = [] self.identifiers, self.strings = self.get_identifiers_and_strings() self.write('#include "Python.h"') @@ -316,7 +316,7 @@ def generate_code(self, name: str, code: types.CodeType) -> str: first_traceable += 1 self.write(f"._co_firsttraceable = {first_traceable},") name_as_code = f"(PyCodeObject *)&{name}" - self.finis.append(f"_PyStaticCode_Fini({name_as_code});") + self.finish.append(f"_PyStaticCode_Fini({name_as_code});") self.inits.append(f"_PyStaticCode_Init({name_as_code})") return f"& {name}.ob_base.ob_base" @@ -488,7 +488,7 @@ def generate(args: list[str], output: TextIO) -> None: code = compile(fd.read(), f"", "exec") printer.generate_file(modname, code) with printer.block(f"void\n_Py_Deepfreeze_Fini(void)"): - for p in printer.finis: + for p in printer.finish: printer.write(p) with printer.block(f"int\n_Py_Deepfreeze_Init(void)"): for p in printer.inits: diff --git a/Tools/build/freeze_modules.py b/Tools/build/freeze_modules.py index 3c43f7e3bbe8ca..b9acac12c98c70 100644 --- a/Tools/build/freeze_modules.py +++ b/Tools/build/freeze_modules.py @@ -36,7 +36,7 @@ TESTS_SECTION = 'Test module' FROZEN = [ # See parse_frozen_spec() for the format. - # In cases where the frozenid is duplicated, the first one is re-used. + # In cases where the frozenid is duplicated, the first one is reused. ('import system', [ # These frozen modules are necessary for bootstrapping # the import system. diff --git a/Tools/build/generate-build-details.py b/Tools/build/generate-build-details.py index 8cd23e2f54f529..55fc5cfac93735 100644 --- a/Tools/build/generate-build-details.py +++ b/Tools/build/generate-build-details.py @@ -155,7 +155,7 @@ def make_paths_relative(data: dict[str, Any], config_path: str | None = None) -> continue # Get the relative path new_path = os.path.relpath(current_path, data['base_prefix']) - # Join '.' so that the path is formated as './path' instead of 'path' + # Join '.' so that the path is formatted as './path' instead of 'path' new_path = os.path.join('.', new_path) container[child] = new_path diff --git a/Tools/build/parse_html5_entities.py b/Tools/build/parse_html5_entities.py index aca98497381a43..38ec8f191a624e 100755 --- a/Tools/build/parse_html5_entities.py +++ b/Tools/build/parse_html5_entities.py @@ -40,12 +40,12 @@ def compare_dicts(old, new): """Compare the old and new dicts and print the differences.""" added = new.keys() - old.keys() if added: - print(f'{len(added)} entitie(s) have been added:') + print(f'{len(added)} entity(s) have been added:') for name in sorted(added): print(f' {name!r}: {new[name]!r}') removed = old.keys() - new.keys() if removed: - print(f'{len(removed)} entitie(s) have been removed:') + print(f'{len(removed)} entity(s) have been removed:') for name in sorted(removed): print(f' {name!r}: {old[name]!r}') changed = set() @@ -53,7 +53,7 @@ def compare_dicts(old, new): if old[name] != new[name]: changed.add((name, old[name], new[name])) if changed: - print(f'{len(changed)} entitie(s) have been modified:') + print(f'{len(changed)} entity(s) have been modified:') for item in sorted(changed): print(' {!r}: {!r} -> {!r}'.format(*item)) diff --git a/Tools/cases_generator/stack.py b/Tools/cases_generator/stack.py index 3a0e7e5d0d5636..cd231b12c7f5e7 100644 --- a/Tools/cases_generator/stack.py +++ b/Tools/cases_generator/stack.py @@ -540,11 +540,11 @@ def for_uop(stack: Stack, uop: Uop, out: CWriter, check_liveness: bool = True) - inputs.reverse() peeks.reverse() offset = stack.logical_sp - stack.physical_sp - for ouput in uop.stack.outputs: - if ouput.is_array() and ouput.used and not ouput.peek: + for output in uop.stack.outputs: + if output.is_array() and output.used and not output.peek: c_offset = offset.to_c() - out.emit(f"{ouput.name} = &stack_pointer[{c_offset}];\n") - offset = offset.push(ouput) + out.emit(f"{output.name} = &stack_pointer[{c_offset}];\n") + offset = offset.push(output) for var in inputs: stack.push(var) outputs = peeks + [ Local.undefined(var) for var in uop.stack.outputs if not var.peek ] diff --git a/Tools/cases_generator/tier1_generator.py b/Tools/cases_generator/tier1_generator.py index 32dc346d5e891a..7cd95ed9b32691 100644 --- a/Tools/cases_generator/tier1_generator.py +++ b/Tools/cases_generator/tier1_generator.py @@ -201,7 +201,7 @@ def generate_tier1_labels( analysis: Analysis, emitter: Emitter ) -> None: emitter.emit("\n") - # Emit tail-callable labels as function defintions + # Emit tail-callable labels as function definitions for name, label in analysis.labels.items(): emitter.emit(f"LABEL({name})\n") storage = Storage(Stack(), [], [], 0, False) diff --git a/Tools/i18n/pygettext.py b/Tools/i18n/pygettext.py index f46b05067d7fde..b55b1db56ce875 100755 --- a/Tools/i18n/pygettext.py +++ b/Tools/i18n/pygettext.py @@ -687,7 +687,7 @@ def main(): try: opts, args = getopt.getopt( sys.argv[1:], - 'ac::d:DEhk:Kno:p:S:Vvw:x:X:', + 'ac::d:DEhk:Know:p:S:Vvw:x:X:', ['extract-all', 'add-comments=?', 'default-domain=', 'escape', 'help', 'keyword=', 'no-default-keywords', 'add-location', 'no-location', 'output=', 'output-dir=', diff --git a/Tools/peg_generator/pegen/grammar_visualizer.py b/Tools/peg_generator/pegen/grammar_visualizer.py index 11f784f45b66b8..0c48ce7bb7860a 100644 --- a/Tools/peg_generator/pegen/grammar_visualizer.py +++ b/Tools/peg_generator/pegen/grammar_visualizer.py @@ -33,15 +33,15 @@ def print_nodes_recursively(self, node: Rule, prefix: str = "", istail: bool = T value = self.name(node) line = prefix + ("└──" if istail else "├──") + value + "\n" - sufix = " " if istail else "│ " + suffix = " " if istail else "│ " if not children: return line *children, last = children for child in children: - line += self.print_nodes_recursively(child, prefix + sufix, False) - line += self.print_nodes_recursively(last, prefix + sufix, True) + line += self.print_nodes_recursively(child, prefix + suffix, False) + line += self.print_nodes_recursively(last, prefix + suffix, True) return line diff --git a/Tools/scripts/combinerefs.py b/Tools/scripts/combinerefs.py index 848bae5658ca3a..75d36f232f979b 100755 --- a/Tools/scripts/combinerefs.py +++ b/Tools/scripts/combinerefs.py @@ -39,10 +39,10 @@ repr is repr(object), extracted from the first PYTHONDUMPREFS output block. CAUTION: If object is a container type, it may not actually contain all the objects shown in the repr: the repr was captured from the first output block, -and some of the containees may have been released since then. For example, +and some of the containers may have been released since then. For example, it's common for the line showing the dict of interned strings to display strings that no longer exist at the end of Py_FinalizeEx; this can be recognized -(albeit painfully) because such containees don't have a line of their own. +(albeit painfully) because such containers don't have a line of their own. The objects are listed in allocation order, with most-recently allocated printed first, and the first object allocated printed last. diff --git a/Tools/unicode/python-mappings/GB2312.TXT b/Tools/unicode/python-mappings/GB2312.TXT index 334b4cdb94863d..1782946a76361b 100644 --- a/Tools/unicode/python-mappings/GB2312.TXT +++ b/Tools/unicode/python-mappings/GB2312.TXT @@ -507,7 +507,7 @@ 0x2628 0x0398 # GREEK CAPITAL LETTER THETA 0x2629 0x0399 # GREEK CAPITAL LETTER IOTA 0x262A 0x039A # GREEK CAPITAL LETTER KAPPA -0x262B 0x039B # GREEK CAPITAL LETTER LAMDA +0x262B 0x039B # GREEK CAPITAL LETTER LAMBDA 0x262C 0x039C # GREEK CAPITAL LETTER MU 0x262D 0x039D # GREEK CAPITAL LETTER NU 0x262E 0x039E # GREEK CAPITAL LETTER XI @@ -531,7 +531,7 @@ 0x2648 0x03B8 # GREEK SMALL LETTER THETA 0x2649 0x03B9 # GREEK SMALL LETTER IOTA 0x264A 0x03BA # GREEK SMALL LETTER KAPPA -0x264B 0x03BB # GREEK SMALL LETTER LAMDA +0x264B 0x03BB # GREEK SMALL LETTER LAMBDA 0x264C 0x03BC # GREEK SMALL LETTER MU 0x264D 0x03BD # GREEK SMALL LETTER NU 0x264E 0x03BE # GREEK SMALL LETTER XI diff --git a/Tools/unicode/python-mappings/jisx0213-2004-std.txt b/Tools/unicode/python-mappings/jisx0213-2004-std.txt index a302fa19ff9bd2..7649933aec5839 100644 --- a/Tools/unicode/python-mappings/jisx0213-2004-std.txt +++ b/Tools/unicode/python-mappings/jisx0213-2004-std.txt @@ -500,7 +500,7 @@ 3-2628 U+0398 # GREEK CAPITAL LETTER THETA 3-2629 U+0399 # GREEK CAPITAL LETTER IOTA 3-262A U+039A # GREEK CAPITAL LETTER KAPPA -3-262B U+039B # GREEK CAPITAL LETTER LAMDA +3-262B U+039B # GREEK CAPITAL LETTER LAMBDA 3-262C U+039C # GREEK CAPITAL LETTER MU 3-262D U+039D # GREEK CAPITAL LETTER NU 3-262E U+039E # GREEK CAPITAL LETTER XI @@ -532,7 +532,7 @@ 3-2648 U+03B8 # GREEK SMALL LETTER THETA 3-2649 U+03B9 # GREEK SMALL LETTER IOTA 3-264A U+03BA # GREEK SMALL LETTER KAPPA -3-264B U+03BB # GREEK SMALL LETTER LAMDA +3-264B U+03BB # GREEK SMALL LETTER LAMBDA 3-264C U+03BC # GREEK SMALL LETTER MU 3-264D U+03BD # GREEK SMALL LETTER NU 3-264E U+03BE # GREEK SMALL LETTER XI diff --git a/configure.ac b/configure.ac index 3566c4b9038c2b..6917f3e1014a33 100644 --- a/configure.ac +++ b/configure.ac @@ -6264,7 +6264,7 @@ if test "$ac_cv_sizeof_wchar_t" -ge 2 \ -a "$ac_cv_wchar_t_signed" = "no" then AC_DEFINE([HAVE_USABLE_WCHAR_T], [1], - [Define if you have a useable wchar_t type defined in wchar.h; useable + [Define if you have a usable wchar_t type defined in wchar.h; usable means wchar_t must be an unsigned type with at least 16 bits. (see Include/unicodeobject.h).]) AC_MSG_RESULT([yes]) diff --git a/pyconfig.h.in b/pyconfig.h.in index 1c533b2bfb7fb4..78036c2656fd59 100644 --- a/pyconfig.h.in +++ b/pyconfig.h.in @@ -1559,7 +1559,7 @@ /* Define to 1 if you have the 'unshare' function. */ #undef HAVE_UNSHARE -/* Define if you have a useable wchar_t type defined in wchar.h; useable means +/* Define if you have a usable wchar_t type defined in wchar.h; usable means wchar_t must be an unsigned type with at least 16 bits. (see Include/unicodeobject.h). */ #undef HAVE_USABLE_WCHAR_T From 7816b64238cd950a090d1f07da62e3475a121828 Mon Sep 17 00:00:00 2001 From: Cornelius Roemer Date: Sun, 20 Jul 2025 23:06:21 +0200 Subject: [PATCH 2/9] Revert "Fix various typos and spelling errors found by codespell." This reverts commit a93e9d6d8db5045480ae868b467038156d218082. --- Doc/README.rst | 4 +- Doc/extending/extending.rst | 2 +- Doc/howto/functional.rst | 4 +- Doc/howto/gdb_helpers.rst | 2 +- Doc/includes/email-read-alternative.py | 4 +- Doc/library/bz2.rst | 8 +- Doc/library/collections.rst | 18 +- Doc/library/datetime.rst | 4 +- Doc/library/difflib.rst | 12 +- Doc/library/index.rst | 2 +- Doc/library/pickle.rst | 2 +- Doc/library/re.rst | 2 +- Doc/library/shelve.rst | 2 +- Doc/library/ssl.rst | 2 +- Doc/library/subprocess.rst | 8 +- Doc/library/unittest.mock.rst | 2 +- Doc/library/unittest.rst | 2 +- Doc/library/urllib.parse.rst | 2 +- Doc/library/venv.rst | 2 +- Doc/library/weakref.rst | 2 +- Doc/library/xml.sax.handler.rst | 4 +- Doc/tutorial/introduction.rst | 6 +- Doc/using/windows.rst | 2 +- Doc/whatsnew/2.4.rst | 2 +- Doc/whatsnew/2.6.rst | 2 +- Doc/whatsnew/3.0.rst | 2 +- Doc/whatsnew/3.11.rst | 2 +- Doc/whatsnew/3.12.rst | 4 +- Doc/whatsnew/3.14.rst | 2 +- Doc/whatsnew/3.2.rst | 2 +- Doc/whatsnew/3.4.rst | 2 +- Doc/whatsnew/3.5.rst | 2 +- Doc/whatsnew/3.8.rst | 2 +- Include/cpython/critical_section.h | 4 +- Include/internal/mimalloc/mimalloc/internal.h | 2 +- Include/internal/pycore_crossinterp.h | 2 +- Include/internal/pycore_pymem.h | 2 +- Include/internal/pycore_runtime_structs.h | 2 +- Include/internal/pycore_time.h | 6 +- Include/modsupport.h | 2 +- Include/refcount.h | 2 +- Include/unicodeobject.h | 2 +- InternalDocs/asyncio.md | 2 +- InternalDocs/interpreter.md | 2 +- Lib/_pydatetime.py | 8 +- Lib/_pydecimal.py | 466 +++++++++--------- Lib/_strptime.py | 8 +- Lib/asyncio/graph.py | 2 +- Lib/email/charset.py | 4 +- Lib/encodings/cp1006.py | 6 +- Lib/encodings/cp1253.py | 4 +- Lib/encodings/cp1256.py | 4 +- Lib/encodings/cp720.py | 4 +- Lib/encodings/cp737.py | 12 +- Lib/encodings/cp864.py | 18 +- Lib/encodings/cp869.py | 12 +- Lib/encodings/cp875.py | 4 +- Lib/encodings/iso8859_6.py | 4 +- Lib/encodings/iso8859_7.py | 4 +- Lib/encodings/mac_arabic.py | 12 +- Lib/encodings/mac_farsi.py | 4 +- Lib/encodings/mac_greek.py | 4 +- Lib/http/cookies.py | 2 +- Lib/idlelib/CREDITS.txt | 2 +- Lib/idlelib/News3.txt | 2 +- Lib/idlelib/editor.py | 4 +- Lib/idlelib/idle_test/test_editmenu.py | 18 +- Lib/idlelib/run.py | 6 +- Lib/idlelib/searchbase.py | 4 +- Lib/imaplib.py | 2 +- Lib/inspect.py | 8 +- Lib/locale.py | 2 +- Lib/logging/__init__.py | 2 +- Lib/logging/handlers.py | 2 +- Lib/multiprocessing/resource_tracker.py | 2 +- Lib/pickle.py | 2 +- Lib/sysconfig/__init__.py | 2 +- Lib/test/bisect_cmd.py | 2 +- Lib/test/configdata/cfgparser.2 | 8 +- Lib/test/crashers/README | 2 +- Lib/test/crashers/infinite_loop_re.py | 2 +- Lib/test/decimaltestdata/base.decTest | 2 +- Lib/test/decimaltestdata/ddBase.decTest | 2 +- Lib/test/decimaltestdata/dqBase.decTest | 2 +- Lib/test/decimaltestdata/dsBase.decTest | 2 +- Lib/test/encoded_modules/__init__.py | 2 +- Lib/test/encoded_modules/module_iso_8859_1.py | 2 +- Lib/test/libregrtest/cmdline.py | 4 +- Lib/test/libregrtest/filter.py | 6 +- Lib/test/libregrtest/findtests.py | 8 +- Lib/test/mime.types | 6 +- Lib/test/multibytecodec_support.py | 22 +- Lib/test/pickletester.py | 2 +- Lib/test/support/asyncore.py | 4 +- Lib/test/support/os_helper.py | 2 +- Lib/test/support/smtpd.py | 2 +- Lib/test/test_asyncio/test_sslproto.py | 2 +- Lib/test/test_asyncio/test_tasks.py | 6 +- Lib/test/test_buffer.py | 2 +- Lib/test/test_build_details.py | 4 +- Lib/test/test_bytes.py | 2 +- Lib/test/test_capi/test_tuple.py | 2 +- Lib/test/test_capi/test_type.py | 2 +- Lib/test/test_cmd_line.py | 2 +- Lib/test/test_codecs.py | 2 +- Lib/test/test_ctypes/test_win32.py | 4 +- .../test_win32_com_foreign_func.py | 6 +- Lib/test/test_decimal.py | 116 ++--- Lib/test/test_descr.py | 36 +- Lib/test/test_dict.py | 2 +- Lib/test/test_difflib.py | 2 +- Lib/test/test_dis.py | 2 +- .../test_email/test__header_value_parser.py | 24 +- Lib/test/test_exceptions.py | 2 +- Lib/test/test_fileio.py | 2 +- Lib/test/test_float.py | 2 +- Lib/test/test_fnmatch.py | 22 +- Lib/test/test_generators.py | 4 +- Lib/test/test_genexps.py | 4 +- Lib/test/test_gzip.py | 4 +- Lib/test/test_httpservers.py | 8 +- Lib/test/test_import/__init__.py | 2 +- Lib/test/test_interpreters/test_api.py | 2 +- Lib/test/test_iterlen.py | 2 +- Lib/test/test_itertools.py | 48 +- Lib/test/test_json/test_dump.py | 4 +- Lib/test/test_locale.py | 4 +- Lib/test/test_logging.py | 2 +- Lib/test/test_long.py | 4 +- Lib/test/test_memoryview.py | 2 +- Lib/test/test_ntpath.py | 16 +- Lib/test/test_opcache.py | 2 +- Lib/test/test_os.py | 2 +- Lib/test/test_pdb.py | 2 +- Lib/test/test_peg_generator/test_c_parser.py | 4 +- Lib/test/test_plistlib.py | 6 +- Lib/test/test_pty.py | 2 +- Lib/test/test_pyrepl/test_pyrepl.py | 2 +- Lib/test/test_regrtest.py | 6 +- Lib/test/test_richcmp.py | 12 +- Lib/test/test_set.py | 6 +- Lib/test/test_socket.py | 2 +- Lib/test/test_sort.py | 2 +- Lib/test/test_sqlite3/test_dbapi.py | 4 +- Lib/test/test_ssl.py | 8 +- Lib/test/test_stat.py | 2 +- Lib/test/test_statistics.py | 2 +- Lib/test/test_strptime.py | 20 +- Lib/test/test_subprocess.py | 2 +- Lib/test/test_syntax.py | 8 +- Lib/test/test_sysconfig.py | 4 +- Lib/test/test_tarfile.py | 2 +- .../test_tkinter/test_geometry_managers.py | 2 +- Lib/test/test_tkinter/test_widgets.py | 2 +- Lib/test/test_traceback.py | 12 +- Lib/test/test_typing.py | 4 +- Lib/test/test_unittest/testmock/testpatch.py | 20 +- Lib/test/test_urllib2.py | 4 +- Lib/test/test_weakref.py | 10 +- Lib/test/test_xml_etree.py | 2 +- Lib/test/test_zipfile/test_core.py | 46 +- Lib/tkinter/__init__.py | 2 +- Lib/tkinter/ttk.py | 14 +- Lib/turtle.py | 4 +- Lib/unittest/mock.py | 2 +- Lib/urllib/request.py | 2 +- Lib/xml/dom/minidom.py | 2 +- Mac/BuildScript/resources/Conclusion.rtf | 2 +- Mac/BuildScript/resources/License.rtf | 12 +- Mac/BuildScript/resources/ReadMe.rtf | 8 +- Mac/BuildScript/resources/Welcome.rtf | 2 +- Mac/PythonLauncher/English.lproj/Credits.rtf | 2 +- Makefile.pre.in | 2 +- Misc/ACKS | 12 +- Misc/HISTORY | 30 +- Misc/NEWS.d/3.10.0a3.rst | 2 +- Misc/NEWS.d/3.10.0a7.rst | 2 +- Misc/NEWS.d/3.12.0a4.rst | 4 +- Misc/NEWS.d/3.13.0b1.rst | 2 +- Misc/NEWS.d/3.14.0a1.rst | 4 +- Misc/NEWS.d/3.14.0a7.rst | 2 +- Misc/NEWS.d/3.14.0b1.rst | 4 +- Misc/NEWS.d/3.5.0a3.rst | 2 +- Misc/NEWS.d/3.5.1rc1.rst | 4 +- Misc/NEWS.d/3.5.2rc1.rst | 2 +- Misc/NEWS.d/3.6.0a1.rst | 4 +- Misc/NEWS.d/3.9.0a1.rst | 2 +- ...-07-19-12-37-05.gh-issue-136801.XU_tF2.rst | 2 +- ...-07-05-09-45-04.gh-issue-136286.N67Amr.rst | 2 +- ...-06-11-12-14-06.gh-issue-135379.25ttXq.rst | 2 +- Modules/Setup.stdlib.in | 2 +- Modules/_collectionsmodule.c | 6 +- Modules/_ctypes/ctypes.h | 2 +- Modules/_datetimemodule.c | 8 +- Modules/_decimal/libmpdec/basearith.h | 18 +- .../libmpdec/literature/mulmod-ppro.txt | 14 +- Modules/_decimal/libmpdec/umodarith.h | 4 +- Modules/_decimal/tests/bench.py | 4 +- Modules/_decimal/tests/bignum.py | 4 +- Modules/_functoolsmodule.c | 2 +- Modules/_pickle.c | 2 +- Modules/_ssl.c | 6 +- Modules/_testinternalcapi.c | 4 +- Modules/_zstd/_zstdmodule.c | 4 +- Modules/_zstd/_zstdmodule.h | 2 +- Modules/_zstd/buffer.h | 2 +- Modules/_zstd/clinic/_zstdmodule.c.h | 2 +- Modules/_zstd/compressor.c | 4 +- Modules/_zstd/decompressor.c | 2 +- Modules/_zstd/zstddict.c | 2 +- Modules/_zstd/zstddict.h | 2 +- Modules/cjkcodecs/_codecs_iso2022.c | 6 +- Modules/clinic/_pickle.c.h | 2 +- Modules/hmacmodule.c | 2 +- Modules/itertoolsmodule.c | 8 +- Modules/mathmodule.c | 6 +- Modules/mmapmodule.c | 2 +- Modules/posixmodule.c | 2 +- Modules/socketmodule.c | 2 +- Objects/clinic/unicodeobject.c.h | 4 +- Objects/codeobject.c | 4 +- Objects/dictnotes.txt | 2 +- Objects/dictobject.c | 16 +- Objects/exceptions.c | 2 +- Objects/listobject.c | 24 +- Objects/listsort.txt | 2 +- Objects/longobject.c | 4 +- Objects/mimalloc/arena.c | 4 +- Objects/mimalloc/os.c | 4 +- .../mimalloc/prim/windows/etw-mimalloc.wprp | 2 +- Objects/obmalloc.c | 4 +- Objects/typeobject.c | 4 +- Objects/unicodeobject.c | 18 +- PC/winreg.c | 2 +- PCbuild/pyproject-clangcl.props | 2 +- PCbuild/pyproject.props | 2 +- Parser/lexer/lexer.c | 2 +- Parser/pegen.c | 4 +- Parser/pegen.h | 2 +- Python/ceval.c | 4 +- Python/codecs.c | 22 +- Python/crossinterp.c | 4 +- Python/dynamic_annotations.c | 2 +- Python/gc.c | 2 +- Python/gc_free_threading.c | 6 +- Python/import.c | 2 +- Python/perf_trampoline.c | 2 +- Python/pystate.c | 4 +- Python/pytime.c | 20 +- Python/remote_debug.h | 2 +- Python/specialize.c | 2 +- Python/uniqueid.c | 2 +- Tools/build/deepfreeze.py | 6 +- Tools/build/freeze_modules.py | 2 +- Tools/build/generate-build-details.py | 2 +- Tools/build/parse_html5_entities.py | 6 +- Tools/cases_generator/stack.py | 8 +- Tools/cases_generator/tier1_generator.py | 2 +- Tools/i18n/pygettext.py | 2 +- .../peg_generator/pegen/grammar_visualizer.py | 6 +- Tools/scripts/combinerefs.py | 4 +- Tools/unicode/python-mappings/GB2312.TXT | 4 +- .../python-mappings/jisx0213-2004-std.txt | 4 +- configure.ac | 2 +- pyconfig.h.in | 2 +- 265 files changed, 958 insertions(+), 958 deletions(-) diff --git a/Doc/README.rst b/Doc/README.rst index 1ac4995ba05a6a..2d1148753e0c6b 100644 --- a/Doc/README.rst +++ b/Doc/README.rst @@ -59,10 +59,10 @@ Available make targets are: * "html", which builds standalone HTML files for offline viewing. -* "htmlview", which reuses the "html" builder, but then opens the main page +* "htmlview", which re-uses the "html" builder, but then opens the main page in your default web browser. -* "htmllive", which reuses the "html" builder, rebuilds the docs, +* "htmllive", which re-uses the "html" builder, rebuilds the docs, starts a local server, and automatically reloads the page in your browser when you make changes to reST files (Unix only). diff --git a/Doc/extending/extending.rst b/Doc/extending/extending.rst index a89a69043c0f9f..fd63495674651b 100644 --- a/Doc/extending/extending.rst +++ b/Doc/extending/extending.rst @@ -214,7 +214,7 @@ and initialize it by calling :c:func:`PyErr_NewException` in the module's SpamError = PyErr_NewException("spam.error", NULL, NULL); -Since :c:data:`!SpamError` is a global variable, it will be overwritten every time +Since :c:data:`!SpamError` is a global variable, it will be overwitten every time the module is reinitialized, when the :c:data:`Py_mod_exec` function is called. For now, let's avoid the issue: we will block repeated initialization by raising an diff --git a/Doc/howto/functional.rst b/Doc/howto/functional.rst index eb251df088a34b..053558e389030a 100644 --- a/Doc/howto/functional.rst +++ b/Doc/howto/functional.rst @@ -375,7 +375,7 @@ have the form:: if condition3 ... for exprN in sequenceN - if condition ) + if conditionN ) Again, for a list comprehension only the outside brackets are different (square brackets instead of parentheses). @@ -407,7 +407,7 @@ equivalent to the following Python code:: continue # Skip this element ... for exprN in sequenceN: - if not (condition): + if not (conditionN): continue # Skip this element # Output the value of diff --git a/Doc/howto/gdb_helpers.rst b/Doc/howto/gdb_helpers.rst index b0d56a6bfb891f..98ce813ca4ab02 100644 --- a/Doc/howto/gdb_helpers.rst +++ b/Doc/howto/gdb_helpers.rst @@ -136,7 +136,7 @@ enabled:: at Objects/unicodeobject.c:551 #7 0x0000000000440d94 in PyUnicodeUCS2_FromString (u=0x5c2b8d "__lltrace__") at Objects/unicodeobject.c:569 #8 0x0000000000584abd in PyDict_GetItemString (v= - {'Yuck': , '__builtins__': , '__file__': 'Lib/test/crashes/nasty_eq_vs_dict.py', '__package__': None, 'y': , 'dict': {0: 0, 1: 1, 2: 2, 3: 3}, '__cached__': None, '__name__': '__main__', 'z': , '__doc__': None}, key= + {'Yuck': , '__builtins__': , '__file__': 'Lib/test/crashers/nasty_eq_vs_dict.py', '__package__': None, 'y': , 'dict': {0: 0, 1: 1, 2: 2, 3: 3}, '__cached__': None, '__name__': '__main__', 'z': , '__doc__': None}, key= 0x5c2b8d "__lltrace__") at Objects/dictobject.c:2171 Notice how the dictionary argument to ``PyDict_GetItemString`` is displayed diff --git a/Doc/includes/email-read-alternative.py b/Doc/includes/email-read-alternative.py index 579390a18e9211..8d0b4e6eb6b6b5 100644 --- a/Doc/includes/email-read-alternative.py +++ b/Doc/includes/email-read-alternative.py @@ -36,8 +36,8 @@ def magic_html_parser(html_text, partfiles): print() print(''.join(simplest.get_content().splitlines(keepends=True)[:3])) -and = input("View full message?") -if and.lower()[0] == 'n': +ans = input("View full message?") +if ans.lower()[0] == 'n': sys.exit() # We can extract the richest alternative in order to display it: diff --git a/Doc/library/bz2.rst b/Doc/library/bz2.rst index 3aa83ab368d35d..ebe2e43febaefa 100644 --- a/Doc/library/bz2.rst +++ b/Doc/library/bz2.rst @@ -322,9 +322,9 @@ Using :func:`compress` and :func:`decompress` to demonstrate round-trip compress >>> import bz2 >>> data = b"""\ ... Donec rhoncus quis sapien sit amet molestie. Fusce scelerisque vel augue - ... nec ullamcorper. Name rutrum pretium placerat. Aliquam vel tristique lorem, + ... nec ullamcorper. Nam rutrum pretium placerat. Aliquam vel tristique lorem, ... sit amet cursus ante. In interdum laoreet mi, sit amet ultrices purus - ... pulvinar a. Name gravida euismod magna, non various justo tincidunt feugiat. + ... pulvinar a. Nam gravida euismod magna, non varius justo tincidunt feugiat. ... Aliquam pharetra lacus non risus vehicula rutrum. Maecenas aliquam leo ... felis. Pellentesque semper nunc sit amet nibh ullamcorper, ac elementum ... dolor luctus. Curabitur lacinia mi ornare consectetur vestibulum.""" @@ -362,9 +362,9 @@ Writing and reading a bzip2-compressed file in binary mode: >>> import bz2 >>> data = b"""\ ... Donec rhoncus quis sapien sit amet molestie. Fusce scelerisque vel augue - ... nec ullamcorper. Name rutrum pretium placerat. Aliquam vel tristique lorem, + ... nec ullamcorper. Nam rutrum pretium placerat. Aliquam vel tristique lorem, ... sit amet cursus ante. In interdum laoreet mi, sit amet ultrices purus - ... pulvinar a. Name gravida euismod magna, non various justo tincidunt feugiat. + ... pulvinar a. Nam gravida euismod magna, non varius justo tincidunt feugiat. ... Aliquam pharetra lacus non risus vehicula rutrum. Maecenas aliquam leo ... felis. Pellentesque semper nunc sit amet nibh ullamcorper, ac elementum ... dolor luctus. Curabitur lacinia mi ornare consectetur vestibulum.""" diff --git a/Doc/library/collections.rst b/Doc/library/collections.rst index 0f778f4db2ce4b..5fbdb12f40cafa 100644 --- a/Doc/library/collections.rst +++ b/Doc/library/collections.rst @@ -455,8 +455,8 @@ or subtracting from an empty counter. Returns a new deque object initialized left-to-right (using :meth:`append`) with data from *iterable*. If *iterable* is not specified, the new deque is empty. - Dequeues are a generalization of stacks and queues (the name is pronounced "deck" - and is short for "double-ended queue"). Dequeues support thread-safe, memory + Deques are a generalization of stacks and queues (the name is pronounced "deck" + and is short for "double-ended queue"). Deques support thread-safe, memory efficient appends and pops from either side of the deque with approximately the same *O*\ (1) performance in either direction. @@ -466,11 +466,11 @@ or subtracting from an empty counter. position of the underlying data representation. - If *maxlen* is not specified or is ``None``, dequeues may grow to an + If *maxlen* is not specified or is ``None``, deques may grow to an arbitrary length. Otherwise, the deque is bounded to the specified maximum length. Once a bounded length deque is full, when new items are added, a corresponding number of items are discarded from the opposite end. Bounded - length dequeues provide functionality similar to the ``tail`` filter in + length deques provide functionality similar to the ``tail`` filter in Unix. They are also useful for tracking transactions and other pools of data where only the most recent activity is of interest. @@ -582,13 +582,13 @@ or subtracting from an empty counter. .. versionadded:: 3.1 -In addition to the above, dequeues support iteration, pickling, ``len(d)``, +In addition to the above, deques support iteration, pickling, ``len(d)``, ``reversed(d)``, ``copy.copy(d)``, ``copy.deepcopy(d)``, membership testing with the :keyword:`in` operator, and subscript references such as ``d[0]`` to access the first element. Indexed access is *O*\ (1) at both ends but slows to *O*\ (*n*) in the middle. For fast random access, use lists instead. -Starting in version 3.5, dequeues support ``__add__()``, ``__mul__()``, +Starting in version 3.5, deques support ``__add__()``, ``__mul__()``, and ``__imul__()``. Example: @@ -650,9 +650,9 @@ Example: :class:`deque` Recipes ^^^^^^^^^^^^^^^^^^^^^^ -This section shows various approaches to working with dequeues. +This section shows various approaches to working with deques. -Bounded length dequeues provide functionality similar to the ``tail`` filter +Bounded length deques provide functionality similar to the ``tail`` filter in Unix:: def tail(filename, n=10): @@ -660,7 +660,7 @@ in Unix:: with open(filename) as f: return deque(f, n) -Another approach to using dequeues is to maintain a sequence of recently +Another approach to using deques is to maintain a sequence of recently added elements by appending to the right and popping to the left:: def moving_average(iterable, n=3): diff --git a/Doc/library/datetime.rst b/Doc/library/datetime.rst index 14f5abb6fa351b..16ed3215bc2c1a 100644 --- a/Doc/library/datetime.rst +++ b/Doc/library/datetime.rst @@ -2515,8 +2515,8 @@ requires, and these work on all platforms with a standard C implementation. +-----------+--------------------------------+------------------------+-------+ | ``%B`` | Month as locale's full name. || January, February, | \(1) | | | | ..., December (en_US);| | -| | || January, February, ..., | | -| | | December (de_DE) | | +| | || Januar, Februar, ..., | | +| | | Dezember (de_DE) | | +-----------+--------------------------------+------------------------+-------+ | ``%m`` | Month as a zero-padded | 01, 02, ..., 12 | \(9) | | | decimal number. | | | diff --git a/Doc/library/difflib.rst b/Doc/library/difflib.rst index 455ea27cb2c3e1..ce948a6860f02c 100644 --- a/Doc/library/difflib.rst +++ b/Doc/library/difflib.rst @@ -246,7 +246,7 @@ diffs. For comparing directories and files, see also, the :mod:`filecmp` module. >>> print(''.join(diff), end="") - one ? ^ - + or + + ore ? ^ - two - three @@ -273,7 +273,7 @@ diffs. For comparing directories and files, see also, the :mod:`filecmp` module. two three >>> print(''.join(restore(diff, 2)), end="") - or + ore tree emu @@ -420,12 +420,12 @@ The :class:`SequenceMatcher` class has this constructor: is not changed. - .. method:: find_longest_match(also=0, ahi=None, blo=0, bhi=None) + .. method:: find_longest_match(alo=0, ahi=None, blo=0, bhi=None) - Find longest matching block in ``a[also:ahi]`` and ``b[blo:bhi]``. + Find longest matching block in ``a[alo:ahi]`` and ``b[blo:bhi]``. If *isjunk* was omitted or ``None``, :meth:`find_longest_match` returns - ``(i, j, k)`` such that ``a[i:i+k]`` is equal to ``b[j:j+k]``, where ``also + ``(i, j, k)`` such that ``a[i:i+k]`` is equal to ``b[j:j+k]``, where ``alo <= i <= i+k <= ahi`` and ``blo <= j <= j+k <= bhi``. For all ``(i', j', k')`` meeting those conditions, the additional conditions ``k >= k'``, ``i <= i'``, and if ``i == i'``, ``j <= j'`` are also met. In other words, of @@ -453,7 +453,7 @@ The :class:`SequenceMatcher` class has this constructor: >>> s.find_longest_match(0, 5, 0, 9) Match(a=1, b=0, size=4) - If no blocks match, this returns ``(also, blo, 0)``. + If no blocks match, this returns ``(alo, blo, 0)``. This method returns a :term:`named tuple` ``Match(a, b, size)``. diff --git a/Doc/library/index.rst b/Doc/library/index.rst index 781264cdfecba8..44b218948d07e1 100644 --- a/Doc/library/index.rst +++ b/Doc/library/index.rst @@ -54,7 +54,7 @@ the `Python Package Index `_. archiving.rst fileformats.rst crypto.rst - allows.rst + allos.rst cmdlinelibs.rst concurrency.rst ipc.rst diff --git a/Doc/library/pickle.rst b/Doc/library/pickle.rst index adf37408c8645d..007c9fe1b950cf 100644 --- a/Doc/library/pickle.rst +++ b/Doc/library/pickle.rst @@ -408,7 +408,7 @@ The :mod:`pickle` module exports three classes, :class:`Pickler`, The memo is the data structure that remembers which objects the pickler has already seen, so that shared or recursive objects are pickled by reference and not by value. This method is - useful when reusing picklers. + useful when re-using picklers. .. class:: Unpickler(file, *, fix_imports=True, encoding="ASCII", errors="strict", buffers=None) diff --git a/Doc/library/re.rst b/Doc/library/re.rst index 3ae2099e7fe701..75ebbf11c8e47c 100644 --- a/Doc/library/re.rst +++ b/Doc/library/re.rst @@ -1811,7 +1811,7 @@ in each word of a sentence except for the first and last characters:: >>> re.sub(r"(\w)(\w+)(\w)", repl, text) 'Poefsrosr Aealmlobdk, pslaee reorpt your abnseces plmrptoy.' >>> re.sub(r"(\w)(\w+)(\w)", repl, text) - 'Pofsroser Aodlambelk, plasee report your asnebces potlmrpy.' + 'Pofsroser Aodlambelk, plasee reoprt yuor asnebces potlmrpy.' Finding all Adverbs diff --git a/Doc/library/shelve.rst b/Doc/library/shelve.rst index b88fe4157bdc29..23808619524056 100644 --- a/Doc/library/shelve.rst +++ b/Doc/library/shelve.rst @@ -144,7 +144,7 @@ Restrictions which can cause hard crashes when trying to read from the database. * :meth:`Shelf.reorganize` may not be available for all database packages and - may temporarily increase resource usage (especially disk space) when called. + may temporarely increase resource usage (especially disk space) when called. Additionally, it will never run automatically and instead needs to be called explicitly. diff --git a/Doc/library/ssl.rst b/Doc/library/ssl.rst index f4e874b89a0f5c..a9930183f9a400 100644 --- a/Doc/library/ssl.rst +++ b/Doc/library/ssl.rst @@ -2285,7 +2285,7 @@ something like the following:: Country Name (2 letter code) [AU]:US State or Province Name (full name) [Some-State]:MyState Locality Name (eg, city) []:Some City - Organization Name (eg, company) [Internet Widgets Pty Ltd]:My Organization, Inc. + Organization Name (eg, company) [Internet Widgits Pty Ltd]:My Organization, Inc. Organizational Unit Name (eg, section) []:My Group Common Name (eg, YOUR name) []:myserver.mygroup.myorganization.com Email Address []:ops@myserver.mygroup.myorganization.com diff --git a/Doc/library/subprocess.rst b/Doc/library/subprocess.rst index aa74e0f57a39fa..028a7861f36798 100644 --- a/Doc/library/subprocess.rst +++ b/Doc/library/subprocess.rst @@ -1359,12 +1359,12 @@ Replacing shell pipeline .. code-block:: bash - output=$(dmesg | grep had) + output=$(dmesg | grep hda) becomes:: p1 = Popen(["dmesg"], stdout=PIPE) - p2 = Popen(["grep", "had"], stdin=p1.stdout, stdout=PIPE) + p2 = Popen(["grep", "hda"], stdin=p1.stdout, stdout=PIPE) p1.stdout.close() # Allow p1 to receive a SIGPIPE if p2 exits. output = p2.communicate()[0] @@ -1376,11 +1376,11 @@ be used directly: .. code-block:: bash - output=$(dmesg | grep had) + output=$(dmesg | grep hda) becomes:: - output = check_output("dmesg | grep had", shell=True) + output = check_output("dmesg | grep hda", shell=True) Replacing :func:`os.system` diff --git a/Doc/library/unittest.mock.rst b/Doc/library/unittest.mock.rst index b98848a25cfa70..091562cc9aef98 100644 --- a/Doc/library/unittest.mock.rst +++ b/Doc/library/unittest.mock.rst @@ -265,7 +265,7 @@ the *new_callable* argument to :func:`patch`. :attr:`return_value` attribute. * *unsafe*: By default, accessing any attribute whose name starts with - *assert*, *assret*, *assert*, *aseert* or *assrt* will raise an + *assert*, *assret*, *asert*, *aseert* or *assrt* will raise an :exc:`AttributeError`. Passing ``unsafe=True`` will allow access to these attributes. diff --git a/Doc/library/unittest.rst b/Doc/library/unittest.rst index 86b215775c6148..ec96e8416120fa 100644 --- a/Doc/library/unittest.rst +++ b/Doc/library/unittest.rst @@ -485,7 +485,7 @@ advantages to placing the test code in a separate module, such as .. _legacy-unit-tests: -Reusing old test code +Re-using old test code ---------------------- Some users will find that they have existing test code that they would like to diff --git a/Doc/library/urllib.parse.rst b/Doc/library/urllib.parse.rst index bcee8ccaa1f5cb..44a9c79cba2216 100644 --- a/Doc/library/urllib.parse.rst +++ b/Doc/library/urllib.parse.rst @@ -24,7 +24,7 @@ The module has been designed to match the internet RFC on Relative Uniform Resource Locators. It supports the following URL schemes: ``file``, ``ftp``, ``gopher``, ``hdl``, ``http``, ``https``, ``imap``, ``itms-services``, ``mailto``, ``mms``, ``news``, ``nntp``, ``prospero``, ``rsync``, ``rtsp``, ``rtsps``, ``rtspu``, -``sftp``, ``https``, ``sip``, ``sips``, ``snews``, ``svn``, ``svn+ssh``, +``sftp``, ``shttp``, ``sip``, ``sips``, ``snews``, ``svn``, ``svn+ssh``, ``telnet``, ``wais``, ``ws``, ``wss``. .. impl-detail:: diff --git a/Doc/library/venv.rst b/Doc/library/venv.rst index 3d04e130eba25a..de427fbafe71dc 100644 --- a/Doc/library/venv.rst +++ b/Doc/library/venv.rst @@ -79,7 +79,7 @@ containing a copy or symlink of the Python executable (as appropriate for the platform or arguments used at environment creation time). It also creates a :file:`lib/pythonX.Y/site-packages` subdirectory (on Windows, this is :file:`Lib\site-packages`). -If an existing directory is specified, it will be reused. +If an existing directory is specified, it will be re-used. .. versionchanged:: 3.5 The use of ``venv`` is now recommended for creating virtual environments. diff --git a/Doc/library/weakref.rst b/Doc/library/weakref.rst index 43e7b2ab33f03f..2a25ed045c68bd 100644 --- a/Doc/library/weakref.rst +++ b/Doc/library/weakref.rst @@ -68,7 +68,7 @@ exposed by the :mod:`weakref` module for the benefit of advanced uses. Not all objects can be weakly referenced. Objects which support weak references include class instances, functions written in Python (but not in C), instance methods, sets, frozensets, some :term:`file objects `, :term:`generators `, -type objects, sockets, arrays, dequeues, regular expression pattern objects, and code +type objects, sockets, arrays, deques, regular expression pattern objects, and code objects. .. versionchanged:: 3.2 diff --git a/Doc/library/xml.sax.handler.rst b/Doc/library/xml.sax.handler.rst index fb7bfaa0bf1777..c2c9d6424b5072 100644 --- a/Doc/library/xml.sax.handler.rst +++ b/Doc/library/xml.sax.handler.rst @@ -250,7 +250,7 @@ events in the input document: string and the *attrs* parameter holds an object of the :class:`~xml.sax.xmlreader.Attributes` interface (see :ref:`attributes-objects`) containing the attributes of - the element. The object passed as *attrs* may be reused by the parser; holding + the element. The object passed as *attrs* may be re-used by the parser; holding on to a reference to it is not a reliable way to keep a copy of the attributes. To keep a copy of the attributes, use the :meth:`copy` method of the *attrs* object. @@ -275,7 +275,7 @@ events in the input document: :ref:`attributes-ns-objects`) containing the attributes of the element. If no namespace is associated with the element, the *uri* component of *name* will be ``None``. The object passed - as *attrs* may be reused by the parser; holding on to a reference to it is not + as *attrs* may be re-used by the parser; holding on to a reference to it is not a reliable way to keep a copy of the attributes. To keep a copy of the attributes, use the :meth:`copy` method of the *attrs* object. diff --git a/Doc/tutorial/introduction.rst b/Doc/tutorial/introduction.rst index 53735eed311454..9e06e03991bc96 100644 --- a/Doc/tutorial/introduction.rst +++ b/Doc/tutorial/introduction.rst @@ -436,9 +436,9 @@ through all other variables that refer to it.:: >>> rgba = rgb >>> id(rgb) == id(rgba) # they reference the same object True - >>> rgba.append("Alpha") + >>> rgba.append("Alph") >>> rgb - ["Red", "Green", "Blue", "Alpha"] + ["Red", "Green", "Blue", "Alph"] All slice operations return a new list containing the requested elements. This means that the following slice returns a @@ -449,7 +449,7 @@ means that the following slice returns a >>> correct_rgba ["Red", "Green", "Blue", "Alpha"] >>> rgba - ["Red", "Green", "Blue", "Alpha"] + ["Red", "Green", "Blue", "Alph"] Assignment to slices is also possible, and this can even change the size of the list or clear it entirely:: diff --git a/Doc/using/windows.rst b/Doc/using/windows.rst index 2eb756a9881ae9..7cc50bccb3724a 100644 --- a/Doc/using/windows.rst +++ b/Doc/using/windows.rst @@ -1514,7 +1514,7 @@ free-threaded binaries at this time. To specify the install option at the command line, use ``Include_freethreaded=1``. See :ref:`install-layout-option` for instructions on -preemptively downloading the additional binaries for offline install. The +pre-emptively downloading the additional binaries for offline install. The options to include debug symbols and binaries also apply to the free-threaded builds. diff --git a/Doc/whatsnew/2.4.rst b/Doc/whatsnew/2.4.rst index 31b2c9581153b0..7628cfefe0ec96 100644 --- a/Doc/whatsnew/2.4.rst +++ b/Doc/whatsnew/2.4.rst @@ -1304,7 +1304,7 @@ complete list of changes, or look through the CVS logs for all the details. comparable. (Contributed by Raymond Hettinger.) * The :mod:`weakref` module now supports a wider variety of objects including - Python functions, class instances, sets, frozensets, dequeues, arrays, files, + Python functions, class instances, sets, frozensets, deques, arrays, files, sockets, and regular expression pattern objects. (Contributed by Raymond Hettinger.) diff --git a/Doc/whatsnew/2.6.rst b/Doc/whatsnew/2.6.rst index 9baf6a37fae23d..0803eba99e6d17 100644 --- a/Doc/whatsnew/2.6.rst +++ b/Doc/whatsnew/2.6.rst @@ -3093,7 +3093,7 @@ Changes to Python's build process and to the C API include: (Contributed by Collin Winter; :issue:`1530959`.) * Several basic data types, such as integers and strings, maintain - internal free lists of objects that can be reused. The data + internal free lists of objects that can be re-used. The data structures for these free lists now follow a naming convention: the variable is always named ``free_list``, the counter is always named ``numfree``, and a macro ``Py_MAXFREELIST`` is diff --git a/Doc/whatsnew/3.0.rst b/Doc/whatsnew/3.0.rst index 05e776aecab97f..d858586138e9ae 100644 --- a/Doc/whatsnew/3.0.rst +++ b/Doc/whatsnew/3.0.rst @@ -782,7 +782,7 @@ Operators And Special Methods * The function attributes named :attr:`!func_X` have been renamed to use the :attr:`!__X__` form, freeing up these names in the function - attribute namespace for user-defined attributes. To with, + attribute namespace for user-defined attributes. To wit, :attr:`!func_closure`, :attr:`!func_code`, :attr:`!func_defaults`, :attr:`!func_dict`, :attr:`!func_doc`, :attr:`!func_globals`, :attr:`!func_name` were renamed to :attr:`~function.__closure__`, diff --git a/Doc/whatsnew/3.11.rst b/Doc/whatsnew/3.11.rst index 07aa1ec76adae7..abf9677fd9cac5 100644 --- a/Doc/whatsnew/3.11.rst +++ b/Doc/whatsnew/3.11.rst @@ -1389,7 +1389,7 @@ are created whenever Python calls a Python function. The following are new frame optimizations: - Streamlined the frame creation process. -- Avoided memory allocation by generously reusing frame space on the C stack. +- Avoided memory allocation by generously re-using frame space on the C stack. - Streamlined the internal frame struct to contain only essential information. Frames previously held extra debugging and memory management information. diff --git a/Doc/whatsnew/3.12.rst b/Doc/whatsnew/3.12.rst index 25e972962dcd59..7cfdc287b7fad7 100644 --- a/Doc/whatsnew/3.12.rst +++ b/Doc/whatsnew/3.12.rst @@ -447,12 +447,12 @@ Improved Error Messages ... self.blech = 1 ... ... def foo(self): - ... something = blech + ... somethin = blech ... >>> A().foo() Traceback (most recent call last): File "", line 1 - something = blech + somethin = blech ^^^^^ NameError: name 'blech' is not defined. Did you mean: 'self.blech'? diff --git a/Doc/whatsnew/3.14.rst b/Doc/whatsnew/3.14.rst index bf17f417a5980e..c108a94692dca7 100644 --- a/Doc/whatsnew/3.14.rst +++ b/Doc/whatsnew/3.14.rst @@ -1051,7 +1051,7 @@ Concurrent safe warnings control The :class:`warnings.catch_warnings` context manager will now optionally use a context variable for warning filters. This is enabled by setting the :data:`~sys.flags.context_aware_warnings` flag, either with the ``-X`` -command-line option or an environment variable. This gives predictable +command-line option or an environment variable. This gives predicable warnings control when using :class:`~warnings.catch_warnings` combined with multiple threads or asynchronous tasks. The flag defaults to true for the free-threaded build and false for the GIL-enabled build. diff --git a/Doc/whatsnew/3.2.rst b/Doc/whatsnew/3.2.rst index 24db1c9400ed97..7104904c956a7a 100644 --- a/Doc/whatsnew/3.2.rst +++ b/Doc/whatsnew/3.2.rst @@ -167,7 +167,7 @@ each with their own argument patterns and help displays:: $ ./helm.py --help # top level help (launch and move) $ ./helm.py launch --help # help for launch options - $ ./helm.py launch --missiles # set missiles=True and torpedoes=False + $ ./helm.py launch --missiles # set missiles=True and torpedos=False $ ./helm.py steer --course 180 --speed 5 # set movement parameters .. seealso:: diff --git a/Doc/whatsnew/3.4.rst b/Doc/whatsnew/3.4.rst index 7e1a46e1afe81c..e4f602a17ee968 100644 --- a/Doc/whatsnew/3.4.rst +++ b/Doc/whatsnew/3.4.rst @@ -2414,7 +2414,7 @@ Changes in the Python API * Because :mod:`unittest.TestSuite` now drops references to tests after they are run, test harnesses that reuse a :class:`~unittest.TestSuite` to re-run - a set of tests may fail. Test suites should not be reused in this fashion + a set of tests may fail. Test suites should not be re-used in this fashion since it means state is retained between test runs, breaking the test isolation that :mod:`unittest` is designed to provide. However, if the lack of isolation is considered acceptable, the old behavior can be restored by diff --git a/Doc/whatsnew/3.5.rst b/Doc/whatsnew/3.5.rst index 2c60efb75736e5..db3f1db3bd74ad 100644 --- a/Doc/whatsnew/3.5.rst +++ b/Doc/whatsnew/3.5.rst @@ -914,7 +914,7 @@ makes it 4 to 100 times faster. (Contributed by Eric Snow in :issue:`16991`.) The :class:`~collections.deque` class now defines :meth:`~collections.deque.index`, :meth:`~collections.deque.insert`, and :meth:`~collections.deque.copy`, and supports the ``+`` and ``*`` operators. -This allows dequeues to be recognized as a :class:`~collections.abc.MutableSequence` +This allows deques to be recognized as a :class:`~collections.abc.MutableSequence` and improves their substitutability for lists. (Contributed by Raymond Hettinger in :issue:`23704`.) diff --git a/Doc/whatsnew/3.8.rst b/Doc/whatsnew/3.8.rst index e2b6bf229d545d..bc2eb1d0e263f0 100644 --- a/Doc/whatsnew/3.8.rst +++ b/Doc/whatsnew/3.8.rst @@ -431,7 +431,7 @@ Other Language Changes ... lastname, *members = family.split() ... return lastname.upper(), *members ... - >>> parse('simpsons homer merge bart lisa maggie') + >>> parse('simpsons homer marge bart lisa maggie') ('SIMPSONS', 'homer', 'marge', 'bart', 'lisa', 'maggie') (Contributed by David Cuthbert and Jordan Chapman in :issue:`32117`.) diff --git a/Include/cpython/critical_section.h b/Include/cpython/critical_section.h index 4d48ba13451304..35db3fb6a59ce6 100644 --- a/Include/cpython/critical_section.h +++ b/Include/cpython/critical_section.h @@ -93,7 +93,7 @@ PyCriticalSection2_End(PyCriticalSection2 *c); } #else /* !Py_GIL_DISABLED */ -// NOTE: the contents of this struct are private and may change between +// NOTE: the contents of this struct are private and may change betweeen // Python releases without a deprecation period. struct PyCriticalSection { // Tagged pointer to an outer active critical section (or 0). @@ -105,7 +105,7 @@ struct PyCriticalSection { // A critical section protected by two mutexes. Use // Py_BEGIN_CRITICAL_SECTION2 and Py_END_CRITICAL_SECTION2. -// NOTE: the contents of this struct are private and may change between +// NOTE: the contents of this struct are private and may change betweeen // Python releases without a deprecation period. struct PyCriticalSection2 { PyCriticalSection _cs_base; diff --git a/Include/internal/mimalloc/mimalloc/internal.h b/Include/internal/mimalloc/mimalloc/internal.h index e367af348dafab..a7daa3a40a4c0b 100644 --- a/Include/internal/mimalloc/mimalloc/internal.h +++ b/Include/internal/mimalloc/mimalloc/internal.h @@ -591,7 +591,7 @@ for the read case we can subtract two entries to discard the `+k1` term, but that leads to `((p1^k2)<<> 5 - preceding = _DAYS_BEFORE_MONTH[month] + (month > 2 and leap year) + preceding = _DAYS_BEFORE_MONTH[month] + (month > 2 and leapyear) if preceding > n: # estimate is too large month -= 1 - preceding -= _DAYS_IN_MONTH[month] + (month == 2 and leap year) + preceding -= _DAYS_IN_MONTH[month] + (month == 2 and leapyear) n -= preceding assert 0 <= n < _days_in_month(year, month) diff --git a/Lib/_pydecimal.py b/Lib/_pydecimal.py index de8247c23df143..9b8e42a2342536 100644 --- a/Lib/_pydecimal.py +++ b/Lib/_pydecimal.py @@ -154,8 +154,8 @@ class InvalidOperation(DecimalException): """ def handle(self, context, *args): if args: - and = _dec_from_triple(args[0]._sign, args[0]._int, 'n', True) - return and._fix_nan(context) + ans = _dec_from_triple(args[0]._sign, args[0]._int, 'n', True) + return ans._fix_nan(context) return _NaN class ConversionSyntax(InvalidOperation): @@ -841,8 +841,8 @@ def __lt__(self, other, context=None): self, other = _convert_for_comparison(self, other) if other is NotImplemented: return other - and = self._compare_check_nans(other, context) - if and: + ans = self._compare_check_nans(other, context) + if ans: return False return self._cmp(other) < 0 @@ -850,8 +850,8 @@ def __le__(self, other, context=None): self, other = _convert_for_comparison(self, other) if other is NotImplemented: return other - and = self._compare_check_nans(other, context) - if and: + ans = self._compare_check_nans(other, context) + if ans: return False return self._cmp(other) <= 0 @@ -859,8 +859,8 @@ def __gt__(self, other, context=None): self, other = _convert_for_comparison(self, other) if other is NotImplemented: return other - and = self._compare_check_nans(other, context) - if and: + ans = self._compare_check_nans(other, context) + if ans: return False return self._cmp(other) > 0 @@ -868,8 +868,8 @@ def __ge__(self, other, context=None): self, other = _convert_for_comparison(self, other) if other is NotImplemented: return other - and = self._compare_check_nans(other, context) - if and: + ans = self._compare_check_nans(other, context) + if ans: return False return self._cmp(other) >= 0 @@ -885,9 +885,9 @@ def compare(self, other, context=None): # Compare(NaN, NaN) = NaN if (self._is_special or other and other._is_special): - and = self._check_nans(other, context) - if and: - return and + ans = self._check_nans(other, context) + if ans: + return ans return Decimal(self._cmp(other)) @@ -914,8 +914,8 @@ def __hash__(self): else: exp_hash = pow(_PyHASH_10INV, -self._exp, _PyHASH_MODULUS) hash_ = int(self._int) * exp_hash % _PyHASH_MODULUS - and = hash_ if self >= 0 else -hash_ - return -2 if and == -1 else and + ans = hash_ if self >= 0 else -hash_ + return -2 if ans == -1 else ans def as_tuple(self): """Represents the number as a triple tuple. @@ -1046,9 +1046,9 @@ def __neg__(self, context=None): Rounds, if it has reason. """ if self._is_special: - and = self._check_nans(context=context) - if and: - return and + ans = self._check_nans(context=context) + if ans: + return ans if context is None: context = getcontext() @@ -1056,11 +1056,11 @@ def __neg__(self, context=None): if not self and context.rounding != ROUND_FLOOR: # -Decimal('0') is Decimal('0'), not Decimal('-0'), except # in ROUND_FLOOR rounding mode. - and = self.copy_abs() + ans = self.copy_abs() else: - and = self.copy_negate() + ans = self.copy_negate() - return and._fix(context) + return ans._fix(context) def __pos__(self, context=None): """Returns a copy, unless it is a sNaN. @@ -1068,20 +1068,20 @@ def __pos__(self, context=None): Rounds the number (if more than precision digits) """ if self._is_special: - and = self._check_nans(context=context) - if and: - return and + ans = self._check_nans(context=context) + if ans: + return ans if context is None: context = getcontext() if not self and context.rounding != ROUND_FLOOR: # + (-0) = 0, except in ROUND_FLOOR rounding mode. - and = self.copy_abs() + ans = self.copy_abs() else: - and = Decimal(self) + ans = Decimal(self) - return and._fix(context) + return ans._fix(context) def __abs__(self, round=True, context=None): """Returns the absolute value of self. @@ -1094,16 +1094,16 @@ def __abs__(self, round=True, context=None): return self.copy_abs() if self._is_special: - and = self._check_nans(context=context) - if and: - return and + ans = self._check_nans(context=context) + if ans: + return ans if self._sign: - and = self.__neg__(context=context) + ans = self.__neg__(context=context) else: - and = self.__pos__(context=context) + ans = self.__pos__(context=context) - return and + return ans def __add__(self, other, context=None): """Returns self + other. @@ -1118,9 +1118,9 @@ def __add__(self, other, context=None): context = getcontext() if self._is_special or other._is_special: - and = self._check_nans(other, context) - if and: - return and + ans = self._check_nans(other, context) + if ans: + return ans if self._isinfinity(): # If both INF, same sign => same as both, opposite => error. @@ -1140,19 +1140,19 @@ def __add__(self, other, context=None): sign = min(self._sign, other._sign) if negativezero: sign = 1 - and = _dec_from_triple(sign, '0', exp) - and = and._fix(context) - return and + ans = _dec_from_triple(sign, '0', exp) + ans = ans._fix(context) + return ans if not self: exp = max(exp, other._exp - context.prec-1) - and = other._rescale(exp, context.rounding) - and = and._fix(context) - return and + ans = other._rescale(exp, context.rounding) + ans = ans._fix(context) + return ans if not other: exp = max(exp, self._exp - context.prec-1) - and = self._rescale(exp, context.rounding) - and = and._fix(context) - return and + ans = self._rescale(exp, context.rounding) + ans = ans._fix(context) + return ans op1 = _WorkRep(self) op2 = _WorkRep(other) @@ -1162,9 +1162,9 @@ def __add__(self, other, context=None): if op1.sign != op2.sign: # Equal and opposite if op1.int == op2.int: - and = _dec_from_triple(negativezero, '0', exp) - and = and._fix(context) - return and + ans = _dec_from_triple(negativezero, '0', exp) + ans = ans._fix(context) + return ans if op1.int < op2.int: op1, op2 = op2, op1 # OK, now abs(op1) > abs(op2) @@ -1187,9 +1187,9 @@ def __add__(self, other, context=None): result.int = op1.int - op2.int result.exp = op1.exp - and = Decimal(result) - and = and._fix(context) - return and + ans = Decimal(result) + ans = ans._fix(context) + return ans __radd__ = __add__ @@ -1200,9 +1200,9 @@ def __sub__(self, other, context=None): return other if self._is_special or other._is_special: - and = self._check_nans(other, context=context) - if and: - return and + ans = self._check_nans(other, context=context) + if ans: + return ans # self - other is computed as self + other.copy_negate() return self.__add__(other.copy_negate(), context=context) @@ -1230,9 +1230,9 @@ def __mul__(self, other, context=None): resultsign = self._sign ^ other._sign if self._is_special or other._is_special: - and = self._check_nans(other, context) - if and: - return and + ans = self._check_nans(other, context) + if ans: + return ans if self._isinfinity(): if not other: @@ -1248,28 +1248,28 @@ def __mul__(self, other, context=None): # Special case for multiplying by zero if not self or not other: - and = _dec_from_triple(resultsign, '0', resultexp) + ans = _dec_from_triple(resultsign, '0', resultexp) # Fixing in case the exponent is out of bounds - and = and._fix(context) - return and + ans = ans._fix(context) + return ans # Special case for multiplying by power of 10 if self._int == '1': - and = _dec_from_triple(resultsign, other._int, resultexp) - and = and._fix(context) - return and + ans = _dec_from_triple(resultsign, other._int, resultexp) + ans = ans._fix(context) + return ans if other._int == '1': - and = _dec_from_triple(resultsign, self._int, resultexp) - and = and._fix(context) - return and + ans = _dec_from_triple(resultsign, self._int, resultexp) + ans = ans._fix(context) + return ans op1 = _WorkRep(self) op2 = _WorkRep(other) - and = _dec_from_triple(resultsign, str(op1.int * op2.int), resultexp) - and = and._fix(context) + ans = _dec_from_triple(resultsign, str(op1.int * op2.int), resultexp) + ans = ans._fix(context) - return and + return ans __rmul__ = __mul__ def __truediv__(self, other, context=None): @@ -1284,9 +1284,9 @@ def __truediv__(self, other, context=None): sign = self._sign ^ other._sign if self._is_special or other._is_special: - and = self._check_nans(other, context) - if and: - return and + ans = self._check_nans(other, context) + if ans: + return ans if self._isinfinity() and other._isinfinity(): return context._raise_error(InvalidOperation, '(+-)INF/(+-)INF') @@ -1328,8 +1328,8 @@ def __truediv__(self, other, context=None): coeff //= 10 exp += 1 - and = _dec_from_triple(sign, str(coeff), exp) - return and._fix(context) + ans = _dec_from_triple(sign, str(coeff), exp) + return ans._fix(context) def _divide(self, other, context): """Return (self // other, self % other), to context.prec precision. @@ -1360,9 +1360,9 @@ def _divide(self, other, context): _dec_from_triple(self._sign, str(r), ideal_exp)) # Here the quotient is too large to be representable - and = context._raise_error(DivisionImpossible, + ans = context._raise_error(DivisionImpossible, 'quotient too large in //, % or divmod') - return and, and + return ans, ans def __rtruediv__(self, other, context=None): """Swaps self/other and returns __truediv__.""" @@ -1382,23 +1382,23 @@ def __divmod__(self, other, context=None): if context is None: context = getcontext() - and = self._check_nans(other, context) - if and: - return (and, and) + ans = self._check_nans(other, context) + if ans: + return (ans, ans) sign = self._sign ^ other._sign if self._isinfinity(): if other._isinfinity(): - and = context._raise_error(InvalidOperation, 'divmod(INF, INF)') - return and, and + ans = context._raise_error(InvalidOperation, 'divmod(INF, INF)') + return ans, ans else: return (_SignedInfinity[sign], context._raise_error(InvalidOperation, 'INF % x')) if not other: if not self: - and = context._raise_error(DivisionUndefined, 'divmod(0, 0)') - return and, and + ans = context._raise_error(DivisionUndefined, 'divmod(0, 0)') + return ans, ans else: return (context._raise_error(DivisionByZero, 'x // 0', sign), context._raise_error(InvalidOperation, 'x % 0')) @@ -1425,9 +1425,9 @@ def __mod__(self, other, context=None): if context is None: context = getcontext() - and = self._check_nans(other, context) - if and: - return and + ans = self._check_nans(other, context) + if ans: + return ans if self._isinfinity(): return context._raise_error(InvalidOperation, 'INF % x') @@ -1457,9 +1457,9 @@ def remainder_near(self, other, context=None): other = _convert_other(other, raiseit=True) - and = self._check_nans(other, context) - if and: - return and + ans = self._check_nans(other, context) + if ans: + return ans # self == +/-infinity -> InvalidOperation if self._isinfinity(): @@ -1477,14 +1477,14 @@ def remainder_near(self, other, context=None): # other = +/-infinity -> remainder = self if other._isinfinity(): - and = Decimal(self) - return and._fix(context) + ans = Decimal(self) + return ans._fix(context) # self = 0 -> remainder = self, with ideal exponent ideal_exponent = min(self._exp, other._exp) if not self: - and = _dec_from_triple(self._sign, '0', ideal_exponent) - return and._fix(context) + ans = _dec_from_triple(self._sign, '0', ideal_exponent) + return ans._fix(context) # catch most cases of large or small quotient expdiff = self.adjusted() - other.adjusted() @@ -1493,8 +1493,8 @@ def remainder_near(self, other, context=None): return context._raise_error(DivisionImpossible) if expdiff <= -2: # expdiff <= -2 => abs(self/other) < 0.1 - and = self._rescale(ideal_exponent, context.rounding) - return and._fix(context) + ans = self._rescale(ideal_exponent, context.rounding) + return ans._fix(context) # adjust both arguments to have the same exponent, then divide op1 = _WorkRep(self) @@ -1520,8 +1520,8 @@ def remainder_near(self, other, context=None): sign = 1-sign r = -r - and = _dec_from_triple(sign, str(r), ideal_exponent) - return and._fix(context) + ans = _dec_from_triple(sign, str(r), ideal_exponent) + return ans._fix(context) def __floordiv__(self, other, context=None): """self // other""" @@ -1532,9 +1532,9 @@ def __floordiv__(self, other, context=None): if context is None: context = getcontext() - and = self._check_nans(other, context) - if and: - return and + ans = self._check_nans(other, context) + if ans: + return ans if self._isinfinity(): if other._isinfinity(): @@ -1645,10 +1645,10 @@ def _fix(self, context): exp_min = len(self._int) + self._exp - context.prec if exp_min > Etop: # overflow: exp_min > Etop iff self.adjusted() > Emax - and = context._raise_error(Overflow, 'above Emax', self._sign) + ans = context._raise_error(Overflow, 'above Emax', self._sign) context._raise_error(Inexact) context._raise_error(Rounded) - return and + return ans self_is_subnormal = exp_min < Etiny if self_is_subnormal: @@ -1671,9 +1671,9 @@ def _fix(self, context): # check whether the rounding pushed the exponent out of range if exp_min > Etop: - and = context._raise_error(Overflow, 'above Emax', self._sign) + ans = context._raise_error(Overflow, 'above Emax', self._sign) else: - and = _dec_from_triple(self._sign, coeff, exp_min) + ans = _dec_from_triple(self._sign, coeff, exp_min) # raise the appropriate signals, taking care to respect # the precedence described in the specification @@ -1684,10 +1684,10 @@ def _fix(self, context): if changed: context._raise_error(Inexact) context._raise_error(Rounded) - if not and: + if not ans: # raise Clamped on underflow to 0 context._raise_error(Clamped) - return and + return ans if self_is_subnormal: context._raise_error(Subnormal) @@ -2282,9 +2282,9 @@ def __pow__(self, other, modulo=None, context=None): context = getcontext() # either argument is a NaN => result is NaN - and = self._check_nans(other, context) - if and: - return and + ans = self._check_nans(other, context) + if ans: + return ans # 0**0 = NaN (!), x**0 = 1 for nonzero x (including +/-Infinity) if not other: @@ -2362,7 +2362,7 @@ def __pow__(self, other, modulo=None, context=None): # from here on, the result always goes through the call # to _fix at the end of this function. - and = None + ans = None exact = False # crude test to catch cases of extreme overflow/underflow. If @@ -2375,24 +2375,24 @@ def __pow__(self, other, modulo=None, context=None): # self > 1 and other +ve, or self < 1 and other -ve # possibility of overflow if bound >= len(str(context.Emax)): - and = _dec_from_triple(result_sign, '1', context.Emax+1) + ans = _dec_from_triple(result_sign, '1', context.Emax+1) else: # self > 1 and other -ve, or self < 1 and other +ve # possibility of underflow to 0 Etiny = context.Etiny() if bound >= len(str(-Etiny)): - and = _dec_from_triple(result_sign, '1', Etiny-1) + ans = _dec_from_triple(result_sign, '1', Etiny-1) # try for an exact result with precision +1 - if and is None: - and = self._power_exact(other, context.prec + 1) - if and is not None: + if ans is None: + ans = self._power_exact(other, context.prec + 1) + if ans is not None: if result_sign == 1: - and = _dec_from_triple(1, and._int, and._exp) + ans = _dec_from_triple(1, ans._int, ans._exp) exact = True # usual case: inexact result, x**y computed directly as exp(y*log(x)) - if and is None: + if ans is None: p = context.prec x = _WorkRep(self) xc, xe = x.int, x.exp @@ -2410,7 +2410,7 @@ def __pow__(self, other, modulo=None, context=None): break extra += 3 - and = _dec_from_triple(result_sign, str(coeff), exp) + ans = _dec_from_triple(result_sign, str(coeff), exp) # unlike exp, ln and log10, the power function respects the # rounding mode; no need to switch to ROUND_HALF_EVEN here @@ -2428,10 +2428,10 @@ def __pow__(self, other, modulo=None, context=None): if exact and not other._isinteger(): # pad with zeros up to length context.prec+1 if necessary; this # ensures that the Rounded signal will be raised. - if len(and._int) <= context.prec: - expdiff = context.prec + 1 - len(and._int) - and = _dec_from_triple(and._sign, and._int+'0'*expdiff, - and._exp-expdiff) + if len(ans._int) <= context.prec: + expdiff = context.prec + 1 - len(ans._int) + ans = _dec_from_triple(ans._sign, ans._int+'0'*expdiff, + ans._exp-expdiff) # create a copy of the current context, with cleared flags/traps newcontext = context.copy() @@ -2440,7 +2440,7 @@ def __pow__(self, other, modulo=None, context=None): newcontext.traps[exception] = 0 # round in the new context - and = and._fix(newcontext) + ans = ans._fix(newcontext) # raise Inexact, and if necessary, Underflow newcontext._raise_error(Inexact) @@ -2453,15 +2453,15 @@ def __pow__(self, other, modulo=None, context=None): # arguments. Note that the order of the exceptions is # important here. if newcontext.flags[Overflow]: - context._raise_error(Overflow, 'above Emax', and._sign) + context._raise_error(Overflow, 'above Emax', ans._sign) for exception in Underflow, Subnormal, Inexact, Rounded, Clamped: if newcontext.flags[exception]: context._raise_error(exception) else: - and = and._fix(context) + ans = ans._fix(context) - return and + return ans def __rpow__(self, other, modulo=None, context=None): """Swaps self/other and returns __pow__.""" @@ -2477,9 +2477,9 @@ def normalize(self, context=None): context = getcontext() if self._is_special: - and = self._check_nans(context=context) - if and: - return and + ans = self._check_nans(context=context) + if ans: + return ans dup = self._fix(context) if dup._isinfinity(): @@ -2508,9 +2508,9 @@ def quantize(self, exp, rounding=None, context=None): rounding = context.rounding if self._is_special or exp._is_special: - and = self._check_nans(exp, context) - if and: - return and + ans = self._check_nans(exp, context) + if ans: + return ans if exp._isinfinity() or self._isinfinity(): if exp._isinfinity() and self._isinfinity(): @@ -2524,8 +2524,8 @@ def quantize(self, exp, rounding=None, context=None): 'target exponent out of bounds in quantize') if not self: - and = _dec_from_triple(self._sign, '0', exp._exp) - return and._fix(context) + ans = _dec_from_triple(self._sign, '0', exp._exp) + return ans._fix(context) self_adjusted = self.adjusted() if self_adjusted > context.Emax: @@ -2535,26 +2535,26 @@ def quantize(self, exp, rounding=None, context=None): return context._raise_error(InvalidOperation, 'quantize result has too many digits for current context') - and = self._rescale(exp._exp, rounding) - if and.adjusted() > context.Emax: + ans = self._rescale(exp._exp, rounding) + if ans.adjusted() > context.Emax: return context._raise_error(InvalidOperation, 'exponent of quantize result too large for current context') - if len(and._int) > context.prec: + if len(ans._int) > context.prec: return context._raise_error(InvalidOperation, 'quantize result has too many digits for current context') # raise appropriate flags - if and and and.adjusted() < context.Emin: + if ans and ans.adjusted() < context.Emin: context._raise_error(Subnormal) - if and._exp > self._exp: - if and != self: + if ans._exp > self._exp: + if ans != self: context._raise_error(Inexact) context._raise_error(Rounded) # call to fix takes care of any necessary folddown, and # signals Clamped if necessary - and = and._fix(context) - return and + ans = ans._fix(context) + return ans def same_quantum(self, other, context=None): """Return True if self and other have the same exponent; otherwise @@ -2619,14 +2619,14 @@ def _round(self, places, rounding): raise ValueError("argument should be at least 1 in _round") if self._is_special or not self: return Decimal(self) - and = self._rescale(self.adjusted()+1-places, rounding) + ans = self._rescale(self.adjusted()+1-places, rounding) # it can happen that the rescale alters the adjusted exponent; # for example when rounding 99.97 to 3 significant figures. # When this happens we end up with an extra 0 at the end of # the number; a second rescale fixes this. - if and.adjusted() != self.adjusted(): - and = and._rescale(and.adjusted()+1-places, rounding) - return and + if ans.adjusted() != self.adjusted(): + ans = ans._rescale(ans.adjusted()+1-places, rounding) + return ans def to_integral_exact(self, rounding=None, context=None): """Rounds to a nearby integer. @@ -2639,9 +2639,9 @@ def to_integral_exact(self, rounding=None, context=None): this method except that it doesn't raise Inexact or Rounded. """ if self._is_special: - and = self._check_nans(context=context) - if and: - return and + ans = self._check_nans(context=context) + if ans: + return ans return Decimal(self) if self._exp >= 0: return Decimal(self) @@ -2651,11 +2651,11 @@ def to_integral_exact(self, rounding=None, context=None): context = getcontext() if rounding is None: rounding = context.rounding - and = self._rescale(0, rounding) - if and != self: + ans = self._rescale(0, rounding) + if ans != self: context._raise_error(Inexact) context._raise_error(Rounded) - return and + return ans def to_integral_value(self, rounding=None, context=None): """Rounds to the nearest integer, without raising inexact, rounded.""" @@ -2664,9 +2664,9 @@ def to_integral_value(self, rounding=None, context=None): if rounding is None: rounding = context.rounding if self._is_special: - and = self._check_nans(context=context) - if and: - return and + ans = self._check_nans(context=context) + if ans: + return ans return Decimal(self) if self._exp >= 0: return Decimal(self) @@ -2682,17 +2682,17 @@ def sqrt(self, context=None): context = getcontext() if self._is_special: - and = self._check_nans(context=context) - if and: - return and + ans = self._check_nans(context=context) + if ans: + return ans if self._isinfinity() and self._sign == 0: return Decimal(self) if not self: # exponent = self._exp // 2. sqrt(-0) = -0 - and = _dec_from_triple(self._sign, '0', self._exp // 2) - return and._fix(context) + ans = _dec_from_triple(self._sign, '0', self._exp // 2) + return ans._fix(context) if self._sign == 1: return context._raise_error(InvalidOperation, 'sqrt(-x), x > 0') @@ -2765,15 +2765,15 @@ def sqrt(self, context=None): if n % 5 == 0: n += 1 - and = _dec_from_triple(0, str(n), e) + ans = _dec_from_triple(0, str(n), e) # round, and fit to current context context = context._shallow_copy() rounding = context._set_rounding(ROUND_HALF_EVEN) - and = and._fix(context) + ans = ans._fix(context) context.rounding = rounding - return and + return ans def max(self, other, context=None): """Returns the larger value. @@ -2811,11 +2811,11 @@ def max(self, other, context=None): c = self.compare_total(other) if c == -1: - and = other + ans = other else: - and = self + ans = self - return and._fix(context) + return ans._fix(context) def min(self, other, context=None): """Returns the smaller value. @@ -2845,11 +2845,11 @@ def min(self, other, context=None): c = self.compare_total(other) if c == -1: - and = self + ans = self else: - and = other + ans = other - return and._fix(context) + return ans._fix(context) def _isinteger(self): """Returns whether self is an integer""" @@ -2889,9 +2889,9 @@ def compare_signal(self, other, context=None): NaNs taking precedence over quiet NaNs. """ other = _convert_other(other, raiseit = True) - and = self._compare_check_nans(other, context) - if and: - return and + ans = self._compare_check_nans(other, context) + if ans: + return ans return self.compare(other, context=context) def compare_total(self, other, context=None): @@ -3002,9 +3002,9 @@ def exp(self, context=None): context = getcontext() # exp(NaN) = NaN - and = self._check_nans(context=context) - if and: - return and + ans = self._check_nans(context=context) + if ans: + return ans # exp(-Infinity) = 0 if self._isinfinity() == -1: @@ -3032,16 +3032,16 @@ def exp(self, context=None): # larger exponent the result either overflows or underflows. if self._sign == 0 and adj > len(str((context.Emax+1)*3)): # overflow - and = _dec_from_triple(0, '1', context.Emax+1) + ans = _dec_from_triple(0, '1', context.Emax+1) elif self._sign == 1 and adj > len(str((-context.Etiny()+1)*3)): # underflow to 0 - and = _dec_from_triple(0, '1', context.Etiny()-1) + ans = _dec_from_triple(0, '1', context.Etiny()-1) elif self._sign == 0 and adj < -p: # p+1 digits; final round will raise correct flags - and = _dec_from_triple(0, '1' + '0'*(p-1) + '1', -p) + ans = _dec_from_triple(0, '1' + '0'*(p-1) + '1', -p) elif self._sign == 1 and adj < -p-1: # p+1 digits; final round will raise correct flags - and = _dec_from_triple(0, '9'*(p+1), -p-1) + ans = _dec_from_triple(0, '9'*(p+1), -p-1) # general case else: op = _WorkRep(self) @@ -3059,16 +3059,16 @@ def exp(self, context=None): break extra += 3 - and = _dec_from_triple(0, str(coeff), exp) + ans = _dec_from_triple(0, str(coeff), exp) - # at this stage, and should round correctly with *any* + # at this stage, ans should round correctly with *any* # rounding mode, not just with ROUND_HALF_EVEN context = context._shallow_copy() rounding = context._set_rounding(ROUND_HALF_EVEN) - and = and._fix(context) + ans = ans._fix(context) context.rounding = rounding - return and + return ans def is_canonical(self): """Return True if self is canonical; otherwise return False. @@ -3158,9 +3158,9 @@ def ln(self, context=None): context = getcontext() # ln(NaN) = NaN - and = self._check_nans(context=context) - if and: - return and + ans = self._check_nans(context=context) + if ans: + return ans # ln(0.0) == -Infinity if not self: @@ -3193,13 +3193,13 @@ def ln(self, context=None): if coeff % (5*10**(len(str(abs(coeff)))-p-1)): break places += 3 - and = _dec_from_triple(int(coeff<0), str(abs(coeff)), -places) + ans = _dec_from_triple(int(coeff<0), str(abs(coeff)), -places) context = context._shallow_copy() rounding = context._set_rounding(ROUND_HALF_EVEN) - and = and._fix(context) + ans = ans._fix(context) context.rounding = rounding - return and + return ans def _log10_exp_bound(self): """Compute a lower bound for the adjusted exponent of self.log10(). @@ -3238,9 +3238,9 @@ def log10(self, context=None): context = getcontext() # log10(NaN) = NaN - and = self._check_nans(context=context) - if and: - return and + ans = self._check_nans(context=context) + if ans: + return ans # log10(0.0) == -Infinity if not self: @@ -3258,7 +3258,7 @@ def log10(self, context=None): # log10(10**n) = n if self._int[0] == '1' and self._int[1:] == '0'*(len(self._int) - 1): # answer may need rounding - and = Decimal(self._exp + len(self._int) - 1) + ans = Decimal(self._exp + len(self._int) - 1) else: # result is irrational, so necessarily inexact op = _WorkRep(self) @@ -3274,13 +3274,13 @@ def log10(self, context=None): if coeff % (5*10**(len(str(abs(coeff)))-p-1)): break places += 3 - and = _dec_from_triple(int(coeff<0), str(abs(coeff)), -places) + ans = _dec_from_triple(int(coeff<0), str(abs(coeff)), -places) context = context._shallow_copy() rounding = context._set_rounding(ROUND_HALF_EVEN) - and = and._fix(context) + ans = ans._fix(context) context.rounding = rounding - return and + return ans def logb(self, context=None): """ Returns the exponent of the magnitude of self's MSD. @@ -3291,9 +3291,9 @@ def logb(self, context=None): without limiting the resulting exponent). """ # logb(NaN) = NaN - and = self._check_nans(context=context) - if and: - return and + ans = self._check_nans(context=context) + if ans: + return ans if context is None: context = getcontext() @@ -3309,8 +3309,8 @@ def logb(self, context=None): # otherwise, simply return the adjusted exponent of self, as a # Decimal. Note that no attempt is made to fit the result # into the current context. - and = Decimal(self.adjusted()) - return and._fix(context) + ans = Decimal(self.adjusted()) + return ans._fix(context) def _islogical(self): """Return True if self is a logical operand. @@ -3421,11 +3421,11 @@ def max_mag(self, other, context=None): c = self.compare_total(other) if c == -1: - and = other + ans = other else: - and = self + ans = self - return and._fix(context) + return ans._fix(context) def min_mag(self, other, context=None): """Compares the values numerically with their sign ignored.""" @@ -3451,20 +3451,20 @@ def min_mag(self, other, context=None): c = self.compare_total(other) if c == -1: - and = self + ans = self else: - and = other + ans = other - return and._fix(context) + return ans._fix(context) def next_minus(self, context=None): """Returns the largest representable number smaller than itself.""" if context is None: context = getcontext() - and = self._check_nans(context=context) - if and: - return and + ans = self._check_nans(context=context) + if ans: + return ans if self._isinfinity() == -1: return _NegativeInfinity @@ -3485,9 +3485,9 @@ def next_plus(self, context=None): if context is None: context = getcontext() - and = self._check_nans(context=context) - if and: - return and + ans = self._check_nans(context=context) + if ans: + return ans if self._isinfinity() == 1: return _Infinity @@ -3517,37 +3517,37 @@ def next_toward(self, other, context=None): if context is None: context = getcontext() - and = self._check_nans(other, context) - if and: - return and + ans = self._check_nans(other, context) + if ans: + return ans comparison = self._cmp(other) if comparison == 0: return self.copy_sign(other) if comparison == -1: - and = self.next_plus(context) + ans = self.next_plus(context) else: # comparison == 1 - and = self.next_minus(context) + ans = self.next_minus(context) - # decide which flags to raise using value of and - if and._isinfinity(): + # decide which flags to raise using value of ans + if ans._isinfinity(): context._raise_error(Overflow, 'Infinite result from next_toward', - and._sign) + ans._sign) context._raise_error(Inexact) context._raise_error(Rounded) - elif and.adjusted() < context.Emin: + elif ans.adjusted() < context.Emin: context._raise_error(Underflow) context._raise_error(Subnormal) context._raise_error(Inexact) context._raise_error(Rounded) # if precision == 1 then we don't raise Clamped for a # result 0E-Etiny. - if not and: + if not ans: context._raise_error(Clamped) - return and + return ans def number_class(self, context=None): """Returns an indication of the class of self. @@ -3602,9 +3602,9 @@ def rotate(self, other, context=None): other = _convert_other(other, raiseit=True) - and = self._check_nans(other, context) - if and: - return and + ans = self._check_nans(other, context) + if ans: + return ans if other._exp != 0: return context._raise_error(InvalidOperation) @@ -3635,9 +3635,9 @@ def scaleb(self, other, context=None): other = _convert_other(other, raiseit=True) - and = self._check_nans(other, context) - if and: - return and + ans = self._check_nans(other, context) + if ans: + return ans if other._exp != 0: return context._raise_error(InvalidOperation) @@ -3660,9 +3660,9 @@ def shift(self, other, context=None): other = _convert_other(other, raiseit=True) - and = self._check_nans(other, context) - if and: - return and + ans = self._check_nans(other, context) + if ans: + return ans if other._exp != 0: return context._raise_error(InvalidOperation) diff --git a/Lib/_strptime.py b/Lib/_strptime.py index d3f3382c64a4b8..cdc55e8daaffa6 100644 --- a/Lib/_strptime.py +++ b/Lib/_strptime.py @@ -2,7 +2,7 @@ CLASSES: LocaleTime -- Discovers and stores locale-specific time information - timer -- Creates regexes for pattern matching a string of text containing + TimeRE -- Creates regexes for pattern matching a string of text containing time information FUNCTIONS: @@ -337,7 +337,7 @@ def __calc_timezone(self): self.timezone = (no_saving, has_saving) -class timer(dict): +class TimeRE(dict): """Handle conversion from format directives to regexes.""" def __init__(self, locale_time=None): @@ -488,7 +488,7 @@ def compile(self, format): _cache_lock = _thread_allocate_lock() # DO NOT modify _TimeRE_cache or _regex_cache without acquiring the cache lock # first! -_TimeRE_cache = timer() +_TimeRE_cache = TimeRE() _CACHE_MAX_SIZE = 5 # Max number of regexes stored in _regex_cache _regex_cache = {} @@ -529,7 +529,7 @@ def _strptime(data_string, format="%a %b %d %H:%M:%S %Y"): if (_getlang() != locale_time.lang or time.tzname != locale_time.tzname or time.daylight != locale_time.daylight): - _TimeRE_cache = timer() + _TimeRE_cache = TimeRE() _regex_cache.clear() locale_time = _TimeRE_cache.locale_time if len(_regex_cache) > _CACHE_MAX_SIZE: diff --git a/Lib/asyncio/graph.py b/Lib/asyncio/graph.py index 6db47eda0400b7..b5bfeb1630a159 100644 --- a/Lib/asyncio/graph.py +++ b/Lib/asyncio/graph.py @@ -17,7 +17,7 @@ 'FutureCallGraph', ) -# Sadly, we can't reuse the traceback module's datastructures as those +# Sadly, we can't re-use the traceback module's datastructures as those # are tailored for error reporting, whereas we need to represent an # async call graph. # diff --git a/Lib/email/charset.py b/Lib/email/charset.py index 761430671a2259..5036c3f58a5633 100644 --- a/Lib/email/charset.py +++ b/Lib/email/charset.py @@ -221,12 +221,12 @@ def __init__(self, input_charset=DEFAULT_CHARSET): # We can try to guess which encoding and conversion to use by the # charset_map dictionary. Try that first, but let the user override # it. - hence, benc, conv = CHARSETS.get(self.input_charset, + henc, benc, conv = CHARSETS.get(self.input_charset, (SHORTEST, BASE64, None)) if not conv: conv = self.input_charset # Set the attributes, allowing the arguments to override the default. - self.header_encoding = hence + self.header_encoding = henc self.body_encoding = benc self.output_charset = ALIASES.get(conv, conv) # Now set the codecs. If one isn't defined for input_charset, diff --git a/Lib/encodings/cp1006.py b/Lib/encodings/cp1006.py index 827b92e51af6f9..a1221c3ef1ce52 100644 --- a/Lib/encodings/cp1006.py +++ b/Lib/encodings/cp1006.py @@ -228,9 +228,9 @@ def getregentry(): '\ufe91' # 0xB4 -> ARABIC LETTER BEH INITIAL FORM '\ufb56' # 0xB5 -> ARABIC LETTER PEH ISOLATED FORM '\ufb58' # 0xB6 -> ARABIC LETTER PEH INITIAL FORM - '\ufe93' # 0xB7 -> ARABIC LETTER THE MARBUTA ISOLATED FORM - '\ufe95' # 0xB8 -> ARABIC LETTER THE ISOLATED FORM - '\ufe97' # 0xB9 -> ARABIC LETTER THE INITIAL FORM + '\ufe93' # 0xB7 -> ARABIC LETTER TEH MARBUTA ISOLATED FORM + '\ufe95' # 0xB8 -> ARABIC LETTER TEH ISOLATED FORM + '\ufe97' # 0xB9 -> ARABIC LETTER TEH INITIAL FORM '\ufb66' # 0xBA -> ARABIC LETTER TTEH ISOLATED FORM '\ufb68' # 0xBB -> ARABIC LETTER TTEH INITIAL FORM '\ufe99' # 0xBC -> ARABIC LETTER THEH ISOLATED FORM diff --git a/Lib/encodings/cp1253.py b/Lib/encodings/cp1253.py index 5a1b148a60c942..ec9c0972d10d72 100644 --- a/Lib/encodings/cp1253.py +++ b/Lib/encodings/cp1253.py @@ -248,7 +248,7 @@ def getregentry(): '\u0398' # 0xC8 -> GREEK CAPITAL LETTER THETA '\u0399' # 0xC9 -> GREEK CAPITAL LETTER IOTA '\u039a' # 0xCA -> GREEK CAPITAL LETTER KAPPA - '\u039b' # 0xCB -> GREEK CAPITAL LETTER LAMBDA + '\u039b' # 0xCB -> GREEK CAPITAL LETTER LAMDA '\u039c' # 0xCC -> GREEK CAPITAL LETTER MU '\u039d' # 0xCD -> GREEK CAPITAL LETTER NU '\u039e' # 0xCE -> GREEK CAPITAL LETTER XI @@ -280,7 +280,7 @@ def getregentry(): '\u03b8' # 0xE8 -> GREEK SMALL LETTER THETA '\u03b9' # 0xE9 -> GREEK SMALL LETTER IOTA '\u03ba' # 0xEA -> GREEK SMALL LETTER KAPPA - '\u03bb' # 0xEB -> GREEK SMALL LETTER LAMBDA + '\u03bb' # 0xEB -> GREEK SMALL LETTER LAMDA '\u03bc' # 0xEC -> GREEK SMALL LETTER MU '\u03bd' # 0xED -> GREEK SMALL LETTER NU '\u03be' # 0xEE -> GREEK SMALL LETTER XI diff --git a/Lib/encodings/cp1256.py b/Lib/encodings/cp1256.py index a105efb84b30f1..fd6afab52c634c 100644 --- a/Lib/encodings/cp1256.py +++ b/Lib/encodings/cp1256.py @@ -246,8 +246,8 @@ def getregentry(): '\u0626' # 0xC6 -> ARABIC LETTER YEH WITH HAMZA ABOVE '\u0627' # 0xC7 -> ARABIC LETTER ALEF '\u0628' # 0xC8 -> ARABIC LETTER BEH - '\u0629' # 0xC9 -> ARABIC LETTER THE MARBUTA - '\u062a' # 0xCA -> ARABIC LETTER THE + '\u0629' # 0xC9 -> ARABIC LETTER TEH MARBUTA + '\u062a' # 0xCA -> ARABIC LETTER TEH '\u062b' # 0xCB -> ARABIC LETTER THEH '\u062c' # 0xCC -> ARABIC LETTER JEEM '\u062d' # 0xCD -> ARABIC LETTER HAH diff --git a/Lib/encodings/cp720.py b/Lib/encodings/cp720.py index 8bdf6cd2405c2d..96d609616c4d28 100644 --- a/Lib/encodings/cp720.py +++ b/Lib/encodings/cp720.py @@ -208,8 +208,8 @@ def getregentry(): '\u0626' # 0x9E -> ARABIC LETTER YEH WITH HAMZA ABOVE '\u0627' # 0x9F -> ARABIC LETTER ALEF '\u0628' # 0xA0 -> ARABIC LETTER BEH - '\u0629' # 0xA1 -> ARABIC LETTER THE MARBUTA - '\u062a' # 0xA2 -> ARABIC LETTER THE + '\u0629' # 0xA1 -> ARABIC LETTER TEH MARBUTA + '\u062a' # 0xA2 -> ARABIC LETTER TEH '\u062b' # 0xA3 -> ARABIC LETTER THEH '\u062c' # 0xA4 -> ARABIC LETTER JEEM '\u062d' # 0xA5 -> ARABIC LETTER HAH diff --git a/Lib/encodings/cp737.py b/Lib/encodings/cp737.py index 26ca2c97f7883f..9685bae75b36cc 100644 --- a/Lib/encodings/cp737.py +++ b/Lib/encodings/cp737.py @@ -55,7 +55,7 @@ def getregentry(): 0x0087: 0x0398, # GREEK CAPITAL LETTER THETA 0x0088: 0x0399, # GREEK CAPITAL LETTER IOTA 0x0089: 0x039a, # GREEK CAPITAL LETTER KAPPA - 0x008a: 0x039b, # GREEK CAPITAL LETTER LAMBDA + 0x008a: 0x039b, # GREEK CAPITAL LETTER LAMDA 0x008b: 0x039c, # GREEK CAPITAL LETTER MU 0x008c: 0x039d, # GREEK CAPITAL LETTER NU 0x008d: 0x039e, # GREEK CAPITAL LETTER XI @@ -79,7 +79,7 @@ def getregentry(): 0x009f: 0x03b8, # GREEK SMALL LETTER THETA 0x00a0: 0x03b9, # GREEK SMALL LETTER IOTA 0x00a1: 0x03ba, # GREEK SMALL LETTER KAPPA - 0x00a2: 0x03bb, # GREEK SMALL LETTER LAMBDA + 0x00a2: 0x03bb, # GREEK SMALL LETTER LAMDA 0x00a3: 0x03bc, # GREEK SMALL LETTER MU 0x00a4: 0x03bd, # GREEK SMALL LETTER NU 0x00a5: 0x03be, # GREEK SMALL LETTER XI @@ -316,7 +316,7 @@ def getregentry(): '\u0398' # 0x0087 -> GREEK CAPITAL LETTER THETA '\u0399' # 0x0088 -> GREEK CAPITAL LETTER IOTA '\u039a' # 0x0089 -> GREEK CAPITAL LETTER KAPPA - '\u039b' # 0x008a -> GREEK CAPITAL LETTER LAMBDA + '\u039b' # 0x008a -> GREEK CAPITAL LETTER LAMDA '\u039c' # 0x008b -> GREEK CAPITAL LETTER MU '\u039d' # 0x008c -> GREEK CAPITAL LETTER NU '\u039e' # 0x008d -> GREEK CAPITAL LETTER XI @@ -340,7 +340,7 @@ def getregentry(): '\u03b8' # 0x009f -> GREEK SMALL LETTER THETA '\u03b9' # 0x00a0 -> GREEK SMALL LETTER IOTA '\u03ba' # 0x00a1 -> GREEK SMALL LETTER KAPPA - '\u03bb' # 0x00a2 -> GREEK SMALL LETTER LAMBDA + '\u03bb' # 0x00a2 -> GREEK SMALL LETTER LAMDA '\u03bc' # 0x00a3 -> GREEK SMALL LETTER MU '\u03bd' # 0x00a4 -> GREEK SMALL LETTER NU '\u03be' # 0x00a5 -> GREEK SMALL LETTER XI @@ -590,7 +590,7 @@ def getregentry(): 0x0398: 0x0087, # GREEK CAPITAL LETTER THETA 0x0399: 0x0088, # GREEK CAPITAL LETTER IOTA 0x039a: 0x0089, # GREEK CAPITAL LETTER KAPPA - 0x039b: 0x008a, # GREEK CAPITAL LETTER LAMBDA + 0x039b: 0x008a, # GREEK CAPITAL LETTER LAMDA 0x039c: 0x008b, # GREEK CAPITAL LETTER MU 0x039d: 0x008c, # GREEK CAPITAL LETTER NU 0x039e: 0x008d, # GREEK CAPITAL LETTER XI @@ -620,7 +620,7 @@ def getregentry(): 0x03b8: 0x009f, # GREEK SMALL LETTER THETA 0x03b9: 0x00a0, # GREEK SMALL LETTER IOTA 0x03ba: 0x00a1, # GREEK SMALL LETTER KAPPA - 0x03bb: 0x00a2, # GREEK SMALL LETTER LAMBDA + 0x03bb: 0x00a2, # GREEK SMALL LETTER LAMDA 0x03bc: 0x00a3, # GREEK SMALL LETTER MU 0x03bd: 0x00a4, # GREEK SMALL LETTER NU 0x03be: 0x00a5, # GREEK SMALL LETTER XI diff --git a/Lib/encodings/cp864.py b/Lib/encodings/cp864.py index 9d950dff5902f5..53df482dcd617a 100644 --- a/Lib/encodings/cp864.py +++ b/Lib/encodings/cp864.py @@ -85,7 +85,7 @@ def getregentry(): 0x00a7: None, # UNDEFINED 0x00a8: 0xfe8e, # ARABIC LETTER ALEF FINAL FORM 0x00a9: 0xfe8f, # ARABIC LETTER BEH ISOLATED FORM - 0x00aa: 0xfe95, # ARABIC LETTER THE ISOLATED FORM + 0x00aa: 0xfe95, # ARABIC LETTER TEH ISOLATED FORM 0x00ab: 0xfe99, # ARABIC LETTER THEH ISOLATED FORM 0x00ac: 0x060c, # ARABIC COMMA 0x00ad: 0xfe9d, # ARABIC LETTER JEEM ISOLATED FORM @@ -116,8 +116,8 @@ def getregentry(): 0x00c6: 0xfe8b, # ARABIC LETTER YEH WITH HAMZA ABOVE INITIAL FORM 0x00c7: 0xfe8d, # ARABIC LETTER ALEF ISOLATED FORM 0x00c8: 0xfe91, # ARABIC LETTER BEH INITIAL FORM - 0x00c9: 0xfe93, # ARABIC LETTER THE MARBUTA ISOLATED FORM - 0x00ca: 0xfe97, # ARABIC LETTER THE INITIAL FORM + 0x00c9: 0xfe93, # ARABIC LETTER TEH MARBUTA ISOLATED FORM + 0x00ca: 0xfe97, # ARABIC LETTER TEH INITIAL FORM 0x00cb: 0xfe9b, # ARABIC LETTER THEH INITIAL FORM 0x00cc: 0xfe9f, # ARABIC LETTER JEEM INITIAL FORM 0x00cd: 0xfea3, # ARABIC LETTER HAH INITIAL FORM @@ -346,7 +346,7 @@ def getregentry(): '\ufffe' # 0x00a7 -> UNDEFINED '\ufe8e' # 0x00a8 -> ARABIC LETTER ALEF FINAL FORM '\ufe8f' # 0x00a9 -> ARABIC LETTER BEH ISOLATED FORM - '\ufe95' # 0x00aa -> ARABIC LETTER THE ISOLATED FORM + '\ufe95' # 0x00aa -> ARABIC LETTER TEH ISOLATED FORM '\ufe99' # 0x00ab -> ARABIC LETTER THEH ISOLATED FORM '\u060c' # 0x00ac -> ARABIC COMMA '\ufe9d' # 0x00ad -> ARABIC LETTER JEEM ISOLATED FORM @@ -377,8 +377,8 @@ def getregentry(): '\ufe8b' # 0x00c6 -> ARABIC LETTER YEH WITH HAMZA ABOVE INITIAL FORM '\ufe8d' # 0x00c7 -> ARABIC LETTER ALEF ISOLATED FORM '\ufe91' # 0x00c8 -> ARABIC LETTER BEH INITIAL FORM - '\ufe93' # 0x00c9 -> ARABIC LETTER THE MARBUTA ISOLATED FORM - '\ufe97' # 0x00ca -> ARABIC LETTER THE INITIAL FORM + '\ufe93' # 0x00c9 -> ARABIC LETTER TEH MARBUTA ISOLATED FORM + '\ufe97' # 0x00ca -> ARABIC LETTER TEH INITIAL FORM '\ufe9b' # 0x00cb -> ARABIC LETTER THEH INITIAL FORM '\ufe9f' # 0x00cc -> ARABIC LETTER JEEM INITIAL FORM '\ufea3' # 0x00cd -> ARABIC LETTER HAH INITIAL FORM @@ -627,9 +627,9 @@ def getregentry(): 0xfe8e: 0x00a8, # ARABIC LETTER ALEF FINAL FORM 0xfe8f: 0x00a9, # ARABIC LETTER BEH ISOLATED FORM 0xfe91: 0x00c8, # ARABIC LETTER BEH INITIAL FORM - 0xfe93: 0x00c9, # ARABIC LETTER THE MARBUTA ISOLATED FORM - 0xfe95: 0x00aa, # ARABIC LETTER THE ISOLATED FORM - 0xfe97: 0x00ca, # ARABIC LETTER THE INITIAL FORM + 0xfe93: 0x00c9, # ARABIC LETTER TEH MARBUTA ISOLATED FORM + 0xfe95: 0x00aa, # ARABIC LETTER TEH ISOLATED FORM + 0xfe97: 0x00ca, # ARABIC LETTER TEH INITIAL FORM 0xfe99: 0x00ab, # ARABIC LETTER THEH ISOLATED FORM 0xfe9b: 0x00cb, # ARABIC LETTER THEH INITIAL FORM 0xfe9d: 0x00ad, # ARABIC LETTER JEEM ISOLATED FORM diff --git a/Lib/encodings/cp869.py b/Lib/encodings/cp869.py index ee54cb4bc173bc..8d8a29b175c188 100644 --- a/Lib/encodings/cp869.py +++ b/Lib/encodings/cp869.py @@ -99,7 +99,7 @@ def getregentry(): 0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL 0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT 0x00b5: 0x039a, # GREEK CAPITAL LETTER KAPPA - 0x00b6: 0x039b, # GREEK CAPITAL LETTER LAMBDA + 0x00b6: 0x039b, # GREEK CAPITAL LETTER LAMDA 0x00b7: 0x039c, # GREEK CAPITAL LETTER MU 0x00b8: 0x039d, # GREEK CAPITAL LETTER NU 0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT @@ -146,7 +146,7 @@ def getregentry(): 0x00e2: 0x03b8, # GREEK SMALL LETTER THETA 0x00e3: 0x03b9, # GREEK SMALL LETTER IOTA 0x00e4: 0x03ba, # GREEK SMALL LETTER KAPPA - 0x00e5: 0x03bb, # GREEK SMALL LETTER LAMBDA + 0x00e5: 0x03bb, # GREEK SMALL LETTER LAMDA 0x00e6: 0x03bc, # GREEK SMALL LETTER MU 0x00e7: 0x03bd, # GREEK SMALL LETTER NU 0x00e8: 0x03be, # GREEK SMALL LETTER XI @@ -360,7 +360,7 @@ def getregentry(): '\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL '\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT '\u039a' # 0x00b5 -> GREEK CAPITAL LETTER KAPPA - '\u039b' # 0x00b6 -> GREEK CAPITAL LETTER LAMBDA + '\u039b' # 0x00b6 -> GREEK CAPITAL LETTER LAMDA '\u039c' # 0x00b7 -> GREEK CAPITAL LETTER MU '\u039d' # 0x00b8 -> GREEK CAPITAL LETTER NU '\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT @@ -407,7 +407,7 @@ def getregentry(): '\u03b8' # 0x00e2 -> GREEK SMALL LETTER THETA '\u03b9' # 0x00e3 -> GREEK SMALL LETTER IOTA '\u03ba' # 0x00e4 -> GREEK SMALL LETTER KAPPA - '\u03bb' # 0x00e5 -> GREEK SMALL LETTER LAMBDA + '\u03bb' # 0x00e5 -> GREEK SMALL LETTER LAMDA '\u03bc' # 0x00e6 -> GREEK SMALL LETTER MU '\u03bd' # 0x00e7 -> GREEK SMALL LETTER NU '\u03be' # 0x00e8 -> GREEK SMALL LETTER XI @@ -603,7 +603,7 @@ def getregentry(): 0x0398: 0x00ac, # GREEK CAPITAL LETTER THETA 0x0399: 0x00ad, # GREEK CAPITAL LETTER IOTA 0x039a: 0x00b5, # GREEK CAPITAL LETTER KAPPA - 0x039b: 0x00b6, # GREEK CAPITAL LETTER LAMBDA + 0x039b: 0x00b6, # GREEK CAPITAL LETTER LAMDA 0x039c: 0x00b7, # GREEK CAPITAL LETTER MU 0x039d: 0x00b8, # GREEK CAPITAL LETTER NU 0x039e: 0x00bd, # GREEK CAPITAL LETTER XI @@ -634,7 +634,7 @@ def getregentry(): 0x03b8: 0x00e2, # GREEK SMALL LETTER THETA 0x03b9: 0x00e3, # GREEK SMALL LETTER IOTA 0x03ba: 0x00e4, # GREEK SMALL LETTER KAPPA - 0x03bb: 0x00e5, # GREEK SMALL LETTER LAMBDA + 0x03bb: 0x00e5, # GREEK SMALL LETTER LAMDA 0x03bc: 0x00e6, # GREEK SMALL LETTER MU 0x03bd: 0x00e7, # GREEK SMALL LETTER NU 0x03be: 0x00e8, # GREEK SMALL LETTER XI diff --git a/Lib/encodings/cp875.py b/Lib/encodings/cp875.py index ba1cac614eb5cd..c25a5a43bc49e1 100644 --- a/Lib/encodings/cp875.py +++ b/Lib/encodings/cp875.py @@ -127,7 +127,7 @@ def getregentry(): '!' # 0x4F -> EXCLAMATION MARK '&' # 0x50 -> AMPERSAND '\u039a' # 0x51 -> GREEK CAPITAL LETTER KAPPA - '\u039b' # 0x52 -> GREEK CAPITAL LETTER LAMBDA + '\u039b' # 0x52 -> GREEK CAPITAL LETTER LAMDA '\u039c' # 0x53 -> GREEK CAPITAL LETTER MU '\u039d' # 0x54 -> GREEK CAPITAL LETTER NU '\u039e' # 0x55 -> GREEK CAPITAL LETTER XI @@ -203,7 +203,7 @@ def getregentry(): '\u03b8' # 0x9B -> GREEK SMALL LETTER THETA '\u03b9' # 0x9C -> GREEK SMALL LETTER IOTA '\u03ba' # 0x9D -> GREEK SMALL LETTER KAPPA - '\u03bb' # 0x9E -> GREEK SMALL LETTER LAMBDA + '\u03bb' # 0x9E -> GREEK SMALL LETTER LAMDA '\u03bc' # 0x9F -> GREEK SMALL LETTER MU '\xb4' # 0xA0 -> ACUTE ACCENT '~' # 0xA1 -> TILDE diff --git a/Lib/encodings/iso8859_6.py b/Lib/encodings/iso8859_6.py index bc41f6134c7828..b02ade6eaf4e13 100644 --- a/Lib/encodings/iso8859_6.py +++ b/Lib/encodings/iso8859_6.py @@ -246,8 +246,8 @@ def getregentry(): '\u0626' # 0xC6 -> ARABIC LETTER YEH WITH HAMZA ABOVE '\u0627' # 0xC7 -> ARABIC LETTER ALEF '\u0628' # 0xC8 -> ARABIC LETTER BEH - '\u0629' # 0xC9 -> ARABIC LETTER THE MARBUTA - '\u062a' # 0xCA -> ARABIC LETTER THE + '\u0629' # 0xC9 -> ARABIC LETTER TEH MARBUTA + '\u062a' # 0xCA -> ARABIC LETTER TEH '\u062b' # 0xCB -> ARABIC LETTER THEH '\u062c' # 0xCC -> ARABIC LETTER JEEM '\u062d' # 0xCD -> ARABIC LETTER HAH diff --git a/Lib/encodings/iso8859_7.py b/Lib/encodings/iso8859_7.py index af08a306645983..d7b39cbc3a70ed 100644 --- a/Lib/encodings/iso8859_7.py +++ b/Lib/encodings/iso8859_7.py @@ -248,7 +248,7 @@ def getregentry(): '\u0398' # 0xC8 -> GREEK CAPITAL LETTER THETA '\u0399' # 0xC9 -> GREEK CAPITAL LETTER IOTA '\u039a' # 0xCA -> GREEK CAPITAL LETTER KAPPA - '\u039b' # 0xCB -> GREEK CAPITAL LETTER LAMBDA + '\u039b' # 0xCB -> GREEK CAPITAL LETTER LAMDA '\u039c' # 0xCC -> GREEK CAPITAL LETTER MU '\u039d' # 0xCD -> GREEK CAPITAL LETTER NU '\u039e' # 0xCE -> GREEK CAPITAL LETTER XI @@ -280,7 +280,7 @@ def getregentry(): '\u03b8' # 0xE8 -> GREEK SMALL LETTER THETA '\u03b9' # 0xE9 -> GREEK SMALL LETTER IOTA '\u03ba' # 0xEA -> GREEK SMALL LETTER KAPPA - '\u03bb' # 0xEB -> GREEK SMALL LETTER LAMBDA + '\u03bb' # 0xEB -> GREEK SMALL LETTER LAMDA '\u03bc' # 0xEC -> GREEK SMALL LETTER MU '\u03bd' # 0xED -> GREEK SMALL LETTER NU '\u03be' # 0xEE -> GREEK SMALL LETTER XI diff --git a/Lib/encodings/mac_arabic.py b/Lib/encodings/mac_arabic.py index 0d1a99d5e7913c..72847e859c464f 100644 --- a/Lib/encodings/mac_arabic.py +++ b/Lib/encodings/mac_arabic.py @@ -118,8 +118,8 @@ def getregentry(): 0x00c6: 0x0626, # ARABIC LETTER YEH WITH HAMZA ABOVE 0x00c7: 0x0627, # ARABIC LETTER ALEF 0x00c8: 0x0628, # ARABIC LETTER BEH - 0x00c9: 0x0629, # ARABIC LETTER THE MARBUTA - 0x00ca: 0x062a, # ARABIC LETTER THE + 0x00c9: 0x0629, # ARABIC LETTER TEH MARBUTA + 0x00ca: 0x062a, # ARABIC LETTER TEH 0x00cb: 0x062b, # ARABIC LETTER THEH 0x00cc: 0x062c, # ARABIC LETTER JEEM 0x00cd: 0x062d, # ARABIC LETTER HAH @@ -379,8 +379,8 @@ def getregentry(): '\u0626' # 0x00c6 -> ARABIC LETTER YEH WITH HAMZA ABOVE '\u0627' # 0x00c7 -> ARABIC LETTER ALEF '\u0628' # 0x00c8 -> ARABIC LETTER BEH - '\u0629' # 0x00c9 -> ARABIC LETTER THE MARBUTA - '\u062a' # 0x00ca -> ARABIC LETTER THE + '\u0629' # 0x00c9 -> ARABIC LETTER TEH MARBUTA + '\u062a' # 0x00ca -> ARABIC LETTER TEH '\u062b' # 0x00cb -> ARABIC LETTER THEH '\u062c' # 0x00cc -> ARABIC LETTER JEEM '\u062d' # 0x00cd -> ARABIC LETTER HAH @@ -634,8 +634,8 @@ def getregentry(): 0x0626: 0x00c6, # ARABIC LETTER YEH WITH HAMZA ABOVE 0x0627: 0x00c7, # ARABIC LETTER ALEF 0x0628: 0x00c8, # ARABIC LETTER BEH - 0x0629: 0x00c9, # ARABIC LETTER THE MARBUTA - 0x062a: 0x00ca, # ARABIC LETTER THE + 0x0629: 0x00c9, # ARABIC LETTER TEH MARBUTA + 0x062a: 0x00ca, # ARABIC LETTER TEH 0x062b: 0x00cb, # ARABIC LETTER THEH 0x062c: 0x00cc, # ARABIC LETTER JEEM 0x062d: 0x00cd, # ARABIC LETTER HAH diff --git a/Lib/encodings/mac_farsi.py b/Lib/encodings/mac_farsi.py index dd898e65d6e2df..e357d43510b5f6 100644 --- a/Lib/encodings/mac_farsi.py +++ b/Lib/encodings/mac_farsi.py @@ -246,8 +246,8 @@ def getregentry(): '\u0626' # 0xC6 -> ARABIC LETTER YEH WITH HAMZA ABOVE '\u0627' # 0xC7 -> ARABIC LETTER ALEF '\u0628' # 0xC8 -> ARABIC LETTER BEH - '\u0629' # 0xC9 -> ARABIC LETTER THE MARBUTA - '\u062a' # 0xCA -> ARABIC LETTER THE + '\u0629' # 0xC9 -> ARABIC LETTER TEH MARBUTA + '\u062a' # 0xCA -> ARABIC LETTER TEH '\u062b' # 0xCB -> ARABIC LETTER THEH '\u062c' # 0xCC -> ARABIC LETTER JEEM '\u062d' # 0xCD -> ARABIC LETTER HAH diff --git a/Lib/encodings/mac_greek.py b/Lib/encodings/mac_greek.py index 55dc0de4af7ca4..d3d0c4f0c38755 100644 --- a/Lib/encodings/mac_greek.py +++ b/Lib/encodings/mac_greek.py @@ -209,7 +209,7 @@ def getregentry(): '\u0393' # 0xA1 -> GREEK CAPITAL LETTER GAMMA '\u0394' # 0xA2 -> GREEK CAPITAL LETTER DELTA '\u0398' # 0xA3 -> GREEK CAPITAL LETTER THETA - '\u039b' # 0xA4 -> GREEK CAPITAL LETTER LAMBDA + '\u039b' # 0xA4 -> GREEK CAPITAL LETTER LAMDA '\u039e' # 0xA5 -> GREEK CAPITAL LETTER XI '\u03a0' # 0xA6 -> GREEK CAPITAL LETTER PI '\xdf' # 0xA7 -> LATIN SMALL LETTER SHARP S @@ -281,7 +281,7 @@ def getregentry(): '\u03b9' # 0xE9 -> GREEK SMALL LETTER IOTA '\u03be' # 0xEA -> GREEK SMALL LETTER XI '\u03ba' # 0xEB -> GREEK SMALL LETTER KAPPA - '\u03bb' # 0xEC -> GREEK SMALL LETTER LAMBDA + '\u03bb' # 0xEC -> GREEK SMALL LETTER LAMDA '\u03bc' # 0xED -> GREEK SMALL LETTER MU '\u03bd' # 0xEE -> GREEK SMALL LETTER NU '\u03bf' # 0xEF -> GREEK SMALL LETTER OMICRON diff --git a/Lib/http/cookies.py b/Lib/http/cookies.py index 349cd2402ffc23..694b1b09a0567c 100644 --- a/Lib/http/cookies.py +++ b/Lib/http/cookies.py @@ -123,7 +123,7 @@ >>> C.output() 'Set-Cookie: number=7\r\nSet-Cookie: string=seven' -Finish. +Finis. """ # diff --git a/Lib/idlelib/CREDITS.txt b/Lib/idlelib/CREDITS.txt index ef776b761c06b7..bea3ba7c20de22 100644 --- a/Lib/idlelib/CREDITS.txt +++ b/Lib/idlelib/CREDITS.txt @@ -21,7 +21,7 @@ subprocess, and made a number of usability enhancements. Other contributors include Raymond Hettinger, Tony Lownds (Mac integration), Neal Norwitz (code check and clean-up), Ronald Oussoren (Mac integration), -Noam Raphael (Code Context, Call Tips, many other patches), and Chui They (RPC +Noam Raphael (Code Context, Call Tips, many other patches), and Chui Tey (RPC integration, debugger integration and persistent breakpoints). Scott David Daniels, Tal Einat, Hernan Foffani, Christos Georgiou, diff --git a/Lib/idlelib/News3.txt b/Lib/idlelib/News3.txt index f9a2ffdc75cbdc..30784578cc637f 100644 --- a/Lib/idlelib/News3.txt +++ b/Lib/idlelib/News3.txt @@ -1173,7 +1173,7 @@ Released on 2016-12-23 - Issue #25198: Enhance the initial html viewer now used for Idle Help. * Properly indent fixed-pitch text (patch by Mark Roseman). * Give code snippet a very Sphinx-like light blueish-gray background. - * Reuse initial width and height set by users for shell and editor. + * Re-use initial width and height set by users for shell and editor. * When the Table of Contents (TOC) menu is used, put the section header at the top of the screen. diff --git a/Lib/idlelib/editor.py b/Lib/idlelib/editor.py index 15da5d52cd4366..17b498f63ba43b 100644 --- a/Lib/idlelib/editor.py +++ b/Lib/idlelib/editor.py @@ -607,11 +607,11 @@ def rmenu_check_cut(self): def rmenu_check_copy(self): try: - index = self.text.index('sel.first') + indx = self.text.index('sel.first') except TclError: return 'disabled' else: - return 'normal' if index else 'disabled' + return 'normal' if indx else 'disabled' def rmenu_check_paste(self): try: diff --git a/Lib/idlelib/idle_test/test_editmenu.py b/Lib/idlelib/idle_test/test_editmenu.py index bf0e6eccb8e940..17478473a3d1b2 100644 --- a/Lib/idlelib/idle_test/test_editmenu.py +++ b/Lib/idlelib/idle_test/test_editmenu.py @@ -37,37 +37,37 @@ def tearDownClass(cls): def test_paste_text(self): "Test pasting into text with and without a selection." text = self.text - for tag, and in ('', 'onetwo\n'), ('sel', 'two\n'): - with self.subTest(tag=tag, and=and): + for tag, ans in ('', 'onetwo\n'), ('sel', 'two\n'): + with self.subTest(tag=tag, ans=ans): text.delete('1.0', 'end') text.insert('1.0', 'one', tag) text.event_generate('<>') - self.assertEqual(text.get('1.0', 'end'), and) + self.assertEqual(text.get('1.0', 'end'), ans) def test_paste_entry(self): "Test pasting into an entry with and without a selection." # Generated <> fails for tk entry without empty select # range for 'no selection'. Live widget works fine. for entry in self.entry, self.tentry: - for end, and in (0, 'onetwo'), ('end', 'two'): - with self.subTest(entry=entry, end=end, and=and): + for end, ans in (0, 'onetwo'), ('end', 'two'): + with self.subTest(entry=entry, end=end, ans=ans): entry.delete(0, 'end') entry.insert(0, 'one') entry.select_range(0, end) entry.event_generate('<>') - self.assertEqual(entry.get(), and) + self.assertEqual(entry.get(), ans) def test_paste_spin(self): "Test pasting into a spinbox with and without a selection." # See note above for entry. spin = self.spin - for end, and in (0, 'onetwo'), ('end', 'two'): - with self.subTest(end=end, and=and): + for end, ans in (0, 'onetwo'), ('end', 'two'): + with self.subTest(end=end, ans=ans): spin.delete(0, 'end') spin.insert(0, 'one') spin.selection('range', 0, end) # see note spin.event_generate('<>') - self.assertEqual(spin.get(), and) + self.assertEqual(spin.get(), ans) if __name__ == '__main__': diff --git a/Lib/idlelib/run.py b/Lib/idlelib/run.py index 71081c1611a853..a30db99a619a93 100644 --- a/Lib/idlelib/run.py +++ b/Lib/idlelib/run.py @@ -265,12 +265,12 @@ def print_exc(typ, exc, tb): print("\nDuring handling of the above exception, " "another exception occurred:\n", file=efile) if tb: - the = traceback.extract_tb(tb) + tbe = traceback.extract_tb(tb) print('Traceback (most recent call last):', file=efile) exclude = ("run.py", "rpc.py", "threading.py", "queue.py", "debugger_r.py", "bdb.py") - cleanup_traceback(the, exclude) - traceback.print_list(the, file=efile) + cleanup_traceback(tbe, exclude) + traceback.print_list(tbe, file=efile) lines = get_message_lines(typ, exc, tb) for line in lines: print(line, end='', file=efile) diff --git a/Lib/idlelib/searchbase.py b/Lib/idlelib/searchbase.py index cfcecbe626ae11..c68a6ca339af04 100644 --- a/Lib/idlelib/searchbase.py +++ b/Lib/idlelib/searchbase.py @@ -109,7 +109,7 @@ def make_entry(self, label_text, var): label = Label(self.frame, text=label_text) label.grid(row=self.row, column=0, sticky="nw") entry = Entry(self.frame, textvariable=var, exportselection=0) - entry.grid(row=self.row, column=1, sticky="new") + entry.grid(row=self.row, column=1, sticky="nwe") self.row = self.row + 1 return entry, label @@ -129,7 +129,7 @@ def make_frame(self,labeltext=None): else: label = '' frame = Frame(self.frame) - frame.grid(row=self.row, column=1, columnspan=1, sticky="new") + frame.grid(row=self.row, column=1, columnspan=1, sticky="nwe") self.row = self.row + 1 return frame, label diff --git a/Lib/imaplib.py b/Lib/imaplib.py index e663e55ad6ebc1..2c3925958d011b 100644 --- a/Lib/imaplib.py +++ b/Lib/imaplib.py @@ -1524,7 +1524,7 @@ def _pop(self, timeout, default=('', None)): # Historical Note: # The timeout was originally implemented using select() after # checking for the presence of already-buffered data. - # That allowed timeouts on pipe connections like IMAP4_stream. + # That allowed timeouts on pipe connetions like IMAP4_stream. # However, it seemed possible that SSL data arriving without any # IMAP data afterward could cause select() to indicate available # application data when there was none, leading to a read() call diff --git a/Lib/inspect.py b/Lib/inspect.py index 2fa590db7136b4..183e67fabf966e 100644 --- a/Lib/inspect.py +++ b/Lib/inspect.py @@ -1395,14 +1395,14 @@ def _missing_arguments(f_name, argnames, pos, values): "" if missing == 1 else "s", s)) def _too_many(f_name, args, kwonly, varargs, defcount, given, values): - at least = len(args) - defcount + atleast = len(args) - defcount kwonly_given = len([arg for arg in kwonly if arg in values]) if varargs: - plural = at least != 1 - sig = "at least %d" % (at least,) + plural = atleast != 1 + sig = "at least %d" % (atleast,) elif defcount: plural = True - sig = "from %d to %d" % (at least, len(args)) + sig = "from %d to %d" % (atleast, len(args)) else: plural = len(args) != 1 sig = str(len(args)) diff --git a/Lib/locale.py b/Lib/locale.py index bbeab95f679b65..0bde7ed51c66c1 100644 --- a/Lib/locale.py +++ b/Lib/locale.py @@ -124,7 +124,7 @@ def _grouping_intervals(grouping): # if grouping is -1, we are done if interval == CHAR_MAX: return - # 0: reuse last group ad infinitum + # 0: re-use last group ad infinitum if interval == 0: if last_interval is None: raise ValueError("invalid grouping") diff --git a/Lib/logging/__init__.py b/Lib/logging/__init__.py index 93f5d90a5137e8..c5860d53b1bdff 100644 --- a/Lib/logging/__init__.py +++ b/Lib/logging/__init__.py @@ -886,7 +886,7 @@ def _removeHandlerRef(wr): """ # This function can be called during module teardown, when globals are # set to None. It can also be called from another thread. So we need to - # preemptively grab the necessary globals and check if they're None, + # pre-emptively grab the necessary globals and check if they're None, # to prevent race conditions and failures during interpreter shutdown. handlers, lock = _handlerList, _lock if lock and handlers: diff --git a/Lib/logging/handlers.py b/Lib/logging/handlers.py index 5b794f7fba557b..2748b5941eade2 100644 --- a/Lib/logging/handlers.py +++ b/Lib/logging/handlers.py @@ -750,7 +750,7 @@ class SysLogHandler(logging.Handler): """ A handler class which sends formatted logging records to a syslog server. Based on Sam Rushing's syslog module: - http://www.nightmare.com/squirrel/python-ext/misc/syslog.py + http://www.nightmare.com/squirl/python-ext/misc/syslog.py Contributed by Nicolas Untz (after which minor refactoring changes have been made). """ diff --git a/Lib/multiprocessing/resource_tracker.py b/Lib/multiprocessing/resource_tracker.py index 14ff2cc927b56b..05633ac21a259c 100644 --- a/Lib/multiprocessing/resource_tracker.py +++ b/Lib/multiprocessing/resource_tracker.py @@ -76,7 +76,7 @@ def _reentrant_call_error(self): "Reentrant call into the multiprocessing resource tracker") def __del__(self): - # making sure child processes are cleaned before ResourceTracker + # making sure child processess are cleaned before ResourceTracker # gets destructed. # see https://github.com/python/cpython/issues/88887 self._stop(use_blocking_lock=False) diff --git a/Lib/pickle.py b/Lib/pickle.py index ad94c5a17b8855..beaefae0479d3c 100644 --- a/Lib/pickle.py +++ b/Lib/pickle.py @@ -480,7 +480,7 @@ def clear_memo(self): The memo is the data structure that remembers which objects the pickler has already seen, so that shared or recursive objects are pickled by reference and not by value. This method is - useful when reusing picklers. + useful when re-using picklers. """ self.memo.clear() diff --git a/Lib/sysconfig/__init__.py b/Lib/sysconfig/__init__.py index e36cafa26d2721..49e0986517ce97 100644 --- a/Lib/sysconfig/__init__.py +++ b/Lib/sysconfig/__init__.py @@ -364,7 +364,7 @@ def _get_sysconfigdata(): def _installation_is_relocated(): - """Is the Python installation running from a different prefix than what was targeted when building?""" + """Is the Python installation running from a different prefix than what was targetted when building?""" if os.name != 'posix': raise NotImplementedError('sysconfig._installation_is_relocated() is currently only supported on POSIX') diff --git a/Lib/test/bisect_cmd.py b/Lib/test/bisect_cmd.py index aa52ead36d4536..aee2e8ac120852 100755 --- a/Lib/test/bisect_cmd.py +++ b/Lib/test/bisect_cmd.py @@ -13,7 +13,7 @@ Load an existing list of tests from a file using -i option: - ./python -m test --list-cases -m file tests test_os > tests + ./python -m test --list-cases -m FileTests test_os > tests ./python -m test.bisect_cmd -i tests test_os """ diff --git a/Lib/test/configdata/cfgparser.2 b/Lib/test/configdata/cfgparser.2 index 9426d25e943282..cfcfef23bfd493 100644 --- a/Lib/test/configdata/cfgparser.2 +++ b/Lib/test/configdata/cfgparser.2 @@ -338,7 +338,7 @@ [homes] comment = Home Directories - browsable = no + browseable = no writable = yes # You can enable VFS recycle bin on a per share basis: @@ -369,7 +369,7 @@ # the default is to use the user's home directory ;[Profiles] ; path = /var/lib/samba/profiles -; browsable = no +; browseable = no ; guest ok = yes @@ -384,7 +384,7 @@ [printers] comment = All Printers path = /var/spool/samba - browsable = no + browseable = no # to allow user 'guest account' to print. guest ok = yes writable = no @@ -414,7 +414,7 @@ [print$] path = /var/lib/samba/printers - browsable = yes + browseable = yes read only = yes write list = @adm root diff --git a/Lib/test/crashers/README b/Lib/test/crashers/README index ccbd98bf293e34..7111946b93b280 100644 --- a/Lib/test/crashers/README +++ b/Lib/test/crashers/README @@ -5,7 +5,7 @@ too obscure to invest the effort. Each test should fail when run from the command line: - ./python Lib/test/crashes/weakref_in_del.py + ./python Lib/test/crashers/weakref_in_del.py Put as much info into a docstring or comments to help determine the cause of the failure, as well as an issue number or link if it exists. diff --git a/Lib/test/crashers/infinite_loop_re.py b/Lib/test/crashers/infinite_loop_re.py index c8c69ac718007a..c84f28d601f865 100644 --- a/Lib/test/crashers/infinite_loop_re.py +++ b/Lib/test/crashers/infinite_loop_re.py @@ -1,6 +1,6 @@ # This was taken from https://bugs.python.org/issue1541697 -# It's not technically a crash. It may not even truly be infinite, +# It's not technically a crasher. It may not even truly be infinite, # however, I haven't waited a long time to see the result. It takes # 100% of CPU while running this and should be fixed. diff --git a/Lib/test/decimaltestdata/base.decTest b/Lib/test/decimaltestdata/base.decTest index 90ab00cec90256..bc4cef919f3480 100644 --- a/Lib/test/decimaltestdata/base.decTest +++ b/Lib/test/decimaltestdata/base.decTest @@ -610,7 +610,7 @@ basx563 toSci "NaNs" -> NaN Conversion_syntax basx564 toSci "Infi" -> NaN Conversion_syntax basx565 toSci "Infin" -> NaN Conversion_syntax basx566 toSci "Infini" -> NaN Conversion_syntax -basx567 toSci "Infinite" -> NaN Conversion_syntax +basx567 toSci "Infinit" -> NaN Conversion_syntax basx568 toSci "-Infinit" -> NaN Conversion_syntax basx569 toSci "0Inf" -> NaN Conversion_syntax basx570 toSci "9Inf" -> NaN Conversion_syntax diff --git a/Lib/test/decimaltestdata/ddBase.decTest b/Lib/test/decimaltestdata/ddBase.decTest index cf62b99e803786..fbd6ccd94dea80 100644 --- a/Lib/test/decimaltestdata/ddBase.decTest +++ b/Lib/test/decimaltestdata/ddBase.decTest @@ -594,7 +594,7 @@ ddbas563 toSci "NaNs" -> NaN Conversion_syntax ddbas564 toSci "Infi" -> NaN Conversion_syntax ddbas565 toSci "Infin" -> NaN Conversion_syntax ddbas566 toSci "Infini" -> NaN Conversion_syntax -ddbas567 toSci "Infinite" -> NaN Conversion_syntax +ddbas567 toSci "Infinit" -> NaN Conversion_syntax ddbas568 toSci "-Infinit" -> NaN Conversion_syntax ddbas569 toSci "0Inf" -> NaN Conversion_syntax ddbas570 toSci "9Inf" -> NaN Conversion_syntax diff --git a/Lib/test/decimaltestdata/dqBase.decTest b/Lib/test/decimaltestdata/dqBase.decTest index c8eed0123fd2f5..6bb463388e15fa 100644 --- a/Lib/test/decimaltestdata/dqBase.decTest +++ b/Lib/test/decimaltestdata/dqBase.decTest @@ -579,7 +579,7 @@ dqbas563 toSci "NaNs" -> NaN Conversion_syntax dqbas564 toSci "Infi" -> NaN Conversion_syntax dqbas565 toSci "Infin" -> NaN Conversion_syntax dqbas566 toSci "Infini" -> NaN Conversion_syntax -dqbas567 toSci "Infinite" -> NaN Conversion_syntax +dqbas567 toSci "Infinit" -> NaN Conversion_syntax dqbas568 toSci "-Infinit" -> NaN Conversion_syntax dqbas569 toSci "0Inf" -> NaN Conversion_syntax dqbas570 toSci "9Inf" -> NaN Conversion_syntax diff --git a/Lib/test/decimaltestdata/dsBase.decTest b/Lib/test/decimaltestdata/dsBase.decTest index e6be438280f131..8ac45fc552152e 100644 --- a/Lib/test/decimaltestdata/dsBase.decTest +++ b/Lib/test/decimaltestdata/dsBase.decTest @@ -558,7 +558,7 @@ dsbas563 toSci "NaNs" -> NaN Conversion_syntax dsbas564 toSci "Infi" -> NaN Conversion_syntax dsbas565 toSci "Infin" -> NaN Conversion_syntax dsbas566 toSci "Infini" -> NaN Conversion_syntax -dsbas567 toSci "Infinite" -> NaN Conversion_syntax +dsbas567 toSci "Infinit" -> NaN Conversion_syntax dsbas568 toSci "-Infinit" -> NaN Conversion_syntax dsbas569 toSci "0Inf" -> NaN Conversion_syntax dsbas570 toSci "9Inf" -> NaN Conversion_syntax diff --git a/Lib/test/encoded_modules/__init__.py b/Lib/test/encoded_modules/__init__.py index cececb6b21acb0..ec43252aad2a46 100644 --- a/Lib/test/encoded_modules/__init__.py +++ b/Lib/test/encoded_modules/__init__.py @@ -18,6 +18,6 @@ test_strings = ( ('iso_8859_1', 'iso-8859-1', "Les hommes ont oublié cette vérité, " "dit le renard. Mais tu ne dois pas l'oublier. Tu deviens " - "responsible pour toujours de ce que tu as apprivoisé."), + "responsable pour toujours de ce que tu as apprivoisé."), ('koi8_r', 'koi8-r', "Познание бесконечности требует бесконечного времени.") ) diff --git a/Lib/test/encoded_modules/module_iso_8859_1.py b/Lib/test/encoded_modules/module_iso_8859_1.py index dc230de1eba1c7..8f4a15c905dc12 100644 --- a/Lib/test/encoded_modules/module_iso_8859_1.py +++ b/Lib/test/encoded_modules/module_iso_8859_1.py @@ -2,4 +2,4 @@ # -*- encoding: iso-8859-1 -*- test = ("Les hommes ont oubli cette vrit, " "dit le renard. Mais tu ne dois pas l'oublier. Tu deviens " - "responsible pour toujours de ce que tu as apprivois.") + "responsable pour toujours de ce que tu as apprivois.") diff --git a/Lib/test/libregrtest/cmdline.py b/Lib/test/libregrtest/cmdline.py index 1fa53b9443123c..07681d75448e24 100644 --- a/Lib/test/libregrtest/cmdline.py +++ b/Lib/test/libregrtest/cmdline.py @@ -138,8 +138,8 @@ Pattern examples: - test method: test_stat_attributes -- test class: file tests -- test identifier: test_os.file tests.test_stat_attributes +- test class: FileTests +- test identifier: test_os.FileTests.test_stat_attributes """ diff --git a/Lib/test/libregrtest/filter.py b/Lib/test/libregrtest/filter.py index 412a0d70e158ad..41372e427ffd03 100644 --- a/Lib/test/libregrtest/filter.py +++ b/Lib/test/libregrtest/filter.py @@ -20,7 +20,7 @@ def match_test(test): def _is_full_match_test(pattern): # If a pattern contains at least one dot, it's considered # as a full test identifier. - # Example: 'test.test_os.file tests.test_access'. + # Example: 'test.test_os.FileTests.test_access'. # # ignore patterns which contain fnmatch patterns: '*', '?', '[...]' # or '[!...]'. For example, ignore 'test_access*'. @@ -66,11 +66,11 @@ def _compile_match_function(patterns): def match_test_regex(test_id, regex_match=regex_match): if regex_match(test_id): # The regex matches the whole identifier, for example - # 'test.test_os.file tests.test_access'. + # 'test.test_os.FileTests.test_access'. return True else: # Try to match parts of the test identifier. - # For example, split 'test.test_os.file tests.test_access' + # For example, split 'test.test_os.FileTests.test_access' # into: 'test', 'test_os', 'FileTests' and 'test_access'. return any(map(regex_match, test_id.split("."))) diff --git a/Lib/test/libregrtest/findtests.py b/Lib/test/libregrtest/findtests.py index 39cab640281f5d..f01c1240774707 100644 --- a/Lib/test/libregrtest/findtests.py +++ b/Lib/test/libregrtest/findtests.py @@ -65,16 +65,16 @@ def split_test_packages(tests, *, testdir: StrPath | None = None, exclude: Container[str] = (), split_test_dirs=SPLITTESTDIRS) -> list[TestName]: testdir = findtestdir(testdir) - split = [] + splitted = [] for name in tests: if name in split_test_dirs: subdir = os.path.join(testdir, name) - split.extend(findtests(testdir=subdir, exclude=exclude, + splitted.extend(findtests(testdir=subdir, exclude=exclude, split_test_dirs=split_test_dirs, base_mod=name)) else: - split.append(name) - return split + splitted.append(name) + return splitted def _list_cases(suite: unittest.TestSuite) -> None: diff --git a/Lib/test/mime.types b/Lib/test/mime.types index 437a38411b214d..eb39a17b6bf4b2 100644 --- a/Lib/test/mime.types +++ b/Lib/test/mime.types @@ -584,7 +584,7 @@ application/vnd.musician mus application/vnd.muvee.style msty application/vnd.ncd.control application/vnd.ncd.reference -application/vnd.nirvana entity request bkm kcm +application/vnd.nervana entity request bkm kcm application/vnd.netfpx application/vnd.neurolanguage.nlu nlu application/vnd.noblenet-directory nnd @@ -824,7 +824,7 @@ application/vnd.sealed.net # spp: application/scvp-vp-response application/vnd.sealed.ppt sppt s1p application/vnd.sealed.tiff stif -application/vnd.sealed.xls sxls xsl s1e +application/vnd.sealed.xls sxls sxl s1e # stm: audio/x-stm application/vnd.sealedmedia.softseal.html stml s1h application/vnd.sealedmedia.softseal.pdf spdf spd s1a @@ -834,7 +834,7 @@ application/vnd.semd semd application/vnd.semf semf application/vnd.shana.informed.formdata ifm application/vnd.shana.informed.formtemplate itp -application/vnd.shana.informed.interchange if +application/vnd.shana.informed.interchange iif application/vnd.shana.informed.package ipk application/vnd.SimTech-MindMapper twd twds application/vnd.smaf mmf diff --git a/Lib/test/multibytecodec_support.py b/Lib/test/multibytecodec_support.py index 9600a4bc03acb8..dbf0cc428e3ff6 100644 --- a/Lib/test/multibytecodec_support.py +++ b/Lib/test/multibytecodec_support.py @@ -324,31 +324,31 @@ def unichrs(s): if len(csetch) == 1 and 0x80 <= csetch[0]: continue - unix = unichrs(data[1]) - if ord(unix) == 0xfffd or unix in urt_wa: + unich = unichrs(data[1]) + if ord(unich) == 0xfffd or unich in urt_wa: continue - urt_wa[unix] = csetch + urt_wa[unich] = csetch - self._testpoint(csetch, unix) + self._testpoint(csetch, unich) def _test_mapping_file_ucm(self): with self.open_mapping_file() as f: ucmdata = f.read() uc = re.findall('', ucmdata) for uni, coded in uc: - unix = chr(int(uni, 16)) + unich = chr(int(uni, 16)) codech = bytes.fromhex(coded) - self._testpoint(codech, unix) + self._testpoint(codech, unich) def test_mapping_supplemental(self): for mapping in self.supmaps: self._testpoint(*mapping) - def _testpoint(self, csetch, unix): - if (csetch, unix) not in self.pass_enctest: - self.assertEqual(unix.encode(self.encoding), csetch) - if (csetch, unix) not in self.pass_dectest: - self.assertEqual(str(csetch, self.encoding), unix) + def _testpoint(self, csetch, unich): + if (csetch, unich) not in self.pass_enctest: + self.assertEqual(unich.encode(self.encoding), csetch) + if (csetch, unich) not in self.pass_dectest: + self.assertEqual(str(csetch, self.encoding), unich) def test_errorhandle(self): for source, scheme, expected in self.codectests: diff --git a/Lib/test/pickletester.py b/Lib/test/pickletester.py index 5fbcd4eba97062..9a3a26a8400844 100644 --- a/Lib/test/pickletester.py +++ b/Lib/test/pickletester.py @@ -2838,7 +2838,7 @@ def test_unicode_high_plane(self): self.assert_is_copy(t, t2) def test_unicode_memoization(self): - # Repeated str is reused (even when escapes added). + # Repeated str is re-used (even when escapes added). for proto in protocols: for s in '', 'xyz', 'xyz\n', 'x\\yz', 'x\xa1yz\r': p = self.dumps((s, s), proto) diff --git a/Lib/test/support/asyncore.py b/Lib/test/support/asyncore.py index 9d1e317cf1968d..870e42837640de 100644 --- a/Lib/test/support/asyncore.py +++ b/Lib/test/support/asyncore.py @@ -37,7 +37,7 @@ most popular way to do it, but there is another very different technique, that lets you have nearly all the advantages of multi-threading, without actually using multiple threads. it's really only practical if your program -is largely I/O bound. If your program is CPU bound, then preemptive +is largely I/O bound. If your program is CPU bound, then pre-emptive scheduled threads are probably what you really need. Network servers are rarely CPU-bound, however. @@ -295,7 +295,7 @@ def set_socket(self, sock, map=None): self.add_channel(map) def set_reuse_addr(self): - # try to reuse a server port if possible + # try to re-use a server port if possible try: self.socket.setsockopt( socket.SOL_SOCKET, socket.SO_REUSEADDR, diff --git a/Lib/test/support/os_helper.py b/Lib/test/support/os_helper.py index 3b3c363d9040b3..2c45fe2369ec36 100644 --- a/Lib/test/support/os_helper.py +++ b/Lib/test/support/os_helper.py @@ -85,7 +85,7 @@ '\u05D0', # U+060C (Arabic Comma): cp864, cp1006, iso8859_6, mac_arabic '\u060C', - # U+062A (Arabic Letter The): cp720 + # U+062A (Arabic Letter Teh): cp720 '\u062A', # U+0E01 (Thai Character Ko Kai): cp874 '\u0E01', diff --git a/Lib/test/support/smtpd.py b/Lib/test/support/smtpd.py index 39670f7be7e9ad..6537679db9ad24 100755 --- a/Lib/test/support/smtpd.py +++ b/Lib/test/support/smtpd.py @@ -637,7 +637,7 @@ def __init__(self, localaddr, remoteaddr, gai_results = socket.getaddrinfo(*localaddr, family=family, type=socket.SOCK_STREAM) self.create_socket(gai_results[0][0], gai_results[0][1]) - # try to reuse a server port if possible + # try to re-use a server port if possible self.set_reuse_addr() self.bind(localaddr) self.listen(5) diff --git a/Lib/test/test_asyncio/test_sslproto.py b/Lib/test/test_asyncio/test_sslproto.py index 859175858e5f96..3e304c166425b0 100644 --- a/Lib/test/test_asyncio/test_sslproto.py +++ b/Lib/test/test_asyncio/test_sslproto.py @@ -116,7 +116,7 @@ def test_connection_lost_when_busy(self): sock.fileno = mock.Mock(return_value=12345) sock.send = mock.Mock(side_effect=BrokenPipeError) - # construct StreamWriter chain that contains loop dependent logic this emulates + # construct StreamWriter chain that contains loop dependant logic this emulates # what _make_ssl_transport() does in BaseSelectorEventLoop reader = asyncio.StreamReader(limit=2 ** 16, loop=self.loop) protocol = asyncio.StreamReaderProtocol(reader, loop=self.loop) diff --git a/Lib/test/test_asyncio/test_tasks.py b/Lib/test/test_asyncio/test_tasks.py index 363e73fac0cded..931a43816a257a 100644 --- a/Lib/test/test_asyncio/test_tasks.py +++ b/Lib/test/test_asyncio/test_tasks.py @@ -1491,7 +1491,7 @@ async def coro(i): with contextlib.closing(asyncio.new_event_loop()) as loop: # Coroutines shouldn't be yielded back as finished coroutines - # can't be reused. + # can't be re-used. awaitables_in = frozenset( (coro(0), coro(1), coro(2), coro(3)) ) @@ -1922,13 +1922,13 @@ async def sleeper(): base_exc = SystemExit() - async def notmuch(): + async def notmutch(): try: await sleeper() except asyncio.CancelledError: raise base_exc - task = self.new_task(loop, notmuch()) + task = self.new_task(loop, notmutch()) test_utils.run_briefly(loop) task.cancel() diff --git a/Lib/test/test_buffer.py b/Lib/test/test_buffer.py index b6afddb0f35428..19582e757161fc 100644 --- a/Lib/test/test_buffer.py +++ b/Lib/test/test_buffer.py @@ -4456,7 +4456,7 @@ def test_pybuffer_size_from_format(self): @support.cpython_only def test_flags_overflow(self): - # gh-126594: Check for integer overflow on large flags + # gh-126594: Check for integer overlow on large flags try: from _testcapi import INT_MIN, INT_MAX except ImportError: diff --git a/Lib/test/test_build_details.py b/Lib/test/test_build_details.py index d7a718139953b0..ba4b8c5aa9b58e 100644 --- a/Lib/test/test_build_details.py +++ b/Lib/test/test_build_details.py @@ -11,7 +11,7 @@ class FormatTestsBase: @property def contents(self): - """Install details file contents. Should be overridden by subclasses.""" + """Install details file contents. Should be overriden by subclasses.""" raise NotImplementedError @property @@ -114,7 +114,7 @@ def contents(self): def test_location(self): self.assertTrue(os.path.isfile(self.location)) - # Override generic format tests with tests for our specific implementation. + # Override generic format tests with tests for our specific implemenation. @needs_installed_python @unittest.skipIf( diff --git a/Lib/test/test_bytes.py b/Lib/test/test_bytes.py index 7bd66c6f779040..2591e7ca6ab0ec 100644 --- a/Lib/test/test_bytes.py +++ b/Lib/test/test_bytes.py @@ -632,7 +632,7 @@ def test_startswith(self): self.assertTrue(b.startswith(b"hello")) self.assertTrue(b.startswith(b"hel")) self.assertTrue(b.startswith(b"h")) - self.assertFalse(b.startswith(b"hello")) + self.assertFalse(b.startswith(b"hellow")) self.assertFalse(b.startswith(b"ha")) with self.assertRaises(TypeError) as cm: b.startswith([b'h']) diff --git a/Lib/test/test_capi/test_tuple.py b/Lib/test/test_capi/test_tuple.py index 0eb70ff68f32f5..7c07bc64e247c5 100644 --- a/Lib/test/test_capi/test_tuple.py +++ b/Lib/test/test_capi/test_tuple.py @@ -259,7 +259,7 @@ def test__tuple_resize(self): def test_bug_59313(self): # Before 3.14, the C-API function PySequence_Tuple # would create incomplete tuples which were visible to - # the cycle GC, and this test would crash the interpreter. + # the cycle GC, and this test would crash the interpeter. TAG = object() tuples = [] diff --git a/Lib/test/test_capi/test_type.py b/Lib/test/test_capi/test_type.py index dd660216770dde..15fb4a93e2ad74 100644 --- a/Lib/test/test_capi/test_type.py +++ b/Lib/test/test_capi/test_type.py @@ -259,7 +259,7 @@ class FreezeThis(metaclass=Meta): self.assertEqual(FreezeThis.value, 2) def test_manual_heap_type(self): - # gh-128923: test that a manually allocated and initialized heap type + # gh-128923: test that a manually allocated and initailized heap type # works correctly ManualHeapType = _testcapi.ManualHeapType for i in range(100): diff --git a/Lib/test/test_cmd_line.py b/Lib/test/test_cmd_line.py index cc3802a90a850f..f30a1874ab96d4 100644 --- a/Lib/test/test_cmd_line.py +++ b/Lib/test/test_cmd_line.py @@ -980,7 +980,7 @@ def test_python_legacy_windows_fs_encoding(self): def test_python_legacy_windows_stdio(self): # Test that _WindowsConsoleIO is used when PYTHONLEGACYWINDOWSSTDIO # is not set. - # We cannot use PIPE because it prevents creating new console. + # We cannot use PIPE becase it prevents creating new console. # So we use exit code. code = "import sys; sys.exit(type(sys.stdout.buffer.raw).__name__ != '_WindowsConsoleIO')" env = os.environ.copy() diff --git a/Lib/test/test_codecs.py b/Lib/test/test_codecs.py index b20c424878450d..d8666f7290e72e 100644 --- a/Lib/test/test_codecs.py +++ b/Lib/test/test_codecs.py @@ -3249,7 +3249,7 @@ def test_codec_lookup_failure(self): def test_unflagged_non_text_codec_handling(self): # The stdlib non-text codecs are now marked so they're - # preemptively skipped by the text model related methods + # pre-emptively skipped by the text model related methods # However, third party codecs won't be flagged, so we still make # sure the case where an inappropriate output type is produced is # handled appropriately diff --git a/Lib/test/test_ctypes/test_win32.py b/Lib/test/test_ctypes/test_win32.py index dc13fae456b45d..7d5133221906bb 100644 --- a/Lib/test/test_ctypes/test_win32.py +++ b/Lib/test/test_ctypes/test_win32.py @@ -13,9 +13,9 @@ @unittest.skipUnless(sys.platform == "win32", 'Windows-specific test') class FunctionCallTestCase(unittest.TestCase): - @unittest.skipUnless('MSC' in sys.version, "SHE only supported by MSC") + @unittest.skipUnless('MSC' in sys.version, "SEH only supported by MSC") @unittest.skipIf(sys.executable.lower().endswith('_d.exe'), - "SHE not enabled in debug builds") + "SEH not enabled in debug builds") def test_SEH(self): # Disable faulthandler to prevent logging the warning: # "Windows fatal exception: access violation" diff --git a/Lib/test/test_ctypes/test_win32_com_foreign_func.py b/Lib/test/test_ctypes/test_win32_com_foreign_func.py index 432894b090638d..7e54f8f6c31d33 100644 --- a/Lib/test/test_ctypes/test_win32_com_foreign_func.py +++ b/Lib/test/test_ctypes/test_win32_com_foreign_func.py @@ -63,13 +63,13 @@ def is_equal_guid(guid1, guid2): IID_IPersist = create_guid("{0000010C-0000-0000-C000-000000000046}") CLSID_ShellLink = create_guid("{00021401-0000-0000-C000-000000000046}") -# https://learn.microsoft.com/en-us/windows/win32/api/unknown/nf-unknown-iunknown-queryinterface(refiid_void) +# https://learn.microsoft.com/en-us/windows/win32/api/unknwn/nf-unknwn-iunknown-queryinterface(refiid_void) proto_query_interface = create_proto_com_method( "QueryInterface", 0, HRESULT, POINTER(GUID), POINTER(c_void_p) ) -# https://learn.microsoft.com/en-us/windows/win32/api/unknown/nf-unknown-iunknown-addref +# https://learn.microsoft.com/en-us/windows/win32/api/unknwn/nf-unknwn-iunknown-addref proto_add_ref = create_proto_com_method("AddRef", 1, ctypes.c_long) -# https://learn.microsoft.com/en-us/windows/win32/api/unknown/nf-unknown-iunknown-release +# https://learn.microsoft.com/en-us/windows/win32/api/unknwn/nf-unknwn-iunknown-release proto_release = create_proto_com_method("Release", 2, ctypes.c_long) # https://learn.microsoft.com/en-us/windows/win32/api/objidl/nf-objidl-ipersist-getclassid proto_get_class_id = create_proto_com_method( diff --git a/Lib/test/test_decimal.py b/Lib/test/test_decimal.py index def33ed81a374c..08a8f4c3b36bd6 100644 --- a/Lib/test/test_decimal.py +++ b/Lib/test/test_decimal.py @@ -355,7 +355,7 @@ def eval_equation(self, s): funct = L[1].lower() valstemp = L[2:] L = Sides[1].strip().split() - and = L[0] + ans = L[0] exceptions = L[1:] except (TypeError, AttributeError, IndexError): raise self.decimal.InvalidOperation @@ -410,7 +410,7 @@ def FixQuotes(val): v = self.read_unlimited(v, self.context) vals.append(v) - and = FixQuotes(and) + ans = FixQuotes(ans) if EXTENDEDERRORTEST and fname not in ('to_sci_string', 'to_eng_string'): for error in theirexceptions: @@ -461,7 +461,7 @@ def FixQuotes(val): myexceptions.sort(key=repr) theirexceptions.sort(key=repr) - self.assertEqual(result, and, + self.assertEqual(result, ans, 'Incorrect answer for ' + s + ' -- got ' + result) self.assertEqual(myexceptions, theirexceptions, @@ -2403,137 +2403,137 @@ def test_none_args(self): ##### Binary functions c.clear_flags() - and = str(x.compare(Decimal('Nan891287828'), context=None)) - self.assertEqual(and, 'NaN1287828') + ans = str(x.compare(Decimal('Nan891287828'), context=None)) + self.assertEqual(ans, 'NaN1287828') self.assertRaises(InvalidOperation, x.compare, Decimal('sNaN'), context=None) self.assertTrue(c.flags[InvalidOperation]) c.clear_flags() - and = str(x.compare_signal(8224, context=None)) - self.assertEqual(and, '-1') + ans = str(x.compare_signal(8224, context=None)) + self.assertEqual(ans, '-1') self.assertRaises(InvalidOperation, x.compare_signal, Decimal('NaN'), context=None) self.assertTrue(c.flags[InvalidOperation]) c.clear_flags() - and = str(x.logical_and(101, context=None)) - self.assertEqual(and, '101') + ans = str(x.logical_and(101, context=None)) + self.assertEqual(ans, '101') self.assertRaises(InvalidOperation, x.logical_and, 123, context=None) self.assertTrue(c.flags[InvalidOperation]) c.clear_flags() - and = str(x.logical_or(101, context=None)) - self.assertEqual(and, '111') + ans = str(x.logical_or(101, context=None)) + self.assertEqual(ans, '111') self.assertRaises(InvalidOperation, x.logical_or, 123, context=None) self.assertTrue(c.flags[InvalidOperation]) c.clear_flags() - and = str(x.logical_xor(101, context=None)) - self.assertEqual(and, '10') + ans = str(x.logical_xor(101, context=None)) + self.assertEqual(ans, '10') self.assertRaises(InvalidOperation, x.logical_xor, 123, context=None) self.assertTrue(c.flags[InvalidOperation]) c.clear_flags() - and = str(x.max(101, context=None)) - self.assertEqual(and, '111') + ans = str(x.max(101, context=None)) + self.assertEqual(ans, '111') self.assertRaises(InvalidOperation, x.max, Decimal('sNaN'), context=None) self.assertTrue(c.flags[InvalidOperation]) c.clear_flags() - and = str(x.max_mag(101, context=None)) - self.assertEqual(and, '111') + ans = str(x.max_mag(101, context=None)) + self.assertEqual(ans, '111') self.assertRaises(InvalidOperation, x.max_mag, Decimal('sNaN'), context=None) self.assertTrue(c.flags[InvalidOperation]) c.clear_flags() - and = str(x.min(101, context=None)) - self.assertEqual(and, '101') + ans = str(x.min(101, context=None)) + self.assertEqual(ans, '101') self.assertRaises(InvalidOperation, x.min, Decimal('sNaN'), context=None) self.assertTrue(c.flags[InvalidOperation]) c.clear_flags() - and = str(x.min_mag(101, context=None)) - self.assertEqual(and, '101') + ans = str(x.min_mag(101, context=None)) + self.assertEqual(ans, '101') self.assertRaises(InvalidOperation, x.min_mag, Decimal('sNaN'), context=None) self.assertTrue(c.flags[InvalidOperation]) c.clear_flags() - and = str(x.remainder_near(101, context=None)) - self.assertEqual(and, '10') + ans = str(x.remainder_near(101, context=None)) + self.assertEqual(ans, '10') self.assertRaises(InvalidOperation, y.remainder_near, 101, context=None) self.assertTrue(c.flags[InvalidOperation]) c.clear_flags() - and = str(x.rotate(2, context=None)) - self.assertEqual(and, '11100') + ans = str(x.rotate(2, context=None)) + self.assertEqual(ans, '11100') self.assertRaises(InvalidOperation, x.rotate, 101, context=None) self.assertTrue(c.flags[InvalidOperation]) c.clear_flags() - and = str(x.scaleb(7, context=None)) - self.assertEqual(and, '1.11E+9') + ans = str(x.scaleb(7, context=None)) + self.assertEqual(ans, '1.11E+9') self.assertRaises(InvalidOperation, x.scaleb, 10000, context=None) self.assertTrue(c.flags[InvalidOperation]) c.clear_flags() - and = str(x.shift(2, context=None)) - self.assertEqual(and, '11100') + ans = str(x.shift(2, context=None)) + self.assertEqual(ans, '11100') self.assertRaises(InvalidOperation, x.shift, 10000, context=None) self.assertTrue(c.flags[InvalidOperation]) ##### Ternary functions c.clear_flags() - and = str(x.fma(2, 3, context=None)) - self.assertEqual(and, '225') + ans = str(x.fma(2, 3, context=None)) + self.assertEqual(ans, '225') self.assertRaises(Overflow, x.fma, Decimal('1e9999'), 3, context=None) self.assertTrue(c.flags[Overflow]) ##### Special cases c.rounding = ROUND_HALF_EVEN - and = str(Decimal('1.5').to_integral(rounding=None, context=None)) - self.assertEqual(and, '2') + ans = str(Decimal('1.5').to_integral(rounding=None, context=None)) + self.assertEqual(ans, '2') c.rounding = ROUND_DOWN - and = str(Decimal('1.5').to_integral(rounding=None, context=None)) - self.assertEqual(and, '1') - and = str(Decimal('1.5').to_integral(rounding=ROUND_UP, context=None)) - self.assertEqual(and, '2') + ans = str(Decimal('1.5').to_integral(rounding=None, context=None)) + self.assertEqual(ans, '1') + ans = str(Decimal('1.5').to_integral(rounding=ROUND_UP, context=None)) + self.assertEqual(ans, '2') c.clear_flags() self.assertRaises(InvalidOperation, Decimal('sNaN').to_integral, context=None) self.assertTrue(c.flags[InvalidOperation]) c.rounding = ROUND_HALF_EVEN - and = str(Decimal('1.5').to_integral_value(rounding=None, context=None)) - self.assertEqual(and, '2') + ans = str(Decimal('1.5').to_integral_value(rounding=None, context=None)) + self.assertEqual(ans, '2') c.rounding = ROUND_DOWN - and = str(Decimal('1.5').to_integral_value(rounding=None, context=None)) - self.assertEqual(and, '1') - and = str(Decimal('1.5').to_integral_value(rounding=ROUND_UP, context=None)) - self.assertEqual(and, '2') + ans = str(Decimal('1.5').to_integral_value(rounding=None, context=None)) + self.assertEqual(ans, '1') + ans = str(Decimal('1.5').to_integral_value(rounding=ROUND_UP, context=None)) + self.assertEqual(ans, '2') c.clear_flags() self.assertRaises(InvalidOperation, Decimal('sNaN').to_integral_value, context=None) self.assertTrue(c.flags[InvalidOperation]) c.rounding = ROUND_HALF_EVEN - and = str(Decimal('1.5').to_integral_exact(rounding=None, context=None)) - self.assertEqual(and, '2') + ans = str(Decimal('1.5').to_integral_exact(rounding=None, context=None)) + self.assertEqual(ans, '2') c.rounding = ROUND_DOWN - and = str(Decimal('1.5').to_integral_exact(rounding=None, context=None)) - self.assertEqual(and, '1') - and = str(Decimal('1.5').to_integral_exact(rounding=ROUND_UP, context=None)) - self.assertEqual(and, '2') + ans = str(Decimal('1.5').to_integral_exact(rounding=None, context=None)) + self.assertEqual(ans, '1') + ans = str(Decimal('1.5').to_integral_exact(rounding=ROUND_UP, context=None)) + self.assertEqual(ans, '2') c.clear_flags() self.assertRaises(InvalidOperation, Decimal('sNaN').to_integral_exact, context=None) self.assertTrue(c.flags[InvalidOperation]) c.rounding = ROUND_UP - and = str(Decimal('1.50001').quantize(exp=Decimal('1e-3'), rounding=None, context=None)) - self.assertEqual(and, '1.501') + ans = str(Decimal('1.50001').quantize(exp=Decimal('1e-3'), rounding=None, context=None)) + self.assertEqual(ans, '1.501') c.rounding = ROUND_DOWN - and = str(Decimal('1.50001').quantize(exp=Decimal('1e-3'), rounding=None, context=None)) - self.assertEqual(and, '1.500') - and = str(Decimal('1.50001').quantize(exp=Decimal('1e-3'), rounding=ROUND_UP, context=None)) - self.assertEqual(and, '1.501') + ans = str(Decimal('1.50001').quantize(exp=Decimal('1e-3'), rounding=None, context=None)) + self.assertEqual(ans, '1.500') + ans = str(Decimal('1.50001').quantize(exp=Decimal('1e-3'), rounding=ROUND_UP, context=None)) + self.assertEqual(ans, '1.501') c.clear_flags() self.assertRaises(InvalidOperation, y.quantize, Decimal('1e-10'), rounding=ROUND_UP, context=None) self.assertTrue(c.flags[InvalidOperation]) @@ -3911,7 +3911,7 @@ def raise_error(context, flag): for fn, args in operations: # find answer and flags raised using a clean context context.clear_flags() - and = fn(*args) + ans = fn(*args) flags = [k for k, v in context.flags.items() if v] for extra_flags in flagsets: @@ -3932,9 +3932,9 @@ def raise_error(context, flag): new_flags = [k for k,v in context.flags.items() if v] new_flags.sort(key=id) - self.assertEqual(and, new_ans, + self.assertEqual(ans, new_ans, "operation produces different answers depending on flags set: " + - "expected %s, got %s." % (and, new_ans)) + "expected %s, got %s." % (ans, new_ans)) self.assertEqual(new_flags, expected_flags, "operation raises different flags depending on flags set: " + "expected %s, got %s" % (expected_flags, new_flags)) diff --git a/Lib/test/test_descr.py b/Lib/test/test_descr.py index 8ef17086641468..8da6647c3f71fc 100644 --- a/Lib/test/test_descr.py +++ b/Lib/test/test_descr.py @@ -3323,7 +3323,7 @@ class F(D, E): pass self.assertIs(x.__class__, cls2) x.__class__ = cls self.assertIs(x.__class__, cls) - def can't(x, C): + def cant(x, C): try: x.__class__ = C except TypeError: @@ -3336,18 +3336,18 @@ def can't(x, C): pass else: self.fail("shouldn't allow del %r.__class__" % x) - can't(C(), list) - can't(list(), C) - can't(C(), 1) - can't(C(), object) - can't(object(), list) - can't(list(), object) + cant(C(), list) + cant(list(), C) + cant(C(), 1) + cant(C(), object) + cant(object(), list) + cant(list(), object) class Int(int): __slots__ = [] - can't(True, int) - can't(2, bool) + cant(True, int) + cant(2, bool) o = object() - can't(o, int) - can't(o, type(None)) + cant(o, int) + cant(o, type(None)) del o class G(object): __slots__ = ["a", "b"] @@ -3387,7 +3387,7 @@ class R(J): for cls2 in G, J, K, L, M, N, P, R, list, Int: if cls is cls2: continue - can't(cls(), cls2) + cant(cls(), cls2) # Issue5283: when __class__ changes in __del__, the wrong # type gets DECREF'd. @@ -3405,16 +3405,16 @@ class C(object): pass a = C() a.__dict__ = {'b': 1} self.assertEqual(a.b, 1) - def can't(x, dict): + def cant(x, dict): try: x.__dict__ = dict except (AttributeError, TypeError): pass else: self.fail("shouldn't allow %r.__dict__ = %r" % (x, dict)) - can't(a, None) - can't(a, []) - can't(a, 1) + cant(a, None) + cant(a, []) + cant(a, 1) del a.__dict__ # Deleting __dict__ is allowed class Base(object): @@ -3423,7 +3423,7 @@ def verify_dict_readonly(x): """ x has to be an instance of a class inheriting from Base. """ - can't(x, {}) + cant(x, {}) try: del x.__dict__ except (AttributeError, TypeError): @@ -5282,7 +5282,7 @@ class Base2(object): bases_before = ",".join([c.__name__ for c in X.__bases__]) print(f"before={bases_before}") - # mykey is initially read from Base, however, the lookup will be performed + # mykey is initially read from Base, however, the lookup will be perfomed # again if specialization fails. The second lookup will use the new # mro set by __eq__. print(X.mykey) diff --git a/Lib/test/test_dict.py b/Lib/test/test_dict.py index 4888bf10a983d5..60c62430370e96 100644 --- a/Lib/test/test_dict.py +++ b/Lib/test/test_dict.py @@ -1581,7 +1581,7 @@ def check_unhashable_key(): with check_unhashable_key(): d.get(key) - # Only TypeError exception is overridden, + # Only TypeError exception is overriden, # other exceptions are left unchanged. class HashError: def __hash__(self): diff --git a/Lib/test/test_difflib.py b/Lib/test/test_difflib.py index 1766c9828aa4a2..6ac584a08d1e86 100644 --- a/Lib/test/test_difflib.py +++ b/Lib/test/test_difflib.py @@ -556,7 +556,7 @@ def test_default_args(self): b[match.b: match.b + match.size]) self.assertFalse(self.longer_match_exists(a, b, match.size)) - match = sm.find_longest_match(also=2, blo=4) + match = sm.find_longest_match(alo=2, blo=4) self.assertEqual(match.a, 3) self.assertEqual(match.b, 7) self.assertEqual(match.size, 4) diff --git a/Lib/test/test_dis.py b/Lib/test/test_dis.py index fc3d53271d1c0b..355990ed58ee09 100644 --- a/Lib/test/test_dis.py +++ b/Lib/test/test_dis.py @@ -1696,7 +1696,7 @@ def jumpy(): # code_object_inner before rerunning the tests def _stringify_instruction(instr): - # Since positions offsets change a lot for these test cases, ignore them. + # Since postions offsets change a lot for these test cases, ignore them. base = ( f" make_inst(opname={instr.opname!r}, arg={instr.arg!r}, argval={instr.argval!r}, " + f"argrepr={instr.argrepr!r}, offset={instr.offset}, start_offset={instr.start_offset}, " + diff --git a/Lib/test/test_email/test__header_value_parser.py b/Lib/test/test_email/test__header_value_parser.py index 0cf6fb5b3d6bc3..179e236ecdfd7f 100644 --- a/Lib/test/test_email/test__header_value_parser.py +++ b/Lib/test/test_email/test__header_value_parser.py @@ -2285,11 +2285,11 @@ def test_get_group_single_mailbox(self): def test_get_group_mixed_list(self): group = self._test_get_x(parser.get_group, ('Monty Python: "Fred A. Bear" ,' - '(foo) Roger , x@test.example.com;'), + '(foo) Roger , x@test.example.com;'), ('Monty Python: "Fred A. Bear" ,' - '(foo) Roger , x@test.example.com;'), + '(foo) Roger , x@test.example.com;'), ('Monty Python: "Fred A. Bear" ,' - ' Roger , x@test.example.com;'), + ' Roger , x@test.example.com;'), [], '') self.assertEqual(group.token_type, 'group') @@ -2306,11 +2306,11 @@ def test_get_group_mixed_list(self): def test_get_group_one_invalid(self): group = self._test_get_x(parser.get_group, ('Monty Python: "Fred A. Bear" ,' - '(foo) Roger ping@example.com, x@test.example.com;'), + '(foo) Roger ping@exampele.com, x@test.example.com;'), ('Monty Python: "Fred A. Bear" ,' - '(foo) Roger ping@example.com, x@test.example.com;'), + '(foo) Roger ping@exampele.com, x@test.example.com;'), ('Monty Python: "Fred A. Bear" ,' - ' Roger ping@example.com, x@test.example.com;'), + ' Roger ping@exampele.com, x@test.example.com;'), [errors.InvalidHeaderDefect, # non-angle addr makes local part invalid errors.InvalidHeaderDefect], # and its not obs-local either: no dots. '') @@ -2718,9 +2718,9 @@ def test_get_msg_id_empty(self): def test_get_msg_id_valid(self): msg_id = self._test_get_x( parser.get_msg_id, - "", - "", - "", + "", + "", + "", [], '', ) @@ -2729,9 +2729,9 @@ def test_get_msg_id_valid(self): def test_get_msg_id_obsolete_local(self): msg_id = self._test_get_x( parser.get_msg_id, - '<"simple.local"@example.com>', - '<"simple.local"@example.com>', - '', + '<"simeple.local"@example.com>', + '<"simeple.local"@example.com>', + '', [errors.ObsoleteHeaderDefect], '', ) diff --git a/Lib/test/test_exceptions.py b/Lib/test/test_exceptions.py index 687e45a764bd3d..57d0656487d4db 100644 --- a/Lib/test/test_exceptions.py +++ b/Lib/test/test_exceptions.py @@ -1295,7 +1295,7 @@ def test_context_of_exception_in_else_and_finally(self): self.assertIs(exc.__context__, ve) def test_unicode_change_attributes(self): - # See issue 7309. This was a crash. + # See issue 7309. This was a crasher. u = UnicodeEncodeError('baz', 'xxxxx', 1, 5, 'foo') self.assertEqual(str(u), "'baz' codec can't encode characters in position 1-4: foo") diff --git a/Lib/test/test_fileio.py b/Lib/test/test_fileio.py index 2e5b894fd5b063..e3d54f6315aade 100644 --- a/Lib/test/test_fileio.py +++ b/Lib/test/test_fileio.py @@ -388,7 +388,7 @@ def check_readall(name, code, prelude="", cleanup="", syscalls = strace_helper.filter_memory(syscalls) # The first call should be an open that returns a - # file descriptor (fd). After that calls may vary. Once the file + # file descriptor (fd). Afer that calls may vary. Once the file # is opened, check calls refer to it by fd as the filename # could be removed from the filesystem, renamed, etc. See: # Time-of-check time-of-use (TOCTOU) software bug class. diff --git a/Lib/test/test_float.py b/Lib/test/test_float.py index e39cba06d03c82..00518abcb11b46 100644 --- a/Lib/test/test_float.py +++ b/Lib/test/test_float.py @@ -1071,7 +1071,7 @@ def test_inf_from_str(self): self.assertRaises(ValueError, float, "in") self.assertRaises(ValueError, float, "+in") self.assertRaises(ValueError, float, "-in") - self.assertRaises(ValueError, float, "infinite") + self.assertRaises(ValueError, float, "infinit") self.assertRaises(ValueError, float, "+Infin") self.assertRaises(ValueError, float, "-INFI") self.assertRaises(ValueError, float, "infinitys") diff --git a/Lib/test/test_fnmatch.py b/Lib/test/test_fnmatch.py index 546682653a071e..5daaf3b3fddb9e 100644 --- a/Lib/test/test_fnmatch.py +++ b/Lib/test/test_fnmatch.py @@ -96,22 +96,22 @@ def test_sep(self): def test_char_set(self): check = self.check_match - testcases = string.ascii_lowercase + string.digits + string.punctuation - for c in testcases: + tescases = string.ascii_lowercase + string.digits + string.punctuation + for c in tescases: check(c, '[az]', c in 'az') check(c, '[!az]', c not in 'az') # Case insensitive. - for c in testcases: + for c in tescases: check(c, '[AZ]', (c in 'az') and IGNORECASE) check(c, '[!AZ]', (c not in 'az') or not IGNORECASE) for c in string.ascii_uppercase: check(c, '[az]', (c in 'AZ') and IGNORECASE) check(c, '[!az]', (c not in 'AZ') or not IGNORECASE) # Repeated same character. - for c in testcases: + for c in tescases: check(c, '[aa]', c == 'a') # Special cases. - for c in testcases: + for c in tescases: check(c, '[^az]', c in '^az') check(c, '[[az]', c in '[az') check(c, r'[!]]', c != ']') @@ -122,24 +122,24 @@ def test_char_set(self): def test_range(self): check = self.check_match - testcases = string.ascii_lowercase + string.digits + string.punctuation - for c in testcases: + tescases = string.ascii_lowercase + string.digits + string.punctuation + for c in tescases: check(c, '[b-d]', c in 'bcd') check(c, '[!b-d]', c not in 'bcd') check(c, '[b-dx-z]', c in 'bcdxyz') check(c, '[!b-dx-z]', c not in 'bcdxyz') # Case insensitive. - for c in testcases: + for c in tescases: check(c, '[B-D]', (c in 'bcd') and IGNORECASE) check(c, '[!B-D]', (c not in 'bcd') or not IGNORECASE) for c in string.ascii_uppercase: check(c, '[b-d]', (c in 'BCD') and IGNORECASE) check(c, '[!b-d]', (c not in 'BCD') or not IGNORECASE) # Upper bound == lower bound. - for c in testcases: + for c in tescases: check(c, '[b-b]', c == 'b') # Special cases. - for c in testcases: + for c in tescases: check(c, '[!-#]', c not in '-#') check(c, '[!--.]', c not in '-.') check(c, '[^-`]', c in '^_`') @@ -153,7 +153,7 @@ def test_range(self): check(c, '[-]', c in '-') check(c, '[!-]', c not in '-') # Upper bound is less that lower bound: error in RE. - for c in testcases: + for c in tescases: check(c, '[d-b]', False) check(c, '[!d-b]', True) check(c, '[d-bx-z]', c in 'xyz') diff --git a/Lib/test/test_generators.py b/Lib/test/test_generators.py index 7ce84ca0a3bb42..3e41c7b9663491 100644 --- a/Lib/test/test_generators.py +++ b/Lib/test/test_generators.py @@ -2377,7 +2377,7 @@ def printsolution(self, x): """ weakref_tests = """\ -Generators are weakly referenceable: +Generators are weakly referencable: >>> import weakref >>> def gen(): @@ -2388,7 +2388,7 @@ def printsolution(self, x): True >>> p = weakref.proxy(gen) -Generator-iterators are weakly referenceable as well: +Generator-iterators are weakly referencable as well: >>> gi = gen() >>> wr = weakref.ref(gi) diff --git a/Lib/test/test_genexps.py b/Lib/test/test_genexps.py index d819d1fd4b9f26..fe5f18fa3f88a0 100644 --- a/Lib/test/test_genexps.py +++ b/Lib/test/test_genexps.py @@ -138,7 +138,7 @@ >>> list(g) [(0, 0), (0, 1), (0, 2), (0, 3), (1, 0), (1, 1), (1, 2), (1, 3), (2, 0), (2, 1), (2, 2), (2, 3)] -Verify reuse of tuples (a side benefit of using genexps over listcomps) +Verify re-use of tuples (a side benefit of using genexps over listcomps) >>> tupleids = list(map(id, ((i,i) for i in range(10)))) >>> int(max(tupleids) - min(tupleids)) @@ -256,7 +256,7 @@ >>> me.gi_running 0 -Verify that genexps are weakly referenceable +Verify that genexps are weakly referencable >>> import weakref >>> g = (i*i for i in range(4)) diff --git a/Lib/test/test_gzip.py b/Lib/test/test_gzip.py index df4748057ab254..a12ff5662a73db 100644 --- a/Lib/test/test_gzip.py +++ b/Lib/test/test_gzip.py @@ -144,7 +144,7 @@ def test_read1(self): self.assertEqual(b''.join(blocks), data1 * 50) def test_readinto(self): - # 10MB of incompressible data to ensure multiple reads + # 10MB of uncompressible data to ensure multiple reads large_data = os.urandom(10 * 2**20) with gzip.GzipFile(self.filename, 'wb') as f: f.write(large_data) @@ -156,7 +156,7 @@ def test_readinto(self): self.assertEqual(buf, large_data) def test_readinto1(self): - # 10MB of incompressible data to ensure multiple reads + # 10MB of uncompressible data to ensure multiple reads large_data = os.urandom(10 * 2**20) with gzip.GzipFile(self.filename, 'wb') as f: f.write(large_data) diff --git a/Lib/test/test_httpservers.py b/Lib/test/test_httpservers.py index ff5184b396081d..2548a7c5f292f0 100644 --- a/Lib/test/test_httpservers.py +++ b/Lib/test/test_httpservers.py @@ -853,17 +853,17 @@ def handle_expect_100(self): class AuditableBytesIO: def __init__(self): - self.data = [] + self.datas = [] def write(self, data): - self.data.append(data) + self.datas.append(data) def getData(self): - return b''.join(self.data) + return b''.join(self.datas) @property def numWrites(self): - return len(self.data) + return len(self.datas) class BaseHTTPRequestHandlerTestCase(unittest.TestCase): diff --git a/Lib/test/test_import/__init__.py b/Lib/test/test_import/__init__.py index 8a7fadcbee2d79..6e34094c5aa422 100644 --- a/Lib/test/test_import/__init__.py +++ b/Lib/test/test_import/__init__.py @@ -2879,7 +2879,7 @@ def check_direct(self, loaded): self.assertIs(loaded.snapshot.lookedup, loaded.module) def check_indirect(self, loaded, orig): - # The module reuses another's PyModuleDef, with a different name. + # The module re-uses another's PyModuleDef, with a different name. assert orig is not loaded.module assert orig.__name__ != loaded.name self.assertNotEqual(loaded.module.__name__, loaded.name) diff --git a/Lib/test/test_interpreters/test_api.py b/Lib/test/test_interpreters/test_api.py index 1566d7acc9b6ba..a34b20beaca7a3 100644 --- a/Lib/test/test_interpreters/test_api.py +++ b/Lib/test/test_interpreters/test_api.py @@ -1217,7 +1217,7 @@ def test_stateless_func_returns_arg(self): # builtin exceptions Exception('uh-oh!'), ModuleNotFoundError('mymodule'), - # builtin functions + # builtin fnctions len, sys.exit, # user classes diff --git a/Lib/test/test_iterlen.py b/Lib/test/test_iterlen.py index d527177215f107..41c9752e557fb3 100644 --- a/Lib/test/test_iterlen.py +++ b/Lib/test/test_iterlen.py @@ -63,7 +63,7 @@ def test_invariant(self): class TestTemporarilyImmutable(TestInvariantWithoutMutations): def test_immutable_during_iteration(self): - # objects such as dequeues, sets, and dictionaries enforce + # objects such as deques, sets, and dictionaries enforce # length immutability during iteration it = self.it diff --git a/Lib/test/test_itertools.py b/Lib/test/test_itertools.py index e7da03cb9b3240..61bea9dba07fec 100644 --- a/Lib/test/test_itertools.py +++ b/Lib/test/test_itertools.py @@ -286,7 +286,7 @@ def test_combinations_overflow(self): with self.assertRaises((OverflowError, MemoryError)): combinations("AA", 2**29) - # Test implementation detail: tuple reuse + # Test implementation detail: tuple re-use @support.impl_detail("tuple reuse is specific to CPython") def test_combinations_tuple_reuse(self): self.assertEqual(len(set(map(id, combinations('abcde', 3)))), 1) @@ -361,7 +361,7 @@ def test_combinations_with_replacement_overflow(self): with self.assertRaises((OverflowError, MemoryError)): combinations_with_replacement("AA", 2**30) - # Test implementation detail: tuple reuse + # Test implementation detail: tuple re-use @support.impl_detail("tuple reuse is specific to CPython") def test_combinations_with_replacement_tuple_reuse(self): cwr = combinations_with_replacement @@ -745,17 +745,17 @@ def test_filter(self): self.assertRaises(TypeError, next, filter(range(6), range(6))) # check copy, deepcopy, pickle - and = [0,2,4] + ans = [0,2,4] c = filter(isEven, range(6)) - self.assertEqual(list(copy.copy(c)), and) + self.assertEqual(list(copy.copy(c)), ans) c = filter(isEven, range(6)) - self.assertEqual(list(copy.deepcopy(c)), and) + self.assertEqual(list(copy.deepcopy(c)), ans) for proto in range(pickle.HIGHEST_PROTOCOL + 1): c = filter(isEven, range(6)) - self.assertEqual(list(pickle.loads(pickle.dumps(c, proto))), and) + self.assertEqual(list(pickle.loads(pickle.dumps(c, proto))), ans) next(c) - self.assertEqual(list(pickle.loads(pickle.dumps(c, proto))), and[1:]) + self.assertEqual(list(pickle.loads(pickle.dumps(c, proto))), ans[1:]) for proto in range(pickle.HIGHEST_PROTOCOL + 1): c = filter(isEven, range(6)) self.pickletest(proto, c) @@ -773,8 +773,8 @@ def test_filterfalse(self): def test_zip(self): # XXX This is rather silly now that builtin zip() calls zip()... - and = [(x,y) for x, y in zip('abc',count())] - self.assertEqual(and, [('a', 0), ('b', 1), ('c', 2)]) + ans = [(x,y) for x, y in zip('abc',count())] + self.assertEqual(ans, [('a', 0), ('b', 1), ('c', 2)]) self.assertEqual(list(zip('abc', range(6))), lzip('abc', range(6))) self.assertEqual(list(zip('abcdef', range(3))), lzip('abcdef', range(3))) self.assertEqual(take(3,zip('abcdef', count())), lzip('abcdef', range(3))) @@ -1303,13 +1303,13 @@ def test_tee(self): support.gc_collect() # For PyPy or other GCs. self.assertRaises(ReferenceError, getattr, p, '__class__') - and = list('abc') + ans = list('abc') long_ans = list(range(10000)) # check copy a, b = tee('abc') - self.assertEqual(list(copy.copy(a)), and) - self.assertEqual(list(copy.copy(b)), and) + self.assertEqual(list(copy.copy(a)), ans) + self.assertEqual(list(copy.copy(b)), ans) a, b = tee(list(range(10000))) self.assertEqual(list(copy.copy(a)), long_ans) self.assertEqual(list(copy.copy(b)), long_ans) @@ -1318,10 +1318,10 @@ def test_tee(self): a, b = tee('abc') take(2, a) take(1, b) - self.assertEqual(list(copy.copy(a)), and[2:]) - self.assertEqual(list(copy.copy(b)), and[1:]) - self.assertEqual(list(a), and[2:]) - self.assertEqual(list(b), and[1:]) + self.assertEqual(list(copy.copy(a)), ans[2:]) + self.assertEqual(list(copy.copy(b)), ans[1:]) + self.assertEqual(list(a), ans[2:]) + self.assertEqual(list(b), ans[1:]) a, b = tee(range(10000)) take(100, a) take(60, b) @@ -1905,7 +1905,7 @@ def __next__(self): t3 = tnew(t1) self.assertTrue(list(t1) == list(t2) == list(t3) == list('abc')) - # test that tee objects are weak referenceable + # test that tee objects are weak referencable a, b = tee(range(10)) p = weakref.proxy(a) self.assertEqual(getattr(p, '__class__'), type(b)) @@ -1913,15 +1913,15 @@ def __next__(self): gc.collect() # For PyPy or other GCs. self.assertRaises(ReferenceError, getattr, p, '__class__') - and = list('abc') + ans = list('abc') long_ans = list(range(10000)) # Tests not applicable to the tee() recipe if False: # check copy a, b = tee('abc') - self.assertEqual(list(copy.copy(a)), and) - self.assertEqual(list(copy.copy(b)), and) + self.assertEqual(list(copy.copy(a)), ans) + self.assertEqual(list(copy.copy(b)), ans) a, b = tee(list(range(10000))) self.assertEqual(list(copy.copy(a)), long_ans) self.assertEqual(list(copy.copy(b)), long_ans) @@ -1930,10 +1930,10 @@ def __next__(self): a, b = tee('abc') take(2, a) take(1, b) - self.assertEqual(list(copy.copy(a)), and[2:]) - self.assertEqual(list(copy.copy(b)), and[1:]) - self.assertEqual(list(a), and[2:]) - self.assertEqual(list(b), and[1:]) + self.assertEqual(list(copy.copy(a)), ans[2:]) + self.assertEqual(list(copy.copy(b)), ans[1:]) + self.assertEqual(list(a), ans[2:]) + self.assertEqual(list(b), ans[1:]) a, b = tee(range(10000)) take(100, a) take(60, b) diff --git a/Lib/test/test_json/test_dump.py b/Lib/test/test_json/test_dump.py index 64f3624f3fbb8e..39470754003bb6 100644 --- a/Lib/test/test_json/test_dump.py +++ b/Lib/test/test_json/test_dump.py @@ -41,9 +41,9 @@ def test_encode_truefalse(self): # Issue 16228: Crash on encoding resized list def test_encode_mutated(self): a = [object()] * 10 - def crash(obj): + def crasher(obj): del a[-1] - self.assertEqual(self.dumps(a, default=crash), + self.assertEqual(self.dumps(a, default=crasher), '[null, null, null, null, null]') # Issue 24094 diff --git a/Lib/test/test_locale.py b/Lib/test/test_locale.py index 0f5858188124ef..55b502e52ca454 100644 --- a/Lib/test/test_locale.py +++ b/Lib/test/test_locale.py @@ -537,7 +537,7 @@ def test_getpreferredencoding(self): codecs.lookup(enc) def test_strcoll_3303(self): - # test crash from bug #3303 + # test crasher from bug #3303 self.assertRaises(TypeError, locale.strcoll, "a", None) self.assertRaises(TypeError, locale.strcoll, b"a", None) @@ -549,7 +549,7 @@ def test_setlocale_category(self): locale.setlocale(locale.LC_MONETARY) locale.setlocale(locale.LC_NUMERIC) - # crash from bug #7419 + # crasher from bug #7419 self.assertRaises(locale.Error, locale.setlocale, 12345) def test_getsetlocale_issue1813(self): diff --git a/Lib/test/test_logging.py b/Lib/test/test_logging.py index e82dc611baac61..275f7ce47d09b5 100644 --- a/Lib/test/test_logging.py +++ b/Lib/test/test_logging.py @@ -2387,7 +2387,7 @@ def __getattr__(self, attribute): return getattr(queue, attribute) class CustomQueueFakeProtocol(CustomQueueProtocol): - # An object implementing the minimal Queue API for + # An object implementing the minimial Queue API for # the logging module but with incorrect signatures. # # The object will be considered a valid queue class since we diff --git a/Lib/test/test_long.py b/Lib/test/test_long.py index 2e1f174c8fe456..f336d49fa4f008 100644 --- a/Lib/test/test_long.py +++ b/Lib/test/test_long.py @@ -938,9 +938,9 @@ def test_correctly_rounded_true_division(self): self.check_truediv(n, 2**1076) # largeish random divisions: a/b where |a| <= |b| <= - # 2*|a|; |and| is between 0.5 and 1.0, so error should + # 2*|a|; |ans| is between 0.5 and 1.0, so error should # always be bounded by 2**-54 with equality possible only - # if the least significant bit of q=and*2**53 is zero. + # if the least significant bit of q=ans*2**53 is zero. for M in [10**10, 10**100, 10**1000]: for i in range(1000): a = random.randrange(1, M) diff --git a/Lib/test/test_memoryview.py b/Lib/test/test_memoryview.py index 653b8e0eeba6bd..64f440f180bbf0 100644 --- a/Lib/test/test_memoryview.py +++ b/Lib/test/test_memoryview.py @@ -738,7 +738,7 @@ def test_picklebuffer_reference_loop(self): @support.requires_resource("cpu") class RacingTest(unittest.TestCase): def test_racing_getbuf_and_releasebuf(self): - """Repeatedly access the memoryview for racing.""" + """Repeatly access the memoryview for racing.""" try: from multiprocessing.managers import SharedMemoryManager except ImportError: diff --git a/Lib/test/test_ntpath.py b/Lib/test/test_ntpath.py index 80405563d54d1d..22f6403d482bc4 100644 --- a/Lib/test/test_ntpath.py +++ b/Lib/test/test_ntpath.py @@ -131,10 +131,10 @@ def test_splitdrive(self): def test_splitdrive_invalid_paths(self): splitdrive = ntpath.splitdrive - self.assertEqual(splitdrive('\\\\set\x00ver\\sha\x00re\\di\x00r'), - ('\\\\set\x00ver\\sha\x00re', '\\di\x00r')) - self.assertEqual(splitdrive(b'\\\\set\x00ver\\sha\x00re\\di\x00r'), - (b'\\\\set\x00ver\\sha\x00re', b'\\di\x00r')) + self.assertEqual(splitdrive('\\\\ser\x00ver\\sha\x00re\\di\x00r'), + ('\\\\ser\x00ver\\sha\x00re', '\\di\x00r')) + self.assertEqual(splitdrive(b'\\\\ser\x00ver\\sha\x00re\\di\x00r'), + (b'\\\\ser\x00ver\\sha\x00re', b'\\di\x00r')) self.assertEqual(splitdrive("\\\\\udfff\\\udffe\\\udffd"), ('\\\\\udfff\\\udffe', '\\\udffd')) if sys.platform == 'win32': @@ -237,10 +237,10 @@ def test_splitroot(self): def test_splitroot_invalid_paths(self): splitroot = ntpath.splitroot - self.assertEqual(splitroot('\\\\set\x00ver\\sha\x00re\\di\x00r'), - ('\\\\set\x00ver\\sha\x00re', '\\', 'di\x00r')) - self.assertEqual(splitroot(b'\\\\set\x00ver\\sha\x00re\\di\x00r'), - (b'\\\\set\x00ver\\sha\x00re', b'\\', b'di\x00r')) + self.assertEqual(splitroot('\\\\ser\x00ver\\sha\x00re\\di\x00r'), + ('\\\\ser\x00ver\\sha\x00re', '\\', 'di\x00r')) + self.assertEqual(splitroot(b'\\\\ser\x00ver\\sha\x00re\\di\x00r'), + (b'\\\\ser\x00ver\\sha\x00re', b'\\', b'di\x00r')) self.assertEqual(splitroot("\\\\\udfff\\\udffe\\\udffd"), ('\\\\\udfff\\\udffe', '\\', '\udffd')) if sys.platform == 'win32': diff --git a/Lib/test/test_opcache.py b/Lib/test/test_opcache.py index 4f4a9516d8411b..30baa09048616c 100644 --- a/Lib/test/test_opcache.py +++ b/Lib/test/test_opcache.py @@ -571,7 +571,7 @@ def test(default=None): def make_deferred_ref_count_obj(): """Create an object that uses deferred reference counting. - Only objects that use deferred reference counting may be stored in inline + Only objects that use deferred refence counting may be stored in inline caches in free-threaded builds. This constructs a new class named Foo, which uses deferred reference counting. """ diff --git a/Lib/test/test_os.py b/Lib/test/test_os.py index 779c85400b477d..de3a17fe893170 100644 --- a/Lib/test/test_os.py +++ b/Lib/test/test_os.py @@ -176,7 +176,7 @@ def test_getcwdb(self): # Tests creating TESTFN -class file tests(unittest.TestCase): +class FileTests(unittest.TestCase): def setUp(self): if os.path.lexists(os_helper.TESTFN): os.unlink(os_helper.TESTFN) diff --git a/Lib/test/test_pdb.py b/Lib/test/test_pdb.py index 33977bee188d9e..6b74e21ad73d1a 100644 --- a/Lib/test/test_pdb.py +++ b/Lib/test/test_pdb.py @@ -4835,7 +4835,7 @@ def test_convvar_completion(self): def test_local_namespace(self): script = textwrap.dedent(""" def f(): - original = "I live Python" + original = "I live Pythin" import pdb; pdb.Pdb().set_trace() f() """) diff --git a/Lib/test/test_peg_generator/test_c_parser.py b/Lib/test/test_peg_generator/test_c_parser.py index 4d97018937efa4..aa01a9b8f7ed87 100644 --- a/Lib/test/test_peg_generator/test_c_parser.py +++ b/Lib/test/test_peg_generator/test_c_parser.py @@ -77,7 +77,7 @@ class TestCParser(unittest.TestCase): @classmethod def setUpClass(cls): if cls._has_run: - # Since gh-104798 (Use setuptools in peg-generator and re-enable + # Since gh-104798 (Use setuptools in peg-generator and reenable # tests), this test case has been producing ref leaks. Initial # debugging points to bug(s) in setuptools and/or importlib. # See gh-105063 for more info. @@ -92,7 +92,7 @@ def setUpClass(cls): cls.tmp_base = os.getcwd() if os.path.samefile(cls.tmp_base, os_helper.SAVEDCWD): cls.tmp_base = None - # Create a directory for the reusable static library part of + # Create a directory for the reuseable static library part of # the pegen extension build process. This greatly reduces the # runtime overhead of spawning compiler processes. cls.library_dir = tempfile.mkdtemp(dir=cls.tmp_base) diff --git a/Lib/test/test_plistlib.py b/Lib/test/test_plistlib.py index 5b420cb54a2e68..a0c76e5dec5ebe 100644 --- a/Lib/test/test_plistlib.py +++ b/Lib/test/test_plistlib.py @@ -858,7 +858,7 @@ def test_load_aware_datetime(self): self.assertEqual(dt.tzinfo, datetime.UTC) @unittest.skipUnless("America/Los_Angeles" in zoneinfo.available_timezones(), - "Can't find timezone database") + "Can't find timezone datebase") def test_dump_aware_datetime(self): dt = datetime.datetime(2345, 6, 7, 8, 9, 10, tzinfo=zoneinfo.ZoneInfo("America/Los_Angeles")) @@ -877,7 +877,7 @@ def test_dump_utc_aware_datetime(self): self.assertEqual(loaded_dt, dt) @unittest.skipUnless("America/Los_Angeles" in zoneinfo.available_timezones(), - "Can't find timezone database") + "Can't find timezone datebase") def test_dump_aware_datetime_without_aware_datetime_option(self): dt = datetime.datetime(2345, 6, 7, 8, tzinfo=zoneinfo.ZoneInfo("America/Los_Angeles")) @@ -1032,7 +1032,7 @@ def test_load_aware_datetime(self): datetime.datetime(2345, 6, 7, 8, tzinfo=datetime.UTC)) @unittest.skipUnless("America/Los_Angeles" in zoneinfo.available_timezones(), - "Can't find timezone database") + "Can't find timezone datebase") def test_dump_aware_datetime_without_aware_datetime_option(self): dt = datetime.datetime(2345, 6, 7, 8, tzinfo=zoneinfo.ZoneInfo("America/Los_Angeles")) diff --git a/Lib/test/test_pty.py b/Lib/test/test_pty.py index 9b2ffd1e375ab5..4836f38c388c05 100644 --- a/Lib/test/test_pty.py +++ b/Lib/test/test_pty.py @@ -53,7 +53,7 @@ def normalize_output(data): # etc.) # This is about the best we can do without getting some feedback - # from someone more knowledgeable. + # from someone more knowledgable. # OSF/1 (Tru64) apparently turns \n into \r\r\n. if data.endswith(b'\r\r\n'): diff --git a/Lib/test/test_pyrepl/test_pyrepl.py b/Lib/test/test_pyrepl/test_pyrepl.py index 6997a9223f8b0d..657a971f8769df 100644 --- a/Lib/test/test_pyrepl/test_pyrepl.py +++ b/Lib/test/test_pyrepl/test_pyrepl.py @@ -1005,7 +1005,7 @@ def test_builtin_completion_top_level(self): # Make iter_modules() search only the standard library. # This makes the test more reliable in case there are # other user packages/scripts on PYTHONPATH which can - # interfere with the completions. + # intefere with the completions. lib_path = os.path.dirname(importlib.__path__[0]) sys.path = [lib_path] diff --git a/Lib/test/test_regrtest.py b/Lib/test/test_regrtest.py index c33c0fd6f971b6..5bc3c5924b07fb 100644 --- a/Lib/test/test_regrtest.py +++ b/Lib/test/test_regrtest.py @@ -1966,7 +1966,7 @@ def test_leak_tmp_file(self): import tempfile import unittest - class file tests(unittest.TestCase): + class FileTests(unittest.TestCase): def test_leak_tmp_file(self): filename = os.path.join(tempfile.gettempdir(), 'mytmpfile') with open(filename, "wb") as fp: @@ -2383,7 +2383,7 @@ def test_format_duration(self): def test_normalize_test_name(self): normalize = normalize_test_name - self.assertEqual(normalize('test_access (test.test_os.file tests.test_access)'), + self.assertEqual(normalize('test_access (test.test_os.FileTests.test_access)'), 'test_access') self.assertEqual(normalize('setUpClass (test.test_os.ChownFileTests)', is_error=True), 'ChownFileTests') @@ -2424,7 +2424,7 @@ def id(self): patterns = get_match_tests() self.addCleanup(set_match_tests, patterns) - test_access = Test('test.test_os.file tests.test_access') + test_access = Test('test.test_os.FileTests.test_access') test_chdir = Test('test.test_os.Win32ErrorTests.test_chdir') test_copy = Test('test.test_shutil.TestCopy.test_copy') diff --git a/Lib/test/test_richcmp.py b/Lib/test/test_richcmp.py index f1c726dd24c0e6..b967c7623c57b0 100644 --- a/Lib/test/test_richcmp.py +++ b/Lib/test/test_richcmp.py @@ -93,14 +93,14 @@ def checkfail(self, error, opname, *args): for op in opmap[opname]: self.assertRaises(error, op, *args) - def checkequal(self, opname, a, b, express): + def checkequal(self, opname, a, b, expres): for op in opmap[opname]: realres = op(a, b) - # can't use assertEqual(realres, express) here - self.assertEqual(len(realres), len(express)) + # can't use assertEqual(realres, expres) here + self.assertEqual(len(realres), len(expres)) for i in range(len(realres)): # results are bool, so we can use "is" here - self.assertTrue(realres[i] is express[i]) + self.assertTrue(realres[i] is expres[i]) def test_mixed(self): # check that comparisons involving Vector objects @@ -149,7 +149,7 @@ def test_basic(self): testoutcome = op(ta, tb) self.assertEqual(realoutcome, testoutcome) - def checkvalue(self, opname, a, b, express): + def checkvalue(self, opname, a, b, expres): for typea in (int, Number): for typeb in (int, Number): ta = typea(a) @@ -157,7 +157,7 @@ def checkvalue(self, opname, a, b, express): for op in opmap[opname]: realres = op(ta, tb) realres = getattr(realres, "x", realres) - self.assertTrue(realres is express) + self.assertTrue(realres is expres) def test_values(self): # check all operators and all comparison results diff --git a/Lib/test/test_set.py b/Lib/test/test_set.py index 491c9e65d7213f..c0df9507bd7f5e 100644 --- a/Lib/test/test_set.py +++ b/Lib/test/test_set.py @@ -124,8 +124,8 @@ def test_isdisjoint(self): def f(s1, s2): 'Pure python equivalent of isdisjoint()' return not set(s1).intersection(s2) - for large in '', 'a', 'ab', 'abc', 'ababac', 'cdc', 'cc', 'efgfe', 'ccb', 'ef': - s1 = self.thetype(large) + for larg in '', 'a', 'ab', 'abc', 'ababac', 'cdc', 'cc', 'efgfe', 'ccb', 'ef': + s1 = self.thetype(larg) for rarg in '', 'a', 'ab', 'abc', 'ababac', 'cdc', 'cc', 'efgfe', 'ccb', 'ef': for C in set, frozenset, dict.fromkeys, str, list, tuple: s2 = C(rarg) @@ -661,7 +661,7 @@ def check_unhashable_element(): with check_unhashable_element(): myset.discard(elem) - # Only TypeError exception is overridden, + # Only TypeError exception is overriden, # other exceptions are left unchanged. class HashError: def __hash__(self): diff --git a/Lib/test/test_socket.py b/Lib/test/test_socket.py index 0a9eb04ba166b2..3dd67b2a2aba97 100644 --- a/Lib/test/test_socket.py +++ b/Lib/test/test_socket.py @@ -7083,7 +7083,7 @@ def test_aead_aes_gcm(self): self.assertEqual(expected_ct, res[assoclen:-taglen]) self.assertEqual(expected_tag, res[-taglen:]) - # create and data manually + # create anc data manually pack_uint32 = struct.Struct('I').pack op, _ = algo.accept() with op: diff --git a/Lib/test/test_sort.py b/Lib/test/test_sort.py index 9c12875357c725..2a7cfb7affaa21 100644 --- a/Lib/test/test_sort.py +++ b/Lib/test/test_sort.py @@ -154,7 +154,7 @@ def test_small_stability(self): class TestBugs(unittest.TestCase): def test_bug453523(self): - # bug 453523 -- list.sort() crash. + # bug 453523 -- list.sort() crasher. # If this fails, the most likely outcome is a core dump. # Mutations during a list sort should raise a ValueError. diff --git a/Lib/test/test_sqlite3/test_dbapi.py b/Lib/test/test_sqlite3/test_dbapi.py index 6530342be04b95..3602726437d8cf 100644 --- a/Lib/test/test_sqlite3/test_dbapi.py +++ b/Lib/test/test_sqlite3/test_dbapi.py @@ -460,8 +460,8 @@ def test_connection_init_bad_isolation_level(self): "BOGUS", " ", "DEFERRE", - "IMMEDIATE", - "EXCLUSIVE", + "IMMEDIAT", + "EXCLUSIV", "DEFERREDS", "IMMEDIATES", "EXCLUSIVES", diff --git a/Lib/test/test_ssl.py b/Lib/test/test_ssl.py index 99d7aec925fcd5..9e519537ca5ed3 100644 --- a/Lib/test/test_ssl.py +++ b/Lib/test/test_ssl.py @@ -4100,7 +4100,7 @@ def test_ecdh_curve(self): client_context, server_context, hostname = testing_context() server_context.set_ecdh_curve("secp384r1") - server_context.set_ciphers("ECDHE:!eNULL:!annul") + server_context.set_ciphers("ECDHE:!eNULL:!aNULL") server_context.minimum_version = ssl.TLSVersion.TLSv1_2 stats = server_params_test(client_context, server_context, chatty=True, connectionchatty=True, @@ -4109,7 +4109,7 @@ def test_ecdh_curve(self): # server auto, client secp384r1 client_context, server_context, hostname = testing_context() client_context.set_ecdh_curve("secp384r1") - server_context.set_ciphers("ECDHE:!eNULL:!annul") + server_context.set_ciphers("ECDHE:!eNULL:!aNULL") server_context.minimum_version = ssl.TLSVersion.TLSv1_2 stats = server_params_test(client_context, server_context, chatty=True, connectionchatty=True, @@ -4119,7 +4119,7 @@ def test_ecdh_curve(self): client_context, server_context, hostname = testing_context() client_context.set_ecdh_curve("prime256v1") server_context.set_ecdh_curve("secp384r1") - server_context.set_ciphers("ECDHE:!eNULL:!annul") + server_context.set_ciphers("ECDHE:!eNULL:!aNULL") server_context.minimum_version = ssl.TLSVersion.TLSv1_2 with self.assertRaises(ssl.SSLError): server_params_test(client_context, server_context, @@ -4441,7 +4441,7 @@ def test_session_handling(self): with client_context2.wrap_socket(socket.socket(), server_hostname=hostname) as s: - # cannot reuse session with a different SSLContext + # cannot re-use session with a different SSLContext with self.assertRaises(ValueError) as e: s.session = session s.connect((HOST, server.port)) diff --git a/Lib/test/test_stat.py b/Lib/test/test_stat.py index 99c28ac304bd10..5fd25d5012c425 100644 --- a/Lib/test/test_stat.py +++ b/Lib/test/test_stat.py @@ -207,7 +207,7 @@ def test_devices(self): self.assertEqual(modestr[0], 'c') self.assertS_IS("CHR", st_mode) # Linux block devices, BSD has no block devices anymore - for blockdev in ("/dev/sda", "/dev/had"): + for blockdev in ("/dev/sda", "/dev/hda"): if os.path.exists(blockdev): st_mode, modestr = self.get_mode(blockdev, lstat=False) self.assertEqual(modestr[0], 'b') diff --git a/Lib/test/test_statistics.py b/Lib/test/test_statistics.py index 6eb7f15cbfe3f3..8250b0aef09aec 100644 --- a/Lib/test/test_statistics.py +++ b/Lib/test/test_statistics.py @@ -2998,7 +2998,7 @@ def test_cdf(self): X = NormalDist(100, 15) cdfs = [X.cdf(x) for x in range(1, 200)] self.assertEqual(set(map(type, cdfs)), {float}) - # Verify monotonic + # Verify montonic self.assertEqual(cdfs, sorted(cdfs)) # Verify center (should be exact) self.assertEqual(X.cdf(100), 0.50) diff --git a/Lib/test/test_strptime.py b/Lib/test/test_strptime.py index 0377b7baf63b16..0241e543cd7dde 100644 --- a/Lib/test/test_strptime.py +++ b/Lib/test/test_strptime.py @@ -117,15 +117,15 @@ def test_lang(self): class TimeRETests(unittest.TestCase): - """Tests for timer.""" + """Tests for TimeRE.""" def setUp(self): - """Construct generic timer object.""" - self.time_re = _strptime.timer() + """Construct generic TimeRE object.""" + self.time_re = _strptime.TimeRE() self.locale_time = _strptime.LocaleTime() def test_pattern(self): - # Test timer.pattern + # Test TimeRE.pattern pattern_string = self.time_re.pattern(r"%a %A %d %Y") self.assertTrue(pattern_string.find(self.locale_time.a_weekday[2]) != -1, "did not find abbreviated weekday in pattern string '%s'" % @@ -178,8 +178,8 @@ def test_blankpattern(self): # Fixes bug #661354 test_locale = _strptime.LocaleTime() test_locale.timezone = (frozenset(), frozenset()) - self.assertEqual(_strptime.timer(test_locale).pattern("%Z"), '', - "with timezone == ('',''), timer().pattern('%Z') != ''") + self.assertEqual(_strptime.TimeRE(test_locale).pattern("%Z"), '', + "with timezone == ('',''), TimeRE().pattern('%Z') != ''") def test_matching_with_escapes(self): # Make sure a format that requires escaping of characters works @@ -195,7 +195,7 @@ def test_locale_data_w_regex_metacharacters(self): locale_time.timezone = (frozenset(("utc", "gmt", "Tokyo (standard time)")), frozenset("Tokyo (daylight time)")) - time_re = _strptime.timer(locale_time) + time_re = _strptime.TimeRE(locale_time) self.assertTrue(time_re.compile("%Z").match("Tokyo (standard time)"), "locale data that contains regex metacharacters is not" " properly escaped") @@ -832,7 +832,7 @@ def test_regex_cleanup(self): self.assertEqual(len(_strptime._regex_cache), 1) def test_new_localetime(self): - # A new LocaleTime instance should be created when a new timer object + # A new LocaleTime instance should be created when a new TimeRE object # is created. locale_time_id = _strptime._TimeRE_cache.locale_time _strptime._TimeRE_cache.locale_time.lang = "Ni" @@ -840,7 +840,7 @@ def test_new_localetime(self): self.assertIsNot(locale_time_id, _strptime._TimeRE_cache.locale_time) def test_TimeRE_recreation_locale(self): - # The timer instance should be recreated upon changing the locale. + # The TimeRE instance should be recreated upon changing the locale. with support.run_with_locale('LC_TIME', 'en_US.UTF8'): _strptime._strptime_time('10 2004', '%d %Y') # Get id of current cache object. @@ -861,7 +861,7 @@ def test_TimeRE_recreation_locale(self): @support.run_with_tz('STD-1DST,M4.1.0,M10.1.0') def test_TimeRE_recreation_timezone(self): - # The timer instance should be recreated upon changing the timezone. + # The TimeRE instance should be recreated upon changing the timezone. oldtzname = time.tzname tm = _strptime._strptime_time(time.tzname[0], '%Z') self.assertEqual(tm.tm_isdst, 0) diff --git a/Lib/test/test_subprocess.py b/Lib/test/test_subprocess.py index 4287765c60348d..f0e350c71f60ea 100644 --- a/Lib/test/test_subprocess.py +++ b/Lib/test/test_subprocess.py @@ -3438,7 +3438,7 @@ def test_vfork_used_when_expected(self): # because libc tends to implement that internally using vfork. But # that'd just be testing a libc+kernel implementation detail. - # Are interested in the system calls: + # Are intersted in the system calls: # clone,clone2,clone3,fork,vfork,exit,exit_group # Unfortunately using `--trace` with that list to strace fails because # not all are supported on all platforms (ex. clone2 is ia64 only...) diff --git a/Lib/test/test_syntax.py b/Lib/test/test_syntax.py index e7bb38484c6306..c52d24219410c2 100644 --- a/Lib/test/test_syntax.py +++ b/Lib/test/test_syntax.py @@ -1844,21 +1844,21 @@ SyntaxError: invalid syntax. Did you mean 'def'? >>> def foo(): -... return result +... returm result Traceback (most recent call last): SyntaxError: invalid syntax. Did you mean 'return'? ->>> lambda x: x ** 2 +>>> lamda x: x ** 2 Traceback (most recent call last): SyntaxError: invalid syntax. Did you mean 'lambda'? >>> def foo(): -... yield i +... yeld i Traceback (most recent call last): SyntaxError: invalid syntax. Did you mean 'yield'? >>> def foo(): -... global counter +... globel counter Traceback (most recent call last): SyntaxError: invalid syntax. Did you mean 'global'? diff --git a/Lib/test/test_sysconfig.py b/Lib/test/test_sysconfig.py index f2c9848eefc45f..2eb8de4b29fe96 100644 --- a/Lib/test/test_sysconfig.py +++ b/Lib/test/test_sysconfig.py @@ -697,7 +697,7 @@ def test_sysconfigdata_json(self): # Keys dependent on uncontrollable external context ignore_keys = {'userbase'} - # Keys dependent on Python being run outside the build directory + # Keys dependent on Python being run outside the build directrory if sysconfig.is_python_build(): ignore_keys |= {'srcdir'} # Keys dependent on the executable location @@ -706,7 +706,7 @@ def test_sysconfigdata_json(self): # Keys dependent on the environment (different inside virtual environments) if sys.prefix != sys.base_prefix: ignore_keys |= {'prefix', 'exec_prefix', 'base', 'platbase'} - # Keys dependent on Python being run from the prefix targeted when building (different on relocatable installs) + # Keys dependent on Python being run from the prefix targetted when building (different on relocatable installs) if sysconfig._installation_is_relocated(): ignore_keys |= {'prefix', 'exec_prefix', 'base', 'platbase', 'installed_base', 'installed_platbase'} diff --git a/Lib/test/test_tarfile.py b/Lib/test/test_tarfile.py index db4028a5d3677c..7055e1ed147a9e 100644 --- a/Lib/test/test_tarfile.py +++ b/Lib/test/test_tarfile.py @@ -4112,7 +4112,7 @@ def test_sneaky_hardlink_fallback(self): arc.add("b/") # Point "c" to the bottom of the tree in "a" arc.add("c", symlink_to=os.path.join("a", "t")) - # link to non-existent location under "a" + # link to non-existant location under "a" arc.add("c/escape", symlink_to=os.path.join("..", "..", "link_here")) # Move "c" to point to "b" ("c/escape" no longer exists) diff --git a/Lib/test/test_tkinter/test_geometry_managers.py b/Lib/test/test_tkinter/test_geometry_managers.py index 7337c4390d604b..d71a634a767310 100644 --- a/Lib/test/test_tkinter/test_geometry_managers.py +++ b/Lib/test/test_tkinter/test_geometry_managers.py @@ -604,7 +604,7 @@ def test_grid_configure_rownspan(self): def test_grid_configure_sticky(self): f = tkinter.Frame(self.root, bg='red') - with self.assertRaisesRegex(TclError, 'bad stickiness value "glue"'): + with self.assertRaisesRegex(TclError, 'bad stickyness value "glue"'): f.grid_configure(sticky='glue') f.grid_configure(sticky='ne') self.assertEqual(f.grid_info()['sticky'], 'ne') diff --git a/Lib/test/test_tkinter/test_widgets.py b/Lib/test/test_tkinter/test_widgets.py index 30d129e4205725..ff3f92e9b5ef83 100644 --- a/Lib/test/test_tkinter/test_widgets.py +++ b/Lib/test/test_tkinter/test_widgets.py @@ -1440,7 +1440,7 @@ def test_paneconfigure_sticky(self): p, b, c = self.create2() self.check_paneconfigure(p, b, 'sticky', 'nsew', 'nesw') self.check_paneconfigure_bad(p, b, 'sticky', - 'bad stickiness value "badValue": must ' + 'bad stickyness value "badValue": must ' 'be a string containing zero or more of ' 'n, e, s, and w') diff --git a/Lib/test/test_traceback.py b/Lib/test/test_traceback.py index 904429dbd5a60a..74b979d009664d 100644 --- a/Lib/test/test_traceback.py +++ b/Lib/test/test_traceback.py @@ -4107,7 +4107,7 @@ class A: def test_getattr_error_bad_suggestions_do_not_trigger_for_small_names(self): class MyClass: - vvv = mom = w = id = python = None + vvv = mom = w = id = pytho = None for name in ("b", "v", "m", "py"): with self.subTest(name=name): @@ -4324,7 +4324,7 @@ def test_import_from_suggestions_do_not_trigger_for_long_attributes(self): self.assertNotIn("blech", actual) def test_import_from_error_bad_suggestions_do_not_trigger_for_small_names(self): - code = "vvv = mom = w = id = python = None" + code = "vvv = mom = w = id = pytho = None" for name in ("b", "v", "m", "py"): with self.subTest(name=name): @@ -4432,19 +4432,19 @@ def func(): def test_name_error_bad_suggestions_do_not_trigger_for_small_names(self): def f_b(): - vvv = mom = w = id = python = None + vvv = mom = w = id = pytho = None b def f_v(): - vvv = mom = w = id = python = None + vvv = mom = w = id = pytho = None v def f_m(): - vvv = mom = w = id = python = None + vvv = mom = w = id = pytho = None m def f_py(): - vvv = mom = w = id = python = None + vvv = mom = w = id = pytho = None py for name, func in (("b", f_b), ("v", f_v), ("m", f_m), ("py", f_py)): diff --git a/Lib/test/test_typing.py b/Lib/test/test_typing.py index af7c7947da65e3..b1615bbff383c2 100644 --- a/Lib/test/test_typing.py +++ b/Lib/test/test_typing.py @@ -9497,7 +9497,7 @@ class FC: class ACF: x: Annotated[ClassVar[Final[int]], "a decoration"] - class CALF: + class CAF: x: ClassVar[Annotated[Final[int], "a decoration"]] class AFC: @@ -9509,7 +9509,7 @@ class FAC: self.assertEqual(get_type_hints(CF, globals())['x'], ClassVar[Final[int]]) self.assertEqual(get_type_hints(FC, globals())['x'], Final[ClassVar[int]]) self.assertEqual(get_type_hints(ACF, globals())['x'], ClassVar[Final[int]]) - self.assertEqual(get_type_hints(CALF, globals())['x'], ClassVar[Final[int]]) + self.assertEqual(get_type_hints(CAF, globals())['x'], ClassVar[Final[int]]) self.assertEqual(get_type_hints(AFC, globals())['x'], Final[ClassVar[int]]) self.assertEqual(get_type_hints(FAC, globals())['x'], Final[ClassVar[int]]) diff --git a/Lib/test/test_unittest/testmock/testpatch.py b/Lib/test/test_unittest/testmock/testpatch.py index abff8542dd2271..bd85fdcfc472a6 100644 --- a/Lib/test/test_unittest/testmock/testpatch.py +++ b/Lib/test/test_unittest/testmock/testpatch.py @@ -1544,22 +1544,22 @@ def test_new_callable_failure(self): original_g = Foo.g original_foo = Foo.foo - def crash(): + def crasher(): raise NameError('crasher') @patch.object(Foo, 'g', 1) - @patch.object(Foo, 'foo', new_callable=crash) + @patch.object(Foo, 'foo', new_callable=crasher) @patch.object(Foo, 'f', 1) def thing1(): pass - @patch.object(Foo, 'foo', new_callable=crash) + @patch.object(Foo, 'foo', new_callable=crasher) @patch.object(Foo, 'g', 1) @patch.object(Foo, 'f', 1) def thing2(): pass @patch.object(Foo, 'g', 1) @patch.object(Foo, 'f', 1) - @patch.object(Foo, 'foo', new_callable=crash) + @patch.object(Foo, 'foo', new_callable=crasher) def thing3(): pass for func in thing1, thing2, thing3: @@ -1582,8 +1582,8 @@ def test_patch_multiple_failure(self): bad = patch.object(Foo, 'missing', 1) bad.attribute_name = 'missing' - for additional in [good, bad], [bad, good]: - patcher.additional_patchers = additional + for additionals in [good, bad], [bad, good]: + patcher.additional_patchers = additionals @patcher def func(): pass @@ -1598,7 +1598,7 @@ def test_patch_multiple_new_callable_failure(self): original_g = Foo.g original_foo = Foo.foo - def crash(): + def crasher(): raise NameError('crasher') patcher = patch.object(Foo, 'f', 1) @@ -1607,11 +1607,11 @@ def crash(): good = patch.object(Foo, 'g', 1) good.attribute_name = 'g' - bad = patch.object(Foo, 'foo', new_callable=crash) + bad = patch.object(Foo, 'foo', new_callable=crasher) bad.attribute_name = 'foo' - for additional in [good, bad], [bad, good]: - patcher.additional_patchers = additional + for additionals in [good, bad], [bad, good]: + patcher.additional_patchers = additionals @patcher def func(): pass diff --git a/Lib/test/test_urllib2.py b/Lib/test/test_urllib2.py index e22fa444c18556..7d7f2fa00d35b6 100644 --- a/Lib/test/test_urllib2.py +++ b/Lib/test/test_urllib2.py @@ -1719,7 +1719,7 @@ def _test_basic_auth(self, opener, auth_handler, auth_header, realm, http_handler, password_manager, request_url, protected_url): import base64 - user, password = "while", "coyote" + user, password = "wile", "coyote" # .add_password() fed through to password manager auth_handler.add_password(realm, request_url, user, password) @@ -1756,7 +1756,7 @@ def test_basic_prior_auth_auto_send(self): # Assume already authenticated if is_authenticated=True # for APIs like Github that don't return 401 - user, password = "while", "coyote" + user, password = "wile", "coyote" request_url = "http://acme.example.com/protected" http_handler = MockHTTPHandlerCheckAuth(200) diff --git a/Lib/test/test_weakref.py b/Lib/test/test_weakref.py index b071ba3f06bf9f..4c7c900eb56ae1 100644 --- a/Lib/test/test_weakref.py +++ b/Lib/test/test_weakref.py @@ -274,14 +274,14 @@ def test_ref_reuse(self): proxy = weakref.proxy(o) ref2 = weakref.ref(o) self.assertIs(ref1, ref2, - "reference object w/out callback should be reused") + "reference object w/out callback should be re-used") o = C() proxy = weakref.proxy(o) ref1 = weakref.ref(o) ref2 = weakref.ref(o) self.assertIs(ref1, ref2, - "reference object w/out callback should be reused") + "reference object w/out callback should be re-used") self.assertEqual(weakref.getweakrefcount(o), 2, "wrong weak ref count for object") del proxy @@ -295,7 +295,7 @@ def test_proxy_reuse(self): ref = weakref.ref(o) proxy2 = weakref.proxy(o) self.assertIs(proxy1, proxy2, - "proxy object w/out callback should have been reused") + "proxy object w/out callback should have been re-used") def test_basic_proxy(self): o = C() @@ -1857,7 +1857,7 @@ def test_weak_keyed_bad_delitem(self): self.assertRaises(KeyError, d.__delitem__, o) self.assertRaises(KeyError, d.__getitem__, o) - # If a key isn't of a weakly referenceable type, __getitem__ and + # If a key isn't of a weakly referencable type, __getitem__ and # __setitem__ raise TypeError. __delitem__ should too. self.assertRaises(TypeError, d.__delitem__, 13) self.assertRaises(TypeError, d.__getitem__, 13) @@ -2260,7 +2260,7 @@ def test_names(self): >>> class Dict(dict): ... pass ... ->>> obj = Dict(red=1, green=2, blue=3) # this object is weak referenceable +>>> obj = Dict(red=1, green=2, blue=3) # this object is weak referencable >>> r = weakref.ref(obj) >>> print(r() is obj) True diff --git a/Lib/test/test_xml_etree.py b/Lib/test/test_xml_etree.py index 5e4b704f869e1f..bf6d5074fdebd8 100644 --- a/Lib/test/test_xml_etree.py +++ b/Lib/test/test_xml_etree.py @@ -2736,7 +2736,7 @@ def test_remove_with_clear_assume_existing(self): def do_test_remove_with_clear(self, *, raises): - # Until the discrepancy between "del root[:]" and "root.clear()" is + # Until the discrepency between "del root[:]" and "root.clear()" is # resolved, we need to keep two tests. Previously, using "del root[:]" # did not crash with the reproducer of gh-126033 while "root.clear()" # did. diff --git a/Lib/test/test_zipfile/test_core.py b/Lib/test/test_zipfile/test_core.py index 355a5af7911542..ada96813709aea 100644 --- a/Lib/test/test_zipfile/test_core.py +++ b/Lib/test/test_zipfile/test_core.py @@ -2378,20 +2378,20 @@ def test_open_conflicting_handles(self): def test_seek_tell(self): # Test seek functionality txt = b"Where's Bruce?" - block = txt.find(b"Bruce") + bloc = txt.find(b"Bruce") # Check seek on a file with zipfile.ZipFile(TESTFN, "w") as zipf: zipf.writestr("foo.txt", txt) with zipfile.ZipFile(TESTFN, "r") as zipf: with zipf.open("foo.txt", "r") as fp: - fp.seek(block, os.SEEK_SET) - self.assertEqual(fp.tell(), block) + fp.seek(bloc, os.SEEK_SET) + self.assertEqual(fp.tell(), bloc) fp.seek(-bloc, os.SEEK_CUR) self.assertEqual(fp.tell(), 0) - fp.seek(block, os.SEEK_CUR) - self.assertEqual(fp.tell(), block) - self.assertEqual(fp.read(5), txt[block:block+5]) - self.assertEqual(fp.tell(), block + 5) + fp.seek(bloc, os.SEEK_CUR) + self.assertEqual(fp.tell(), bloc) + self.assertEqual(fp.read(5), txt[bloc:bloc+5]) + self.assertEqual(fp.tell(), bloc + 5) fp.seek(0, os.SEEK_END) self.assertEqual(fp.tell(), len(txt)) fp.seek(0, os.SEEK_SET) @@ -2402,14 +2402,14 @@ def test_seek_tell(self): zipf.writestr("foo.txt", txt) with zipfile.ZipFile(data, mode="r") as zipf: with zipf.open("foo.txt", "r") as fp: - fp.seek(block, os.SEEK_SET) - self.assertEqual(fp.tell(), block) + fp.seek(bloc, os.SEEK_SET) + self.assertEqual(fp.tell(), bloc) fp.seek(-bloc, os.SEEK_CUR) self.assertEqual(fp.tell(), 0) - fp.seek(block, os.SEEK_CUR) - self.assertEqual(fp.tell(), block) - self.assertEqual(fp.read(5), txt[block:block+5]) - self.assertEqual(fp.tell(), block + 5) + fp.seek(bloc, os.SEEK_CUR) + self.assertEqual(fp.tell(), bloc) + self.assertEqual(fp.read(5), txt[bloc:bloc+5]) + self.assertEqual(fp.tell(), bloc + 5) fp.seek(0, os.SEEK_END) self.assertEqual(fp.tell(), len(txt)) fp.seek(0, os.SEEK_SET) @@ -2418,12 +2418,12 @@ def test_seek_tell(self): def test_read_after_seek(self): # Issue 102956: Make sure seek(x, os.SEEK_CUR) doesn't break read() txt = b"Charge men!" - block = txt.find(b"men") + bloc = txt.find(b"men") with zipfile.ZipFile(TESTFN, "w") as zipf: zipf.writestr("foo.txt", txt) with zipfile.ZipFile(TESTFN, mode="r") as zipf: with zipf.open("foo.txt", "r") as fp: - fp.seek(block, os.SEEK_CUR) + fp.seek(bloc, os.SEEK_CUR) self.assertEqual(fp.read(-1), b'men!') with zipfile.ZipFile(TESTFN, mode="r") as zipf: with zipf.open("foo.txt", "r") as fp: @@ -2802,16 +2802,16 @@ def test_seek_tell(self): self.zip.setpassword(b"python") txt = self.plain test_word = b'encryption' - block = txt.find(test_word) + bloc = txt.find(test_word) bloc_len = len(test_word) with self.zip.open("test.txt", "r") as fp: - fp.seek(block, os.SEEK_SET) - self.assertEqual(fp.tell(), block) + fp.seek(bloc, os.SEEK_SET) + self.assertEqual(fp.tell(), bloc) fp.seek(-bloc, os.SEEK_CUR) self.assertEqual(fp.tell(), 0) - fp.seek(block, os.SEEK_CUR) - self.assertEqual(fp.tell(), block) - self.assertEqual(fp.read(bloc_len), txt[block:block+bloc_len]) + fp.seek(bloc, os.SEEK_CUR) + self.assertEqual(fp.tell(), bloc) + self.assertEqual(fp.read(bloc_len), txt[bloc:bloc+bloc_len]) # Make sure that the second read after seeking back beyond # _readbuffer returns the same content (ie. rewind to the start of @@ -2822,8 +2822,8 @@ def test_seek_tell(self): fp._offset = 0 fp.seek(0, os.SEEK_SET) self.assertEqual(fp.tell(), 0) - fp.seek(block, os.SEEK_CUR) - self.assertEqual(fp.read(bloc_len), txt[block:block+bloc_len]) + fp.seek(bloc, os.SEEK_CUR) + self.assertEqual(fp.read(bloc_len), txt[bloc:bloc+bloc_len]) fp.MIN_READ_SIZE = old_read_size fp.seek(0, os.SEEK_END) diff --git a/Lib/tkinter/__init__.py b/Lib/tkinter/__init__.py index f47d2ae34531b3..a693b04870b995 100644 --- a/Lib/tkinter/__init__.py +++ b/Lib/tkinter/__init__.py @@ -3697,7 +3697,7 @@ def set(self, value): def coords(self, value=None): """Return a tuple (X,Y) of the point along the centerline of the - through that corresponds to VALUE or the current value if None is + trough that corresponds to VALUE or the current value if None is given.""" return self._getints(self.tk.call(self._w, 'coords', value)) diff --git a/Lib/tkinter/ttk.py b/Lib/tkinter/ttk.py index 4f92997ebd17d4..c0cf1e787fa9ad 100644 --- a/Lib/tkinter/ttk.py +++ b/Lib/tkinter/ttk.py @@ -254,20 +254,20 @@ def _list_from_layouttuple(tk, ltuple): ltuple = tk.splitlist(ltuple) res = [] - index = 0 - while index < len(ltuple): - name = ltuple[index] + indx = 0 + while indx < len(ltuple): + name = ltuple[indx] opts = {} res.append((name, opts)) - index += 1 + indx += 1 - while index < len(ltuple): # grab name's options - opt, val = ltuple[index:index + 2] + while indx < len(ltuple): # grab name's options + opt, val = ltuple[indx:indx + 2] if not opt.startswith('-'): # found next name break opt = opt[1:] # remove the '-' from the option - index += 2 + indx += 2 if opt == 'children': val = _list_from_layouttuple(tk, val) diff --git a/Lib/turtle.py b/Lib/turtle.py index 2a47aa083b7fdd..e88981d298ad52 100644 --- a/Lib/turtle.py +++ b/Lib/turtle.py @@ -1646,8 +1646,8 @@ def radians(self): def _go(self, distance): """move turtle forward by specified distance""" - end = self._position + self._orient * distance - self._goto(end) + ende = self._position + self._orient * distance + self._goto(ende) def _rotate(self, angle): """Turn turtle counterclockwise by specified angle if angle > 0.""" diff --git a/Lib/unittest/mock.py b/Lib/unittest/mock.py index b49409150a8414..e1dbfdacf56337 100644 --- a/Lib/unittest/mock.py +++ b/Lib/unittest/mock.py @@ -1289,7 +1289,7 @@ class or instance) that acts as the specification for the mock object. If `return_value` attribute. * `unsafe`: By default, accessing any attribute whose name starts with - *assert*, *assret*, *assert*, *aseert*, or *assrt* raises an AttributeError. + *assert*, *assret*, *asert*, *aseert*, or *assrt* raises an AttributeError. Additionally, an AttributeError is raised when accessing attributes that match the name of an assertion method without the prefix `assert_`, e.g. accessing `called_once` instead of `assert_called_once`. diff --git a/Lib/urllib/request.py b/Lib/urllib/request.py index e79b8e3e40a2fd..41dc5d7b35dedb 100644 --- a/Lib/urllib/request.py +++ b/Lib/urllib/request.py @@ -922,7 +922,7 @@ class AbstractBasicAuthHandler: 'realm=(["\']?)([^"\']*)\\2', re.I) - # XXX could preemptively send auth info already accepted (RFC 2617, + # XXX could pre-emptively send auth info already accepted (RFC 2617, # end of section 2, and section 1.2 immediately after "credentials" # production). diff --git a/Lib/xml/dom/minidom.py b/Lib/xml/dom/minidom.py index 2204eae79c2237..db51f350ea0153 100644 --- a/Lib/xml/dom/minidom.py +++ b/Lib/xml/dom/minidom.py @@ -1893,7 +1893,7 @@ def renameNode(self, n, namespaceURI, name): element.setIdAttributeNode(n) # It's not clear from a semantic perspective whether we should # call the user data handlers for the NODE_RENAMED event since - # we're reusing the existing node. The draft spec has been + # we're re-using the existing node. The draft spec has been # interpreted as meaning "no, don't call the handler unless a # new node is created." return n diff --git a/Mac/BuildScript/resources/Conclusion.rtf b/Mac/BuildScript/resources/Conclusion.rtf index 48923ab91d86a0..9e0fa9fa6eeb73 100644 --- a/Mac/BuildScript/resources/Conclusion.rtf +++ b/Mac/BuildScript/resources/Conclusion.rtf @@ -4,7 +4,7 @@ {\colortbl;\red255\green255\blue255;} {\*\expandedcolortbl;;} \margl1440\margr1440\vieww10540\viewh8400\viewkind0 -\part\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\pardirnatural\partightenfactor0 +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\pardirnatural\partightenfactor0 \f0\fs28 \cf0 Congratulations! \fs24 diff --git a/Mac/BuildScript/resources/License.rtf b/Mac/BuildScript/resources/License.rtf index 78bd476fd8443b..b5cb8ec41c86e2 100644 --- a/Mac/BuildScript/resources/License.rtf +++ b/Mac/BuildScript/resources/License.rtf @@ -4,7 +4,7 @@ {\colortbl;\red255\green255\blue255;} {\*\expandedcolortbl;;} \margl1440\margr1440\vieww18500\viewh13520\viewkind0 -\part\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\pardirnatural\partightenfactor0 +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\pardirnatural\partightenfactor0 \f0\b\fs36 \cf0 \ul \ulc0 HISTORY AND LICENSE\ @@ -48,7 +48,7 @@ Thanks to the many outside volunteers who have worked under Guido's direction to \f0\b \ul TERMS AND CONDITIONS FOR ACCESSING OR OTHERWISE USING PYTHON\ \ -\part\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\pardirnatural\partightenfactor0 +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\pardirnatural\partightenfactor0 \f1\b0 \cf0 \ulnone Python software and documentation are licensed under the Python Software Foundation License Version 2.\ \ @@ -56,7 +56,7 @@ Starting with Python 3.8.6, examples, recipes, and other code in the documentati \ Some software incorporated into Python is under different licenses. The licenses are listed with code falling under that license.\ \ -\part\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\pardirnatural\partightenfactor0 +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\pardirnatural\partightenfactor0 \cf0 \ \f0\b PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2\ @@ -134,16 +134,16 @@ Permission to use, copy, modify, and distribute this software and its documentat STICHTING MATHEMATISCH CENTRUM DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL STICHTING MATHEMATISCH CENTRUM BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\ \ \ -\part\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\pardirnatural\partightenfactor0 +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\pardirnatural\partightenfactor0 \f0\b \cf0 ZERO-CLAUSE BSD LICENSE FOR CODE IN THE PYTHON DOCUMENTATION\ -\part\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\pardirnatural\partightenfactor0 +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\pardirnatural\partightenfactor0 \f1\b0 \cf0 \ Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted.\ \ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\ -\part\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\pardirnatural\partightenfactor0 +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\pardirnatural\partightenfactor0 \cf0 \ \ diff --git a/Mac/BuildScript/resources/ReadMe.rtf b/Mac/BuildScript/resources/ReadMe.rtf index 5c47d37d92a860..ee5ba4707dfea4 100644 --- a/Mac/BuildScript/resources/ReadMe.rtf +++ b/Mac/BuildScript/resources/ReadMe.rtf @@ -4,17 +4,17 @@ {\colortbl;\red255\green255\blue255;} {\*\expandedcolortbl;;} \margl1440\margr1440\vieww13380\viewh14580\viewkind0 -\part\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\pardirnatural\partightenfactor0 +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\pardirnatural\partightenfactor0 \f0\fs24 \cf0 This package will install Python $FULL_VERSION for macOS $MACOSX_DEPLOYMENT_TARGET for the following architecture(s): $ARCHITECTURES.\ \ -\part\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\partightenfactor0 +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\partightenfactor0 \f1\b \cf0 NOTE: \f0\b0 This is a beta preview of Python 3.13.0, the next feature release of Python 3. It is not intended for production use.\ -\part\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\pardirnatural\partightenfactor0 +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\pardirnatural\partightenfactor0 \cf0 \ -\part\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\pardirnatural\partightenfactor0 +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\pardirnatural\partightenfactor0 \f1\b \cf0 \ul \ulc0 Certificate verification and OpenSSL\ diff --git a/Mac/BuildScript/resources/Welcome.rtf b/Mac/BuildScript/resources/Welcome.rtf index f1a2a8183b9df6..49d6e22286be26 100644 --- a/Mac/BuildScript/resources/Welcome.rtf +++ b/Mac/BuildScript/resources/Welcome.rtf @@ -4,7 +4,7 @@ {\colortbl;\red255\green255\blue255;} {\*\expandedcolortbl;;} \margl1440\margr1440\vieww12200\viewh10880\viewkind0 -\part\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\partightenfactor0 +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\partightenfactor0 \f0\fs24 \cf0 This package will install \f1\b Python $FULL_VERSION diff --git a/Mac/PythonLauncher/English.lproj/Credits.rtf b/Mac/PythonLauncher/English.lproj/Credits.rtf index b2ec2771a90062..930ca221a128b0 100644 --- a/Mac/PythonLauncher/English.lproj/Credits.rtf +++ b/Mac/PythonLauncher/English.lproj/Credits.rtf @@ -1,7 +1,7 @@ {\rtf1\mac\ansicpg10000\cocoartf100 {\fonttbl\f0\fswiss\fcharset77 Helvetica-Bold;\f1\fswiss\fcharset77 Helvetica;} {\colortbl;\red255\green255\blue255;} -\part\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\ql\qnatural +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\ql\qnatural \f0\b\fs24 \cf0 Engineering: \f1\b0 \ diff --git a/Makefile.pre.in b/Makefile.pre.in index b55239e020afdd..7fea799c3912dd 100644 --- a/Makefile.pre.in +++ b/Makefile.pre.in @@ -2560,7 +2560,7 @@ TESTSUBDIRS= idlelib/idle_test \ test/certdata/capath \ test/cjkencodings \ test/configdata \ - test/crashes \ + test/crashers \ test/data \ test/decimaltestdata \ test/dtracedata \ diff --git a/Misc/ACKS b/Misc/ACKS index 5be30c7579c1fa..fabd79b9f74210 100644 --- a/Misc/ACKS +++ b/Misc/ACKS @@ -32,7 +32,7 @@ Farhan Ahmad Matthew Ahrens Nir Aides Akira -Edge Akman +Ege Akman Yaniv Aknin Jyrki Alakuijala Tatiana Al-Chueyr @@ -318,7 +318,7 @@ Brad Chapman Greg Chapman Mitch Chapman Matt Chaput -Willow Charging +Willow Chargin Ben Chatterton Yogesh Chaudhari Gautam Chaudhuri @@ -551,7 +551,7 @@ Troy J. Farrell Jim Fasarakis-Hilliard Mark Favas Sergey Fedoseev -Boris Field +Boris Feld M. Felt Thomas Fenzl Niels Ferguson @@ -973,7 +973,7 @@ Derek D. Kim Gihwan Kim Jan Kim Noah Kim -Taek You Kim +Taek Joo Kim Yeojin Kim Sam Kimbrel Tomohiko Kinebuchi @@ -1336,7 +1336,7 @@ Max Neunhöffer Anthon van der Neut George Neville-Neil Hieu Nguyen -Name Nguyen +Nam Nguyen Johannes Nicolai Samuel Nicolary Jonathan Niehof @@ -1544,7 +1544,7 @@ Nikolaus Rath Sridhar Ratnakumar Ysj Ray Eric S. Raymond -Edward K. Stream +Edward K. Ream Chris Rebert Marc Recht John Redford diff --git a/Misc/HISTORY b/Misc/HISTORY index b0de1e3b670f91..d68aaa066771fb 100644 --- a/Misc/HISTORY +++ b/Misc/HISTORY @@ -168,7 +168,7 @@ Core and Builtins - Issue #24806: Prevent builtin types that are not allowed to be subclassed from being subclassed through multiple inheritance. -- Issue #24848: Fixed a number of bugs in UTF-7 decoding of malformed data. +- Issue #24848: Fixed a number of bugs in UTF-7 decoding of misformed data. - Issue #25280: Import trace messages emitted in verbose (-v) mode are no longer formatted twice. @@ -802,7 +802,7 @@ IDLE - Issue #25198: Enhance the initial html viewer now used for Idle Help. * Properly indent fixed-pitch text (patch by Mark Roseman). * Give code snippet a very Sphinx-like light blueish-gray background. - * Reuse initial width and height set by users for shell and editor. + * Re-use initial width and height set by users for shell and editor. * When the Table of Contents (TOC) menu is used, put the section header at the top of the screen. @@ -5277,7 +5277,7 @@ Library - Issue #17018: Make Process.join() retry if os.waitpid() fails with EINTR. -- Issue #17223: array module: Fix a crash when converting an array containing +- Issue #17223: array module: Fix a crasher when converting an array containing invalid characters (outside range [U+0000; U+10ffff]) to Unicode: repr(array), str(array) and array.tounicode(). Patch written by Manuel Jacob. @@ -10477,7 +10477,7 @@ Tests - Issue #11577: improve test coverage of binhex.py. Patch by Arkady Koplyarov. -- New test_crashers added to exercise the scripts in the Lib/test/crashes +- New test_crashers added to exercise the scripts in the Lib/test/crashers directory and confirm they fail as expected - Issue #11578: added test for the timeit module. Patch by Michael Henry. @@ -13868,7 +13868,7 @@ Library - Add count() and reverse() methods to collections.deque(). -- Fix variations of extending dequeues: d.extend(d) d.extendleft(d) d+=d +- Fix variations of extending deques: d.extend(d) d.extendleft(d) d+=d - Issue #6986: Fix crash in the JSON C accelerator when called with the wrong parameter types. Patch by Victor Stinner. @@ -14105,7 +14105,7 @@ Library digits in input, as recommended by the standard. Previously it was restricted to accepting [0-9]. -- Issue #6106: telnetlib.Telnet.process_rawq doesn't handle default WILL/WON'T +- Issue #6106: telnetlib.Telnet.process_rawq doesn't handle default WILL/WONT DO/DONT correctly. - Issue #1424152: Fix for http.client, urllib.request to support SSL while @@ -17280,7 +17280,7 @@ Core and Builtins * nb_divide, nb_inplace_divide * operator.div, operator.idiv, operator.__div__, operator.__idiv__ (Only __truediv__ and __floordiv__ remain, not sure how to handle - them if we want to reuse __div__ and friends. If we do, it will + them if we want to re-use __div__ and friends. If we do, it will make it harder to write code for both 2.x and 3.x.) - 'as' and 'with' are keywords. @@ -17957,7 +17957,7 @@ Core and builtins attribute on objects until one without one is found. This leads to recursion when you take a class and set its __call__ attribute to an instance of the class. Originally fixed for classic classes, but this fix is for new-style. - Removes the infinite_rec_3 crash. + Removes the infinite_rec_3 crasher. - The string and unicode methods startswith() and endswith() now accept a tuple of prefixes/suffixes to look for. Implements RFE #1491485. @@ -20460,7 +20460,7 @@ Core and builtins the overallocation is no more than three elements -- this improves space utilization for applications that have large numbers of small lists. -- Most list bodies now get reused rather than freed. Speeds up list +- Most list bodies now get re-used rather than freed. Speeds up list instantiation and deletion by saving calls to malloc() and free(). - The dict.update() method now accepts all the same argument forms @@ -20892,7 +20892,7 @@ Library allow any iterable. - _strptime.py now has a behind-the-scenes caching mechanism for the most - recent timer instance used along with the last five unique directive + recent TimeRE instance used along with the last five unique directive patterns. The overall module was also made more thread-safe. - random.cunifvariate() and random.stdgamma() were deprecated in Py2.3 @@ -21081,7 +21081,7 @@ Library - Lib/encodings/rot_13.py when used as a script, now more properly uses the first Python interpreter on your path. -- Removed caching of timer (and thus LocaleTime) in _strptime.py to +- Removed caching of TimeRE (and thus LocaleTime) in _strptime.py to fix a locale related bug in the test suite. Although another patch was needed to actually fix the problem, the cache code was not restored. @@ -21342,7 +21342,7 @@ Core and builtins thread mutated the dict during __delitem__, or if a comparison function mutated it. It also neglected to raise KeyError when the key wasn't present; didn't raise TypeError when the key wasn't of a weakly - referenceable type; and broke various more-or-less obscure dict + referencable type; and broke various more-or-less obscure dict invariants by using a sequence of equality comparisons over the whole set of dict keys instead of computing the key's hash code to narrow the search to those keys with the same hash code. All of these are @@ -25106,8 +25106,8 @@ Python/C API - Extensions types which support weak references must now set the field allocated for the weak reference machinery to NULL themselves; this is done to avoid the cost of checking each object for having a - weakly referenceable type in PyObject_INIT(), since most types are - not weakly referenceable. + weakly referencable type in PyObject_INIT(), since most types are + not weakly referencable. - PyFrame_FastToLocals() and PyFrame_LocalsToFast() copy bindings for free variables and cell variables to and from the frame's f_locals. @@ -31115,7 +31115,7 @@ __del__" under certain circumstances have been fixed (mostly by changes elsewher in the interpreter). - In urlparse.py, there is a cache for results in urlparse.urlparse(); -its size limit is set to 20. Also, new URL schemes https, https, and +its size limit is set to 20. Also, new URL schemes shttp, https, and snews are "supported". - shelve.py: use cPickle and cStringIO when available. Also added diff --git a/Misc/NEWS.d/3.10.0a3.rst b/Misc/NEWS.d/3.10.0a3.rst index b143d5868d5f6b..6cf3db3eb43c8b 100644 --- a/Misc/NEWS.d/3.10.0a3.rst +++ b/Misc/NEWS.d/3.10.0a3.rst @@ -636,7 +636,7 @@ find_module(), and load_module(). .. section: Library Mock objects which are not unsafe will now raise an AttributeError if an -attribute with the prefix assert, aseert, or assrt is accessed, in addition +attribute with the prefix asert, aseert, or assrt is accessed, in addition to this already happening for the prefixes assert or assret. .. diff --git a/Misc/NEWS.d/3.10.0a7.rst b/Misc/NEWS.d/3.10.0a7.rst index d7ed53f8060c7f..d866e805fd3a7e 100644 --- a/Misc/NEWS.d/3.10.0a7.rst +++ b/Misc/NEWS.d/3.10.0a7.rst @@ -534,7 +534,7 @@ during connection setup. .. nonce: cee_X5 .. section: Library -Improve performance of :class:`fractions.Fraction` arithmetic for large +Improve performance of :class:`fractions.Fraction` arithmetics for large components. Contributed by Sergey B. Kirpichev. .. diff --git a/Misc/NEWS.d/3.12.0a4.rst b/Misc/NEWS.d/3.12.0a4.rst index f49f09765f65df..57fb2052764b6f 100644 --- a/Misc/NEWS.d/3.12.0a4.rst +++ b/Misc/NEWS.d/3.12.0a4.rst @@ -429,7 +429,7 @@ different set of index file names instead of using ``__init__`` parameters. when accessing an attribute that matches the name of an assertion but without the prefix ``assert_``, e.g. accessing ``called_once`` instead of ``assert_called_once``. This is in addition to this already happening for -accessing attributes with prefixes ``assert``, ``assret``, ``assert``, +accessing attributes with prefixes ``assert``, ``assret``, ``asert``, ``aseert``, and ``assrt``. .. @@ -871,7 +871,7 @@ Fix reStructuredText syntax errors in docstrings in the :mod:`enum` module. .. nonce: Jd47V6 .. section: Library -Optimize the :class:`~fractions.Fraction` arithmetic for small components. +Optimize the :class:`~fractions.Fraction` arithmetics for small components. .. diff --git a/Misc/NEWS.d/3.13.0b1.rst b/Misc/NEWS.d/3.13.0b1.rst index 46879b1b0fb9f1..97731276679ba6 100644 --- a/Misc/NEWS.d/3.13.0b1.rst +++ b/Misc/NEWS.d/3.13.0b1.rst @@ -340,7 +340,7 @@ contain lambdas. .. nonce: 8LpZ6m .. section: Core and Builtins -Prevent ``agen.aclose()`` objects being reused after ``.throw()``. +Prevent ``agen.aclose()`` objects being re-used after ``.throw()``. .. diff --git a/Misc/NEWS.d/3.14.0a1.rst b/Misc/NEWS.d/3.14.0a1.rst index 54cbcd70b0bd2d..67451a7e0087cb 100644 --- a/Misc/NEWS.d/3.14.0a1.rst +++ b/Misc/NEWS.d/3.14.0a1.rst @@ -234,7 +234,7 @@ tool .. nonce: HW8CIS .. section: Tests -Update ``Lib/test/crashes/bogus_code_obj.py`` so that it crashes properly +Update ``Lib/test/crashers/bogus_code_obj.py`` so that it crashes properly again. .. @@ -5231,7 +5231,7 @@ both reference ``__class__``. .. nonce: D9EE-o .. section: Core and Builtins -JIT: Reuse trampolines on AArch64 when creating stencils. Patch by Diego +JIT: Re-use trampolines on AArch64 when creating stencils. Patch by Diego Russo .. diff --git a/Misc/NEWS.d/3.14.0a7.rst b/Misc/NEWS.d/3.14.0a7.rst index 1eb3b43748ddd5..35b96d33da4175 100644 --- a/Misc/NEWS.d/3.14.0a7.rst +++ b/Misc/NEWS.d/3.14.0a7.rst @@ -976,7 +976,7 @@ Fix mimalloc library builds for 32-bit ARM targets. .. nonce: 2BgHU5 .. section: Build -clang-cl on Windows needs option ``/EHa`` to support SHE (structured +clang-cl on Windows needs option ``/EHa`` to support SEH (structured exception handling) correctly. Fix by Chris Eibl. .. diff --git a/Misc/NEWS.d/3.14.0b1.rst b/Misc/NEWS.d/3.14.0b1.rst index 02ceb82b556386..041fbaf2051719 100644 --- a/Misc/NEWS.d/3.14.0b1.rst +++ b/Misc/NEWS.d/3.14.0b1.rst @@ -1756,7 +1756,7 @@ Add support for macOS multi-arch builds with the JIT enabled .. nonce: q9fvyM .. section: Core and Builtins -PyREPL now supports syntax highlighting. Contributed by Łukasz Langa. +PyREPL now supports syntax highlighing. Contributed by Łukasz Langa. .. @@ -1797,7 +1797,7 @@ non-``None`` ``closure``. Patch by Bartosz Sławecki. .. nonce: Uj7lyY .. section: Core and Builtins -Fix a bug that was allowing newlines inconsistently in format specifiers for +Fix a bug that was allowing newlines inconsitently in format specifiers for single-quoted f-strings. Patch by Pablo Galindo. .. diff --git a/Misc/NEWS.d/3.5.0a3.rst b/Misc/NEWS.d/3.5.0a3.rst index e62ceaa1335684..a81d67aea8663b 100644 --- a/Misc/NEWS.d/3.5.0a3.rst +++ b/Misc/NEWS.d/3.5.0a3.rst @@ -270,7 +270,7 @@ until the garbage collector cleans them up. Patch by Martin Panter. .. section: Library collections.deque() objects now support methods for index(), insert(), and -copy(). This allows dequeues to be registered as a MutableSequence and it +copy(). This allows deques to be registered as a MutableSequence and it improves their substitutability for lists. .. diff --git a/Misc/NEWS.d/3.5.1rc1.rst b/Misc/NEWS.d/3.5.1rc1.rst index ec684bfb8b8d54..05e1ecfaf6bc79 100644 --- a/Misc/NEWS.d/3.5.1rc1.rst +++ b/Misc/NEWS.d/3.5.1rc1.rst @@ -138,7 +138,7 @@ subclassed through multiple inheritance. .. nonce: HlUSuy .. section: Core and Builtins -Fixed a number of bugs in UTF-7 decoding of malformed data. +Fixed a number of bugs in UTF-7 decoding of misformed data. .. @@ -1085,7 +1085,7 @@ them a 'sheet'. Patch by Mark Roseman. Enhance the initial html viewer now used for Idle Help. Properly indent fixed-pitch text (patch by Mark Roseman). Give code snippet a very -Sphinx-like light blueish-gray background. Reuse initial width and height +Sphinx-like light blueish-gray background. Re-use initial width and height set by users for shell and editor. When the Table of Contents (TOC) menu is used, put the section header at the top of the screen. diff --git a/Misc/NEWS.d/3.5.2rc1.rst b/Misc/NEWS.d/3.5.2rc1.rst index a50edd49311e9f..f9409b62e352ac 100644 --- a/Misc/NEWS.d/3.5.2rc1.rst +++ b/Misc/NEWS.d/3.5.2rc1.rst @@ -262,7 +262,7 @@ compiler issues. .. nonce: j9zand .. section: Core and Builtins -Deque.insert() gave odd results for bounded dequeues that had reached their +Deque.insert() gave odd results for bounded deques that had reached their maximum size. Now an IndexError will be raised when attempting to insert into a full deque. diff --git a/Misc/NEWS.d/3.6.0a1.rst b/Misc/NEWS.d/3.6.0a1.rst index b628b63f99c0d9..803c9fc5925fa6 100644 --- a/Misc/NEWS.d/3.6.0a1.rst +++ b/Misc/NEWS.d/3.6.0a1.rst @@ -751,7 +751,7 @@ The UTF-8 decoder is now up to 15 times as fast for error handlers: .. nonce: HlUSuy .. section: Core and Builtins -Fixed a number of bugs in UTF-7 decoding of malformed data. +Fixed a number of bugs in UTF-7 decoding of misformed data. .. @@ -3329,7 +3329,7 @@ them a 'sheet'. Patch by Mark Roseman. Enhance the initial html viewer now used for Idle Help. Properly indent fixed-pitch text (patch by Mark Roseman). Give code snippet a very -Sphinx-like light blueish-gray background. Reuse initial width and height set by +Sphinx-like light blueish-gray background. Re-use initial width and height set by users for shell and editor. When the Table of Contents (TOC) menu is used, put the section header at the top of the screen. diff --git a/Misc/NEWS.d/3.9.0a1.rst b/Misc/NEWS.d/3.9.0a1.rst index 796322ae42cc4b..cc24bae5881df1 100644 --- a/Misc/NEWS.d/3.9.0a1.rst +++ b/Misc/NEWS.d/3.9.0a1.rst @@ -414,7 +414,7 @@ The select module is now PEP-384 compliant and no longer has static state .. nonce: yZXC3P .. section: Core and Builtins -ast module updated to PEP-384 and all statistics removed +ast module updated to PEP-384 and all statics removed .. diff --git a/Misc/NEWS.d/next/Core_and_Builtins/2025-07-19-12-37-05.gh-issue-136801.XU_tF2.rst b/Misc/NEWS.d/next/Core_and_Builtins/2025-07-19-12-37-05.gh-issue-136801.XU_tF2.rst index 767d7b97726971..5c0813b1a0abda 100644 --- a/Misc/NEWS.d/next/Core_and_Builtins/2025-07-19-12-37-05.gh-issue-136801.XU_tF2.rst +++ b/Misc/NEWS.d/next/Core_and_Builtins/2025-07-19-12-37-05.gh-issue-136801.XU_tF2.rst @@ -1 +1 @@ -Fix PyREPL syntax highlighting on match cases after multi-line case. Contributed by Olga Matoula. +Fix PyREPL syntax highlightning on match cases after multi-line case. Contributed by Olga Matoula. diff --git a/Misc/NEWS.d/next/Library/2025-07-05-09-45-04.gh-issue-136286.N67Amr.rst b/Misc/NEWS.d/next/Library/2025-07-05-09-45-04.gh-issue-136286.N67Amr.rst index ddc2310392fe92..0a0d66ac0b8abf 100644 --- a/Misc/NEWS.d/next/Library/2025-07-05-09-45-04.gh-issue-136286.N67Amr.rst +++ b/Misc/NEWS.d/next/Library/2025-07-05-09-45-04.gh-issue-136286.N67Amr.rst @@ -1,2 +1,2 @@ -Fix pickling failures for protocols 0 and 1 for many objects related to +Fix pickling failures for protocols 0 and 1 for many objects realted to subinterpreters. diff --git a/Misc/NEWS.d/next/Tools-Demos/2025-06-11-12-14-06.gh-issue-135379.25ttXq.rst b/Misc/NEWS.d/next/Tools-Demos/2025-06-11-12-14-06.gh-issue-135379.25ttXq.rst index ebe3ab0e7d1993..25599a865b7246 100644 --- a/Misc/NEWS.d/next/Tools-Demos/2025-06-11-12-14-06.gh-issue-135379.25ttXq.rst +++ b/Misc/NEWS.d/next/Tools-Demos/2025-06-11-12-14-06.gh-issue-135379.25ttXq.rst @@ -1,4 +1,4 @@ The cases generator no longer accepts type annotations on stack items. -Conversions to non-default types are now done explicitly in bytecodes.c and +Conversions to non-default types are now done explictly in bytecodes.c and optimizer_bytecodes.c. This will simplify code generation for top-of-stack caching and other future features. diff --git a/Modules/Setup.stdlib.in b/Modules/Setup.stdlib.in index 3c6ee659e744af..86c8eb27c0a6c7 100644 --- a/Modules/Setup.stdlib.in +++ b/Modules/Setup.stdlib.in @@ -85,7 +85,7 @@ # # Since the compilation of the built-in cryptographic modules depends # on whether we are building on WASI or not, rules will be explicitly -# written. In the future, it should be preferable to be able to setup +# written. In the future, it should be preferrable to be able to setup # the relevant bits here instead of in Makefile.pre.in or configure.ac. # Hash functions can be disabled with --without-builtin-hashlib-hashes. diff --git a/Modules/_collectionsmodule.c b/Modules/_collectionsmodule.c index e5086c922fa1b4..3ba48d5d9d3c64 100644 --- a/Modules/_collectionsmodule.c +++ b/Modules/_collectionsmodule.c @@ -117,7 +117,7 @@ class dequeobject_converter(self_converter): * d.rightindex become indices into distinct blocks and either may * be larger than the other. * - * Empty dequeues have: + * Empty deques have: * d.len == 0 * d.leftblock == d.rightblock * d.leftindex == CENTER + 1 @@ -139,7 +139,7 @@ struct dequeobject { Py_ssize_t leftindex; /* 0 <= leftindex < BLOCKLEN */ Py_ssize_t rightindex; /* 0 <= rightindex < BLOCKLEN */ size_t state; /* incremented whenever the indices move */ - Py_ssize_t maxlen; /* maxlen is -1 for unbounded dequeues */ + Py_ssize_t maxlen; /* maxlen is -1 for unbounded deques */ Py_ssize_t numfreeblocks; block *freeblocks[MAXFREEBLOCKS]; PyObject *weakreflist; @@ -2551,7 +2551,7 @@ _collections__count_elements_impl(PyObject *module, PyObject *mapping, while (1) { /* Fast path advantages: 1. Eliminate double hashing - (by reusing the same hash for both the get and set) + (by re-using the same hash for both the get and set) 2. Avoid argument overhead of PyObject_CallFunctionObjArgs (argument tuple creation and parsing) 3. Avoid indirection through a bound method object diff --git a/Modules/_ctypes/ctypes.h b/Modules/_ctypes/ctypes.h index eef57a75d07160..5b4f97d43b8721 100644 --- a/Modules/_ctypes/ctypes.h +++ b/Modules/_ctypes/ctypes.h @@ -68,7 +68,7 @@ #endif #ifdef MS_WIN32 -#include // for IUnknown interface +#include // for IUnknown interface #endif typedef struct { diff --git a/Modules/_datetimemodule.c b/Modules/_datetimemodule.c index 11bf170b16ad81..7a6426593d021f 100644 --- a/Modules/_datetimemodule.c +++ b/Modules/_datetimemodule.c @@ -478,7 +478,7 @@ days_before_year(int year) static void ord_to_ymd(int ordinal, int *year, int *month, int *day) { - int n, n1, n4, n100, n400, leap year, preceding; + int n, n1, n4, n100, n400, leapyear, preceding; /* ordinal is a 1-based index, starting at 1-Jan-1. The pattern of * leap years repeats exactly every 400 years. The basic strategy is @@ -542,10 +542,10 @@ ord_to_ymd(int ordinal, int *year, int *month, int *day) * find the month via an estimate that's either exact or one too * large. */ - leap year = n1 == 3 && (n4 != 24 || n100 == 3); - assert(leap year == is_leap(*year)); + leapyear = n1 == 3 && (n4 != 24 || n100 == 3); + assert(leapyear == is_leap(*year)); *month = (n + 50) >> 5; - preceding = (_days_before_month[*month] + (*month > 2 && leap year)); + preceding = (_days_before_month[*month] + (*month > 2 && leapyear)); if (preceding > n) { /* estimate is too large */ *month -= 1; diff --git a/Modules/_decimal/libmpdec/basearith.h b/Modules/_decimal/libmpdec/basearith.h index f5ad0b4f449fee..d35925aaddb48e 100644 --- a/Modules/_decimal/libmpdec/basearith.h +++ b/Modules/_decimal/libmpdec/basearith.h @@ -110,27 +110,27 @@ _mpd_div_words_r(mpd_uint_t *q, mpd_uint_t *r, mpd_uint_t hi, mpd_uint_t lo) l = l + n_adj; if (l < n_adj) h++; t = h + hi; - /* At this point t == quest, with q == quest or q == quest+1: - * 1) 0 <= 2**64*hi + lo - quest*MPD_RADIX < 2*MPD_RADIX + /* At this point t == qest, with q == qest or q == qest+1: + * 1) 0 <= 2**64*hi + lo - qest*MPD_RADIX < 2*MPD_RADIX */ - /* t = 2**64-1 - quest = 2**64 - (quest+1) */ + /* t = 2**64-1 - qest = 2**64 - (qest+1) */ t = MPD_UINT_MAX - t; - /* (h, l) = 2**64*MPD_RADIX - (quest+1)*MPD_RADIX */ + /* (h, l) = 2**64*MPD_RADIX - (qest+1)*MPD_RADIX */ _mpd_mul_words(&h, &l, t, MPD_RADIX); l = l + lo; if (l < lo) h++; h += hi; h -= MPD_RADIX; - /* (h, l) = 2**64*hi + lo - (quest+1)*MPD_RADIX (mod 2**128) - * Case q == quest+1: + /* (h, l) = 2**64*hi + lo - (qest+1)*MPD_RADIX (mod 2**128) + * Case q == qest+1: * a) h == 0, l == r - * b) q := h - t == quest+1 + * b) q := h - t == qest+1 * c) r := l - * Case q == quest: + * Case q == qest: * a) h == MPD_UINT_MAX, l == 2**64-(MPD_RADIX-r) - * b) q := h - t == quest + * b) q := h - t == qest * c) r := l + MPD_RADIX = r */ diff --git a/Modules/_decimal/libmpdec/literature/mulmod-ppro.txt b/Modules/_decimal/libmpdec/literature/mulmod-ppro.txt index 11e4927b5ce637..ba804e4b4e7864 100644 --- a/Modules/_decimal/libmpdec/literature/mulmod-ppro.txt +++ b/Modules/_decimal/libmpdec/literature/mulmod-ppro.txt @@ -54,29 +54,29 @@ relative error of 2**(1-F): Calculate an estimate for q = floor(n/p). The multiplication has another maximum relative error of 2**(1-F): - (9) quest = n * pinv + (9) qest = n * pinv -If we can show that q < quest < q+1, then trunc(quest) = q. It is then +If we can show that q < qest < q+1, then trunc(qest) = q. It is then easy to recover the remainder r. The complete algorithm is: a) Set the control word to 64-bit precision and truncation mode. b) n = a * b # Calculate exact product. - c) quest = n * pinv # Calculate estimate for the quotient. + c) qest = n * pinv # Calculate estimate for the quotient. - d) q = (quest+2**63)-2**63 # Truncate quest to the exact quotient. + d) q = (qest+2**63)-2**63 # Truncate qest to the exact quotient. f) r = n - q * p # Calculate remainder. -Proof for q < quest < q+1: +Proof for q < qest < q+1: ------------------------- -Using the cumulative error, the error bounds for quest are: +Using the cumulative error, the error bounds for qest are: n n * (1 + 2**(1-F))**2 - (9) --------------------- <= quest <= --------------------- + (9) --------------------- <= qest <= --------------------- p * (1 + 2**(1-F))**2 p diff --git a/Modules/_decimal/libmpdec/umodarith.h b/Modules/_decimal/libmpdec/umodarith.h index e4fe2dc35ba26b..d7dbbbe6a7331a 100644 --- a/Modules/_decimal/libmpdec/umodarith.h +++ b/Modules/_decimal/libmpdec/umodarith.h @@ -380,8 +380,8 @@ std_powmod(mpd_uint_t base, mpd_uint_t exp, mpd_uint_t umod) * pinv := (long double)1.0 / p (precalculated) * * a) n = a * b # Calculate exact product. - * b) quest = n * pinv # Calculate estimate for q = n / p. - * c) q = (quest+2**63)-2**63 # Truncate quest to the exact quotient. + * b) qest = n * pinv # Calculate estimate for q = n / p. + * c) q = (qest+2**63)-2**63 # Truncate qest to the exact quotient. * d) r = n - q * p # Calculate remainder. * * Remarks: diff --git a/Modules/_decimal/tests/bench.py b/Modules/_decimal/tests/bench.py index ed562cb79b7c80..6605e9a92e2dde 100644 --- a/Modules/_decimal/tests/bench.py +++ b/Modules/_decimal/tests/bench.py @@ -74,9 +74,9 @@ def _increase_int_max_str_digits(func, maxdigits=maxdigits): def wrapper(*args, **kwargs): previous_int_limit = sys.get_int_max_str_digits() sys.set_int_max_str_digits(maxdigits) - and = func(*args, **kwargs) + ans = func(*args, **kwargs) sys.set_int_max_str_digits(previous_int_limit) - return and + return ans return wrapper return _increase_int_max_str_digits diff --git a/Modules/_decimal/tests/bignum.py b/Modules/_decimal/tests/bignum.py index 2d232a362536fd..a67e161ddf098f 100644 --- a/Modules/_decimal/tests/bignum.py +++ b/Modules/_decimal/tests/bignum.py @@ -27,8 +27,8 @@ def xhash(coeff, exp): else: exp_hash = pow(_PyHASH_10INV, -exp, _PyHASH_MODULUS) hash_ = coeff * exp_hash % _PyHASH_MODULUS - and = hash_ if sign == 1 else -hash_ - return -2 if and == -1 else and + ans = hash_ if sign == 1 else -hash_ + return -2 if ans == -1 else ans x = mpz(10) ** 425000000 - 1 diff --git a/Modules/_functoolsmodule.c b/Modules/_functoolsmodule.c index e4d32415d2be0c..1c888295cb07f1 100644 --- a/Modules/_functoolsmodule.c +++ b/Modules/_functoolsmodule.c @@ -503,7 +503,7 @@ partial_vectorcall(PyObject *self, PyObject *const *args, assert(i == pto_nkwds); Py_XDECREF(pto_kw_merged); - /* Resize Stack if the removing overallocation saves some noticeable memory + /* Resize Stack if the removing overallocation saves some noticable memory * NOTE: This whole block can be removed without breaking anything */ Py_ssize_t noveralloc = n_merges + nkwds; if (stack != small_stack && noveralloc > 6 && noveralloc > init_stack_size / 10) { diff --git a/Modules/_pickle.c b/Modules/_pickle.c index 1c6a5900ec1df4..cf3ceb43fb3f3f 100644 --- a/Modules/_pickle.c +++ b/Modules/_pickle.c @@ -4650,7 +4650,7 @@ Clears the pickler's "memo". The memo is the data structure that remembers which objects the pickler has already seen, so that shared or recursive objects are pickled by reference and not by value. This method is useful when -reusing picklers. +re-using picklers. [clinic start generated code]*/ static PyObject * diff --git a/Modules/_ssl.c b/Modules/_ssl.c index bd78ccb72db4f1..24c243e330d4bf 100644 --- a/Modules/_ssl.c +++ b/Modules/_ssl.c @@ -197,7 +197,7 @@ extern const SSL_METHOD *TLSv1_2_method(void); * ECDH+*: enable ephemeral elliptic curve Diffie-Hellman * DHE+*: fallback to ephemeral finite field Diffie-Hellman * encryption order: AES AEAD (GCM), ChaCha AEAD, AES CBC - * !annul:!eNULL: really no NULL ciphers + * !aNULL:!eNULL: really no NULL ciphers * !aDSS: no authentication with discrete logarithm DSA algorithm * !SHA1: no weak SHA1 MAC * !AESCCM: no CCM mode, it's uncommon and slow @@ -205,7 +205,7 @@ extern const SSL_METHOD *TLSv1_2_method(void); * Based on Hynek's excellent blog post (update 2021-02-11) * https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/ */ - #define PY_SSL_DEFAULT_CIPHER_STRING "@SECLEVEL=2:ECDH+AESGCM:ECDH+CHACHA20:ECDH+AES:DHE+AES:!annul:!eNULL:!aDSS:!SHA1:!AESCCM" + #define PY_SSL_DEFAULT_CIPHER_STRING "@SECLEVEL=2:ECDH+AESGCM:ECDH+CHACHA20:ECDH+AES:DHE+AES:!aNULL:!eNULL:!aDSS:!SHA1:!AESCCM" #ifndef PY_SSL_MIN_PROTOCOL #define PY_SSL_MIN_PROTOCOL TLS1_2_VERSION #endif @@ -3450,7 +3450,7 @@ _ssl__SSLContext_impl(PyTypeObject *type, int proto_version) #endif } else { /* SSLv2 needs MD5 */ - result = SSL_CTX_set_cipher_list(ctx, "HIGH:!annul:!eNULL"); + result = SSL_CTX_set_cipher_list(ctx, "HIGH:!aNULL:!eNULL"); } if (result == 0) { ERR_clear_error(); diff --git a/Modules/_testinternalcapi.c b/Modules/_testinternalcapi.c index b5eea0b6194f5d..533e7dd3a7ec00 100644 --- a/Modules/_testinternalcapi.c +++ b/Modules/_testinternalcapi.c @@ -497,7 +497,7 @@ test_bytes_find(PyObject *self, PyObject *Py_UNUSED(args)) CHECK("ython", "thon", 1, 2); CHECK("thon", "thon", 2, 2); CHECK("hon", "thon", 3, -1); - CHECK("Python", "zz", 0, -1); + CHECK("Pytho", "zz", 0, -1); CHECK("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "ab", 0, -1); CHECK("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "ba", 0, -1); CHECK("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "bb", 0, -1); @@ -891,7 +891,7 @@ write_perf_map_entry(PyObject *self, PyObject *args) unsigned int code_size; const char *entry_name; - if (!PyArg_ParseTuple(args, "is", &code_addr_v, &code_size, &entry_name)) + if (!PyArg_ParseTuple(args, "OIs", &code_addr_v, &code_size, &entry_name)) return NULL; code_addr = PyLong_AsVoidPtr(code_addr_v); if (code_addr == NULL) { diff --git a/Modules/_zstd/_zstdmodule.c b/Modules/_zstd/_zstdmodule.c index 59aece588ce8ca..d75c0779474a82 100644 --- a/Modules/_zstd/_zstdmodule.c +++ b/Modules/_zstd/_zstdmodule.c @@ -1,4 +1,4 @@ -/* Low level interface to the Zstandard algorithm & the zstd library. */ +/* Low level interface to the Zstandard algorthm & the zstd library. */ #ifndef Py_BUILD_CORE_BUILTIN # define Py_BUILD_CORE_MODULE 1 @@ -497,7 +497,7 @@ _zstd.get_frame_info frame_buffer: Py_buffer A bytes-like object, containing the header of a Zstandard frame. -Get Zstandard frame information from a frame header. +Get Zstandard frame infomation from a frame header. [clinic start generated code]*/ static PyObject * diff --git a/Modules/_zstd/_zstdmodule.h b/Modules/_zstd/_zstdmodule.h index 82226ff8718e6b..4e8f708f2232c7 100644 --- a/Modules/_zstd/_zstdmodule.h +++ b/Modules/_zstd/_zstdmodule.h @@ -1,4 +1,4 @@ -/* Low level interface to the Zstandard algorithm & the zstd library. */ +/* Low level interface to the Zstandard algorthm & the zstd library. */ /* Declarations shared between different parts of the _zstd module*/ diff --git a/Modules/_zstd/buffer.h b/Modules/_zstd/buffer.h index 0ac7bcb4ddc416..4c885fa0d720fd 100644 --- a/Modules/_zstd/buffer.h +++ b/Modules/_zstd/buffer.h @@ -1,4 +1,4 @@ -/* Low level interface to the Zstandard algorithm & the zstd library. */ +/* Low level interface to the Zstandard algorthm & the zstd library. */ #ifndef ZSTD_BUFFER_H #define ZSTD_BUFFER_H diff --git a/Modules/_zstd/clinic/_zstdmodule.c.h b/Modules/_zstd/clinic/_zstdmodule.c.h index 081ea728001757..766e1cfa776767 100644 --- a/Modules/_zstd/clinic/_zstdmodule.c.h +++ b/Modules/_zstd/clinic/_zstdmodule.c.h @@ -289,7 +289,7 @@ PyDoc_STRVAR(_zstd_get_frame_info__doc__, "get_frame_info($module, /, frame_buffer)\n" "--\n" "\n" -"Get Zstandard frame information from a frame header.\n" +"Get Zstandard frame infomation from a frame header.\n" "\n" " frame_buffer\n" " A bytes-like object, containing the header of a Zstandard frame."); diff --git a/Modules/_zstd/compressor.c b/Modules/_zstd/compressor.c index dcd4513f9a7414..bc9e6eff89af68 100644 --- a/Modules/_zstd/compressor.c +++ b/Modules/_zstd/compressor.c @@ -1,4 +1,4 @@ -/* Low level interface to the Zstandard algorithm & the zstd library. */ +/* Low level interface to the Zstandard algorthm & the zstd library. */ /* ZstdCompressor class definitions */ @@ -713,7 +713,7 @@ _zstd_ZstdCompressor_set_pledged_input_size_impl(ZstdCompressor *self, unsigned long long size) /*[clinic end generated code: output=3a09e55cc0e3b4f9 input=afd8a7d78cff2eb5]*/ { - // Error occurred while converting argument, should be unreachable + // Error occured while converting argument, should be unreachable assert(size != ZSTD_CONTENTSIZE_ERROR); /* Thread-safe code */ diff --git a/Modules/_zstd/decompressor.c b/Modules/_zstd/decompressor.c index b00ee05d2f51bf..c53d6e4cb05cf0 100644 --- a/Modules/_zstd/decompressor.c +++ b/Modules/_zstd/decompressor.c @@ -1,4 +1,4 @@ -/* Low level interface to the Zstandard algorithm & the zstd library. */ +/* Low level interface to the Zstandard algorthm & the zstd library. */ /* ZstdDecompressor class definition */ diff --git a/Modules/_zstd/zstddict.c b/Modules/_zstd/zstddict.c index 35d6ca8e55a265..14f74aaed46ec5 100644 --- a/Modules/_zstd/zstddict.c +++ b/Modules/_zstd/zstddict.c @@ -1,4 +1,4 @@ -/* Low level interface to the Zstandard algorithm & the zstd library. */ +/* Low level interface to the Zstandard algorthm & the zstd library. */ /* ZstdDict class definitions */ diff --git a/Modules/_zstd/zstddict.h b/Modules/_zstd/zstddict.h index e0d3f46b2b14a6..4a403416dbd4a3 100644 --- a/Modules/_zstd/zstddict.h +++ b/Modules/_zstd/zstddict.h @@ -1,4 +1,4 @@ -/* Low level interface to the Zstandard algorithm & the zstd library. */ +/* Low level interface to the Zstandard algorthm & the zstd library. */ #ifndef ZSTD_DICT_H #define ZSTD_DICT_H diff --git a/Modules/cjkcodecs/_codecs_iso2022.c b/Modules/cjkcodecs/_codecs_iso2022.c index 41466dd19acea8..ef6faeb71274e1 100644 --- a/Modules/cjkcodecs/_codecs_iso2022.c +++ b/Modules/cjkcodecs/_codecs_iso2022.c @@ -140,15 +140,15 @@ struct iso2022_designation { struct iso2022_config { int flags; - const struct iso2022_designation *designations; /* non-ascii designs */ + const struct iso2022_designation *designations; /* non-ascii desigs */ }; /*-*- iso-2022 codec implementation -*-*/ CODEC_INIT(iso2022) { - const struct iso2022_designation *design; - for (design = CONFIG_DESIGNATIONS; design->mark; design++) { + const struct iso2022_designation *desig; + for (desig = CONFIG_DESIGNATIONS; desig->mark; desig++) { if (desig->initializer != NULL && desig->initializer(codec) != 0) { return -1; } diff --git a/Modules/clinic/_pickle.c.h b/Modules/clinic/_pickle.c.h index 94220e47198735..213e817a50a287 100644 --- a/Modules/clinic/_pickle.c.h +++ b/Modules/clinic/_pickle.c.h @@ -17,7 +17,7 @@ PyDoc_STRVAR(_pickle_Pickler_clear_memo__doc__, "The memo is the data structure that remembers which objects the\n" "pickler has already seen, so that shared or recursive objects are\n" "pickled by reference and not by value. This method is useful when\n" -"reusing picklers."); +"re-using picklers."); #define _PICKLE_PICKLER_CLEAR_MEMO_METHODDEF \ {"clear_memo", (PyCFunction)_pickle_Pickler_clear_memo, METH_NOARGS, _pickle_Pickler_clear_memo__doc__}, diff --git a/Modules/hmacmodule.c b/Modules/hmacmodule.c index b11e97e52dde8b..95e400231bb65c 100644 --- a/Modules/hmacmodule.c +++ b/Modules/hmacmodule.c @@ -649,7 +649,7 @@ find_hash_info(hmacmodule_state *state, PyObject *hash_info_ref) { const py_hmac_hinfo *info = NULL; int rc = find_hash_info_impl(state, hash_info_ref, &info); - // The code below could be simplified with only 'rc == 0' case, + // The code below could be simplfied with only 'rc == 0' case, // but we are deliberately verbose to ease future improvements. if (rc < 0) { return NULL; diff --git a/Modules/itertoolsmodule.c b/Modules/itertoolsmodule.c index 5806185db44e84..cc1a558001563c 100644 --- a/Modules/itertoolsmodule.c +++ b/Modules/itertoolsmodule.c @@ -2128,7 +2128,7 @@ product_next_lock_held(PyObject *op) } else { Py_ssize_t *indices = lz->indices; - /* Copy the previous result tuple or reuse it if available */ + /* Copy the previous result tuple or re-use it if available */ if (Py_REFCNT(result) > 1) { PyObject *old_result = result; result = _PyTuple_FromArray(_PyTuple_ITEMS(old_result), npools); @@ -2367,7 +2367,7 @@ combinations_next_lock_held(PyObject *op) PyTuple_SET_ITEM(result, i, elem); } } else { - /* Copy the previous result tuple or reuse it if available */ + /* Copy the previous result tuple or re-use it if available */ if (Py_REFCNT(result) > 1) { PyObject *old_result = result; result = _PyTuple_FromArray(_PyTuple_ITEMS(old_result), r); @@ -2623,7 +2623,7 @@ cwr_next(PyObject *op) } } } else { - /* Copy the previous result tuple or reuse it if available */ + /* Copy the previous result tuple or re-use it if available */ if (Py_REFCNT(result) > 1) { PyObject *old_result = result; result = _PyTuple_FromArray(_PyTuple_ITEMS(old_result), r); @@ -2884,7 +2884,7 @@ permutations_next(PyObject *op) if (n == 0) goto empty; - /* Copy the previous result tuple or reuse it if available */ + /* Copy the previous result tuple or re-use it if available */ if (Py_REFCNT(result) > 1) { PyObject *old_result = result; result = _PyTuple_FromArray(_PyTuple_ITEMS(old_result), r); diff --git a/Modules/mathmodule.c b/Modules/mathmodule.c index ee94f031899238..7c2a421dd6a450 100644 --- a/Modules/mathmodule.c +++ b/Modules/mathmodule.c @@ -2328,7 +2328,7 @@ static PyObject * math_log(PyObject *module, PyObject * const *args, Py_ssize_t nargs) { PyObject *num, *den; - PyObject *and; + PyObject *ans; if (!_PyArg_CheckPositional("log", nargs, 1, 2)) return NULL; @@ -2343,10 +2343,10 @@ math_log(PyObject *module, PyObject * const *args, Py_ssize_t nargs) return NULL; } - and = PyNumber_TrueDivide(num, den); + ans = PyNumber_TrueDivide(num, den); Py_DECREF(num); Py_DECREF(den); - return and; + return ans; } PyDoc_STRVAR(math_log_doc, diff --git a/Modules/mmapmodule.c b/Modules/mmapmodule.c index 44899198a90c38..142ff1a21316ab 100644 --- a/Modules/mmapmodule.c +++ b/Modules/mmapmodule.c @@ -15,7 +15,7 @@ / This version of mmapmodule.c has been changed significantly / from the original mmapfile.c on which it was based. / The original version of mmapfile is maintained by Sam at - / ftp://squirrel.nightmare.com/pub/python/python-ext. + / ftp://squirl.nightmare.com/pub/python/python-ext. */ #ifndef Py_BUILD_CORE_BUILTIN diff --git a/Modules/posixmodule.c b/Modules/posixmodule.c index 29ff229df718e3..47eaf5cd428a53 100644 --- a/Modules/posixmodule.c +++ b/Modules/posixmodule.c @@ -5830,7 +5830,7 @@ os_nice_impl(PyObject *module, int increment) /* There are two flavours of 'nice': one that returns the new priority (as required by almost all standards out there) and the - Linux/FreeBSD one, which returns '0' on success and advice + Linux/FreeBSD one, which returns '0' on success and advices the use of getpriority() to get the new priority. If we are of the nice family that returns the new priority, we diff --git a/Modules/socketmodule.c b/Modules/socketmodule.c index f115c1bff136ca..f3ad01854de93b 100644 --- a/Modules/socketmodule.c +++ b/Modules/socketmodule.c @@ -6125,7 +6125,7 @@ gethost_common(socket_state *state, struct hostent *h, struct sockaddr *addr, name = sock_decode_hostname(h->h_name); if (name == NULL) goto err; - rtn_tuple = Py_BuildValue("NO", name, name_list, addr_list); + rtn_tuple = Py_BuildValue("NOO", name, name_list, addr_list); err: Py_XDECREF(name_list); diff --git a/Objects/clinic/unicodeobject.c.h b/Objects/clinic/unicodeobject.c.h index 162d7af6c9f9a0..1819fbaea220a3 100644 --- a/Objects/clinic/unicodeobject.c.h +++ b/Objects/clinic/unicodeobject.c.h @@ -604,9 +604,9 @@ PyDoc_STRVAR(unicode_isalnum__doc__, "isalnum($self, /)\n" "--\n" "\n" -"Return True if the string is an alphanumeric string, False otherwise.\n" +"Return True if the string is an alpha-numeric string, False otherwise.\n" "\n" -"A string is alphanumeric if all characters in the string are alphanumeric and\n" +"A string is alpha-numeric if all characters in the string are alpha-numeric and\n" "there is at least one character in the string."); #define UNICODE_ISALNUM_METHODDEF \ diff --git a/Objects/codeobject.c b/Objects/codeobject.c index 223bb747ca2de4..42e021679b583f 100644 --- a/Objects/codeobject.c +++ b/Objects/codeobject.c @@ -2012,7 +2012,7 @@ _PyCode_CheckNoExternalState(PyCodeObject *co, _PyCode_var_counts_t *counts, errmsg = "globals not supported"; } // Otherwise we don't check counts.unbound.globals.numunknown since we can't - // distinguish between globals and builtins here. + // distinguish beween globals and builtins here. if (errmsg != NULL) { if (p_errmsg != NULL) { @@ -2123,7 +2123,7 @@ code_returns_only_none(PyCodeObject *co) for (int i = 0; i < len; i += _PyInstruction_GetLength(co, i)) { _Py_CODEUNIT inst = _Py_GetBaseCodeUnit(co, i); if (IS_RETURN_OPCODE(inst.op.code)) { - // We already know it isn't returning None. + // We alraedy know it isn't returning None. return 0; } } diff --git a/Objects/dictnotes.txt b/Objects/dictnotes.txt index 838c6b334ea330..db6a3cf1d634b0 100644 --- a/Objects/dictnotes.txt +++ b/Objects/dictnotes.txt @@ -56,7 +56,7 @@ Membership Testing Dynamic Mappings Characterized by deletions interspersed with adds and replacements. - Performance benefits greatly from the reuse of dummy entries. + Performance benefits greatly from the re-use of dummy entries. Data Layout ----------- diff --git a/Objects/dictobject.c b/Objects/dictobject.c index f41337c7b7666e..0ed52ac5e87b6e 100644 --- a/Objects/dictobject.c +++ b/Objects/dictobject.c @@ -81,7 +81,7 @@ DK_ENTRIES(keys)[index] if index >= 0): Active upon key insertion. Dummy slots cannot be made Unused again else the probe sequence in case of collision would have no way to know they were once active. - In free-threaded builds dummy slots are not reused to allow lock-free + In free-threaded builds dummy slots are not re-used to allow lock-free lookups to proceed safely. 4. Pending. index >= 0, key != NULL, and value == NULL (split only) @@ -659,7 +659,7 @@ _PyDict_CheckConsistency(PyObject *op, int check_content) PyDictObject *mp = (PyDictObject *)op; PyDictKeysObject *keys = mp->ma_keys; - int split = _PyDict_HasSplitTable(mp); + int splitted = _PyDict_HasSplitTable(mp); Py_ssize_t usable = USABLE_FRACTION(DK_SIZE(keys)); // In the free-threaded build, shared keys may be concurrently modified, @@ -672,7 +672,7 @@ _PyDict_CheckConsistency(PyObject *op, int check_content) CHECK(0 <= dk_nentries && dk_nentries <= usable); CHECK(dk_usable + dk_nentries <= usable); - if (!split) { + if (!splitted) { /* combined table */ CHECK(keys->dk_kind != DICT_KEYS_SPLIT); CHECK(keys->dk_refcnt == 1 || keys == Py_EMPTY_KEYS); @@ -721,20 +721,20 @@ _PyDict_CheckConsistency(PyObject *op, int check_content) CHECK(PyUnicode_CheckExact(key)); Py_hash_t hash = unicode_get_hash(key); CHECK(hash != -1); - if (!split) { + if (!splitted) { CHECK(entry->me_value != NULL); } } - if (split) { + if (splitted) { CHECK(entry->me_value == NULL); } } } - if (split) { + if (splitted) { CHECK(mp->ma_used <= SHARED_KEYS_MAX_SIZE); - /* split table */ + /* splitted table */ int duplicate_check = 0; for (Py_ssize_t i=0; i < mp->ma_used; i++) { int index = get_index_from_order(mp, i); @@ -7218,7 +7218,7 @@ set_dict_inline_values(PyObject *obj, PyDictObject *new_dict) #ifdef Py_GIL_DISABLED -// Tries and sets the dictionary for an object in the easy case when our current +// Trys and sets the dictionary for an object in the easy case when our current // dictionary is either completely not materialized or is a dictionary which // does not point at the inline values. static bool diff --git a/Objects/exceptions.c b/Objects/exceptions.c index 31d84e4ba6e94b..b17cac83551670 100644 --- a/Objects/exceptions.c +++ b/Objects/exceptions.c @@ -3698,7 +3698,7 @@ UnicodeDecodeError_init(PyObject *self, PyObject *args, PyObject *kwds) if (PyObject_GetBuffer(object, &view, PyBUF_SIMPLE) != 0) { return -1; } - // 'object' is borrowed, so we can reuse the variable + // 'object' is borrowed, so we can re-use the variable object = PyBytes_FromStringAndSize(view.buf, view.len); PyBuffer_Release(&view); if (object == NULL) { diff --git a/Objects/listobject.c b/Objects/listobject.c index 1d9041aa9ecc22..1b36f4c25abf4d 100644 --- a/Objects/listobject.c +++ b/Objects/listobject.c @@ -2304,7 +2304,7 @@ merge_lo(MergeState *ms, sortslice ssa, Py_ssize_t na, min_gallop = ms->min_gallop; for (;;) { - Py_ssize_t account = 0; /* # of times A won in a row */ + Py_ssize_t acount = 0; /* # of times A won in a row */ Py_ssize_t bcount = 0; /* # of times B won in a row */ /* Do the straightforward thing until (if ever) one run @@ -2318,7 +2318,7 @@ merge_lo(MergeState *ms, sortslice ssa, Py_ssize_t na, goto Fail; sortslice_copy_incr(&dest, &ssb); ++bcount; - account = 0; + acount = 0; --nb; if (nb == 0) goto Succeed; @@ -2327,12 +2327,12 @@ merge_lo(MergeState *ms, sortslice ssa, Py_ssize_t na, } else { sortslice_copy_incr(&dest, &ssa); - ++account; + ++acount; bcount = 0; --na; if (na == 1) goto CopyB; - if (account >= min_gallop) + if (acount >= min_gallop) break; } } @@ -2348,7 +2348,7 @@ merge_lo(MergeState *ms, sortslice ssa, Py_ssize_t na, min_gallop -= min_gallop > 1; ms->min_gallop = min_gallop; k = gallop_right(ms, ssb.keys[0], ssa.keys, na, 0); - account = k; + acount = k; if (k) { if (k < 0) goto Fail; @@ -2386,7 +2386,7 @@ merge_lo(MergeState *ms, sortslice ssa, Py_ssize_t na, --na; if (na == 1) goto CopyB; - } while (account >= MIN_GALLOP || bcount >= MIN_GALLOP); + } while (acount >= MIN_GALLOP || bcount >= MIN_GALLOP); ++min_gallop; /* penalize it for leaving galloping mode */ ms->min_gallop = min_gallop; } @@ -2442,7 +2442,7 @@ merge_hi(MergeState *ms, sortslice ssa, Py_ssize_t na, min_gallop = ms->min_gallop; for (;;) { - Py_ssize_t account = 0; /* # of times A won in a row */ + Py_ssize_t acount = 0; /* # of times A won in a row */ Py_ssize_t bcount = 0; /* # of times B won in a row */ /* Do the straightforward thing until (if ever) one run @@ -2455,18 +2455,18 @@ merge_hi(MergeState *ms, sortslice ssa, Py_ssize_t na, if (k < 0) goto Fail; sortslice_copy_decr(&dest, &ssa); - ++account; + ++acount; bcount = 0; --na; if (na == 0) goto Succeed; - if (account >= min_gallop) + if (acount >= min_gallop) break; } else { sortslice_copy_decr(&dest, &ssb); ++bcount; - account = 0; + acount = 0; --nb; if (nb == 1) goto CopyA; @@ -2489,7 +2489,7 @@ merge_hi(MergeState *ms, sortslice ssa, Py_ssize_t na, if (k < 0) goto Fail; k = na - k; - account = k; + acount = k; if (k) { sortslice_advance(&dest, -k); sortslice_advance(&ssa, -k); @@ -2526,7 +2526,7 @@ merge_hi(MergeState *ms, sortslice ssa, Py_ssize_t na, --na; if (na == 0) goto Succeed; - } while (account >= MIN_GALLOP || bcount >= MIN_GALLOP); + } while (acount >= MIN_GALLOP || bcount >= MIN_GALLOP); ++min_gallop; /* penalize it for leaving galloping mode */ ms->min_gallop = min_gallop; } diff --git a/Objects/listsort.txt b/Objects/listsort.txt index 4dd0955e35810b..5b2fc7d50a25ca 100644 --- a/Objects/listsort.txt +++ b/Objects/listsort.txt @@ -721,7 +721,7 @@ wildly unbalanced runs already enjoys excellent performance. ~sort is a good example of when balanced runs could benefit from a better hint value: to the extent possible, this would like to use a starting -offset equal to the previous value of account/bcount. Doing so saves about +offset equal to the previous value of acount/bcount. Doing so saves about 10% of the compares in ~sort. However, doing so is also a mixed bag, hurting other cases. diff --git a/Objects/longobject.c b/Objects/longobject.c index 1e73b6a442303c..581db10b54ab57 100644 --- a/Objects/longobject.c +++ b/Objects/longobject.c @@ -2155,7 +2155,7 @@ long_to_decimal_string_internal(PyObject *aa, /* convert array of base _PyLong_BASE digits in pin to an array of base _PyLong_DECIMAL_BASE digits in pout, following Knuth (TAOCP, - Volume 2 (3rd end), section 4.4, Method 1b). */ + Volume 2 (3rd edn), section 4.4, Method 1b). */ pin = a->long_value.ob_digit; pout = scratch->long_value.ob_digit; size = 0; @@ -3312,7 +3312,7 @@ x_divrem(PyLongObject *v1, PyLongObject *w1, PyLongObject **prem) stwodigits z; /* We follow Knuth [The Art of Computer Programming, Vol. 2 (3rd - end.), section 4.3.1, Algorithm D], except that we don't explicitly + edn.), section 4.3.1, Algorithm D], except that we don't explicitly handle the special case when the initial estimate q for a quotient digit is >= PyLong_BASE: the max value for q is PyLong_BASE+1, and that won't overflow a digit. */ diff --git a/Objects/mimalloc/arena.c b/Objects/mimalloc/arena.c index 858ff319fb2c70..5db5d950c43b68 100644 --- a/Objects/mimalloc/arena.c +++ b/Objects/mimalloc/arena.c @@ -644,11 +644,11 @@ void _mi_arena_free(void* p, size_t size, size_t committed_size, mi_memid_t memi mi_assert_internal(arena->blocks_purge != NULL); if (!all_committed) { - // mark the entire range as no longer committed (so we recommit the full range when reusing) + // mark the entire range as no longer committed (so we recommit the full range when re-using) _mi_bitmap_unclaim_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx); mi_track_mem_noaccess(p,size); if (committed_size > 0) { - // if partially committed, adjust the committed stats (is it will be recommitted when reusing) + // if partially committed, adjust the committed stats (is it will be recommitted when re-using) // in the delayed purge, we now need to not count a decommit if the range is not marked as committed. _mi_stat_decrease(&stats->committed, committed_size); } diff --git a/Objects/mimalloc/os.c b/Objects/mimalloc/os.c index cdd4a091d5d469..c9103168c12507 100644 --- a/Objects/mimalloc/os.c +++ b/Objects/mimalloc/os.c @@ -476,7 +476,7 @@ bool _mi_os_reset(void* addr, size_t size, mi_stats_t* stats) { // either resets or decommits memory, returns true if the memory needs -// to be recommitted if it is to be reused later on. +// to be recommitted if it is to be re-used later on. bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset, mi_stats_t* stats) { if (mi_option_get(mi_option_purge_delay) < 0) return false; // is purging allowed? @@ -499,7 +499,7 @@ bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset, mi_stats_t* stats) } // either resets or decommits memory, returns true if the memory needs -// to be recommitted if it is to be reused later on. +// to be recommitted if it is to be re-used later on. bool _mi_os_purge(void* p, size_t size, mi_stats_t * stats) { return _mi_os_purge_ex(p, size, true, stats); } diff --git a/Objects/mimalloc/prim/windows/etw-mimalloc.wprp b/Objects/mimalloc/prim/windows/etw-mimalloc.wprp index 26d24d953199cd..b00cd7adf2285c 100644 --- a/Objects/mimalloc/prim/windows/etw-mimalloc.wprp +++ b/Objects/mimalloc/prim/windows/etw-mimalloc.wprp @@ -14,7 +14,7 @@ - + diff --git a/Objects/obmalloc.c b/Objects/obmalloc.c index 92364137b9e347..deb7fd957e57dd 100644 --- a/Objects/obmalloc.c +++ b/Objects/obmalloc.c @@ -128,7 +128,7 @@ _PyMem_mi_page_is_safe_to_free(mi_page_t *page) // If we are deferring collection of more than this amount of memory for // mimalloc pages, advance the write sequence. Advancing allows these -// pages to be reused in a different thread or for a different size class. +// pages to be re-used in a different thread or for a different size class. #define QSBR_PAGE_MEM_LIMIT 4096*20 // Return true if the global write sequence should be advanced for a mimalloc @@ -1233,7 +1233,7 @@ free_delayed(uintptr_t ptr, size_t size) struct _mem_work_chunk *buf = NULL; if (!llist_empty(head)) { - // Try to reuse the last buffer + // Try to re-use the last buffer buf = llist_data(head->prev, struct _mem_work_chunk, node); if (buf->wr_idx == WORK_ITEMS_PER_CHUNK) { // already full diff --git a/Objects/typeobject.c b/Objects/typeobject.c index ce7d6c6a9c093d..379c4d0467c487 100644 --- a/Objects/typeobject.c +++ b/Objects/typeobject.c @@ -4880,7 +4880,7 @@ type_new_impl(type_new_ctx *ctx) assert(_PyType_CheckConsistency(type)); #if defined(Py_GIL_DISABLED) && defined(Py_DEBUG) && SIZEOF_VOID_P > 4 - // After this point, other threads can potentially use this type. + // After this point, other threads can potentally use this type. ((PyObject*)type)->ob_flags |= _Py_TYPE_REVEALED_FLAG; #endif @@ -5597,7 +5597,7 @@ PyType_FromMetaclass( assert(_PyType_CheckConsistency(type)); #if defined(Py_GIL_DISABLED) && defined(Py_DEBUG) && SIZEOF_VOID_P > 4 - // After this point, other threads can potentially use this type. + // After this point, other threads can potentally use this type. ((PyObject*)type)->ob_flags |= _Py_TYPE_REVEALED_FLAG; #endif diff --git a/Objects/unicodeobject.c b/Objects/unicodeobject.c index 22a095e05e363c..5c2308a012142a 100644 --- a/Objects/unicodeobject.c +++ b/Objects/unicodeobject.c @@ -2918,7 +2918,7 @@ unicode_fromformat_arg(_PyUnicodeWriter *writer, { char buffer[MAX_INTMAX_CHARS]; - // Fill buffer using sprintf, with one of many possible format + // Fill buffer using sprinf, with one of many possible format // strings, like "%llX" for `long long` in hexadecimal. // The type/size is in `sizemod`; the format is in `*f`. @@ -7771,7 +7771,7 @@ decode_code_page_errors(UINT code_page, const char *in, const int size, const char *errors, int final) { - const char *starting = in; + const char *startin = in; const char *endin = in + size; DWORD flags = MB_ERR_INVALID_CHARS; /* Ideally, we should get reason from FormatMessage. This is the Windows @@ -7854,13 +7854,13 @@ decode_code_page_errors(UINT code_page, if (in + insize >= endin && !final) break; - startinpos = in - starting; + startinpos = in - startin; endinpos = startinpos + 1; outpos = out - *buf; if (unicode_decode_call_errorhandler_wchar( errors, &errorHandler, encoding, reason, - &starting, &endin, &startinpos, &endinpos, &exc, &in, + &startin, &endin, &startinpos, &endinpos, &exc, &in, buf, bufsize, &outpos)) { goto error; @@ -7877,8 +7877,8 @@ decode_code_page_errors(UINT code_page, /* Shrink the buffer */ assert(out - *buf <= *bufsize); *bufsize = out - *buf; - /* (in - starting) <= size and size is an int */ - ret = Py_SAFE_DOWNCAST(in - starting, Py_ssize_t, int); + /* (in - startin) <= size and size is an int */ + ret = Py_SAFE_DOWNCAST(in - startin, Py_ssize_t, int); error: Py_XDECREF(encoding_obj); @@ -12252,9 +12252,9 @@ unicode_isalpha_impl(PyObject *self) /*[clinic input] str.isalnum as unicode_isalnum -Return True if the string is an alphanumeric string, False otherwise. +Return True if the string is an alpha-numeric string, False otherwise. -A string is alphanumeric if all characters in the string are alphanumeric and +A string is alpha-numeric if all characters in the string are alpha-numeric and there is at least one character in the string. [clinic start generated code]*/ @@ -14289,7 +14289,7 @@ unicode_getnewargs(PyObject *v, PyObject *Py_UNUSED(ignored)) } /* -This function searches the longest common leading whitespace +This function searchs the longest common leading whitespace of all lines in the [src, end). It returns the length of the common leading whitespace and sets `output` to point to the beginning of the common leading whitespace if length > 0. diff --git a/PC/winreg.c b/PC/winreg.c index 1c3145aaee2b7a..d1a1c3d1c97850 100644 --- a/PC/winreg.c +++ b/PC/winreg.c @@ -26,7 +26,7 @@ typedef struct { /* Forward declares */ -static BOOL PyHKEY_AsHKEY(winreg_state *st, PyObject *ob, HKEY *press, BOOL bNoneOK); +static BOOL PyHKEY_AsHKEY(winreg_state *st, PyObject *ob, HKEY *pRes, BOOL bNoneOK); static BOOL clinic_HKEY_converter(winreg_state *st, PyObject *ob, void *p); static PyObject *PyHKEY_FromHKEY(winreg_state *st, HKEY h); static BOOL PyHKEY_Close(winreg_state *st, PyObject *obHandle); diff --git a/PCbuild/pyproject-clangcl.props b/PCbuild/pyproject-clangcl.props index 6b2c663bd8cc5c..70a81ca7da306c 100644 --- a/PCbuild/pyproject-clangcl.props +++ b/PCbuild/pyproject-clangcl.props @@ -41,7 +41,7 @@ diff --git a/PCbuild/pyproject.props b/PCbuild/pyproject.props index f3fb0ad44c1705..cf35e705f355a7 100644 --- a/PCbuild/pyproject.props +++ b/PCbuild/pyproject.props @@ -127,7 +127,7 @@ diff --git a/Parser/lexer/lexer.c b/Parser/lexer/lexer.c index ace1bced5bd733..81363cf8e810fe 100644 --- a/Parser/lexer/lexer.c +++ b/Parser/lexer/lexer.c @@ -137,7 +137,7 @@ set_ftstring_expr(struct tok_state* tok, struct token *token, char c) { // Handle quotes if (ch == '"' || ch == '\'') { - // The following if/else block works because there is an off number + // The following if/else block works becase there is an off number // of quotes in STRING tokens and the lexer only ever reaches this // function with valid STRING tokens. // For example: """hello""" diff --git a/Parser/pegen.c b/Parser/pegen.c index fc08425a93503c..50641de27d37fd 100644 --- a/Parser/pegen.c +++ b/Parser/pegen.c @@ -346,7 +346,7 @@ _PyPegen_get_memo_statistics(void) #endif int // bool -_PyPegen_is_memoized(Parser *p, int type, void *press) +_PyPegen_is_memoized(Parser *p, int type, void *pres) { if (p->mark == p->fill) { if (_PyPegen_fill_token(p) < 0) { @@ -372,7 +372,7 @@ _PyPegen_is_memoized(Parser *p, int type, void *press) } #endif p->mark = m->mark; - *(void **)(press) = m->node; + *(void **)(pres) = m->node; return 1; } } diff --git a/Parser/pegen.h b/Parser/pegen.h index b5048583c44852..804f931871aec8 100644 --- a/Parser/pegen.h +++ b/Parser/pegen.h @@ -143,7 +143,7 @@ PyObject *_PyPegen_get_memo_statistics(void); int _PyPegen_insert_memo(Parser *p, int mark, int type, void *node); int _PyPegen_update_memo(Parser *p, int mark, int type, void *node); -int _PyPegen_is_memoized(Parser *p, int type, void *press); +int _PyPegen_is_memoized(Parser *p, int type, void *pres); int _PyPegen_lookahead(int, void *(func)(Parser *), Parser *); int _PyPegen_lookahead_for_expr(int, expr_ty (func)(Parser *), Parser *); diff --git a/Python/ceval.c b/Python/ceval.c index 5c21d7e9e49080..291e753dec0ce5 100644 --- a/Python/ceval.c +++ b/Python/ceval.c @@ -1371,9 +1371,9 @@ too_many_positional(PyThreadState *tstate, PyCodeObject *co, } Py_ssize_t defcount = defaults == NULL ? 0 : PyTuple_GET_SIZE(defaults); if (defcount) { - Py_ssize_t at least = co_argcount - defcount; + Py_ssize_t atleast = co_argcount - defcount; plural = 1; - sig = PyUnicode_FromFormat("from %zd to %zd", at least, co_argcount); + sig = PyUnicode_FromFormat("from %zd to %zd", atleast, co_argcount); } else { plural = (co_argcount != 1); diff --git a/Python/codecs.c b/Python/codecs.c index 451d83d4ba2f6e..caf8d9d5f3c188 100644 --- a/Python/codecs.c +++ b/Python/codecs.c @@ -926,17 +926,17 @@ PyObject *PyCodec_XMLCharRefReplaceErrors(PyObject *exc) slen = Py_MAX(0, end - start); } - Py_ssize_t resize = 0; + Py_ssize_t ressize = 0; for (Py_ssize_t i = start; i < end; ++i) { Py_UCS4 ch = PyUnicode_READ_CHAR(obj, i); int k = n_decimal_digits_for_codepoint(ch); assert(k != 0); assert(k <= 7); - resize += 2 + k + 1; + ressize += 2 + k + 1; } /* allocate replacement */ - PyObject *res = PyUnicode_New(resize, 127); + PyObject *res = PyUnicode_New(ressize, 127); if (res == NULL) { Py_DECREF(obj); return NULL; @@ -993,12 +993,12 @@ _PyCodec_BackslashReplaceUnicodeEncodeError(PyObject *exc) slen = Py_MAX(0, end - start); } - Py_ssize_t resize = 0; + Py_ssize_t ressize = 0; for (Py_ssize_t i = start; i < end; ++i) { Py_UCS4 c = PyUnicode_READ_CHAR(obj, i); - resize += codec_handler_unicode_hex_width(c); + ressize += codec_handler_unicode_hex_width(c); } - PyObject *res = PyUnicode_New(resize, 127); + PyObject *res = PyUnicode_New(ressize, 127); if (res == NULL) { Py_DECREF(obj); return NULL; @@ -1097,7 +1097,7 @@ PyObject *PyCodec_NameReplaceErrors(PyObject *exc) } char buffer[256]; /* NAME_MAXLEN in unicodename_db.h */ - Py_ssize_t imax = start, resize = 0, replsize; + Py_ssize_t imax = start, ressize = 0, replsize; for (; imax < end; ++imax) { Py_UCS4 c = PyUnicode_READ_CHAR(obj, imax); if (ucnhash_capi->getname(c, buffer, sizeof(buffer), 1)) { @@ -1109,13 +1109,13 @@ PyObject *PyCodec_NameReplaceErrors(PyObject *exc) else { replsize = codec_handler_unicode_hex_width(c); } - if (resize > PY_SSIZE_T_MAX - replsize) { + if (ressize > PY_SSIZE_T_MAX - replsize) { break; } - resize += replsize; + ressize += replsize; } - PyObject *res = PyUnicode_New(resize, 127); + PyObject *res = PyUnicode_New(ressize, 127); if (res == NULL) { Py_DECREF(obj); return NULL; @@ -1137,7 +1137,7 @@ PyObject *PyCodec_NameReplaceErrors(PyObject *exc) } } - assert(outp == PyUnicode_1BYTE_DATA(res) + resize); + assert(outp == PyUnicode_1BYTE_DATA(res) + ressize); assert(_PyUnicode_CheckConsistency(res, 1)); PyObject *restuple = Py_BuildValue("(Nn)", res, imax); Py_DECREF(obj); diff --git a/Python/crossinterp.c b/Python/crossinterp.c index ce361ae363e5fb..16a23f0351cd26 100644 --- a/Python/crossinterp.c +++ b/Python/crossinterp.c @@ -772,7 +772,7 @@ _PyPickle_GetXIData(PyThreadState *tstate, PyObject *obj, _PyXIData_t *xidata) return -1; } - // If we had an "unwrapper" mechanism, we could call + // If we had an "unwrapper" mechnanism, we could call // _PyObject_GetXIData() on the bytes object directly and add // a simple unwrapper to call pickle.loads() on the bytes. size_t size = sizeof(struct _shared_pickle_data); @@ -3176,7 +3176,7 @@ _PyXI_InitTypes(PyInterpreterState *interp) "failed to initialize the cross-interpreter exception types"); } // We would initialize heap types here too but that leads to ref leaks. - // Instead, we initialize them in _PyXI_Init(). + // Instead, we intialize them in _PyXI_Init(). return _PyStatus_OK(); } diff --git a/Python/dynamic_annotations.c b/Python/dynamic_annotations.c index bca89a30535a7a..7febaa09df1950 100644 --- a/Python/dynamic_annotations.c +++ b/Python/dynamic_annotations.c @@ -143,7 +143,7 @@ static int GetRunningOnValgrind(void) { /* See the comments in dynamic_annotations.h */ int RunningOnValgrind(void) { static volatile int running_on_valgrind = -1; - /* C doesn't have thread-safe initialization of statistics, and we + /* C doesn't have thread-safe initialization of statics, and we don't want to depend on pthread_once here, so hack it. */ int local_running_on_valgrind = running_on_valgrind; if (local_running_on_valgrind == -1) diff --git a/Python/gc.c b/Python/gc.c index d485a0d0b17e6f..4160f68c27a3ef 100644 --- a/Python/gc.c +++ b/Python/gc.c @@ -1,6 +1,6 @@ // This implements the reference cycle garbage collector. // The Python module interface to the collector is in gcmodule.c. -// See InternalDocs/garbage_collector.md for more information. +// See InternalDocs/garbage_collector.md for more infromation. #include "Python.h" #include "pycore_ceval.h" // _Py_set_eval_breaker_bit() diff --git a/Python/gc_free_threading.c b/Python/gc_free_threading.c index 9a9e28c5982fd6..0b0ddf227e4952 100644 --- a/Python/gc_free_threading.c +++ b/Python/gc_free_threading.c @@ -529,7 +529,7 @@ static_assert(BUFFER_HI < BUFFER_SIZE && BUFFER_LO > 0, "Invalid prefetch buffer level settings."); -// Prefetch instructions will fetch the line of data from memory that +// Prefetch intructions will fetch the line of data from memory that // contains the byte specified with the source operand to a location in // the cache hierarchy specified by a locality hint. The instruction // is only a hint and the CPU is free to ignore it. Instructions and @@ -581,7 +581,7 @@ static_assert(BUFFER_HI < BUFFER_SIZE && #define prefetch(ptr) #endif -// a contiguous sequence of PyObject pointers, can contain NULLs +// a contigous sequence of PyObject pointers, can contain NULLs typedef struct { PyObject **start; PyObject **end; @@ -750,7 +750,7 @@ gc_mark_enqueue(PyObject *op, gc_mark_args_t *args) } } -// Called when we have a contiguous sequence of PyObject pointers, either +// Called when we have a contigous sequence of PyObject pointers, either // a tuple or list object. This will add the items to the buffer if there // is space for them all otherwise push a new "span" on the span stack. Using // spans has the advantage of not creating a deep _PyObjectStack stack when diff --git a/Python/import.c b/Python/import.c index 6dfa2c135d95a8..73b94d0dd2a1b1 100644 --- a/Python/import.c +++ b/Python/import.c @@ -2636,7 +2636,7 @@ module_dict_for_exec(PyThreadState *tstate, PyObject *name) if (m == NULL) return NULL; /* If the module is being reloaded, we get the old module back - and reuse its dict to exec the new code. */ + and re-use its dict to exec the new code. */ d = PyModule_GetDict(m); int r = PyDict_Contains(d, &_Py_ID(__builtins__)); if (r == 0) { diff --git a/Python/perf_trampoline.c b/Python/perf_trampoline.c index edd02582f31d4f..a2da3c7d56df50 100644 --- a/Python/perf_trampoline.c +++ b/Python/perf_trampoline.c @@ -110,7 +110,7 @@ achieve this we have a assembly template in Objects/asm_trampiline.S that is compiled into the Python executable/shared library. This template generates a symbol that maps the start of the assembly code and another that marks the end of the assembly code for the trampoline. Then, every time we need a unique -trampoline for a Python code object, we copy the assembly code into a mapped +trampoline for a Python code object, we copy the assembly code into a mmaped area that has executable permissions and we return the start of that area as our trampoline function. diff --git a/Python/pystate.c b/Python/pystate.c index 2a948498ea9cd1..0d4c26f92cec90 100644 --- a/Python/pystate.c +++ b/Python/pystate.c @@ -309,7 +309,7 @@ holds_gil(PyThreadState *tstate) /* Suppress deprecation warning for PyBytesObject.ob_shash */ _Py_COMP_DIAG_PUSH _Py_COMP_DIAG_IGNORE_DEPR_DECLS -/* We use "initial" if the runtime gets reused +/* We use "initial" if the runtime gets re-used (e.g. Py_Finalize() followed by Py_Initialize(). Note that we initialize "initial" relative to _PyRuntime, to ensure pre-initialized pointers point to the active @@ -543,7 +543,7 @@ init_interpreter(PyInterpreterState *interp, interp->threads.preallocated = &interp->_initial_thread; // We would call _PyObject_InitState() at this point - // if interp->feature_flags were already set. + // if interp->feature_flags were alredy set. _PyEval_InitState(interp); _PyGC_InitState(&interp->gc); diff --git a/Python/pytime.c b/Python/pytime.c index 7b9cd17dd58a7d..67cf6437264490 100644 --- a/Python/pytime.c +++ b/Python/pytime.c @@ -75,14 +75,14 @@ _PyTime_GCD(PyTime_t x, PyTime_t y) int -_PyTimeFraction_Set(_PyTimeFraction *frac, PyTime_t number, PyTime_t denom) +_PyTimeFraction_Set(_PyTimeFraction *frac, PyTime_t numer, PyTime_t denom) { - if (number < 1 || denom < 1) { + if (numer < 1 || denom < 1) { return -1; } - PyTime_t gcd = _PyTime_GCD(number, denom); - frac->number = number / gcd; + PyTime_t gcd = _PyTime_GCD(numer, denom); + frac->numer = numer / gcd; frac->denom = denom / gcd; return 0; } @@ -91,7 +91,7 @@ _PyTimeFraction_Set(_PyTimeFraction *frac, PyTime_t number, PyTime_t denom) double _PyTimeFraction_Resolution(const _PyTimeFraction *frac) { - return (double)frac->number / (double)frac->denom / 1e9; + return (double)frac->numer / (double)frac->denom / 1e9; } @@ -179,7 +179,7 @@ _PyTime_Mul(PyTime_t t, PyTime_t k) PyTime_t _PyTimeFraction_Mul(PyTime_t ticks, const _PyTimeFraction *frac) { - const PyTime_t mul = frac->number; + const PyTime_t mul = frac->numer; const PyTime_t div = frac->denom; if (div == 1) { @@ -1099,13 +1099,13 @@ py_mach_timebase_info(_PyTimeFraction *base) // fail: https://developer.apple.com/library/mac/#qa/qa1398/ (void)mach_timebase_info(&timebase); - // Check that timebase.number and timebase.denom can be casted to + // Check that timebase.numer and timebase.denom can be casted to // PyTime_t. In practice, timebase uses uint32_t, so casting cannot // overflow. At the end, only make sure that the type is uint32_t // (PyTime_t is 64-bit long). - Py_BUILD_ASSERT(sizeof(timebase.number) <= sizeof(PyTime_t)); + Py_BUILD_ASSERT(sizeof(timebase.numer) <= sizeof(PyTime_t)); Py_BUILD_ASSERT(sizeof(timebase.denom) <= sizeof(PyTime_t)); - PyTime_t number = (PyTime_t)timebase.number; + PyTime_t numer = (PyTime_t)timebase.numer; PyTime_t denom = (PyTime_t)timebase.denom; // Known time bases: @@ -1113,7 +1113,7 @@ py_mach_timebase_info(_PyTimeFraction *base) // * (1, 1) on Intel: 1 ns // * (1000000000, 33333335) on PowerPC: ~30 ns // * (1000000000, 25000000) on PowerPC: 40 ns - if (_PyTimeFraction_Set(base, number, denom) < 0) { + if (_PyTimeFraction_Set(base, numer, denom) < 0) { return _PyStatus_ERR("invalid mach_timebase_info"); } return PyStatus_Ok(); diff --git a/Python/remote_debug.h b/Python/remote_debug.h index fa6e149e82f8ca..5324a7aaa6f5e5 100644 --- a/Python/remote_debug.h +++ b/Python/remote_debug.h @@ -1119,7 +1119,7 @@ _Py_RemoteDebug_PagedReadRemoteMemory(proc_handle_t *handle, } if (_Py_RemoteDebug_ReadRemoteMemory(handle, page_base, page_size, entry->data) < 0) { - // Try to just copy the exact amount as a fallback + // Try to just copy the exact ammount as a fallback PyErr_Clear(); goto fallback; } diff --git a/Python/specialize.c b/Python/specialize.c index 04f50c9ea8ee4c..fe8d04cf3442f1 100644 --- a/Python/specialize.c +++ b/Python/specialize.c @@ -2544,7 +2544,7 @@ static _PyBinaryOpSpecializationDescr binaryop_extend_descrs[] = { {NB_INPLACE_AND, compactlongs_guard, compactlongs_and}, {NB_INPLACE_XOR, compactlongs_guard, compactlongs_xor}, - /* float-long arithmetic */ + /* float-long arithemetic */ {NB_ADD, float_compactlong_guard, float_compactlong_add}, {NB_SUBTRACT, float_compactlong_guard, float_compactlong_subtract}, {NB_TRUE_DIVIDE, nonzero_float_compactlong_guard, float_compactlong_true_div}, diff --git a/Python/uniqueid.c b/Python/uniqueid.c index cce847604e841d..64c3e6cfbbe825 100644 --- a/Python/uniqueid.c +++ b/Python/uniqueid.c @@ -7,7 +7,7 @@ #include "pycore_uniqueid.h" // This contains code for allocating unique ids for per-thread reference -// counting and reusing those ids when an object is deallocated. +// counting and re-using those ids when an object is deallocated. // // Currently, per-thread reference counting is only used for heap types. // diff --git a/Tools/build/deepfreeze.py b/Tools/build/deepfreeze.py index 0f218942a74cdc..2b9f03aebb6d7e 100644 --- a/Tools/build/deepfreeze.py +++ b/Tools/build/deepfreeze.py @@ -118,7 +118,7 @@ def __init__(self, file: TextIO) -> None: self.file = file self.cache: dict[tuple[type, object, str], str] = {} self.hits, self.misses = 0, 0 - self.finish: list[str] = [] + self.finis: list[str] = [] self.inits: list[str] = [] self.identifiers, self.strings = self.get_identifiers_and_strings() self.write('#include "Python.h"') @@ -316,7 +316,7 @@ def generate_code(self, name: str, code: types.CodeType) -> str: first_traceable += 1 self.write(f"._co_firsttraceable = {first_traceable},") name_as_code = f"(PyCodeObject *)&{name}" - self.finish.append(f"_PyStaticCode_Fini({name_as_code});") + self.finis.append(f"_PyStaticCode_Fini({name_as_code});") self.inits.append(f"_PyStaticCode_Init({name_as_code})") return f"& {name}.ob_base.ob_base" @@ -488,7 +488,7 @@ def generate(args: list[str], output: TextIO) -> None: code = compile(fd.read(), f"", "exec") printer.generate_file(modname, code) with printer.block(f"void\n_Py_Deepfreeze_Fini(void)"): - for p in printer.finish: + for p in printer.finis: printer.write(p) with printer.block(f"int\n_Py_Deepfreeze_Init(void)"): for p in printer.inits: diff --git a/Tools/build/freeze_modules.py b/Tools/build/freeze_modules.py index b9acac12c98c70..3c43f7e3bbe8ca 100644 --- a/Tools/build/freeze_modules.py +++ b/Tools/build/freeze_modules.py @@ -36,7 +36,7 @@ TESTS_SECTION = 'Test module' FROZEN = [ # See parse_frozen_spec() for the format. - # In cases where the frozenid is duplicated, the first one is reused. + # In cases where the frozenid is duplicated, the first one is re-used. ('import system', [ # These frozen modules are necessary for bootstrapping # the import system. diff --git a/Tools/build/generate-build-details.py b/Tools/build/generate-build-details.py index 55fc5cfac93735..8cd23e2f54f529 100644 --- a/Tools/build/generate-build-details.py +++ b/Tools/build/generate-build-details.py @@ -155,7 +155,7 @@ def make_paths_relative(data: dict[str, Any], config_path: str | None = None) -> continue # Get the relative path new_path = os.path.relpath(current_path, data['base_prefix']) - # Join '.' so that the path is formatted as './path' instead of 'path' + # Join '.' so that the path is formated as './path' instead of 'path' new_path = os.path.join('.', new_path) container[child] = new_path diff --git a/Tools/build/parse_html5_entities.py b/Tools/build/parse_html5_entities.py index 38ec8f191a624e..aca98497381a43 100755 --- a/Tools/build/parse_html5_entities.py +++ b/Tools/build/parse_html5_entities.py @@ -40,12 +40,12 @@ def compare_dicts(old, new): """Compare the old and new dicts and print the differences.""" added = new.keys() - old.keys() if added: - print(f'{len(added)} entity(s) have been added:') + print(f'{len(added)} entitie(s) have been added:') for name in sorted(added): print(f' {name!r}: {new[name]!r}') removed = old.keys() - new.keys() if removed: - print(f'{len(removed)} entity(s) have been removed:') + print(f'{len(removed)} entitie(s) have been removed:') for name in sorted(removed): print(f' {name!r}: {old[name]!r}') changed = set() @@ -53,7 +53,7 @@ def compare_dicts(old, new): if old[name] != new[name]: changed.add((name, old[name], new[name])) if changed: - print(f'{len(changed)} entity(s) have been modified:') + print(f'{len(changed)} entitie(s) have been modified:') for item in sorted(changed): print(' {!r}: {!r} -> {!r}'.format(*item)) diff --git a/Tools/cases_generator/stack.py b/Tools/cases_generator/stack.py index cd231b12c7f5e7..3a0e7e5d0d5636 100644 --- a/Tools/cases_generator/stack.py +++ b/Tools/cases_generator/stack.py @@ -540,11 +540,11 @@ def for_uop(stack: Stack, uop: Uop, out: CWriter, check_liveness: bool = True) - inputs.reverse() peeks.reverse() offset = stack.logical_sp - stack.physical_sp - for output in uop.stack.outputs: - if output.is_array() and output.used and not output.peek: + for ouput in uop.stack.outputs: + if ouput.is_array() and ouput.used and not ouput.peek: c_offset = offset.to_c() - out.emit(f"{output.name} = &stack_pointer[{c_offset}];\n") - offset = offset.push(output) + out.emit(f"{ouput.name} = &stack_pointer[{c_offset}];\n") + offset = offset.push(ouput) for var in inputs: stack.push(var) outputs = peeks + [ Local.undefined(var) for var in uop.stack.outputs if not var.peek ] diff --git a/Tools/cases_generator/tier1_generator.py b/Tools/cases_generator/tier1_generator.py index 7cd95ed9b32691..32dc346d5e891a 100644 --- a/Tools/cases_generator/tier1_generator.py +++ b/Tools/cases_generator/tier1_generator.py @@ -201,7 +201,7 @@ def generate_tier1_labels( analysis: Analysis, emitter: Emitter ) -> None: emitter.emit("\n") - # Emit tail-callable labels as function definitions + # Emit tail-callable labels as function defintions for name, label in analysis.labels.items(): emitter.emit(f"LABEL({name})\n") storage = Storage(Stack(), [], [], 0, False) diff --git a/Tools/i18n/pygettext.py b/Tools/i18n/pygettext.py index b55b1db56ce875..f46b05067d7fde 100755 --- a/Tools/i18n/pygettext.py +++ b/Tools/i18n/pygettext.py @@ -687,7 +687,7 @@ def main(): try: opts, args = getopt.getopt( sys.argv[1:], - 'ac::d:DEhk:Know:p:S:Vvw:x:X:', + 'ac::d:DEhk:Kno:p:S:Vvw:x:X:', ['extract-all', 'add-comments=?', 'default-domain=', 'escape', 'help', 'keyword=', 'no-default-keywords', 'add-location', 'no-location', 'output=', 'output-dir=', diff --git a/Tools/peg_generator/pegen/grammar_visualizer.py b/Tools/peg_generator/pegen/grammar_visualizer.py index 0c48ce7bb7860a..11f784f45b66b8 100644 --- a/Tools/peg_generator/pegen/grammar_visualizer.py +++ b/Tools/peg_generator/pegen/grammar_visualizer.py @@ -33,15 +33,15 @@ def print_nodes_recursively(self, node: Rule, prefix: str = "", istail: bool = T value = self.name(node) line = prefix + ("└──" if istail else "├──") + value + "\n" - suffix = " " if istail else "│ " + sufix = " " if istail else "│ " if not children: return line *children, last = children for child in children: - line += self.print_nodes_recursively(child, prefix + suffix, False) - line += self.print_nodes_recursively(last, prefix + suffix, True) + line += self.print_nodes_recursively(child, prefix + sufix, False) + line += self.print_nodes_recursively(last, prefix + sufix, True) return line diff --git a/Tools/scripts/combinerefs.py b/Tools/scripts/combinerefs.py index 75d36f232f979b..848bae5658ca3a 100755 --- a/Tools/scripts/combinerefs.py +++ b/Tools/scripts/combinerefs.py @@ -39,10 +39,10 @@ repr is repr(object), extracted from the first PYTHONDUMPREFS output block. CAUTION: If object is a container type, it may not actually contain all the objects shown in the repr: the repr was captured from the first output block, -and some of the containers may have been released since then. For example, +and some of the containees may have been released since then. For example, it's common for the line showing the dict of interned strings to display strings that no longer exist at the end of Py_FinalizeEx; this can be recognized -(albeit painfully) because such containers don't have a line of their own. +(albeit painfully) because such containees don't have a line of their own. The objects are listed in allocation order, with most-recently allocated printed first, and the first object allocated printed last. diff --git a/Tools/unicode/python-mappings/GB2312.TXT b/Tools/unicode/python-mappings/GB2312.TXT index 1782946a76361b..334b4cdb94863d 100644 --- a/Tools/unicode/python-mappings/GB2312.TXT +++ b/Tools/unicode/python-mappings/GB2312.TXT @@ -507,7 +507,7 @@ 0x2628 0x0398 # GREEK CAPITAL LETTER THETA 0x2629 0x0399 # GREEK CAPITAL LETTER IOTA 0x262A 0x039A # GREEK CAPITAL LETTER KAPPA -0x262B 0x039B # GREEK CAPITAL LETTER LAMBDA +0x262B 0x039B # GREEK CAPITAL LETTER LAMDA 0x262C 0x039C # GREEK CAPITAL LETTER MU 0x262D 0x039D # GREEK CAPITAL LETTER NU 0x262E 0x039E # GREEK CAPITAL LETTER XI @@ -531,7 +531,7 @@ 0x2648 0x03B8 # GREEK SMALL LETTER THETA 0x2649 0x03B9 # GREEK SMALL LETTER IOTA 0x264A 0x03BA # GREEK SMALL LETTER KAPPA -0x264B 0x03BB # GREEK SMALL LETTER LAMBDA +0x264B 0x03BB # GREEK SMALL LETTER LAMDA 0x264C 0x03BC # GREEK SMALL LETTER MU 0x264D 0x03BD # GREEK SMALL LETTER NU 0x264E 0x03BE # GREEK SMALL LETTER XI diff --git a/Tools/unicode/python-mappings/jisx0213-2004-std.txt b/Tools/unicode/python-mappings/jisx0213-2004-std.txt index 7649933aec5839..a302fa19ff9bd2 100644 --- a/Tools/unicode/python-mappings/jisx0213-2004-std.txt +++ b/Tools/unicode/python-mappings/jisx0213-2004-std.txt @@ -500,7 +500,7 @@ 3-2628 U+0398 # GREEK CAPITAL LETTER THETA 3-2629 U+0399 # GREEK CAPITAL LETTER IOTA 3-262A U+039A # GREEK CAPITAL LETTER KAPPA -3-262B U+039B # GREEK CAPITAL LETTER LAMBDA +3-262B U+039B # GREEK CAPITAL LETTER LAMDA 3-262C U+039C # GREEK CAPITAL LETTER MU 3-262D U+039D # GREEK CAPITAL LETTER NU 3-262E U+039E # GREEK CAPITAL LETTER XI @@ -532,7 +532,7 @@ 3-2648 U+03B8 # GREEK SMALL LETTER THETA 3-2649 U+03B9 # GREEK SMALL LETTER IOTA 3-264A U+03BA # GREEK SMALL LETTER KAPPA -3-264B U+03BB # GREEK SMALL LETTER LAMBDA +3-264B U+03BB # GREEK SMALL LETTER LAMDA 3-264C U+03BC # GREEK SMALL LETTER MU 3-264D U+03BD # GREEK SMALL LETTER NU 3-264E U+03BE # GREEK SMALL LETTER XI diff --git a/configure.ac b/configure.ac index 6917f3e1014a33..3566c4b9038c2b 100644 --- a/configure.ac +++ b/configure.ac @@ -6264,7 +6264,7 @@ if test "$ac_cv_sizeof_wchar_t" -ge 2 \ -a "$ac_cv_wchar_t_signed" = "no" then AC_DEFINE([HAVE_USABLE_WCHAR_T], [1], - [Define if you have a usable wchar_t type defined in wchar.h; usable + [Define if you have a useable wchar_t type defined in wchar.h; useable means wchar_t must be an unsigned type with at least 16 bits. (see Include/unicodeobject.h).]) AC_MSG_RESULT([yes]) diff --git a/pyconfig.h.in b/pyconfig.h.in index 78036c2656fd59..1c533b2bfb7fb4 100644 --- a/pyconfig.h.in +++ b/pyconfig.h.in @@ -1559,7 +1559,7 @@ /* Define to 1 if you have the 'unshare' function. */ #undef HAVE_UNSHARE -/* Define if you have a usable wchar_t type defined in wchar.h; usable means +/* Define if you have a useable wchar_t type defined in wchar.h; useable means wchar_t must be an unsigned type with at least 16 bits. (see Include/unicodeobject.h). */ #undef HAVE_USABLE_WCHAR_T From 5db6cc2c5742a449d818025545900520866627eb Mon Sep 17 00:00:00 2001 From: Cornelius Roemer Date: Mon, 21 Jul 2025 01:01:05 +0200 Subject: [PATCH 3/9] Progress on picking true typos --- Doc/extending/extending.rst | 2 +- Doc/library/shelve.rst | 2 +- Doc/whatsnew/3.14.rst | 2 +- Include/cpython/critical_section.h | 4 ++-- Include/internal/pycore_pymem.h | 2 +- Include/modsupport.h | 2 +- Include/refcount.h | 2 +- Include/unicodeobject.h | 2 +- InternalDocs/asyncio.md | 2 +- Lib/multiprocessing/resource_tracker.py | 2 +- Lib/sysconfig/__init__.py | 2 +- 11 files changed, 12 insertions(+), 12 deletions(-) diff --git a/Doc/extending/extending.rst b/Doc/extending/extending.rst index fd63495674651b..a89a69043c0f9f 100644 --- a/Doc/extending/extending.rst +++ b/Doc/extending/extending.rst @@ -214,7 +214,7 @@ and initialize it by calling :c:func:`PyErr_NewException` in the module's SpamError = PyErr_NewException("spam.error", NULL, NULL); -Since :c:data:`!SpamError` is a global variable, it will be overwitten every time +Since :c:data:`!SpamError` is a global variable, it will be overwritten every time the module is reinitialized, when the :c:data:`Py_mod_exec` function is called. For now, let's avoid the issue: we will block repeated initialization by raising an diff --git a/Doc/library/shelve.rst b/Doc/library/shelve.rst index 23808619524056..b88fe4157bdc29 100644 --- a/Doc/library/shelve.rst +++ b/Doc/library/shelve.rst @@ -144,7 +144,7 @@ Restrictions which can cause hard crashes when trying to read from the database. * :meth:`Shelf.reorganize` may not be available for all database packages and - may temporarely increase resource usage (especially disk space) when called. + may temporarily increase resource usage (especially disk space) when called. Additionally, it will never run automatically and instead needs to be called explicitly. diff --git a/Doc/whatsnew/3.14.rst b/Doc/whatsnew/3.14.rst index c108a94692dca7..bf17f417a5980e 100644 --- a/Doc/whatsnew/3.14.rst +++ b/Doc/whatsnew/3.14.rst @@ -1051,7 +1051,7 @@ Concurrent safe warnings control The :class:`warnings.catch_warnings` context manager will now optionally use a context variable for warning filters. This is enabled by setting the :data:`~sys.flags.context_aware_warnings` flag, either with the ``-X`` -command-line option or an environment variable. This gives predicable +command-line option or an environment variable. This gives predictable warnings control when using :class:`~warnings.catch_warnings` combined with multiple threads or asynchronous tasks. The flag defaults to true for the free-threaded build and false for the GIL-enabled build. diff --git a/Include/cpython/critical_section.h b/Include/cpython/critical_section.h index 35db3fb6a59ce6..4d48ba13451304 100644 --- a/Include/cpython/critical_section.h +++ b/Include/cpython/critical_section.h @@ -93,7 +93,7 @@ PyCriticalSection2_End(PyCriticalSection2 *c); } #else /* !Py_GIL_DISABLED */ -// NOTE: the contents of this struct are private and may change betweeen +// NOTE: the contents of this struct are private and may change between // Python releases without a deprecation period. struct PyCriticalSection { // Tagged pointer to an outer active critical section (or 0). @@ -105,7 +105,7 @@ struct PyCriticalSection { // A critical section protected by two mutexes. Use // Py_BEGIN_CRITICAL_SECTION2 and Py_END_CRITICAL_SECTION2. -// NOTE: the contents of this struct are private and may change betweeen +// NOTE: the contents of this struct are private and may change between // Python releases without a deprecation period. struct PyCriticalSection2 { PyCriticalSection _cs_base; diff --git a/Include/internal/pycore_pymem.h b/Include/internal/pycore_pymem.h index f3f2ae0a140828..cf283cbbd7297d 100644 --- a/Include/internal/pycore_pymem.h +++ b/Include/internal/pycore_pymem.h @@ -94,7 +94,7 @@ extern void _PyMem_FreeDelayed(void *ptr, size_t size); extern void _PyMem_ProcessDelayed(PyThreadState *tstate); // Periodically process delayed free requests when the world is stopped. -// Notify of any objects whic should be freeed. +// Notify of any objects which should be freed. typedef void (*delayed_dealloc_cb)(PyObject *, void *); extern void _PyMem_ProcessDelayedNoDealloc(PyThreadState *tstate, delayed_dealloc_cb cb, void *state); diff --git a/Include/modsupport.h b/Include/modsupport.h index af995f567b004c..0b205548f0dc96 100644 --- a/Include/modsupport.h +++ b/Include/modsupport.h @@ -33,7 +33,7 @@ PyAPI_FUNC(int) PyModule_Add(PyObject *mod, const char *name, PyObject *value); // Similar to PyModule_AddObjectRef() and PyModule_Add() but steal // a reference to 'value' on success and only on success. -// Errorprone. Should not be used in new code. +// Error-prone. Should not be used in new code. PyAPI_FUNC(int) PyModule_AddObject(PyObject *mod, const char *, PyObject *value); PyAPI_FUNC(int) PyModule_AddIntConstant(PyObject *, const char *, long); diff --git a/Include/refcount.h b/Include/refcount.h index ba34461fefcbb0..b155d19d7f858f 100644 --- a/Include/refcount.h +++ b/Include/refcount.h @@ -453,7 +453,7 @@ static inline Py_ALWAYS_INLINE void Py_DECREF(PyObject *op) * There are cases where it's safe to use the naive code, but they're brittle. * For example, if `op` points to a Python integer, you know that destroying * one of those can't cause problems -- but in part that relies on that - * Python integers aren't currently weakly referencable. Best practice is + * Python integers aren't currently weakly referenceable. Best practice is * to use Py_CLEAR() even if you can't think of a reason for why you need to. * * gh-98724: Use a temporary variable to only evaluate the macro argument once, diff --git a/Include/unicodeobject.h b/Include/unicodeobject.h index b72d581ec25804..b98f44887ae827 100644 --- a/Include/unicodeobject.h +++ b/Include/unicodeobject.h @@ -760,7 +760,7 @@ PyAPI_FUNC(PyObject*) PyUnicode_Split( Py_ssize_t maxsplit /* Maxsplit count */ ); -/* Dito, but split at line breaks. +/* Ditto, but split at line breaks. CRLF is considered to be one line break. Line breaks are not included in the resulting list. */ diff --git a/InternalDocs/asyncio.md b/InternalDocs/asyncio.md index 22159852ca54db..ca7e8a92dec1ce 100644 --- a/InternalDocs/asyncio.md +++ b/InternalDocs/asyncio.md @@ -205,7 +205,7 @@ the current task is found and returned. If no matching thread state is found, `None` is returned. In free-threading, it avoids contention on a global dictionary as -threads can access the current task of thier running loop without any +threads can access the current task of their running loop without any locking. --- diff --git a/Lib/multiprocessing/resource_tracker.py b/Lib/multiprocessing/resource_tracker.py index 05633ac21a259c..14ff2cc927b56b 100644 --- a/Lib/multiprocessing/resource_tracker.py +++ b/Lib/multiprocessing/resource_tracker.py @@ -76,7 +76,7 @@ def _reentrant_call_error(self): "Reentrant call into the multiprocessing resource tracker") def __del__(self): - # making sure child processess are cleaned before ResourceTracker + # making sure child processes are cleaned before ResourceTracker # gets destructed. # see https://github.com/python/cpython/issues/88887 self._stop(use_blocking_lock=False) diff --git a/Lib/sysconfig/__init__.py b/Lib/sysconfig/__init__.py index 49e0986517ce97..e36cafa26d2721 100644 --- a/Lib/sysconfig/__init__.py +++ b/Lib/sysconfig/__init__.py @@ -364,7 +364,7 @@ def _get_sysconfigdata(): def _installation_is_relocated(): - """Is the Python installation running from a different prefix than what was targetted when building?""" + """Is the Python installation running from a different prefix than what was targeted when building?""" if os.name != 'posix': raise NotImplementedError('sysconfig._installation_is_relocated() is currently only supported on POSIX') From f00a0fc8a3aadbe5b77c6b33e96fdc428284b63b Mon Sep 17 00:00:00 2001 From: Cornelius Roemer Date: Mon, 21 Jul 2025 01:04:19 +0200 Subject: [PATCH 4/9] Progress --- Lib/test/test_asyncio/test_sslproto.py | 2 +- Lib/test/test_buffer.py | 2 +- Lib/test/test_build_details.py | 4 ++-- Lib/test/test_capi/test_tuple.py | 2 +- Lib/test/test_capi/test_type.py | 2 +- Lib/test/test_cmd_line.py | 2 +- Lib/test/test_dict.py | 2 +- Lib/test/test_dis.py | 2 +- Lib/test/test_fileio.py | 2 +- Lib/test/test_generators.py | 4 ++-- Lib/test/test_genexps.py | 2 +- Lib/test/test_pyrepl/test_pyrepl.py | 2 +- 12 files changed, 14 insertions(+), 14 deletions(-) diff --git a/Lib/test/test_asyncio/test_sslproto.py b/Lib/test/test_asyncio/test_sslproto.py index 3e304c166425b0..859175858e5f96 100644 --- a/Lib/test/test_asyncio/test_sslproto.py +++ b/Lib/test/test_asyncio/test_sslproto.py @@ -116,7 +116,7 @@ def test_connection_lost_when_busy(self): sock.fileno = mock.Mock(return_value=12345) sock.send = mock.Mock(side_effect=BrokenPipeError) - # construct StreamWriter chain that contains loop dependant logic this emulates + # construct StreamWriter chain that contains loop dependent logic this emulates # what _make_ssl_transport() does in BaseSelectorEventLoop reader = asyncio.StreamReader(limit=2 ** 16, loop=self.loop) protocol = asyncio.StreamReaderProtocol(reader, loop=self.loop) diff --git a/Lib/test/test_buffer.py b/Lib/test/test_buffer.py index 19582e757161fc..b6afddb0f35428 100644 --- a/Lib/test/test_buffer.py +++ b/Lib/test/test_buffer.py @@ -4456,7 +4456,7 @@ def test_pybuffer_size_from_format(self): @support.cpython_only def test_flags_overflow(self): - # gh-126594: Check for integer overlow on large flags + # gh-126594: Check for integer overflow on large flags try: from _testcapi import INT_MIN, INT_MAX except ImportError: diff --git a/Lib/test/test_build_details.py b/Lib/test/test_build_details.py index ba4b8c5aa9b58e..d7a718139953b0 100644 --- a/Lib/test/test_build_details.py +++ b/Lib/test/test_build_details.py @@ -11,7 +11,7 @@ class FormatTestsBase: @property def contents(self): - """Install details file contents. Should be overriden by subclasses.""" + """Install details file contents. Should be overridden by subclasses.""" raise NotImplementedError @property @@ -114,7 +114,7 @@ def contents(self): def test_location(self): self.assertTrue(os.path.isfile(self.location)) - # Override generic format tests with tests for our specific implemenation. + # Override generic format tests with tests for our specific implementation. @needs_installed_python @unittest.skipIf( diff --git a/Lib/test/test_capi/test_tuple.py b/Lib/test/test_capi/test_tuple.py index 7c07bc64e247c5..0eb70ff68f32f5 100644 --- a/Lib/test/test_capi/test_tuple.py +++ b/Lib/test/test_capi/test_tuple.py @@ -259,7 +259,7 @@ def test__tuple_resize(self): def test_bug_59313(self): # Before 3.14, the C-API function PySequence_Tuple # would create incomplete tuples which were visible to - # the cycle GC, and this test would crash the interpeter. + # the cycle GC, and this test would crash the interpreter. TAG = object() tuples = [] diff --git a/Lib/test/test_capi/test_type.py b/Lib/test/test_capi/test_type.py index 15fb4a93e2ad74..dd660216770dde 100644 --- a/Lib/test/test_capi/test_type.py +++ b/Lib/test/test_capi/test_type.py @@ -259,7 +259,7 @@ class FreezeThis(metaclass=Meta): self.assertEqual(FreezeThis.value, 2) def test_manual_heap_type(self): - # gh-128923: test that a manually allocated and initailized heap type + # gh-128923: test that a manually allocated and initialized heap type # works correctly ManualHeapType = _testcapi.ManualHeapType for i in range(100): diff --git a/Lib/test/test_cmd_line.py b/Lib/test/test_cmd_line.py index f30a1874ab96d4..cc3802a90a850f 100644 --- a/Lib/test/test_cmd_line.py +++ b/Lib/test/test_cmd_line.py @@ -980,7 +980,7 @@ def test_python_legacy_windows_fs_encoding(self): def test_python_legacy_windows_stdio(self): # Test that _WindowsConsoleIO is used when PYTHONLEGACYWINDOWSSTDIO # is not set. - # We cannot use PIPE becase it prevents creating new console. + # We cannot use PIPE because it prevents creating new console. # So we use exit code. code = "import sys; sys.exit(type(sys.stdout.buffer.raw).__name__ != '_WindowsConsoleIO')" env = os.environ.copy() diff --git a/Lib/test/test_dict.py b/Lib/test/test_dict.py index 60c62430370e96..4888bf10a983d5 100644 --- a/Lib/test/test_dict.py +++ b/Lib/test/test_dict.py @@ -1581,7 +1581,7 @@ def check_unhashable_key(): with check_unhashable_key(): d.get(key) - # Only TypeError exception is overriden, + # Only TypeError exception is overridden, # other exceptions are left unchanged. class HashError: def __hash__(self): diff --git a/Lib/test/test_dis.py b/Lib/test/test_dis.py index 355990ed58ee09..fc3d53271d1c0b 100644 --- a/Lib/test/test_dis.py +++ b/Lib/test/test_dis.py @@ -1696,7 +1696,7 @@ def jumpy(): # code_object_inner before rerunning the tests def _stringify_instruction(instr): - # Since postions offsets change a lot for these test cases, ignore them. + # Since positions offsets change a lot for these test cases, ignore them. base = ( f" make_inst(opname={instr.opname!r}, arg={instr.arg!r}, argval={instr.argval!r}, " + f"argrepr={instr.argrepr!r}, offset={instr.offset}, start_offset={instr.start_offset}, " + diff --git a/Lib/test/test_fileio.py b/Lib/test/test_fileio.py index e3d54f6315aade..2e5b894fd5b063 100644 --- a/Lib/test/test_fileio.py +++ b/Lib/test/test_fileio.py @@ -388,7 +388,7 @@ def check_readall(name, code, prelude="", cleanup="", syscalls = strace_helper.filter_memory(syscalls) # The first call should be an open that returns a - # file descriptor (fd). Afer that calls may vary. Once the file + # file descriptor (fd). After that calls may vary. Once the file # is opened, check calls refer to it by fd as the filename # could be removed from the filesystem, renamed, etc. See: # Time-of-check time-of-use (TOCTOU) software bug class. diff --git a/Lib/test/test_generators.py b/Lib/test/test_generators.py index 3e41c7b9663491..7ce84ca0a3bb42 100644 --- a/Lib/test/test_generators.py +++ b/Lib/test/test_generators.py @@ -2377,7 +2377,7 @@ def printsolution(self, x): """ weakref_tests = """\ -Generators are weakly referencable: +Generators are weakly referenceable: >>> import weakref >>> def gen(): @@ -2388,7 +2388,7 @@ def printsolution(self, x): True >>> p = weakref.proxy(gen) -Generator-iterators are weakly referencable as well: +Generator-iterators are weakly referenceable as well: >>> gi = gen() >>> wr = weakref.ref(gi) diff --git a/Lib/test/test_genexps.py b/Lib/test/test_genexps.py index fe5f18fa3f88a0..639c8fde612091 100644 --- a/Lib/test/test_genexps.py +++ b/Lib/test/test_genexps.py @@ -256,7 +256,7 @@ >>> me.gi_running 0 -Verify that genexps are weakly referencable +Verify that genexps are weakly referenceable >>> import weakref >>> g = (i*i for i in range(4)) diff --git a/Lib/test/test_pyrepl/test_pyrepl.py b/Lib/test/test_pyrepl/test_pyrepl.py index 657a971f8769df..6997a9223f8b0d 100644 --- a/Lib/test/test_pyrepl/test_pyrepl.py +++ b/Lib/test/test_pyrepl/test_pyrepl.py @@ -1005,7 +1005,7 @@ def test_builtin_completion_top_level(self): # Make iter_modules() search only the standard library. # This makes the test more reliable in case there are # other user packages/scripts on PYTHONPATH which can - # intefere with the completions. + # interfere with the completions. lib_path = os.path.dirname(importlib.__path__[0]) sys.path = [lib_path] From f0bb2063ed1aed0e6cedc074d1ea5bde6bfbf2e1 Mon Sep 17 00:00:00 2001 From: Cornelius Roemer Date: Mon, 21 Jul 2025 01:10:05 +0200 Subject: [PATCH 5/9] Progress --- Lib/imaplib.py | 2 +- Lib/test/test_gzip.py | 4 ++-- Lib/test/test_logging.py | 2 +- Lib/test/test_memoryview.py | 2 +- Lib/test/test_opcache.py | 2 +- Lib/test/test_plistlib.py | 6 +++--- Lib/test/test_pty.py | 2 +- Lib/test/test_set.py | 2 +- Lib/test/test_statistics.py | 2 +- Lib/test/test_subprocess.py | 2 +- Lib/test/test_sysconfig.py | 4 ++-- Lib/test/test_tarfile.py | 2 +- Lib/test/test_weakref.py | 4 ++-- Lib/test/test_xml_etree.py | 2 +- Misc/NEWS.d/3.14.0b1.rst | 4 ++-- .../2025-07-19-12-37-05.gh-issue-136801.XU_tF2.rst | 2 +- .../Library/2025-07-05-09-45-04.gh-issue-136286.N67Amr.rst | 2 +- .../2025-06-11-12-14-06.gh-issue-135379.25ttXq.rst | 2 +- 18 files changed, 24 insertions(+), 24 deletions(-) diff --git a/Lib/imaplib.py b/Lib/imaplib.py index 2c3925958d011b..e663e55ad6ebc1 100644 --- a/Lib/imaplib.py +++ b/Lib/imaplib.py @@ -1524,7 +1524,7 @@ def _pop(self, timeout, default=('', None)): # Historical Note: # The timeout was originally implemented using select() after # checking for the presence of already-buffered data. - # That allowed timeouts on pipe connetions like IMAP4_stream. + # That allowed timeouts on pipe connections like IMAP4_stream. # However, it seemed possible that SSL data arriving without any # IMAP data afterward could cause select() to indicate available # application data when there was none, leading to a read() call diff --git a/Lib/test/test_gzip.py b/Lib/test/test_gzip.py index a12ff5662a73db..df4748057ab254 100644 --- a/Lib/test/test_gzip.py +++ b/Lib/test/test_gzip.py @@ -144,7 +144,7 @@ def test_read1(self): self.assertEqual(b''.join(blocks), data1 * 50) def test_readinto(self): - # 10MB of uncompressible data to ensure multiple reads + # 10MB of incompressible data to ensure multiple reads large_data = os.urandom(10 * 2**20) with gzip.GzipFile(self.filename, 'wb') as f: f.write(large_data) @@ -156,7 +156,7 @@ def test_readinto(self): self.assertEqual(buf, large_data) def test_readinto1(self): - # 10MB of uncompressible data to ensure multiple reads + # 10MB of incompressible data to ensure multiple reads large_data = os.urandom(10 * 2**20) with gzip.GzipFile(self.filename, 'wb') as f: f.write(large_data) diff --git a/Lib/test/test_logging.py b/Lib/test/test_logging.py index 275f7ce47d09b5..e82dc611baac61 100644 --- a/Lib/test/test_logging.py +++ b/Lib/test/test_logging.py @@ -2387,7 +2387,7 @@ def __getattr__(self, attribute): return getattr(queue, attribute) class CustomQueueFakeProtocol(CustomQueueProtocol): - # An object implementing the minimial Queue API for + # An object implementing the minimal Queue API for # the logging module but with incorrect signatures. # # The object will be considered a valid queue class since we diff --git a/Lib/test/test_memoryview.py b/Lib/test/test_memoryview.py index 64f440f180bbf0..653b8e0eeba6bd 100644 --- a/Lib/test/test_memoryview.py +++ b/Lib/test/test_memoryview.py @@ -738,7 +738,7 @@ def test_picklebuffer_reference_loop(self): @support.requires_resource("cpu") class RacingTest(unittest.TestCase): def test_racing_getbuf_and_releasebuf(self): - """Repeatly access the memoryview for racing.""" + """Repeatedly access the memoryview for racing.""" try: from multiprocessing.managers import SharedMemoryManager except ImportError: diff --git a/Lib/test/test_opcache.py b/Lib/test/test_opcache.py index 30baa09048616c..4f4a9516d8411b 100644 --- a/Lib/test/test_opcache.py +++ b/Lib/test/test_opcache.py @@ -571,7 +571,7 @@ def test(default=None): def make_deferred_ref_count_obj(): """Create an object that uses deferred reference counting. - Only objects that use deferred refence counting may be stored in inline + Only objects that use deferred reference counting may be stored in inline caches in free-threaded builds. This constructs a new class named Foo, which uses deferred reference counting. """ diff --git a/Lib/test/test_plistlib.py b/Lib/test/test_plistlib.py index a0c76e5dec5ebe..5b420cb54a2e68 100644 --- a/Lib/test/test_plistlib.py +++ b/Lib/test/test_plistlib.py @@ -858,7 +858,7 @@ def test_load_aware_datetime(self): self.assertEqual(dt.tzinfo, datetime.UTC) @unittest.skipUnless("America/Los_Angeles" in zoneinfo.available_timezones(), - "Can't find timezone datebase") + "Can't find timezone database") def test_dump_aware_datetime(self): dt = datetime.datetime(2345, 6, 7, 8, 9, 10, tzinfo=zoneinfo.ZoneInfo("America/Los_Angeles")) @@ -877,7 +877,7 @@ def test_dump_utc_aware_datetime(self): self.assertEqual(loaded_dt, dt) @unittest.skipUnless("America/Los_Angeles" in zoneinfo.available_timezones(), - "Can't find timezone datebase") + "Can't find timezone database") def test_dump_aware_datetime_without_aware_datetime_option(self): dt = datetime.datetime(2345, 6, 7, 8, tzinfo=zoneinfo.ZoneInfo("America/Los_Angeles")) @@ -1032,7 +1032,7 @@ def test_load_aware_datetime(self): datetime.datetime(2345, 6, 7, 8, tzinfo=datetime.UTC)) @unittest.skipUnless("America/Los_Angeles" in zoneinfo.available_timezones(), - "Can't find timezone datebase") + "Can't find timezone database") def test_dump_aware_datetime_without_aware_datetime_option(self): dt = datetime.datetime(2345, 6, 7, 8, tzinfo=zoneinfo.ZoneInfo("America/Los_Angeles")) diff --git a/Lib/test/test_pty.py b/Lib/test/test_pty.py index 4836f38c388c05..9b2ffd1e375ab5 100644 --- a/Lib/test/test_pty.py +++ b/Lib/test/test_pty.py @@ -53,7 +53,7 @@ def normalize_output(data): # etc.) # This is about the best we can do without getting some feedback - # from someone more knowledgable. + # from someone more knowledgeable. # OSF/1 (Tru64) apparently turns \n into \r\r\n. if data.endswith(b'\r\r\n'): diff --git a/Lib/test/test_set.py b/Lib/test/test_set.py index c0df9507bd7f5e..203a231201c669 100644 --- a/Lib/test/test_set.py +++ b/Lib/test/test_set.py @@ -661,7 +661,7 @@ def check_unhashable_element(): with check_unhashable_element(): myset.discard(elem) - # Only TypeError exception is overriden, + # Only TypeError exception is overridden, # other exceptions are left unchanged. class HashError: def __hash__(self): diff --git a/Lib/test/test_statistics.py b/Lib/test/test_statistics.py index 8250b0aef09aec..6eb7f15cbfe3f3 100644 --- a/Lib/test/test_statistics.py +++ b/Lib/test/test_statistics.py @@ -2998,7 +2998,7 @@ def test_cdf(self): X = NormalDist(100, 15) cdfs = [X.cdf(x) for x in range(1, 200)] self.assertEqual(set(map(type, cdfs)), {float}) - # Verify montonic + # Verify monotonic self.assertEqual(cdfs, sorted(cdfs)) # Verify center (should be exact) self.assertEqual(X.cdf(100), 0.50) diff --git a/Lib/test/test_subprocess.py b/Lib/test/test_subprocess.py index f0e350c71f60ea..4287765c60348d 100644 --- a/Lib/test/test_subprocess.py +++ b/Lib/test/test_subprocess.py @@ -3438,7 +3438,7 @@ def test_vfork_used_when_expected(self): # because libc tends to implement that internally using vfork. But # that'd just be testing a libc+kernel implementation detail. - # Are intersted in the system calls: + # Are interested in the system calls: # clone,clone2,clone3,fork,vfork,exit,exit_group # Unfortunately using `--trace` with that list to strace fails because # not all are supported on all platforms (ex. clone2 is ia64 only...) diff --git a/Lib/test/test_sysconfig.py b/Lib/test/test_sysconfig.py index 2eb8de4b29fe96..f2c9848eefc45f 100644 --- a/Lib/test/test_sysconfig.py +++ b/Lib/test/test_sysconfig.py @@ -697,7 +697,7 @@ def test_sysconfigdata_json(self): # Keys dependent on uncontrollable external context ignore_keys = {'userbase'} - # Keys dependent on Python being run outside the build directrory + # Keys dependent on Python being run outside the build directory if sysconfig.is_python_build(): ignore_keys |= {'srcdir'} # Keys dependent on the executable location @@ -706,7 +706,7 @@ def test_sysconfigdata_json(self): # Keys dependent on the environment (different inside virtual environments) if sys.prefix != sys.base_prefix: ignore_keys |= {'prefix', 'exec_prefix', 'base', 'platbase'} - # Keys dependent on Python being run from the prefix targetted when building (different on relocatable installs) + # Keys dependent on Python being run from the prefix targeted when building (different on relocatable installs) if sysconfig._installation_is_relocated(): ignore_keys |= {'prefix', 'exec_prefix', 'base', 'platbase', 'installed_base', 'installed_platbase'} diff --git a/Lib/test/test_tarfile.py b/Lib/test/test_tarfile.py index 7055e1ed147a9e..db4028a5d3677c 100644 --- a/Lib/test/test_tarfile.py +++ b/Lib/test/test_tarfile.py @@ -4112,7 +4112,7 @@ def test_sneaky_hardlink_fallback(self): arc.add("b/") # Point "c" to the bottom of the tree in "a" arc.add("c", symlink_to=os.path.join("a", "t")) - # link to non-existant location under "a" + # link to non-existent location under "a" arc.add("c/escape", symlink_to=os.path.join("..", "..", "link_here")) # Move "c" to point to "b" ("c/escape" no longer exists) diff --git a/Lib/test/test_weakref.py b/Lib/test/test_weakref.py index 4c7c900eb56ae1..55299995e6e08b 100644 --- a/Lib/test/test_weakref.py +++ b/Lib/test/test_weakref.py @@ -1857,7 +1857,7 @@ def test_weak_keyed_bad_delitem(self): self.assertRaises(KeyError, d.__delitem__, o) self.assertRaises(KeyError, d.__getitem__, o) - # If a key isn't of a weakly referencable type, __getitem__ and + # If a key isn't of a weakly referenceable type, __getitem__ and # __setitem__ raise TypeError. __delitem__ should too. self.assertRaises(TypeError, d.__delitem__, 13) self.assertRaises(TypeError, d.__getitem__, 13) @@ -2260,7 +2260,7 @@ def test_names(self): >>> class Dict(dict): ... pass ... ->>> obj = Dict(red=1, green=2, blue=3) # this object is weak referencable +>>> obj = Dict(red=1, green=2, blue=3) # this object is weak referenceable >>> r = weakref.ref(obj) >>> print(r() is obj) True diff --git a/Lib/test/test_xml_etree.py b/Lib/test/test_xml_etree.py index bf6d5074fdebd8..5e4b704f869e1f 100644 --- a/Lib/test/test_xml_etree.py +++ b/Lib/test/test_xml_etree.py @@ -2736,7 +2736,7 @@ def test_remove_with_clear_assume_existing(self): def do_test_remove_with_clear(self, *, raises): - # Until the discrepency between "del root[:]" and "root.clear()" is + # Until the discrepancy between "del root[:]" and "root.clear()" is # resolved, we need to keep two tests. Previously, using "del root[:]" # did not crash with the reproducer of gh-126033 while "root.clear()" # did. diff --git a/Misc/NEWS.d/3.14.0b1.rst b/Misc/NEWS.d/3.14.0b1.rst index 041fbaf2051719..02ceb82b556386 100644 --- a/Misc/NEWS.d/3.14.0b1.rst +++ b/Misc/NEWS.d/3.14.0b1.rst @@ -1756,7 +1756,7 @@ Add support for macOS multi-arch builds with the JIT enabled .. nonce: q9fvyM .. section: Core and Builtins -PyREPL now supports syntax highlighing. Contributed by Łukasz Langa. +PyREPL now supports syntax highlighting. Contributed by Łukasz Langa. .. @@ -1797,7 +1797,7 @@ non-``None`` ``closure``. Patch by Bartosz Sławecki. .. nonce: Uj7lyY .. section: Core and Builtins -Fix a bug that was allowing newlines inconsitently in format specifiers for +Fix a bug that was allowing newlines inconsistently in format specifiers for single-quoted f-strings. Patch by Pablo Galindo. .. diff --git a/Misc/NEWS.d/next/Core_and_Builtins/2025-07-19-12-37-05.gh-issue-136801.XU_tF2.rst b/Misc/NEWS.d/next/Core_and_Builtins/2025-07-19-12-37-05.gh-issue-136801.XU_tF2.rst index 5c0813b1a0abda..767d7b97726971 100644 --- a/Misc/NEWS.d/next/Core_and_Builtins/2025-07-19-12-37-05.gh-issue-136801.XU_tF2.rst +++ b/Misc/NEWS.d/next/Core_and_Builtins/2025-07-19-12-37-05.gh-issue-136801.XU_tF2.rst @@ -1 +1 @@ -Fix PyREPL syntax highlightning on match cases after multi-line case. Contributed by Olga Matoula. +Fix PyREPL syntax highlighting on match cases after multi-line case. Contributed by Olga Matoula. diff --git a/Misc/NEWS.d/next/Library/2025-07-05-09-45-04.gh-issue-136286.N67Amr.rst b/Misc/NEWS.d/next/Library/2025-07-05-09-45-04.gh-issue-136286.N67Amr.rst index 0a0d66ac0b8abf..ddc2310392fe92 100644 --- a/Misc/NEWS.d/next/Library/2025-07-05-09-45-04.gh-issue-136286.N67Amr.rst +++ b/Misc/NEWS.d/next/Library/2025-07-05-09-45-04.gh-issue-136286.N67Amr.rst @@ -1,2 +1,2 @@ -Fix pickling failures for protocols 0 and 1 for many objects realted to +Fix pickling failures for protocols 0 and 1 for many objects related to subinterpreters. diff --git a/Misc/NEWS.d/next/Tools-Demos/2025-06-11-12-14-06.gh-issue-135379.25ttXq.rst b/Misc/NEWS.d/next/Tools-Demos/2025-06-11-12-14-06.gh-issue-135379.25ttXq.rst index 25599a865b7246..ebe3ab0e7d1993 100644 --- a/Misc/NEWS.d/next/Tools-Demos/2025-06-11-12-14-06.gh-issue-135379.25ttXq.rst +++ b/Misc/NEWS.d/next/Tools-Demos/2025-06-11-12-14-06.gh-issue-135379.25ttXq.rst @@ -1,4 +1,4 @@ The cases generator no longer accepts type annotations on stack items. -Conversions to non-default types are now done explictly in bytecodes.c and +Conversions to non-default types are now done explicitly in bytecodes.c and optimizer_bytecodes.c. This will simplify code generation for top-of-stack caching and other future features. From be0fe0be9ffbcabe50bcaa5c25b8e1b82c3c70dd Mon Sep 17 00:00:00 2001 From: Cornelius Roemer Date: Mon, 21 Jul 2025 01:11:08 +0200 Subject: [PATCH 6/9] zstd progress --- Modules/_zstd/_zstdmodule.c | 4 ++-- Modules/_zstd/_zstdmodule.h | 2 +- Modules/_zstd/buffer.h | 2 +- Modules/_zstd/clinic/_zstdmodule.c.h | 2 +- Modules/_zstd/compressor.c | 4 ++-- Modules/_zstd/decompressor.c | 2 +- Modules/_zstd/zstddict.c | 2 +- Modules/_zstd/zstddict.h | 2 +- 8 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Modules/_zstd/_zstdmodule.c b/Modules/_zstd/_zstdmodule.c index d75c0779474a82..59aece588ce8ca 100644 --- a/Modules/_zstd/_zstdmodule.c +++ b/Modules/_zstd/_zstdmodule.c @@ -1,4 +1,4 @@ -/* Low level interface to the Zstandard algorthm & the zstd library. */ +/* Low level interface to the Zstandard algorithm & the zstd library. */ #ifndef Py_BUILD_CORE_BUILTIN # define Py_BUILD_CORE_MODULE 1 @@ -497,7 +497,7 @@ _zstd.get_frame_info frame_buffer: Py_buffer A bytes-like object, containing the header of a Zstandard frame. -Get Zstandard frame infomation from a frame header. +Get Zstandard frame information from a frame header. [clinic start generated code]*/ static PyObject * diff --git a/Modules/_zstd/_zstdmodule.h b/Modules/_zstd/_zstdmodule.h index 4e8f708f2232c7..82226ff8718e6b 100644 --- a/Modules/_zstd/_zstdmodule.h +++ b/Modules/_zstd/_zstdmodule.h @@ -1,4 +1,4 @@ -/* Low level interface to the Zstandard algorthm & the zstd library. */ +/* Low level interface to the Zstandard algorithm & the zstd library. */ /* Declarations shared between different parts of the _zstd module*/ diff --git a/Modules/_zstd/buffer.h b/Modules/_zstd/buffer.h index 4c885fa0d720fd..0ac7bcb4ddc416 100644 --- a/Modules/_zstd/buffer.h +++ b/Modules/_zstd/buffer.h @@ -1,4 +1,4 @@ -/* Low level interface to the Zstandard algorthm & the zstd library. */ +/* Low level interface to the Zstandard algorithm & the zstd library. */ #ifndef ZSTD_BUFFER_H #define ZSTD_BUFFER_H diff --git a/Modules/_zstd/clinic/_zstdmodule.c.h b/Modules/_zstd/clinic/_zstdmodule.c.h index 766e1cfa776767..081ea728001757 100644 --- a/Modules/_zstd/clinic/_zstdmodule.c.h +++ b/Modules/_zstd/clinic/_zstdmodule.c.h @@ -289,7 +289,7 @@ PyDoc_STRVAR(_zstd_get_frame_info__doc__, "get_frame_info($module, /, frame_buffer)\n" "--\n" "\n" -"Get Zstandard frame infomation from a frame header.\n" +"Get Zstandard frame information from a frame header.\n" "\n" " frame_buffer\n" " A bytes-like object, containing the header of a Zstandard frame."); diff --git a/Modules/_zstd/compressor.c b/Modules/_zstd/compressor.c index bc9e6eff89af68..dcd4513f9a7414 100644 --- a/Modules/_zstd/compressor.c +++ b/Modules/_zstd/compressor.c @@ -1,4 +1,4 @@ -/* Low level interface to the Zstandard algorthm & the zstd library. */ +/* Low level interface to the Zstandard algorithm & the zstd library. */ /* ZstdCompressor class definitions */ @@ -713,7 +713,7 @@ _zstd_ZstdCompressor_set_pledged_input_size_impl(ZstdCompressor *self, unsigned long long size) /*[clinic end generated code: output=3a09e55cc0e3b4f9 input=afd8a7d78cff2eb5]*/ { - // Error occured while converting argument, should be unreachable + // Error occurred while converting argument, should be unreachable assert(size != ZSTD_CONTENTSIZE_ERROR); /* Thread-safe code */ diff --git a/Modules/_zstd/decompressor.c b/Modules/_zstd/decompressor.c index c53d6e4cb05cf0..b00ee05d2f51bf 100644 --- a/Modules/_zstd/decompressor.c +++ b/Modules/_zstd/decompressor.c @@ -1,4 +1,4 @@ -/* Low level interface to the Zstandard algorthm & the zstd library. */ +/* Low level interface to the Zstandard algorithm & the zstd library. */ /* ZstdDecompressor class definition */ diff --git a/Modules/_zstd/zstddict.c b/Modules/_zstd/zstddict.c index 14f74aaed46ec5..35d6ca8e55a265 100644 --- a/Modules/_zstd/zstddict.c +++ b/Modules/_zstd/zstddict.c @@ -1,4 +1,4 @@ -/* Low level interface to the Zstandard algorthm & the zstd library. */ +/* Low level interface to the Zstandard algorithm & the zstd library. */ /* ZstdDict class definitions */ diff --git a/Modules/_zstd/zstddict.h b/Modules/_zstd/zstddict.h index 4a403416dbd4a3..e0d3f46b2b14a6 100644 --- a/Modules/_zstd/zstddict.h +++ b/Modules/_zstd/zstddict.h @@ -1,4 +1,4 @@ -/* Low level interface to the Zstandard algorthm & the zstd library. */ +/* Low level interface to the Zstandard algorithm & the zstd library. */ #ifndef ZSTD_DICT_H #define ZSTD_DICT_H From 0fa7feca203383db908ffd90cb997bbc8270ad55 Mon Sep 17 00:00:00 2001 From: Cornelius Roemer Date: Mon, 21 Jul 2025 01:15:12 +0200 Subject: [PATCH 7/9] progress --- Modules/Setup.stdlib.in | 2 +- Modules/_functoolsmodule.c | 2 +- Modules/hmacmodule.c | 2 +- Modules/posixmodule.c | 2 +- Objects/codeobject.c | 4 ++-- Objects/typeobject.c | 4 ++-- Objects/unicodeobject.c | 2 +- PCbuild/pyproject.props | 2 +- Parser/lexer/lexer.c | 2 +- Python/crossinterp.c | 4 ++-- Python/gc.c | 2 +- Python/gc_free_threading.c | 6 +++--- Python/pystate.c | 2 +- 13 files changed, 18 insertions(+), 18 deletions(-) diff --git a/Modules/Setup.stdlib.in b/Modules/Setup.stdlib.in index 86c8eb27c0a6c7..3c6ee659e744af 100644 --- a/Modules/Setup.stdlib.in +++ b/Modules/Setup.stdlib.in @@ -85,7 +85,7 @@ # # Since the compilation of the built-in cryptographic modules depends # on whether we are building on WASI or not, rules will be explicitly -# written. In the future, it should be preferrable to be able to setup +# written. In the future, it should be preferable to be able to setup # the relevant bits here instead of in Makefile.pre.in or configure.ac. # Hash functions can be disabled with --without-builtin-hashlib-hashes. diff --git a/Modules/_functoolsmodule.c b/Modules/_functoolsmodule.c index 1c888295cb07f1..e4d32415d2be0c 100644 --- a/Modules/_functoolsmodule.c +++ b/Modules/_functoolsmodule.c @@ -503,7 +503,7 @@ partial_vectorcall(PyObject *self, PyObject *const *args, assert(i == pto_nkwds); Py_XDECREF(pto_kw_merged); - /* Resize Stack if the removing overallocation saves some noticable memory + /* Resize Stack if the removing overallocation saves some noticeable memory * NOTE: This whole block can be removed without breaking anything */ Py_ssize_t noveralloc = n_merges + nkwds; if (stack != small_stack && noveralloc > 6 && noveralloc > init_stack_size / 10) { diff --git a/Modules/hmacmodule.c b/Modules/hmacmodule.c index 95e400231bb65c..b11e97e52dde8b 100644 --- a/Modules/hmacmodule.c +++ b/Modules/hmacmodule.c @@ -649,7 +649,7 @@ find_hash_info(hmacmodule_state *state, PyObject *hash_info_ref) { const py_hmac_hinfo *info = NULL; int rc = find_hash_info_impl(state, hash_info_ref, &info); - // The code below could be simplfied with only 'rc == 0' case, + // The code below could be simplified with only 'rc == 0' case, // but we are deliberately verbose to ease future improvements. if (rc < 0) { return NULL; diff --git a/Modules/posixmodule.c b/Modules/posixmodule.c index 47eaf5cd428a53..46a528afdc155c 100644 --- a/Modules/posixmodule.c +++ b/Modules/posixmodule.c @@ -5830,7 +5830,7 @@ os_nice_impl(PyObject *module, int increment) /* There are two flavours of 'nice': one that returns the new priority (as required by almost all standards out there) and the - Linux/FreeBSD one, which returns '0' on success and advices + Linux/FreeBSD one, which returns '0' on success and advises the use of getpriority() to get the new priority. If we are of the nice family that returns the new priority, we diff --git a/Objects/codeobject.c b/Objects/codeobject.c index 42e021679b583f..223bb747ca2de4 100644 --- a/Objects/codeobject.c +++ b/Objects/codeobject.c @@ -2012,7 +2012,7 @@ _PyCode_CheckNoExternalState(PyCodeObject *co, _PyCode_var_counts_t *counts, errmsg = "globals not supported"; } // Otherwise we don't check counts.unbound.globals.numunknown since we can't - // distinguish beween globals and builtins here. + // distinguish between globals and builtins here. if (errmsg != NULL) { if (p_errmsg != NULL) { @@ -2123,7 +2123,7 @@ code_returns_only_none(PyCodeObject *co) for (int i = 0; i < len; i += _PyInstruction_GetLength(co, i)) { _Py_CODEUNIT inst = _Py_GetBaseCodeUnit(co, i); if (IS_RETURN_OPCODE(inst.op.code)) { - // We alraedy know it isn't returning None. + // We already know it isn't returning None. return 0; } } diff --git a/Objects/typeobject.c b/Objects/typeobject.c index 379c4d0467c487..ce7d6c6a9c093d 100644 --- a/Objects/typeobject.c +++ b/Objects/typeobject.c @@ -4880,7 +4880,7 @@ type_new_impl(type_new_ctx *ctx) assert(_PyType_CheckConsistency(type)); #if defined(Py_GIL_DISABLED) && defined(Py_DEBUG) && SIZEOF_VOID_P > 4 - // After this point, other threads can potentally use this type. + // After this point, other threads can potentially use this type. ((PyObject*)type)->ob_flags |= _Py_TYPE_REVEALED_FLAG; #endif @@ -5597,7 +5597,7 @@ PyType_FromMetaclass( assert(_PyType_CheckConsistency(type)); #if defined(Py_GIL_DISABLED) && defined(Py_DEBUG) && SIZEOF_VOID_P > 4 - // After this point, other threads can potentally use this type. + // After this point, other threads can potentially use this type. ((PyObject*)type)->ob_flags |= _Py_TYPE_REVEALED_FLAG; #endif diff --git a/Objects/unicodeobject.c b/Objects/unicodeobject.c index 5c2308a012142a..63d14069fcdd5a 100644 --- a/Objects/unicodeobject.c +++ b/Objects/unicodeobject.c @@ -14289,7 +14289,7 @@ unicode_getnewargs(PyObject *v, PyObject *Py_UNUSED(ignored)) } /* -This function searchs the longest common leading whitespace +This function searches the longest common leading whitespace of all lines in the [src, end). It returns the length of the common leading whitespace and sets `output` to point to the beginning of the common leading whitespace if length > 0. diff --git a/PCbuild/pyproject.props b/PCbuild/pyproject.props index cf35e705f355a7..f3fb0ad44c1705 100644 --- a/PCbuild/pyproject.props +++ b/PCbuild/pyproject.props @@ -127,7 +127,7 @@ diff --git a/Parser/lexer/lexer.c b/Parser/lexer/lexer.c index 81363cf8e810fe..ace1bced5bd733 100644 --- a/Parser/lexer/lexer.c +++ b/Parser/lexer/lexer.c @@ -137,7 +137,7 @@ set_ftstring_expr(struct tok_state* tok, struct token *token, char c) { // Handle quotes if (ch == '"' || ch == '\'') { - // The following if/else block works becase there is an off number + // The following if/else block works because there is an off number // of quotes in STRING tokens and the lexer only ever reaches this // function with valid STRING tokens. // For example: """hello""" diff --git a/Python/crossinterp.c b/Python/crossinterp.c index 16a23f0351cd26..ce361ae363e5fb 100644 --- a/Python/crossinterp.c +++ b/Python/crossinterp.c @@ -772,7 +772,7 @@ _PyPickle_GetXIData(PyThreadState *tstate, PyObject *obj, _PyXIData_t *xidata) return -1; } - // If we had an "unwrapper" mechnanism, we could call + // If we had an "unwrapper" mechanism, we could call // _PyObject_GetXIData() on the bytes object directly and add // a simple unwrapper to call pickle.loads() on the bytes. size_t size = sizeof(struct _shared_pickle_data); @@ -3176,7 +3176,7 @@ _PyXI_InitTypes(PyInterpreterState *interp) "failed to initialize the cross-interpreter exception types"); } // We would initialize heap types here too but that leads to ref leaks. - // Instead, we intialize them in _PyXI_Init(). + // Instead, we initialize them in _PyXI_Init(). return _PyStatus_OK(); } diff --git a/Python/gc.c b/Python/gc.c index 4160f68c27a3ef..d485a0d0b17e6f 100644 --- a/Python/gc.c +++ b/Python/gc.c @@ -1,6 +1,6 @@ // This implements the reference cycle garbage collector. // The Python module interface to the collector is in gcmodule.c. -// See InternalDocs/garbage_collector.md for more infromation. +// See InternalDocs/garbage_collector.md for more information. #include "Python.h" #include "pycore_ceval.h" // _Py_set_eval_breaker_bit() diff --git a/Python/gc_free_threading.c b/Python/gc_free_threading.c index 0b0ddf227e4952..9a9e28c5982fd6 100644 --- a/Python/gc_free_threading.c +++ b/Python/gc_free_threading.c @@ -529,7 +529,7 @@ static_assert(BUFFER_HI < BUFFER_SIZE && BUFFER_LO > 0, "Invalid prefetch buffer level settings."); -// Prefetch intructions will fetch the line of data from memory that +// Prefetch instructions will fetch the line of data from memory that // contains the byte specified with the source operand to a location in // the cache hierarchy specified by a locality hint. The instruction // is only a hint and the CPU is free to ignore it. Instructions and @@ -581,7 +581,7 @@ static_assert(BUFFER_HI < BUFFER_SIZE && #define prefetch(ptr) #endif -// a contigous sequence of PyObject pointers, can contain NULLs +// a contiguous sequence of PyObject pointers, can contain NULLs typedef struct { PyObject **start; PyObject **end; @@ -750,7 +750,7 @@ gc_mark_enqueue(PyObject *op, gc_mark_args_t *args) } } -// Called when we have a contigous sequence of PyObject pointers, either +// Called when we have a contiguous sequence of PyObject pointers, either // a tuple or list object. This will add the items to the buffer if there // is space for them all otherwise push a new "span" on the span stack. Using // spans has the advantage of not creating a deep _PyObjectStack stack when diff --git a/Python/pystate.c b/Python/pystate.c index 0d4c26f92cec90..3a9fad3f6346ae 100644 --- a/Python/pystate.c +++ b/Python/pystate.c @@ -543,7 +543,7 @@ init_interpreter(PyInterpreterState *interp, interp->threads.preallocated = &interp->_initial_thread; // We would call _PyObject_InitState() at this point - // if interp->feature_flags were alredy set. + // if interp->feature_flags were already set. _PyEval_InitState(interp); _PyGC_InitState(&interp->gc); From 96a5a0b2094b85aca0fb38b1f6f3fd8bcd5b1cff Mon Sep 17 00:00:00 2001 From: Cornelius Roemer Date: Mon, 21 Jul 2025 01:17:53 +0200 Subject: [PATCH 8/9] done --- Python/remote_debug.h | 2 +- Python/specialize.c | 2 +- Tools/build/generate-build-details.py | 2 +- Tools/cases_generator/tier1_generator.py | 2 +- Tools/peg_generator/pegen/grammar_visualizer.py | 6 +++--- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/Python/remote_debug.h b/Python/remote_debug.h index 5324a7aaa6f5e5..fa6e149e82f8ca 100644 --- a/Python/remote_debug.h +++ b/Python/remote_debug.h @@ -1119,7 +1119,7 @@ _Py_RemoteDebug_PagedReadRemoteMemory(proc_handle_t *handle, } if (_Py_RemoteDebug_ReadRemoteMemory(handle, page_base, page_size, entry->data) < 0) { - // Try to just copy the exact ammount as a fallback + // Try to just copy the exact amount as a fallback PyErr_Clear(); goto fallback; } diff --git a/Python/specialize.c b/Python/specialize.c index fe8d04cf3442f1..04f50c9ea8ee4c 100644 --- a/Python/specialize.c +++ b/Python/specialize.c @@ -2544,7 +2544,7 @@ static _PyBinaryOpSpecializationDescr binaryop_extend_descrs[] = { {NB_INPLACE_AND, compactlongs_guard, compactlongs_and}, {NB_INPLACE_XOR, compactlongs_guard, compactlongs_xor}, - /* float-long arithemetic */ + /* float-long arithmetic */ {NB_ADD, float_compactlong_guard, float_compactlong_add}, {NB_SUBTRACT, float_compactlong_guard, float_compactlong_subtract}, {NB_TRUE_DIVIDE, nonzero_float_compactlong_guard, float_compactlong_true_div}, diff --git a/Tools/build/generate-build-details.py b/Tools/build/generate-build-details.py index 8cd23e2f54f529..55fc5cfac93735 100644 --- a/Tools/build/generate-build-details.py +++ b/Tools/build/generate-build-details.py @@ -155,7 +155,7 @@ def make_paths_relative(data: dict[str, Any], config_path: str | None = None) -> continue # Get the relative path new_path = os.path.relpath(current_path, data['base_prefix']) - # Join '.' so that the path is formated as './path' instead of 'path' + # Join '.' so that the path is formatted as './path' instead of 'path' new_path = os.path.join('.', new_path) container[child] = new_path diff --git a/Tools/cases_generator/tier1_generator.py b/Tools/cases_generator/tier1_generator.py index 32dc346d5e891a..7cd95ed9b32691 100644 --- a/Tools/cases_generator/tier1_generator.py +++ b/Tools/cases_generator/tier1_generator.py @@ -201,7 +201,7 @@ def generate_tier1_labels( analysis: Analysis, emitter: Emitter ) -> None: emitter.emit("\n") - # Emit tail-callable labels as function defintions + # Emit tail-callable labels as function definitions for name, label in analysis.labels.items(): emitter.emit(f"LABEL({name})\n") storage = Storage(Stack(), [], [], 0, False) diff --git a/Tools/peg_generator/pegen/grammar_visualizer.py b/Tools/peg_generator/pegen/grammar_visualizer.py index 11f784f45b66b8..0c48ce7bb7860a 100644 --- a/Tools/peg_generator/pegen/grammar_visualizer.py +++ b/Tools/peg_generator/pegen/grammar_visualizer.py @@ -33,15 +33,15 @@ def print_nodes_recursively(self, node: Rule, prefix: str = "", istail: bool = T value = self.name(node) line = prefix + ("└──" if istail else "├──") + value + "\n" - sufix = " " if istail else "│ " + suffix = " " if istail else "│ " if not children: return line *children, last = children for child in children: - line += self.print_nodes_recursively(child, prefix + sufix, False) - line += self.print_nodes_recursively(last, prefix + sufix, True) + line += self.print_nodes_recursively(child, prefix + suffix, False) + line += self.print_nodes_recursively(last, prefix + suffix, True) return line From e09fee00769b9fa85061133890ef905f31d7c66d Mon Sep 17 00:00:00 2001 From: Cornelius Roemer Date: Mon, 21 Jul 2025 01:20:41 +0200 Subject: [PATCH 9/9] regen-all --- Modules/_zstd/_zstdmodule.c | 2 +- Modules/_zstd/clinic/_zstdmodule.c.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Modules/_zstd/_zstdmodule.c b/Modules/_zstd/_zstdmodule.c index 59aece588ce8ca..75477b132ee452 100644 --- a/Modules/_zstd/_zstdmodule.c +++ b/Modules/_zstd/_zstdmodule.c @@ -502,7 +502,7 @@ Get Zstandard frame information from a frame header. static PyObject * _zstd_get_frame_info_impl(PyObject *module, Py_buffer *frame_buffer) -/*[clinic end generated code: output=56e033cf48001929 input=94b240583ae22ca5]*/ +/*[clinic end generated code: output=56e033cf48001929 input=6b350490b0f58ede]*/ { uint64_t decompressed_size; uint32_t dict_id; diff --git a/Modules/_zstd/clinic/_zstdmodule.c.h b/Modules/_zstd/clinic/_zstdmodule.c.h index 081ea728001757..feb0563b322838 100644 --- a/Modules/_zstd/clinic/_zstdmodule.c.h +++ b/Modules/_zstd/clinic/_zstdmodule.c.h @@ -426,4 +426,4 @@ _zstd_set_parameter_types(PyObject *module, PyObject *const *args, Py_ssize_t na exit: return return_value; } -/*[clinic end generated code: output=437b084f149e68e5 input=a9049054013a1b77]*/ +/*[clinic end generated code: output=a6ad0ab41c507911 input=a9049054013a1b77]*/ pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy