Python-checkins
Threads by month
- ----- 2025 -----
- February
- January
- ----- 2024 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2023 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2022 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2021 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2020 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2019 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2018 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2017 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2016 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2015 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2014 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2013 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2012 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2011 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2010 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2009 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2008 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2007 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2006 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2005 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2004 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2003 -----
- December
- November
- October
- September
- August
January 2025
- 1 participants
- 705 discussions
gh-129409: Fix Integer overflow - SEGV while writing data more than 2GB in CSV file (#129413)
by sobolevn Jan. 29, 2025
by sobolevn Jan. 29, 2025
Jan. 29, 2025
https://github.com/python/cpython/commit/97b0ef05d987ebef354512b516a246feb4…
commit: 97b0ef05d987ebef354512b516a246feb411e815
branch: main
author: Srinivas Reddy Thatiparthy (తాటిపర్తి శ్రీనివాస్ రెడ్డి) <thatiparthysreenivas(a)gmail.com>
committer: sobolevn <mail(a)sobolevn.me>
date: 2025-01-29T11:15:47Z
summary:
gh-129409: Fix Integer overflow - SEGV while writing data more than 2GB in CSV file (#129413)
files:
A Misc/NEWS.d/next/Library/2025-01-29-14-30-54.gh-issue-129409.JZbOE6.rst
M Modules/_csv.c
diff --git a/Misc/NEWS.d/next/Library/2025-01-29-14-30-54.gh-issue-129409.JZbOE6.rst b/Misc/NEWS.d/next/Library/2025-01-29-14-30-54.gh-issue-129409.JZbOE6.rst
new file mode 100644
index 00000000000000..7e00b44c0ef471
--- /dev/null
+++ b/Misc/NEWS.d/next/Library/2025-01-29-14-30-54.gh-issue-129409.JZbOE6.rst
@@ -0,0 +1,2 @@
+Fix an integer overflow in the :mod:`csv` module when writing a data field
+larger than 2GB.
diff --git a/Modules/_csv.c b/Modules/_csv.c
index 7ca30e39e00c0c..e5ae853590bf2c 100644
--- a/Modules/_csv.c
+++ b/Modules/_csv.c
@@ -1138,7 +1138,7 @@ join_append_data(WriterObj *self, int field_kind, const void *field_data,
int copy_phase)
{
DialectObj *dialect = self->dialect;
- int i;
+ Py_ssize_t i;
Py_ssize_t rec_len;
#define INCLEN \
1
0
Jan. 29, 2025
https://github.com/python/cpython/commit/4815131910cec72805ad2966e7af1e2eba…
commit: 4815131910cec72805ad2966e7af1e2eba49fe51
branch: main
author: Irit Katriel <1055913+iritkatriel(a)users.noreply.github.com>
committer: iritkatriel <1055913+iritkatriel(a)users.noreply.github.com>
date: 2025-01-29T09:28:21Z
summary:
gh-100239: specialize bitwise logical binary ops on ints (#128927)
files:
A Misc/NEWS.d/next/Core_and_Builtins/2025-01-16-22-54-12.gh-issue-100239.7_HpBU.rst
M Include/internal/pycore_opcode_metadata.h
M Include/internal/pycore_uop_metadata.h
M Lib/test/test_opcache.py
M Python/bytecodes.c
M Python/specialize.c
M Tools/c-analyzer/cpython/ignored.tsv
diff --git a/Include/internal/pycore_opcode_metadata.h b/Include/internal/pycore_opcode_metadata.h
index 98dfead35f7c31..fe791c090120c9 100644
--- a/Include/internal/pycore_opcode_metadata.h
+++ b/Include/internal/pycore_opcode_metadata.h
@@ -2013,7 +2013,7 @@ const struct opcode_metadata _PyOpcode_opcode_metadata[266] = {
[BINARY_OP_ADD_FLOAT] = { true, INSTR_FMT_IXC0000, HAS_EXIT_FLAG | HAS_ERROR_FLAG },
[BINARY_OP_ADD_INT] = { true, INSTR_FMT_IXC0000, HAS_EXIT_FLAG | HAS_ERROR_FLAG },
[BINARY_OP_ADD_UNICODE] = { true, INSTR_FMT_IXC0000, HAS_EXIT_FLAG | HAS_ERROR_FLAG },
- [BINARY_OP_EXTEND] = { true, INSTR_FMT_IXC0000, HAS_EXIT_FLAG | HAS_ESCAPES_FLAG },
+ [BINARY_OP_EXTEND] = { true, INSTR_FMT_IXC0000, HAS_DEOPT_FLAG | HAS_ESCAPES_FLAG },
[BINARY_OP_INPLACE_ADD_UNICODE] = { true, INSTR_FMT_IXC0000, HAS_LOCAL_FLAG | HAS_DEOPT_FLAG | HAS_EXIT_FLAG | HAS_ERROR_FLAG },
[BINARY_OP_MULTIPLY_FLOAT] = { true, INSTR_FMT_IXC0000, HAS_EXIT_FLAG | HAS_ERROR_FLAG },
[BINARY_OP_MULTIPLY_INT] = { true, INSTR_FMT_IXC0000, HAS_EXIT_FLAG | HAS_ERROR_FLAG },
diff --git a/Include/internal/pycore_uop_metadata.h b/Include/internal/pycore_uop_metadata.h
index 59740dbb57072e..80f89defb7e2eb 100644
--- a/Include/internal/pycore_uop_metadata.h
+++ b/Include/internal/pycore_uop_metadata.h
@@ -82,7 +82,7 @@ const uint16_t _PyUop_Flags[MAX_UOP_ID+1] = {
[_GUARD_BOTH_UNICODE] = HAS_EXIT_FLAG,
[_BINARY_OP_ADD_UNICODE] = HAS_ERROR_FLAG | HAS_PURE_FLAG,
[_BINARY_OP_INPLACE_ADD_UNICODE] = HAS_LOCAL_FLAG | HAS_DEOPT_FLAG | HAS_ERROR_FLAG,
- [_GUARD_BINARY_OP_EXTEND] = HAS_EXIT_FLAG | HAS_ESCAPES_FLAG,
+ [_GUARD_BINARY_OP_EXTEND] = HAS_DEOPT_FLAG | HAS_ESCAPES_FLAG,
[_BINARY_OP_EXTEND] = HAS_ESCAPES_FLAG | HAS_PURE_FLAG,
[_BINARY_SUBSCR] = HAS_ERROR_FLAG | HAS_ESCAPES_FLAG,
[_BINARY_SLICE] = HAS_ERROR_FLAG | HAS_ESCAPES_FLAG,
diff --git a/Lib/test/test_opcache.py b/Lib/test/test_opcache.py
index cc58a4b8c3cd11..dc02d1d7babb23 100644
--- a/Lib/test/test_opcache.py
+++ b/Lib/test/test_opcache.py
@@ -1396,6 +1396,29 @@ def compactlong_rhs(arg):
binary_op_nan()
+ def binary_op_bitwise_extend():
+ for _ in range(100):
+ a, b = 2, 7
+ x = a | b
+ self.assertEqual(x, 7)
+ y = a & b
+ self.assertEqual(y, 2)
+ z = a ^ b
+ self.assertEqual(z, 5)
+ a, b = 3, 9
+ a |= b
+ self.assertEqual(a, 11)
+ a, b = 11, 9
+ a &= b
+ self.assertEqual(a, 9)
+ a, b = 3, 9
+ a ^= b
+ self.assertEqual(a, 10)
+
+ binary_op_bitwise_extend()
+ self.assert_specialized(binary_op_bitwise_extend, "BINARY_OP_EXTEND")
+ self.assert_no_opcode(binary_op_bitwise_extend, "BINARY_OP")
+
@cpython_only
@requires_specialization_ft
def test_load_super_attr(self):
diff --git a/Misc/NEWS.d/next/Core_and_Builtins/2025-01-16-22-54-12.gh-issue-100239.7_HpBU.rst b/Misc/NEWS.d/next/Core_and_Builtins/2025-01-16-22-54-12.gh-issue-100239.7_HpBU.rst
new file mode 100644
index 00000000000000..6f086b7ecc0036
--- /dev/null
+++ b/Misc/NEWS.d/next/Core_and_Builtins/2025-01-16-22-54-12.gh-issue-100239.7_HpBU.rst
@@ -0,0 +1 @@
+Specialize ``BINARY_OP`` for bitwise logical operations on compact ints.
diff --git a/Python/bytecodes.c b/Python/bytecodes.c
index 0d7b9f2a781019..f659a5e5c920a7 100644
--- a/Python/bytecodes.c
+++ b/Python/bytecodes.c
@@ -759,7 +759,7 @@ dummy_func(
assert(INLINE_CACHE_ENTRIES_BINARY_OP == 5);
assert(d && d->guard);
int res = d->guard(left_o, right_o);
- EXIT_IF(!res);
+ DEOPT_IF(!res);
}
pure op(_BINARY_OP_EXTEND, (descr/4, left, right -- res)) {
diff --git a/Python/specialize.c b/Python/specialize.c
index fa022346bdea6a..abb130d73eeebd 100644
--- a/Python/specialize.c
+++ b/Python/specialize.c
@@ -581,6 +581,10 @@ _PyCode_Quicken(_Py_CODEUNIT *instructions, Py_ssize_t size, PyObject *consts,
#define SPEC_FAIL_BINARY_OP_TRUE_DIVIDE_FLOAT 26
#define SPEC_FAIL_BINARY_OP_TRUE_DIVIDE_OTHER 27
#define SPEC_FAIL_BINARY_OP_XOR 28
+#define SPEC_FAIL_BINARY_OP_OR_INT 29
+#define SPEC_FAIL_BINARY_OP_OR_DIFFERENT_TYPES 30
+#define SPEC_FAIL_BINARY_OP_XOR_INT 31
+#define SPEC_FAIL_BINARY_OP_XOR_DIFFERENT_TYPES 32
/* Calls */
@@ -2379,6 +2383,12 @@ binary_op_fail_kind(int oparg, PyObject *lhs, PyObject *rhs)
return SPEC_FAIL_BINARY_OP_MULTIPLY_OTHER;
case NB_OR:
case NB_INPLACE_OR:
+ if (!Py_IS_TYPE(lhs, Py_TYPE(rhs))) {
+ return SPEC_FAIL_BINARY_OP_OR_DIFFERENT_TYPES;
+ }
+ if (PyLong_CheckExact(lhs)) {
+ return SPEC_FAIL_BINARY_OP_OR_INT;
+ }
return SPEC_FAIL_BINARY_OP_OR;
case NB_POWER:
case NB_INPLACE_POWER:
@@ -2406,6 +2416,12 @@ binary_op_fail_kind(int oparg, PyObject *lhs, PyObject *rhs)
return SPEC_FAIL_BINARY_OP_TRUE_DIVIDE_OTHER;
case NB_XOR:
case NB_INPLACE_XOR:
+ if (!Py_IS_TYPE(lhs, Py_TYPE(rhs))) {
+ return SPEC_FAIL_BINARY_OP_XOR_DIFFERENT_TYPES;
+ }
+ if (PyLong_CheckExact(lhs)) {
+ return SPEC_FAIL_BINARY_OP_XOR_INT;
+ }
return SPEC_FAIL_BINARY_OP_XOR;
}
Py_UNREACHABLE();
@@ -2414,6 +2430,34 @@ binary_op_fail_kind(int oparg, PyObject *lhs, PyObject *rhs)
/** Binary Op Specialization Extensions */
+/* long-long */
+
+static inline int
+is_compactlong(PyObject *v)
+{
+ return PyLong_CheckExact(v) &&
+ _PyLong_IsCompact((PyLongObject *)v);
+}
+
+static int
+compactlongs_guard(PyObject *lhs, PyObject *rhs)
+{
+ return (is_compactlong(lhs) && is_compactlong(rhs));
+}
+
+#define BITWISE_LONGS_ACTION(NAME, OP) \
+ static PyObject * \
+ (NAME)(PyObject *lhs, PyObject *rhs) \
+ { \
+ Py_ssize_t rhs_val = _PyLong_CompactValue((PyLongObject *)rhs); \
+ Py_ssize_t lhs_val = _PyLong_CompactValue((PyLongObject *)lhs); \
+ return PyLong_FromSsize_t(lhs_val OP rhs_val); \
+ }
+BITWISE_LONGS_ACTION(compactlongs_or, |)
+BITWISE_LONGS_ACTION(compactlongs_and, &)
+BITWISE_LONGS_ACTION(compactlongs_xor, ^)
+#undef BITWISE_LONGS_ACTION
+
/* float-long */
static inline int
@@ -2484,6 +2528,15 @@ LONG_FLOAT_ACTION(compactlong_float_multiply, *)
LONG_FLOAT_ACTION(compactlong_float_true_div, /)
#undef LONG_FLOAT_ACTION
+static _PyBinaryOpSpecializationDescr compactlongs_specs[NB_OPARG_LAST+1] = {
+ [NB_OR] = {compactlongs_guard, compactlongs_or},
+ [NB_AND] = {compactlongs_guard, compactlongs_and},
+ [NB_XOR] = {compactlongs_guard, compactlongs_xor},
+ [NB_INPLACE_OR] = {compactlongs_guard, compactlongs_or},
+ [NB_INPLACE_AND] = {compactlongs_guard, compactlongs_and},
+ [NB_INPLACE_XOR] = {compactlongs_guard, compactlongs_xor},
+};
+
static _PyBinaryOpSpecializationDescr float_compactlong_specs[NB_OPARG_LAST+1] = {
[NB_ADD] = {float_compactlong_guard, float_compactlong_add},
[NB_SUBTRACT] = {float_compactlong_guard, float_compactlong_subtract},
@@ -2512,6 +2565,7 @@ binary_op_extended_specialization(PyObject *lhs, PyObject *rhs, int oparg,
LOOKUP_SPEC(compactlong_float_specs, oparg);
LOOKUP_SPEC(float_compactlong_specs, oparg);
+ LOOKUP_SPEC(compactlongs_specs, oparg);
#undef LOOKUP_SPEC
return 0;
}
diff --git a/Tools/c-analyzer/cpython/ignored.tsv b/Tools/c-analyzer/cpython/ignored.tsv
index 415d20e5b7fabb..be3ded9f07ef8a 100644
--- a/Tools/c-analyzer/cpython/ignored.tsv
+++ b/Tools/c-analyzer/cpython/ignored.tsv
@@ -381,6 +381,7 @@ Python/pylifecycle.c - INTERPRETER_TRAMPOLINE_CODEDEF -
Python/pystate.c - initial -
Python/specialize.c - adaptive_opcodes -
Python/specialize.c - cache_requirements -
+Python/specialize.c - compactlongs_specs -
Python/specialize.c - float_compactlong_specs -
Python/specialize.c - compactlong_float_specs -
Python/stdlib_module_names.h - _Py_stdlib_module_names -
1
0
https://github.com/python/cpython/commit/03d9cdb7298cdbf67535dfeb3af5c1a619…
commit: 03d9cdb7298cdbf67535dfeb3af5c1a619be7580
branch: main
author: 🇺🇦 Sviatoslav Sydorenko (Святослав Сидоренко) <webknjaz(a)redhat.com>
committer: hugovk <1324225+hugovk(a)users.noreply.github.com>
date: 2025-01-29T11:16:51+02:00
summary:
Merge TSAN test matrices in CI (#123278)
files:
A Tools/tsan/suppressions.txt
D Tools/tsan/supressions.txt
M .github/workflows/build.yml
M .github/workflows/reusable-tsan.yml
diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
index 0c20b85acbd565..72c1618982b146 100644
--- a/.github/workflows/build.yml
+++ b/.github/workflows/build.yml
@@ -512,26 +512,20 @@ jobs:
run: xvfb-run make ci
build_tsan:
- name: 'Thread sanitizer'
- needs: check_source
- if: needs.check_source.outputs.run_tests == 'true'
- uses: ./.github/workflows/reusable-tsan.yml
- with:
- config_hash: ${{ needs.check_source.outputs.config_hash }}
- options: ./configure --config-cache --with-thread-sanitizer --with-pydebug
- suppressions_path: Tools/tsan/supressions.txt
- tsan_logs_artifact_name: tsan-logs-default
-
- build_tsan_free_threading:
- name: 'Thread sanitizer (free-threading)'
+ name: >-
+ Thread sanitizer
+ ${{ fromJSON(matrix.free-threading) && '(free-threading)' || '' }}
needs: check_source
if: needs.check_source.outputs.run_tests == 'true'
+ strategy:
+ matrix:
+ free-threading:
+ - false
+ - true
uses: ./.github/workflows/reusable-tsan.yml
with:
config_hash: ${{ needs.check_source.outputs.config_hash }}
- options: ./configure --config-cache --disable-gil --with-thread-sanitizer --with-pydebug
- suppressions_path: Tools/tsan/suppressions_free_threading.txt
- tsan_logs_artifact_name: tsan-logs-free-threading
+ free-threading: ${{ matrix.free-threading }}
# CIFuzz job based on https://google.github.io/oss-fuzz/getting-started/continuous-integration/
cifuzz:
@@ -591,7 +585,6 @@ jobs:
- test_hypothesis
- build_asan
- build_tsan
- - build_tsan_free_threading
- cifuzz
runs-on: ubuntu-latest
@@ -625,7 +618,6 @@ jobs:
build_windows,
build_asan,
build_tsan,
- build_tsan_free_threading,
'
|| ''
}}
diff --git a/.github/workflows/reusable-tsan.yml b/.github/workflows/reusable-tsan.yml
index 269f479849f21e..1d2548565d50ef 100644
--- a/.github/workflows/reusable-tsan.yml
+++ b/.github/workflows/reusable-tsan.yml
@@ -6,17 +6,11 @@ on:
config_hash:
required: true
type: string
- options:
- required: true
- type: string
- suppressions_path:
- description: 'A repo relative path to the suppressions file'
- required: true
- type: string
- tsan_logs_artifact_name:
- description: 'Name of the TSAN logs artifact. Must be unique for each job.'
- required: true
- type: string
+ free-threading:
+ description: Whether to use free-threaded mode
+ required: false
+ type: boolean
+ default: false
env:
FORCE_COLOR: 1
@@ -26,9 +20,6 @@ jobs:
name: 'Thread sanitizer'
runs-on: ubuntu-24.04
timeout-minutes: 60
- env:
- OPTIONS: ${{ inputs.options }}
- SUPPRESSIONS_PATH: ${{ inputs.suppressions_path }}
steps:
- uses: actions/checkout@v4
with:
@@ -55,7 +46,11 @@ jobs:
sudo sysctl -w vm.mmap_rnd_bits=28
- name: TSAN Option Setup
run: |
- echo "TSAN_OPTIONS=log_path=${GITHUB_WORKSPACE}/tsan_log suppressions=${GITHUB_WORKSPACE}/${SUPPRESSIONS_PATH} handle_segv=0" >> "$GITHUB_ENV"
+ echo "TSAN_OPTIONS=log_path=${GITHUB_WORKSPACE}/tsan_log suppressions=${GITHUB_WORKSPACE}/Tools/tsan/suppressions${{
+ fromJSON(inputs.free-threading)
+ && '_free_threading'
+ || ''
+ }}.txt handle_segv=0" >> "$GITHUB_ENV"
echo "CC=clang" >> "$GITHUB_ENV"
echo "CXX=clang++" >> "$GITHUB_ENV"
- name: Add ccache to PATH
@@ -67,7 +62,12 @@ jobs:
save: ${{ github.event_name == 'push' }}
max-size: "200M"
- name: Configure CPython
- run: "${OPTIONS}"
+ run: >-
+ ./configure
+ --config-cache
+ --with-thread-sanitizer
+ --with-pydebug
+ ${{ fromJSON(inputs.free-threading) && '--disable-gil' || '' }}
- name: Build CPython
run: make -j4
- name: Display build info
@@ -81,6 +81,11 @@ jobs:
if: always()
uses: actions/upload-artifact@v4
with:
- name: ${{ inputs.tsan_logs_artifact_name }}
+ name: >-
+ tsan-logs-${{
+ fromJSON(inputs.free-threading)
+ && 'free-threading'
+ || 'default'
+ }}
path: tsan_log.*
if-no-files-found: ignore
diff --git a/Tools/tsan/supressions.txt b/Tools/tsan/suppressions.txt
similarity index 100%
rename from Tools/tsan/supressions.txt
rename to Tools/tsan/suppressions.txt
1
0
Jan. 29, 2025
https://github.com/python/cpython/commit/1a80214f11f1a6ddcea19e2c40719c746a…
commit: 1a80214f11f1a6ddcea19e2c40719c746a163f02
branch: main
author: Pieter Eendebak <pieter.eendebak(a)gmail.com>
committer: markshannon <mark(a)hotpy.org>
date: 2025-01-29T09:15:24Z
summary:
gh-126703: Add freelists for list and tuple iterators (GH-128592)
files:
A Misc/NEWS.d/next/Core_and_Builtins/2025-01-07-19-26-40.gh-issue-126703.9i-S5t.rst
M Include/internal/pycore_freelist_state.h
M Objects/listobject.c
M Objects/object.c
M Objects/tupleobject.c
diff --git a/Include/internal/pycore_freelist_state.h b/Include/internal/pycore_freelist_state.h
index 2ccd1ac055b747..7c252f5b570c13 100644
--- a/Include/internal/pycore_freelist_state.h
+++ b/Include/internal/pycore_freelist_state.h
@@ -11,6 +11,8 @@ extern "C" {
# define PyTuple_MAXSAVESIZE 20 // Largest tuple to save on freelist
# define Py_tuple_MAXFREELIST 2000 // Maximum number of tuples of each size to save
# define Py_lists_MAXFREELIST 80
+# define Py_list_iters_MAXFREELIST 10
+# define Py_tuple_iters_MAXFREELIST 10
# define Py_dicts_MAXFREELIST 80
# define Py_dictkeys_MAXFREELIST 80
# define Py_floats_MAXFREELIST 100
@@ -40,6 +42,8 @@ struct _Py_freelists {
struct _Py_freelist ints;
struct _Py_freelist tuples[PyTuple_MAXSAVESIZE];
struct _Py_freelist lists;
+ struct _Py_freelist list_iters;
+ struct _Py_freelist tuple_iters;
struct _Py_freelist dicts;
struct _Py_freelist dictkeys;
struct _Py_freelist slices;
diff --git a/Misc/NEWS.d/next/Core_and_Builtins/2025-01-07-19-26-40.gh-issue-126703.9i-S5t.rst b/Misc/NEWS.d/next/Core_and_Builtins/2025-01-07-19-26-40.gh-issue-126703.9i-S5t.rst
new file mode 100644
index 00000000000000..dcd5f449c98ef3
--- /dev/null
+++ b/Misc/NEWS.d/next/Core_and_Builtins/2025-01-07-19-26-40.gh-issue-126703.9i-S5t.rst
@@ -0,0 +1 @@
+Improve performance of iterating over lists and tuples by using a freelist for the iterator objects.
diff --git a/Objects/listobject.c b/Objects/listobject.c
index 099e65c0c25fed..f4a269e4d7b284 100644
--- a/Objects/listobject.c
+++ b/Objects/listobject.c
@@ -3903,15 +3903,17 @@ PyTypeObject PyListIter_Type = {
static PyObject *
list_iter(PyObject *seq)
{
- _PyListIterObject *it;
-
if (!PyList_Check(seq)) {
PyErr_BadInternalCall();
return NULL;
}
- it = PyObject_GC_New(_PyListIterObject, &PyListIter_Type);
- if (it == NULL)
- return NULL;
+ _PyListIterObject *it = _Py_FREELIST_POP(_PyListIterObject, list_iters);
+ if (it == NULL) {
+ it = PyObject_GC_New(_PyListIterObject, &PyListIter_Type);
+ if (it == NULL) {
+ return NULL;
+ }
+ }
it->it_index = 0;
it->it_seq = (PyListObject *)Py_NewRef(seq);
_PyObject_GC_TRACK(it);
@@ -3924,7 +3926,8 @@ listiter_dealloc(PyObject *self)
_PyListIterObject *it = (_PyListIterObject *)self;
_PyObject_GC_UNTRACK(it);
Py_XDECREF(it->it_seq);
- PyObject_GC_Del(it);
+ assert(Py_IS_TYPE(self, &PyListIter_Type));
+ _Py_FREELIST_FREE(list_iters, it, PyObject_GC_Del);
}
static int
diff --git a/Objects/object.c b/Objects/object.c
index cd48d2f75ba490..fdff16138201a0 100644
--- a/Objects/object.c
+++ b/Objects/object.c
@@ -923,6 +923,8 @@ _PyObject_ClearFreeLists(struct _Py_freelists *freelists, int is_finalization)
clear_freelist(&freelists->tuples[i], is_finalization, free_object);
}
clear_freelist(&freelists->lists, is_finalization, free_object);
+ clear_freelist(&freelists->list_iters, is_finalization, free_object);
+ clear_freelist(&freelists->tuple_iters, is_finalization, free_object);
clear_freelist(&freelists->dicts, is_finalization, free_object);
clear_freelist(&freelists->dictkeys, is_finalization, PyMem_Free);
clear_freelist(&freelists->slices, is_finalization, free_object);
diff --git a/Objects/tupleobject.c b/Objects/tupleobject.c
index 7fe8553030a02e..60af9e40e3fe83 100644
--- a/Objects/tupleobject.c
+++ b/Objects/tupleobject.c
@@ -993,7 +993,8 @@ tupleiter_dealloc(PyObject *self)
_PyTupleIterObject *it = _PyTupleIterObject_CAST(self);
_PyObject_GC_UNTRACK(it);
Py_XDECREF(it->it_seq);
- PyObject_GC_Del(it);
+ assert(Py_IS_TYPE(self, &PyTupleIter_Type));
+ _Py_FREELIST_FREE(tuple_iters, it, PyObject_GC_Del);
}
static int
@@ -1119,15 +1120,16 @@ PyTypeObject PyTupleIter_Type = {
static PyObject *
tuple_iter(PyObject *seq)
{
- _PyTupleIterObject *it;
-
if (!PyTuple_Check(seq)) {
PyErr_BadInternalCall();
return NULL;
}
- it = PyObject_GC_New(_PyTupleIterObject, &PyTupleIter_Type);
- if (it == NULL)
- return NULL;
+ _PyTupleIterObject *it = _Py_FREELIST_POP(_PyTupleIterObject, tuple_iters);
+ if (it == NULL) {
+ it = PyObject_GC_New(_PyTupleIterObject, &PyTupleIter_Type);
+ if (it == NULL)
+ return NULL;
+ }
it->it_index = 0;
it->it_seq = (PyTupleObject *)Py_NewRef(seq);
_PyObject_GC_TRACK(it);
1
0
Jan. 29, 2025
https://github.com/python/cpython/commit/41ad2bb2489c4cf9ba4f13401750b3fcdb…
commit: 41ad2bb2489c4cf9ba4f13401750b3fcdb33937b
branch: main
author: Brandt Bucher <brandtbucher(a)microsoft.com>
committer: brandtbucher <brandtbucher(a)gmail.com>
date: 2025-01-28T16:26:46-08:00
summary:
GH-128563: Don't leave frame->lltrace uninitialized (GH-129417)
files:
A Misc/NEWS.d/next/Core_and_Builtins/2025-01-28-11-13-41.gh-issue-128563.xElppE.rst
M Include/internal/pycore_frame.h
diff --git a/Include/internal/pycore_frame.h b/Include/internal/pycore_frame.h
index 155a6f3ce054a2..f6f2776804e85b 100644
--- a/Include/internal/pycore_frame.h
+++ b/Include/internal/pycore_frame.h
@@ -215,6 +215,9 @@ _PyFrame_Initialize(
frame->return_offset = 0;
frame->owner = FRAME_OWNED_BY_THREAD;
frame->visited = 0;
+#ifdef Py_DEBUG
+ frame->lltrace = 0;
+#endif
for (int i = null_locals_from; i < code->co_nlocalsplus; i++) {
frame->localsplus[i] = PyStackRef_NULL;
@@ -398,6 +401,9 @@ _PyFrame_PushTrampolineUnchecked(PyThreadState *tstate, PyCodeObject *code, int
#endif
frame->owner = FRAME_OWNED_BY_THREAD;
frame->visited = 0;
+#ifdef Py_DEBUG
+ frame->lltrace = 0;
+#endif
frame->return_offset = 0;
#ifdef Py_GIL_DISABLED
diff --git a/Misc/NEWS.d/next/Core_and_Builtins/2025-01-28-11-13-41.gh-issue-128563.xElppE.rst b/Misc/NEWS.d/next/Core_and_Builtins/2025-01-28-11-13-41.gh-issue-128563.xElppE.rst
new file mode 100644
index 00000000000000..dfd932e836bda6
--- /dev/null
+++ b/Misc/NEWS.d/next/Core_and_Builtins/2025-01-28-11-13-41.gh-issue-128563.xElppE.rst
@@ -0,0 +1,2 @@
+Fix an issue where the "lltrace" debug feature could have been incorrectly
+enabled for some frames.
1
0
https://github.com/python/cpython/commit/828b27680f07f1ed8302616b8229f49c09…
commit: 828b27680f07f1ed8302616b8229f49c09032657
branch: main
author: Brandt Bucher <brandtbucher(a)microsoft.com>
committer: brandtbucher <brandtbucher(a)gmail.com>
date: 2025-01-28T16:10:51-08:00
summary:
GH-126599: Remove the PyOptimizer API (GH-129194)
files:
M Include/internal/pycore_interp.h
M Include/internal/pycore_opcode_metadata.h
M Include/internal/pycore_optimizer.h
M Include/opcode_ids.h
M InternalDocs/jit.md
M Lib/_opcode_metadata.py
M Lib/test/support/__init__.py
M Lib/test/test_capi/test_misc.py
M Lib/test/test_capi/test_opt.py
M Lib/test/test_dis.py
M Lib/test/test_opcache.py
M Lib/test/test_regrtest.py
M Modules/_testinternalcapi.c
M Objects/object.c
M Python/bytecodes.c
M Python/generated_cases.c.h
M Python/opcode_targets.h
M Python/optimizer.c
M Python/pylifecycle.c
M Python/pystate.c
M Python/sysmodule.c
M Tools/c-analyzer/cpython/ignored.tsv
M Tools/cases_generator/tier1_generator.py
diff --git a/Include/internal/pycore_interp.h b/Include/internal/pycore_interp.h
index f745b09796753b..6f00eca8de05af 100644
--- a/Include/internal/pycore_interp.h
+++ b/Include/internal/pycore_interp.h
@@ -31,7 +31,7 @@ extern "C" {
#include "pycore_list.h" // struct _Py_list_state
#include "pycore_mimalloc.h" // struct _mimalloc_interp_state
#include "pycore_object_state.h" // struct _py_object_state
-#include "pycore_optimizer.h" // _PyOptimizerObject
+#include "pycore_optimizer.h" // _PyExecutorObject
#include "pycore_obmalloc.h" // struct _obmalloc_state
#include "pycore_qsbr.h" // struct _qsbr_state
#include "pycore_stackref.h" // Py_STACKREF_DEBUG
@@ -262,7 +262,7 @@ struct _is {
struct ast_state ast;
struct types_state types;
struct callable_cache callable_cache;
- _PyOptimizerObject *optimizer;
+ bool jit;
_PyExecutorObject *executor_list_head;
size_t trace_run_counter;
_rare_events rare_events;
diff --git a/Include/internal/pycore_opcode_metadata.h b/Include/internal/pycore_opcode_metadata.h
index bad5e515a99565..98dfead35f7c31 100644
--- a/Include/internal/pycore_opcode_metadata.h
+++ b/Include/internal/pycore_opcode_metadata.h
@@ -271,8 +271,12 @@ int _PyOpcode_num_popped(int opcode, int oparg) {
return 0;
case JUMP_BACKWARD:
return 0;
+ case JUMP_BACKWARD_JIT:
+ return 0;
case JUMP_BACKWARD_NO_INTERRUPT:
return 0;
+ case JUMP_BACKWARD_NO_JIT:
+ return 0;
case JUMP_FORWARD:
return 0;
case JUMP_IF_FALSE:
@@ -742,8 +746,12 @@ int _PyOpcode_num_pushed(int opcode, int oparg) {
return 0;
case JUMP_BACKWARD:
return 0;
+ case JUMP_BACKWARD_JIT:
+ return 0;
case JUMP_BACKWARD_NO_INTERRUPT:
return 0;
+ case JUMP_BACKWARD_NO_JIT:
+ return 0;
case JUMP_FORWARD:
return 0;
case JUMP_IF_FALSE:
@@ -1467,10 +1475,18 @@ int _PyOpcode_max_stack_effect(int opcode, int oparg, int *effect) {
*effect = 0;
return 0;
}
+ case JUMP_BACKWARD_JIT: {
+ *effect = 0;
+ return 0;
+ }
case JUMP_BACKWARD_NO_INTERRUPT: {
*effect = 0;
return 0;
}
+ case JUMP_BACKWARD_NO_JIT: {
+ *effect = 0;
+ return 0;
+ }
case JUMP_FORWARD: {
*effect = 0;
return 0;
@@ -2110,7 +2126,9 @@ const struct opcode_metadata _PyOpcode_opcode_metadata[266] = {
[INTERPRETER_EXIT] = { true, INSTR_FMT_IX, 0 },
[IS_OP] = { true, INSTR_FMT_IB, HAS_ARG_FLAG },
[JUMP_BACKWARD] = { true, INSTR_FMT_IBC, HAS_ARG_FLAG | HAS_JUMP_FLAG | HAS_EVAL_BREAK_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG },
+ [JUMP_BACKWARD_JIT] = { true, INSTR_FMT_IBC, HAS_ARG_FLAG | HAS_JUMP_FLAG | HAS_EVAL_BREAK_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG },
[JUMP_BACKWARD_NO_INTERRUPT] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_JUMP_FLAG },
+ [JUMP_BACKWARD_NO_JIT] = { true, INSTR_FMT_IBC, HAS_ARG_FLAG | HAS_JUMP_FLAG | HAS_EVAL_BREAK_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG },
[JUMP_FORWARD] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_JUMP_FLAG },
[LIST_APPEND] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_ERROR_FLAG },
[LIST_EXTEND] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG },
@@ -2539,7 +2557,9 @@ const char *_PyOpcode_OpName[266] = {
[IS_OP] = "IS_OP",
[JUMP] = "JUMP",
[JUMP_BACKWARD] = "JUMP_BACKWARD",
+ [JUMP_BACKWARD_JIT] = "JUMP_BACKWARD_JIT",
[JUMP_BACKWARD_NO_INTERRUPT] = "JUMP_BACKWARD_NO_INTERRUPT",
+ [JUMP_BACKWARD_NO_JIT] = "JUMP_BACKWARD_NO_JIT",
[JUMP_FORWARD] = "JUMP_FORWARD",
[JUMP_IF_FALSE] = "JUMP_IF_FALSE",
[JUMP_IF_TRUE] = "JUMP_IF_TRUE",
@@ -2800,7 +2820,9 @@ const uint8_t _PyOpcode_Deopt[256] = {
[INTERPRETER_EXIT] = INTERPRETER_EXIT,
[IS_OP] = IS_OP,
[JUMP_BACKWARD] = JUMP_BACKWARD,
+ [JUMP_BACKWARD_JIT] = JUMP_BACKWARD,
[JUMP_BACKWARD_NO_INTERRUPT] = JUMP_BACKWARD_NO_INTERRUPT,
+ [JUMP_BACKWARD_NO_JIT] = JUMP_BACKWARD,
[JUMP_FORWARD] = JUMP_FORWARD,
[LIST_APPEND] = LIST_APPEND,
[LIST_EXTEND] = LIST_EXTEND,
@@ -2939,8 +2961,6 @@ const uint8_t _PyOpcode_Deopt[256] = {
case 146: \
case 147: \
case 148: \
- case 230: \
- case 231: \
case 232: \
case 233: \
case 234: \
diff --git a/Include/internal/pycore_optimizer.h b/Include/internal/pycore_optimizer.h
index 03ce4d4491acd7..e806e306d2d57f 100644
--- a/Include/internal/pycore_optimizer.h
+++ b/Include/internal/pycore_optimizer.h
@@ -83,23 +83,6 @@ typedef struct _PyExecutorObject {
_PyExitData exits[1];
} _PyExecutorObject;
-typedef struct _PyOptimizerObject _PyOptimizerObject;
-
-/* Should return > 0 if a new executor is created. O if no executor is produced and < 0 if an error occurred. */
-typedef int (*_Py_optimize_func)(
- _PyOptimizerObject* self, struct _PyInterpreterFrame *frame,
- _Py_CODEUNIT *instr, _PyExecutorObject **exec_ptr,
- int curr_stackentries, bool progress_needed);
-
-struct _PyOptimizerObject {
- PyObject_HEAD
- _Py_optimize_func optimize;
- /* Data needed by the optimizer goes here, but is opaque to the VM */
-};
-
-/** Test support **/
-_PyOptimizerObject *_Py_SetOptimizer(PyInterpreterState *interp, _PyOptimizerObject* optimizer);
-
// Export for '_opcode' shared extension (JIT compiler).
PyAPI_FUNC(_PyExecutorObject*) _Py_GetExecutor(PyCodeObject *code, int offset);
@@ -110,12 +93,6 @@ void _Py_BloomFilter_Init(_PyBloomFilter *);
void _Py_BloomFilter_Add(_PyBloomFilter *bloom, void *obj);
PyAPI_FUNC(void) _Py_Executor_DependsOn(_PyExecutorObject *executor, void *obj);
-// For testing
-// Export for '_testinternalcapi' shared extension.
-PyAPI_FUNC(_PyOptimizerObject *) _Py_GetOptimizer(void);
-PyAPI_FUNC(int) _Py_SetTier2Optimizer(_PyOptimizerObject* optimizer);
-PyAPI_FUNC(PyObject *) _PyOptimizer_NewUOpOptimizer(void);
-
#define _Py_MAX_ALLOWED_BUILTINS_MODIFICATIONS 3
#define _Py_MAX_ALLOWED_GLOBALS_MODIFICATIONS 6
@@ -144,9 +121,7 @@ int _Py_uop_analyze_and_optimize(struct _PyInterpreterFrame *frame,
_PyUOpInstruction *trace, int trace_len, int curr_stackentries,
_PyBloomFilter *dependencies);
-extern PyTypeObject _PyDefaultOptimizer_Type;
extern PyTypeObject _PyUOpExecutor_Type;
-extern PyTypeObject _PyUOpOptimizer_Type;
#define UOP_FORMAT_TARGET 0
diff --git a/Include/opcode_ids.h b/Include/opcode_ids.h
index c3b58825bfc938..4a9fc15dcd2880 100644
--- a/Include/opcode_ids.h
+++ b/Include/opcode_ids.h
@@ -174,41 +174,43 @@ extern "C" {
#define FOR_ITER_LIST 192
#define FOR_ITER_RANGE 193
#define FOR_ITER_TUPLE 194
-#define LOAD_ATTR_CLASS 195
-#define LOAD_ATTR_CLASS_WITH_METACLASS_CHECK 196
-#define LOAD_ATTR_GETATTRIBUTE_OVERRIDDEN 197
-#define LOAD_ATTR_INSTANCE_VALUE 198
-#define LOAD_ATTR_METHOD_LAZY_DICT 199
-#define LOAD_ATTR_METHOD_NO_DICT 200
-#define LOAD_ATTR_METHOD_WITH_VALUES 201
-#define LOAD_ATTR_MODULE 202
-#define LOAD_ATTR_NONDESCRIPTOR_NO_DICT 203
-#define LOAD_ATTR_NONDESCRIPTOR_WITH_VALUES 204
-#define LOAD_ATTR_PROPERTY 205
-#define LOAD_ATTR_SLOT 206
-#define LOAD_ATTR_WITH_HINT 207
-#define LOAD_CONST_IMMORTAL 208
-#define LOAD_CONST_MORTAL 209
-#define LOAD_GLOBAL_BUILTIN 210
-#define LOAD_GLOBAL_MODULE 211
-#define LOAD_SUPER_ATTR_ATTR 212
-#define LOAD_SUPER_ATTR_METHOD 213
-#define RESUME_CHECK 214
-#define SEND_GEN 215
-#define STORE_ATTR_INSTANCE_VALUE 216
-#define STORE_ATTR_SLOT 217
-#define STORE_ATTR_WITH_HINT 218
-#define STORE_SUBSCR_DICT 219
-#define STORE_SUBSCR_LIST_INT 220
-#define TO_BOOL_ALWAYS_TRUE 221
-#define TO_BOOL_BOOL 222
-#define TO_BOOL_INT 223
-#define TO_BOOL_LIST 224
-#define TO_BOOL_NONE 225
-#define TO_BOOL_STR 226
-#define UNPACK_SEQUENCE_LIST 227
-#define UNPACK_SEQUENCE_TUPLE 228
-#define UNPACK_SEQUENCE_TWO_TUPLE 229
+#define JUMP_BACKWARD_JIT 195
+#define JUMP_BACKWARD_NO_JIT 196
+#define LOAD_ATTR_CLASS 197
+#define LOAD_ATTR_CLASS_WITH_METACLASS_CHECK 198
+#define LOAD_ATTR_GETATTRIBUTE_OVERRIDDEN 199
+#define LOAD_ATTR_INSTANCE_VALUE 200
+#define LOAD_ATTR_METHOD_LAZY_DICT 201
+#define LOAD_ATTR_METHOD_NO_DICT 202
+#define LOAD_ATTR_METHOD_WITH_VALUES 203
+#define LOAD_ATTR_MODULE 204
+#define LOAD_ATTR_NONDESCRIPTOR_NO_DICT 205
+#define LOAD_ATTR_NONDESCRIPTOR_WITH_VALUES 206
+#define LOAD_ATTR_PROPERTY 207
+#define LOAD_ATTR_SLOT 208
+#define LOAD_ATTR_WITH_HINT 209
+#define LOAD_CONST_IMMORTAL 210
+#define LOAD_CONST_MORTAL 211
+#define LOAD_GLOBAL_BUILTIN 212
+#define LOAD_GLOBAL_MODULE 213
+#define LOAD_SUPER_ATTR_ATTR 214
+#define LOAD_SUPER_ATTR_METHOD 215
+#define RESUME_CHECK 216
+#define SEND_GEN 217
+#define STORE_ATTR_INSTANCE_VALUE 218
+#define STORE_ATTR_SLOT 219
+#define STORE_ATTR_WITH_HINT 220
+#define STORE_SUBSCR_DICT 221
+#define STORE_SUBSCR_LIST_INT 222
+#define TO_BOOL_ALWAYS_TRUE 223
+#define TO_BOOL_BOOL 224
+#define TO_BOOL_INT 225
+#define TO_BOOL_LIST 226
+#define TO_BOOL_NONE 227
+#define TO_BOOL_STR 228
+#define UNPACK_SEQUENCE_LIST 229
+#define UNPACK_SEQUENCE_TUPLE 230
+#define UNPACK_SEQUENCE_TWO_TUPLE 231
#define INSTRUMENTED_END_FOR 235
#define INSTRUMENTED_POP_ITER 236
#define INSTRUMENTED_END_SEND 237
diff --git a/InternalDocs/jit.md b/InternalDocs/jit.md
index 1e9f385d5f87fa..2c204f39792d6a 100644
--- a/InternalDocs/jit.md
+++ b/InternalDocs/jit.md
@@ -38,12 +38,8 @@ executor in `co_executors`.
## The micro-op optimizer
-The optimizer that `_PyOptimizer_Optimize()` runs is configurable via the
-`_Py_SetTier2Optimizer()` function (this is used in test via
-`_testinternalcapi.set_optimizer()`.)
-
The micro-op (abbreviated `uop` to approximate `μop`) optimizer is defined in
-[`Python/optimizer.c`](../Python/optimizer.c) as the type `_PyUOpOptimizer_Type`.
+[`Python/optimizer.c`](../Python/optimizer.c) as `_PyOptimizer_Optimize`.
It translates an instruction trace into a sequence of micro-ops by replacing
each bytecode by an equivalent sequence of micro-ops (see
`_PyOpcode_macro_expansion` in
diff --git a/Lib/_opcode_metadata.py b/Lib/_opcode_metadata.py
index 459f7411296bcd..12c41374592185 100644
--- a/Lib/_opcode_metadata.py
+++ b/Lib/_opcode_metadata.py
@@ -85,6 +85,10 @@
"CONTAINS_OP_SET",
"CONTAINS_OP_DICT",
],
+ "JUMP_BACKWARD": [
+ "JUMP_BACKWARD_NO_JIT",
+ "JUMP_BACKWARD_JIT",
+ ],
"FOR_ITER": [
"FOR_ITER_LIST",
"FOR_ITER_TUPLE",
@@ -167,41 +171,43 @@
'FOR_ITER_LIST': 192,
'FOR_ITER_RANGE': 193,
'FOR_ITER_TUPLE': 194,
- 'LOAD_ATTR_CLASS': 195,
- 'LOAD_ATTR_CLASS_WITH_METACLASS_CHECK': 196,
- 'LOAD_ATTR_GETATTRIBUTE_OVERRIDDEN': 197,
- 'LOAD_ATTR_INSTANCE_VALUE': 198,
- 'LOAD_ATTR_METHOD_LAZY_DICT': 199,
- 'LOAD_ATTR_METHOD_NO_DICT': 200,
- 'LOAD_ATTR_METHOD_WITH_VALUES': 201,
- 'LOAD_ATTR_MODULE': 202,
- 'LOAD_ATTR_NONDESCRIPTOR_NO_DICT': 203,
- 'LOAD_ATTR_NONDESCRIPTOR_WITH_VALUES': 204,
- 'LOAD_ATTR_PROPERTY': 205,
- 'LOAD_ATTR_SLOT': 206,
- 'LOAD_ATTR_WITH_HINT': 207,
- 'LOAD_CONST_IMMORTAL': 208,
- 'LOAD_CONST_MORTAL': 209,
- 'LOAD_GLOBAL_BUILTIN': 210,
- 'LOAD_GLOBAL_MODULE': 211,
- 'LOAD_SUPER_ATTR_ATTR': 212,
- 'LOAD_SUPER_ATTR_METHOD': 213,
- 'RESUME_CHECK': 214,
- 'SEND_GEN': 215,
- 'STORE_ATTR_INSTANCE_VALUE': 216,
- 'STORE_ATTR_SLOT': 217,
- 'STORE_ATTR_WITH_HINT': 218,
- 'STORE_SUBSCR_DICT': 219,
- 'STORE_SUBSCR_LIST_INT': 220,
- 'TO_BOOL_ALWAYS_TRUE': 221,
- 'TO_BOOL_BOOL': 222,
- 'TO_BOOL_INT': 223,
- 'TO_BOOL_LIST': 224,
- 'TO_BOOL_NONE': 225,
- 'TO_BOOL_STR': 226,
- 'UNPACK_SEQUENCE_LIST': 227,
- 'UNPACK_SEQUENCE_TUPLE': 228,
- 'UNPACK_SEQUENCE_TWO_TUPLE': 229,
+ 'JUMP_BACKWARD_JIT': 195,
+ 'JUMP_BACKWARD_NO_JIT': 196,
+ 'LOAD_ATTR_CLASS': 197,
+ 'LOAD_ATTR_CLASS_WITH_METACLASS_CHECK': 198,
+ 'LOAD_ATTR_GETATTRIBUTE_OVERRIDDEN': 199,
+ 'LOAD_ATTR_INSTANCE_VALUE': 200,
+ 'LOAD_ATTR_METHOD_LAZY_DICT': 201,
+ 'LOAD_ATTR_METHOD_NO_DICT': 202,
+ 'LOAD_ATTR_METHOD_WITH_VALUES': 203,
+ 'LOAD_ATTR_MODULE': 204,
+ 'LOAD_ATTR_NONDESCRIPTOR_NO_DICT': 205,
+ 'LOAD_ATTR_NONDESCRIPTOR_WITH_VALUES': 206,
+ 'LOAD_ATTR_PROPERTY': 207,
+ 'LOAD_ATTR_SLOT': 208,
+ 'LOAD_ATTR_WITH_HINT': 209,
+ 'LOAD_CONST_IMMORTAL': 210,
+ 'LOAD_CONST_MORTAL': 211,
+ 'LOAD_GLOBAL_BUILTIN': 212,
+ 'LOAD_GLOBAL_MODULE': 213,
+ 'LOAD_SUPER_ATTR_ATTR': 214,
+ 'LOAD_SUPER_ATTR_METHOD': 215,
+ 'RESUME_CHECK': 216,
+ 'SEND_GEN': 217,
+ 'STORE_ATTR_INSTANCE_VALUE': 218,
+ 'STORE_ATTR_SLOT': 219,
+ 'STORE_ATTR_WITH_HINT': 220,
+ 'STORE_SUBSCR_DICT': 221,
+ 'STORE_SUBSCR_LIST_INT': 222,
+ 'TO_BOOL_ALWAYS_TRUE': 223,
+ 'TO_BOOL_BOOL': 224,
+ 'TO_BOOL_INT': 225,
+ 'TO_BOOL_LIST': 226,
+ 'TO_BOOL_NONE': 227,
+ 'TO_BOOL_STR': 228,
+ 'UNPACK_SEQUENCE_LIST': 229,
+ 'UNPACK_SEQUENCE_TUPLE': 230,
+ 'UNPACK_SEQUENCE_TWO_TUPLE': 231,
}
opmap = {
diff --git a/Lib/test/support/__init__.py b/Lib/test/support/__init__.py
index 89f2a6b916bfc2..6436753f998a16 100644
--- a/Lib/test/support/__init__.py
+++ b/Lib/test/support/__init__.py
@@ -58,7 +58,8 @@
"LOOPBACK_TIMEOUT", "INTERNET_TIMEOUT", "SHORT_TIMEOUT", "LONG_TIMEOUT",
"Py_DEBUG", "exceeds_recursion_limit", "get_c_recursion_limit",
"skip_on_s390x",
- "without_optimizer",
+ "requires_jit_enabled",
+ "requires_jit_disabled",
"force_not_colorized",
"force_not_colorized_test_class",
"make_clean_env",
@@ -2620,21 +2621,13 @@ def exceeds_recursion_limit():
Py_TRACE_REFS = hasattr(sys, 'getobjects')
-# Decorator to disable optimizer while a function run
-def without_optimizer(func):
- try:
- from _testinternalcapi import get_optimizer, set_optimizer
- except ImportError:
- return func
- @functools.wraps(func)
- def wrapper(*args, **kwargs):
- save_opt = get_optimizer()
- try:
- set_optimizer(None)
- return func(*args, **kwargs)
- finally:
- set_optimizer(save_opt)
- return wrapper
+try:
+ from _testinternalcapi import jit_enabled
+except ImportError:
+ requires_jit_enabled = requires_jit_disabled = unittest.skip("requires _testinternalcapi")
+else:
+ requires_jit_enabled = unittest.skipUnless(jit_enabled(), "requires JIT enabled")
+ requires_jit_disabled = unittest.skipIf(jit_enabled(), "requires JIT disabled")
_BASE_COPY_SRC_DIR_IGNORED_NAMES = frozenset({
diff --git a/Lib/test/test_capi/test_misc.py b/Lib/test/test_capi/test_misc.py
index 114e7cdfd0cd9c..1087b38c225085 100644
--- a/Lib/test/test_capi/test_misc.py
+++ b/Lib/test/test_capi/test_misc.py
@@ -306,7 +306,7 @@ def test_getitem_with_error(self):
CURRENT_THREAD_REGEX +
r' File .*, line 6 in <module>\n'
r'\n'
- r'Extension modules: _testcapi \(total: 1\)\n')
+ r'Extension modules: _testcapi, _testinternalcapi \(total: 2\)\n')
else:
# Python built with NDEBUG macro defined:
# test _Py_CheckFunctionResult() instead.
diff --git a/Lib/test/test_capi/test_opt.py b/Lib/test/test_capi/test_opt.py
index 6a2f7726222f9b..d3aea37e094e61 100644
--- a/Lib/test/test_capi/test_opt.py
+++ b/Lib/test/test_capi/test_opt.py
@@ -9,21 +9,12 @@
import _opcode
from test.support import (script_helper, requires_specialization,
- import_helper, Py_GIL_DISABLED)
+ import_helper, Py_GIL_DISABLED, requires_jit_enabled)
_testinternalcapi = import_helper.import_module("_testinternalcapi")
from _testinternalcapi import TIER2_THRESHOLD
-(a)contextlib.contextmanager
-def temporary_optimizer(opt):
- old_opt = _testinternalcapi.get_optimizer()
- _testinternalcapi.set_optimizer(opt)
- try:
- yield
- finally:
- _testinternalcapi.set_optimizer(old_opt)
-
@contextlib.contextmanager
def clear_executors(func):
@@ -57,8 +48,7 @@ def get_opnames(ex):
@requires_specialization
@unittest.skipIf(Py_GIL_DISABLED, "optimizer not yet supported in free-threaded builds")
-(a)unittest.skipUnless(hasattr(_testinternalcapi, "get_optimizer"),
- "Requires optimizer infrastructure")
+@requires_jit_enabled
class TestExecutorInvalidation(unittest.TestCase):
def test_invalidate_object(self):
@@ -75,10 +65,8 @@ def f{n}():
funcs = [ ns[f'f{n}'] for n in range(5)]
objects = [object() for _ in range(5)]
- opt = _testinternalcapi.new_uop_optimizer()
- with temporary_optimizer(opt):
- for f in funcs:
- f()
+ for f in funcs:
+ f()
executors = [get_first_executor(f) for f in funcs]
# Set things up so each executor depends on the objects
# with an equal or lower index.
@@ -106,9 +94,7 @@ def f():
pass
"""), ns, ns)
f = ns['f']
- opt = _testinternalcapi.new_uop_optimizer()
- with temporary_optimizer(opt):
- f()
+ f()
exe = get_first_executor(f)
self.assertIsNotNone(exe)
self.assertTrue(exe.is_valid())
@@ -119,9 +105,7 @@ def test_sys__clear_internal_caches(self):
def f():
for _ in range(TIER2_THRESHOLD):
pass
- opt = _testinternalcapi.new_uop_optimizer()
- with temporary_optimizer(opt):
- f()
+ f()
exe = get_first_executor(f)
self.assertIsNotNone(exe)
self.assertTrue(exe.is_valid())
@@ -133,8 +117,7 @@ def f():
@requires_specialization
@unittest.skipIf(Py_GIL_DISABLED, "optimizer not yet supported in free-threaded builds")
-(a)unittest.skipUnless(hasattr(_testinternalcapi, "get_optimizer"),
- "Requires optimizer infrastructure")
+@requires_jit_enabled
@unittest.skipIf(os.getenv("PYTHON_UOPS_OPTIMIZE") == "0", "Needs uop optimizer to run.")
class TestUops(unittest.TestCase):
@@ -144,9 +127,7 @@ def testfunc(x):
while i < x:
i += 1
- opt = _testinternalcapi.new_uop_optimizer()
- with temporary_optimizer(opt):
- testfunc(TIER2_THRESHOLD)
+ testfunc(TIER2_THRESHOLD)
ex = get_first_executor(testfunc)
self.assertIsNotNone(ex)
@@ -192,11 +173,9 @@ def many_vars():
"""), ns, ns)
many_vars = ns["many_vars"]
- opt = _testinternalcapi.new_uop_optimizer()
- with temporary_optimizer(opt):
- ex = get_first_executor(many_vars)
- self.assertIsNone(ex)
- many_vars()
+ ex = get_first_executor(many_vars)
+ self.assertIsNone(ex)
+ many_vars()
ex = get_first_executor(many_vars)
self.assertIsNotNone(ex)
@@ -215,10 +194,7 @@ def testfunc(x):
while i < x:
i += 1
- opt = _testinternalcapi.new_uop_optimizer()
-
- with temporary_optimizer(opt):
- testfunc(TIER2_THRESHOLD)
+ testfunc(TIER2_THRESHOLD)
ex = get_first_executor(testfunc)
self.assertIsNotNone(ex)
@@ -231,9 +207,7 @@ def testfunc(n):
while i < n:
i += 1
- opt = _testinternalcapi.new_uop_optimizer()
- with temporary_optimizer(opt):
- testfunc(TIER2_THRESHOLD)
+ testfunc(TIER2_THRESHOLD)
ex = get_first_executor(testfunc)
self.assertIsNotNone(ex)
@@ -246,9 +220,7 @@ def testfunc(a):
if x is None:
x = 0
- opt = _testinternalcapi.new_uop_optimizer()
- with temporary_optimizer(opt):
- testfunc(range(TIER2_THRESHOLD))
+ testfunc(range(TIER2_THRESHOLD))
ex = get_first_executor(testfunc)
self.assertIsNotNone(ex)
@@ -263,9 +235,7 @@ def testfunc(a):
if x is not None:
x = 0
- opt = _testinternalcapi.new_uop_optimizer()
- with temporary_optimizer(opt):
- testfunc(range(TIER2_THRESHOLD))
+ testfunc(range(TIER2_THRESHOLD))
ex = get_first_executor(testfunc)
self.assertIsNotNone(ex)
@@ -279,9 +249,7 @@ def testfunc(n):
while not i >= n:
i += 1
- opt = _testinternalcapi.new_uop_optimizer()
- with temporary_optimizer(opt):
- testfunc(TIER2_THRESHOLD)
+ testfunc(TIER2_THRESHOLD)
ex = get_first_executor(testfunc)
self.assertIsNotNone(ex)
@@ -294,9 +262,7 @@ def testfunc(n):
while i < n:
i += 1
- opt = _testinternalcapi.new_uop_optimizer()
- with temporary_optimizer(opt):
- testfunc(TIER2_THRESHOLD)
+ testfunc(TIER2_THRESHOLD)
ex = get_first_executor(testfunc)
self.assertIsNotNone(ex)
@@ -314,9 +280,7 @@ def testfunc(n):
a += 1
return a
- opt = _testinternalcapi.new_uop_optimizer()
- with temporary_optimizer(opt):
- testfunc(TIER2_THRESHOLD)
+ testfunc(TIER2_THRESHOLD)
ex = get_first_executor(testfunc)
self.assertIsNotNone(ex)
@@ -332,10 +296,8 @@ def testfunc(n):
total += i
return total
- opt = _testinternalcapi.new_uop_optimizer()
- with temporary_optimizer(opt):
- total = testfunc(TIER2_THRESHOLD)
- self.assertEqual(total, sum(range(TIER2_THRESHOLD)))
+ total = testfunc(TIER2_THRESHOLD)
+ self.assertEqual(total, sum(range(TIER2_THRESHOLD)))
ex = get_first_executor(testfunc)
self.assertIsNotNone(ex)
@@ -353,11 +315,9 @@ def testfunc(a):
total += i
return total
- opt = _testinternalcapi.new_uop_optimizer()
- with temporary_optimizer(opt):
- a = list(range(TIER2_THRESHOLD))
- total = testfunc(a)
- self.assertEqual(total, sum(a))
+ a = list(range(TIER2_THRESHOLD))
+ total = testfunc(a)
+ self.assertEqual(total, sum(a))
ex = get_first_executor(testfunc)
self.assertIsNotNone(ex)
@@ -375,11 +335,9 @@ def testfunc(a):
total += i
return total
- opt = _testinternalcapi.new_uop_optimizer()
- with temporary_optimizer(opt):
- a = tuple(range(TIER2_THRESHOLD))
- total = testfunc(a)
- self.assertEqual(total, sum(a))
+ a = tuple(range(TIER2_THRESHOLD))
+ total = testfunc(a)
+ self.assertEqual(total, sum(a))
ex = get_first_executor(testfunc)
self.assertIsNotNone(ex)
@@ -395,14 +353,12 @@ def testfunc(it):
for x in it:
pass
- opt = _testinternalcapi.new_uop_optimizer()
- with temporary_optimizer(opt):
- a = [1, 2, 3]
- it = iter(a)
- testfunc(it)
- a.append(4)
- with self.assertRaises(StopIteration):
- next(it)
+ a = [1, 2, 3]
+ it = iter(a)
+ testfunc(it)
+ a.append(4)
+ with self.assertRaises(StopIteration):
+ next(it)
def test_call_py_exact_args(self):
def testfunc(n):
@@ -411,9 +367,7 @@ def dummy(x):
for i in range(n):
dummy(i)
- opt = _testinternalcapi.new_uop_optimizer()
- with temporary_optimizer(opt):
- testfunc(TIER2_THRESHOLD)
+ testfunc(TIER2_THRESHOLD)
ex = get_first_executor(testfunc)
self.assertIsNotNone(ex)
@@ -429,9 +383,7 @@ def testfunc(n):
else:
i = 1
- opt = _testinternalcapi.new_uop_optimizer()
- with temporary_optimizer(opt):
- testfunc(TIER2_THRESHOLD)
+ testfunc(TIER2_THRESHOLD)
ex = get_first_executor(testfunc)
self.assertIsNotNone(ex)
@@ -457,9 +409,7 @@ def testfunc(n, m):
x += 1000*i + j
return x
- opt = _testinternalcapi.new_uop_optimizer()
- with temporary_optimizer(opt):
- x = testfunc(TIER2_THRESHOLD, TIER2_THRESHOLD)
+ x = testfunc(TIER2_THRESHOLD, TIER2_THRESHOLD)
self.assertEqual(x, sum(range(TIER2_THRESHOLD)) * TIER2_THRESHOLD * 1001)
@@ -484,9 +434,7 @@ def testfunc(n):
bits += 1
return bits
- opt = _testinternalcapi.new_uop_optimizer()
- with temporary_optimizer(opt):
- x = testfunc(TIER2_THRESHOLD * 2)
+ x = testfunc(TIER2_THRESHOLD * 2)
self.assertEqual(x, TIER2_THRESHOLD * 5)
ex = get_first_executor(testfunc)
@@ -499,16 +447,12 @@ def testfunc(n):
@requires_specialization
@unittest.skipIf(Py_GIL_DISABLED, "optimizer not yet supported in free-threaded builds")
-(a)unittest.skipUnless(hasattr(_testinternalcapi, "get_optimizer"),
- "Requires optimizer infrastructure")
+@requires_jit_enabled
@unittest.skipIf(os.getenv("PYTHON_UOPS_OPTIMIZE") == "0", "Needs uop optimizer to run.")
class TestUopsOptimization(unittest.TestCase):
def _run_with_optimizer(self, testfunc, arg):
- res = None
- opt = _testinternalcapi.new_uop_optimizer()
- with temporary_optimizer(opt):
- res = testfunc(arg)
+ res = testfunc(arg)
ex = get_first_executor(testfunc)
return res, ex
@@ -542,10 +486,7 @@ def testfunc(loops):
num += 1
return a
- opt = _testinternalcapi.new_uop_optimizer()
- res = None
- with temporary_optimizer(opt):
- res = testfunc(TIER2_THRESHOLD)
+ res = testfunc(TIER2_THRESHOLD)
ex = get_first_executor(testfunc)
self.assertIsNotNone(ex)
@@ -566,10 +507,7 @@ def testfunc(loops):
num += 1
return x
- opt = _testinternalcapi.new_uop_optimizer()
- res = None
- with temporary_optimizer(opt):
- res = testfunc(TIER2_THRESHOLD)
+ res = testfunc(TIER2_THRESHOLD)
ex = get_first_executor(testfunc)
self.assertIsNotNone(ex)
@@ -661,16 +599,14 @@ def testfunc(n):
for i in range(n):
dummy(i)
- opt = _testinternalcapi.new_uop_optimizer()
# Trigger specialization
testfunc(8)
- with temporary_optimizer(opt):
- del dummy
- gc.collect()
+ del dummy
+ gc.collect()
- def dummy(x):
- return x + 2
- testfunc(32)
+ def dummy(x):
+ return x + 2
+ testfunc(32)
ex = get_first_executor(testfunc)
# Honestly as long as it doesn't crash it's fine.
@@ -703,8 +639,6 @@ def testfunc(n):
x = range(i)
return x
- opt = _testinternalcapi.new_uop_optimizer()
- _testinternalcapi.set_optimizer(opt)
testfunc(_testinternalcapi.TIER2_THRESHOLD)
ex = get_first_executor(testfunc)
@@ -712,7 +646,7 @@ def testfunc(n):
uops = get_opnames(ex)
assert "_LOAD_GLOBAL_BUILTINS" not in uops
assert "_LOAD_CONST_INLINE_BORROW" in uops
- """))
+ """), PYTHON_JIT="1")
self.assertEqual(result[0].rc, 0, result)
def test_float_add_constant_propagation(self):
@@ -1399,9 +1333,7 @@ def testfunc(n):
# Only works on functions promoted to constants
global_identity(i)
- opt = _testinternalcapi.new_uop_optimizer()
- with temporary_optimizer(opt):
- testfunc(TIER2_THRESHOLD)
+ testfunc(TIER2_THRESHOLD)
ex = get_first_executor(testfunc)
self.assertIsNotNone(ex)
@@ -1488,12 +1420,12 @@ def test_decref_escapes(self):
class Convert9999ToNone:
def __del__(self):
ns = sys._getframe(1).f_locals
- if ns["i"] == 9999:
+ if ns["i"] == _testinternalcapi.TIER2_THRESHOLD:
ns["i"] = None
def crash_addition():
try:
- for i in range(10000):
+ for i in range(_testinternalcapi.TIER2_THRESHOLD + 1):
n = Convert9999ToNone()
i + i # Remove guards for i.
n = None # Change i.
diff --git a/Lib/test/test_dis.py b/Lib/test/test_dis.py
index df4bdf4a3dd312..2e149b32e5c1ec 100644
--- a/Lib/test/test_dis.py
+++ b/Lib/test/test_dis.py
@@ -15,7 +15,7 @@
import unittest
from test.support import (captured_stdout, requires_debug_ranges,
requires_specialization, cpython_only,
- os_helper)
+ os_helper, import_helper)
from test.support.bytecode_helper import BytecodeTestCase
@@ -904,7 +904,7 @@ def loop_test():
LOAD_FAST 0 (i)
CALL_PY_GENERAL 1
POP_TOP
- JUMP_BACKWARD 16 (to L1)
+ JUMP_BACKWARD_{: <6} 16 (to L1)
%3d L2: END_FOR
POP_ITER
@@ -1308,7 +1308,8 @@ def test_loop_quicken(self):
# Loop can trigger a quicken where the loop is located
self.code_quicken(loop_test, 4)
got = self.get_disassembly(loop_test, adaptive=True)
- expected = dis_loop_test_quickened_code
+ jit = import_helper.import_module("_testinternalcapi").jit_enabled()
+ expected = dis_loop_test_quickened_code.format("JIT" if jit else "NO_JIT")
self.do_disassembly_compare(got, expected)
@cpython_only
diff --git a/Lib/test/test_opcache.py b/Lib/test/test_opcache.py
index e8ea21f8179978..cc58a4b8c3cd11 100644
--- a/Lib/test/test_opcache.py
+++ b/Lib/test/test_opcache.py
@@ -6,7 +6,7 @@
import unittest
from test.support import (threading_helper, check_impl_detail,
requires_specialization, requires_specialization_ft,
- cpython_only)
+ cpython_only, requires_jit_disabled)
from test.support.import_helper import import_module
# Skip this module on other interpreters, it is cpython specific:
@@ -16,20 +16,6 @@
_testinternalcapi = import_module("_testinternalcapi")
-def disabling_optimizer(func):
- def wrapper(*args, **kwargs):
- if not hasattr(_testinternalcapi, "get_optimizer"):
- return func(*args, **kwargs)
- old_opt = _testinternalcapi.get_optimizer()
- _testinternalcapi.set_optimizer(None)
- try:
- return func(*args, **kwargs)
- finally:
- _testinternalcapi.set_optimizer(old_opt)
-
- return wrapper
-
-
class TestBase(unittest.TestCase):
def assert_specialized(self, f, opname):
instructions = dis.get_instructions(f, adaptive=True)
@@ -526,7 +512,7 @@ def f(x, y):
f(None)
f()
- @disabling_optimizer
+ @requires_jit_disabled
@requires_specialization_ft
def test_assign_init_code(self):
class MyClass:
@@ -549,7 +535,7 @@ def count_args(self, *args):
MyClass.__init__.__code__ = count_args.__code__
instantiate()
- @disabling_optimizer
+ @requires_jit_disabled
@requires_specialization_ft
def test_push_init_frame_fails(self):
def instantiate():
@@ -583,7 +569,7 @@ class TestRacesDoNotCrash(TestBase):
WARMUPS = 2
WRITERS = 2
- @disabling_optimizer
+ @requires_jit_disabled
def assert_races_do_not_crash(
self, opname, get_items, read, write, *, check_items=False
):
diff --git a/Lib/test/test_regrtest.py b/Lib/test/test_regrtest.py
index e9ef830c848aad..969f483814d08d 100644
--- a/Lib/test/test_regrtest.py
+++ b/Lib/test/test_regrtest.py
@@ -1185,7 +1185,7 @@ def test_run(self):
stats=TestStats(4, 1),
forever=True)
- @support.without_optimizer
+ @support.requires_jit_disabled
def check_leak(self, code, what, *, run_workers=False):
test = self.create_test('huntrleaks', code=code)
diff --git a/Modules/_testinternalcapi.c b/Modules/_testinternalcapi.c
index f3d234a7f9595e..9f38a9d7a9a5c2 100644
--- a/Modules/_testinternalcapi.c
+++ b/Modules/_testinternalcapi.c
@@ -950,38 +950,13 @@ get_co_framesize(PyObject *self, PyObject *arg)
return PyLong_FromLong(code->co_framesize);
}
-#ifdef _Py_TIER2
-
-static PyObject *
-new_uop_optimizer(PyObject *self, PyObject *arg)
-{
- return _PyOptimizer_NewUOpOptimizer();
-}
-
static PyObject *
-set_optimizer(PyObject *self, PyObject *opt)
+jit_enabled(PyObject *self, PyObject *arg)
{
- if (opt == Py_None) {
- opt = NULL;
- }
- if (_Py_SetTier2Optimizer((_PyOptimizerObject*)opt) < 0) {
- return NULL;
- }
- Py_RETURN_NONE;
+ return PyBool_FromLong(_PyInterpreterState_GET()->jit);
}
-static PyObject *
-get_optimizer(PyObject *self, PyObject *Py_UNUSED(ignored))
-{
- PyObject *opt = NULL;
#ifdef _Py_TIER2
- opt = (PyObject *)_Py_GetOptimizer();
-#endif
- if (opt == NULL) {
- Py_RETURN_NONE;
- }
- return opt;
-}
static PyObject *
add_executor_dependency(PyObject *self, PyObject *args)
@@ -2047,10 +2022,8 @@ static PyMethodDef module_functions[] = {
{"iframe_getline", iframe_getline, METH_O, NULL},
{"iframe_getlasti", iframe_getlasti, METH_O, NULL},
{"get_co_framesize", get_co_framesize, METH_O, NULL},
+ {"jit_enabled", jit_enabled, METH_NOARGS, NULL},
#ifdef _Py_TIER2
- {"get_optimizer", get_optimizer, METH_NOARGS, NULL},
- {"set_optimizer", set_optimizer, METH_O, NULL},
- {"new_uop_optimizer", new_uop_optimizer, METH_NOARGS, NULL},
{"add_executor_dependency", add_executor_dependency, METH_VARARGS, NULL},
{"invalidate_executors", invalidate_executors, METH_O, NULL},
#endif
diff --git a/Objects/object.c b/Objects/object.c
index a70a2c3fc2f3dd..cd48d2f75ba490 100644
--- a/Objects/object.c
+++ b/Objects/object.c
@@ -19,7 +19,7 @@
#include "pycore_object.h" // PyAPI_DATA() _Py_SwappedOp definition
#include "pycore_object_state.h" // struct _reftracer_runtime_state
#include "pycore_long.h" // _PyLong_GetZero()
-#include "pycore_optimizer.h" // _PyUOpExecutor_Type, _PyUOpOptimizer_Type, ...
+#include "pycore_optimizer.h" // _PyUOpExecutor_Type, ...
#include "pycore_pyerrors.h" // _PyErr_Occurred()
#include "pycore_pymem.h" // _PyMem_IsPtrFreed()
#include "pycore_pystate.h" // _PyThreadState_GET()
@@ -2379,9 +2379,6 @@ static PyTypeObject* static_types[] = {
&_PyBufferWrapper_Type,
&_PyContextTokenMissing_Type,
&_PyCoroWrapper_Type,
-#ifdef _Py_TIER2
- &_PyDefaultOptimizer_Type,
-#endif
&_Py_GenericAliasIterType,
&_PyHamtItems_Type,
&_PyHamtKeys_Type,
@@ -2404,7 +2401,6 @@ static PyTypeObject* static_types[] = {
&_PyUnion_Type,
#ifdef _Py_TIER2
&_PyUOpExecutor_Type,
- &_PyUOpOptimizer_Type,
#endif
&_PyWeakref_CallableProxyType,
&_PyWeakref_ProxyType,
diff --git a/Python/bytecodes.c b/Python/bytecodes.c
index 7d463511aee41d..0d7b9f2a781019 100644
--- a/Python/bytecodes.c
+++ b/Python/bytecodes.c
@@ -2782,13 +2782,26 @@ dummy_func(
JUMPBY(oparg);
}
- tier1 op(_JUMP_BACKWARD, (the_counter/1 --)) {
- assert(oparg <= INSTR_OFFSET());
- JUMPBY(-oparg);
- #ifdef _Py_TIER2
- #if ENABLE_SPECIALIZATION
+ family(JUMP_BACKWARD, 1) = {
+ JUMP_BACKWARD_NO_JIT,
+ JUMP_BACKWARD_JIT,
+ };
+
+ tier1 op(_SPECIALIZE_JUMP_BACKWARD, (--)) {
+ #if ENABLE_SPECIALIZATION
+ if (this_instr->op.code == JUMP_BACKWARD) {
+ this_instr->op.code = tstate->interp->jit ? JUMP_BACKWARD_JIT : JUMP_BACKWARD_NO_JIT;
+ // Need to re-dispatch so the warmup counter isn't off by one:
+ next_instr = this_instr;
+ DISPATCH_SAME_OPARG();
+ }
+ #endif
+ }
+
+ tier1 op(_JIT, (--)) {
+ #ifdef _Py_TIER2
_Py_BackoffCounter counter = this_instr[1].counter;
- if (backoff_counter_triggers(counter) && this_instr->op.code == JUMP_BACKWARD) {
+ if (backoff_counter_triggers(counter) && this_instr->op.code == JUMP_BACKWARD_JIT) {
_Py_CODEUNIT *start = this_instr;
/* Back up over EXTENDED_ARGs so optimizer sees the whole instruction */
while (oparg > 255) {
@@ -2811,13 +2824,25 @@ dummy_func(
else {
ADVANCE_ADAPTIVE_COUNTER(this_instr[1].counter);
}
- #endif /* ENABLE_SPECIALIZATION */
- #endif /* _Py_TIER2 */
+ #endif
}
macro(JUMP_BACKWARD) =
+ unused/1 +
+ _SPECIALIZE_JUMP_BACKWARD +
_CHECK_PERIODIC +
- _JUMP_BACKWARD;
+ JUMP_BACKWARD_NO_INTERRUPT;
+
+ macro(JUMP_BACKWARD_NO_JIT) =
+ unused/1 +
+ _CHECK_PERIODIC +
+ JUMP_BACKWARD_NO_INTERRUPT;
+
+ macro(JUMP_BACKWARD_JIT) =
+ unused/1 +
+ _CHECK_PERIODIC +
+ JUMP_BACKWARD_NO_INTERRUPT +
+ _JIT;
pseudo(JUMP, (--)) = {
JUMP_FORWARD,
@@ -2906,6 +2931,7 @@ dummy_func(
* generator or coroutine, so we deliberately do not check it here.
* (see bpo-30039).
*/
+ assert(oparg <= INSTR_OFFSET());
JUMPBY(-oparg);
}
diff --git a/Python/generated_cases.c.h b/Python/generated_cases.c.h
index 5dd2f37d811109..ffdad70815caef 100644
--- a/Python/generated_cases.c.h
+++ b/Python/generated_cases.c.h
@@ -180,6 +180,7 @@
TARGET(BINARY_OP_EXTEND) {
_Py_CODEUNIT* const this_instr = frame->instr_ptr = next_instr;
+ (void)this_instr;
next_instr += 6;
INSTRUCTION_STATS(BINARY_OP_EXTEND);
static_assert(INLINE_CACHE_ENTRIES_BINARY_OP == 5, "incorrect cache size");
@@ -1087,6 +1088,7 @@
TARGET(CALL_ALLOC_AND_ENTER_INIT) {
_Py_CODEUNIT* const this_instr = frame->instr_ptr = next_instr;
+ (void)this_instr;
next_instr += 4;
INSTRUCTION_STATS(CALL_ALLOC_AND_ENTER_INIT);
static_assert(INLINE_CACHE_ENTRIES_CALL == 3, "incorrect cache size");
@@ -1185,6 +1187,7 @@
TARGET(CALL_BOUND_METHOD_EXACT_ARGS) {
_Py_CODEUNIT* const this_instr = frame->instr_ptr = next_instr;
+ (void)this_instr;
next_instr += 4;
INSTRUCTION_STATS(CALL_BOUND_METHOD_EXACT_ARGS);
static_assert(INLINE_CACHE_ENTRIES_CALL == 3, "incorrect cache size");
@@ -1288,6 +1291,7 @@
TARGET(CALL_BOUND_METHOD_GENERAL) {
_Py_CODEUNIT* const this_instr = frame->instr_ptr = next_instr;
+ (void)this_instr;
next_instr += 4;
INSTRUCTION_STATS(CALL_BOUND_METHOD_GENERAL);
static_assert(INLINE_CACHE_ENTRIES_CALL == 3, "incorrect cache size");
@@ -2113,6 +2117,7 @@
TARGET(CALL_KW_BOUND_METHOD) {
_Py_CODEUNIT* const this_instr = frame->instr_ptr = next_instr;
+ (void)this_instr;
next_instr += 4;
INSTRUCTION_STATS(CALL_KW_BOUND_METHOD);
static_assert(INLINE_CACHE_ENTRIES_CALL_KW == 3, "incorrect cache size");
@@ -2313,6 +2318,7 @@
TARGET(CALL_KW_PY) {
_Py_CODEUNIT* const this_instr = frame->instr_ptr = next_instr;
+ (void)this_instr;
next_instr += 4;
INSTRUCTION_STATS(CALL_KW_PY);
static_assert(INLINE_CACHE_ENTRIES_CALL_KW == 3, "incorrect cache size");
@@ -2890,6 +2896,7 @@
TARGET(CALL_PY_EXACT_ARGS) {
_Py_CODEUNIT* const this_instr = frame->instr_ptr = next_instr;
+ (void)this_instr;
next_instr += 4;
INSTRUCTION_STATS(CALL_PY_EXACT_ARGS);
static_assert(INLINE_CACHE_ENTRIES_CALL == 3, "incorrect cache size");
@@ -2971,6 +2978,7 @@
TARGET(CALL_PY_GENERAL) {
_Py_CODEUNIT* const this_instr = frame->instr_ptr = next_instr;
+ (void)this_instr;
next_instr += 4;
INSTRUCTION_STATS(CALL_PY_GENERAL);
static_assert(INLINE_CACHE_ENTRIES_CALL == 3, "incorrect cache size");
@@ -5160,10 +5168,55 @@
}
TARGET(JUMP_BACKWARD) {
+ frame->instr_ptr = next_instr;
+ next_instr += 2;
+ INSTRUCTION_STATS(JUMP_BACKWARD);
+ PREDICTED_JUMP_BACKWARD:;
+ _Py_CODEUNIT* const this_instr = next_instr - 2;
+ (void)this_instr;
+ /* Skip 1 cache entry */
+ // _SPECIALIZE_JUMP_BACKWARD
+ {
+ #if ENABLE_SPECIALIZATION
+ if (this_instr->op.code == JUMP_BACKWARD) {
+ this_instr->op.code = tstate->interp->jit ? JUMP_BACKWARD_JIT : JUMP_BACKWARD_NO_JIT;
+ // Need to re-dispatch so the warmup counter isn't off by one:
+ next_instr = this_instr;
+ DISPATCH_SAME_OPARG();
+ }
+ #endif
+ }
+ // _CHECK_PERIODIC
+ {
+ _Py_CHECK_EMSCRIPTEN_SIGNALS_PERIODICALLY();
+ QSBR_QUIESCENT_STATE(tstate);
+ if (_Py_atomic_load_uintptr_relaxed(&tstate->eval_breaker) & _PY_EVAL_EVENTS_MASK) {
+ _PyFrame_SetStackPointer(frame, stack_pointer);
+ int err = _Py_HandlePending(tstate);
+ stack_pointer = _PyFrame_GetStackPointer(frame);
+ if (err != 0) goto error;
+ }
+ }
+ // _JUMP_BACKWARD_NO_INTERRUPT
+ {
+ /* This bytecode is used in the `yield from` or `await` loop.
+ * If there is an interrupt, we want it handled in the innermost
+ * generator or coroutine, so we deliberately do not check it here.
+ * (see bpo-30039).
+ */
+ assert(oparg <= INSTR_OFFSET());
+ JUMPBY(-oparg);
+ }
+ DISPATCH();
+ }
+
+ TARGET(JUMP_BACKWARD_JIT) {
_Py_CODEUNIT* const this_instr = frame->instr_ptr = next_instr;
(void)this_instr;
next_instr += 2;
- INSTRUCTION_STATS(JUMP_BACKWARD);
+ INSTRUCTION_STATS(JUMP_BACKWARD_JIT);
+ static_assert(1 == 1, "incorrect cache size");
+ /* Skip 1 cache entry */
// _CHECK_PERIODIC
{
_Py_CHECK_EMSCRIPTEN_SIGNALS_PERIODICALLY();
@@ -5175,16 +5228,21 @@
if (err != 0) goto error;
}
}
- // _JUMP_BACKWARD
+ // _JUMP_BACKWARD_NO_INTERRUPT
{
- uint16_t the_counter = read_u16(&this_instr[1].cache);
- (void)the_counter;
+ /* This bytecode is used in the `yield from` or `await` loop.
+ * If there is an interrupt, we want it handled in the innermost
+ * generator or coroutine, so we deliberately do not check it here.
+ * (see bpo-30039).
+ */
assert(oparg <= INSTR_OFFSET());
JUMPBY(-oparg);
+ }
+ // _JIT
+ {
#ifdef _Py_TIER2
- #if ENABLE_SPECIALIZATION
_Py_BackoffCounter counter = this_instr[1].counter;
- if (backoff_counter_triggers(counter) && this_instr->op.code == JUMP_BACKWARD) {
+ if (backoff_counter_triggers(counter) && this_instr->op.code == JUMP_BACKWARD_JIT) {
_Py_CODEUNIT *start = this_instr;
/* Back up over EXTENDED_ARGs so optimizer sees the whole instruction */
while (oparg > 255) {
@@ -5211,8 +5269,7 @@
else {
ADVANCE_ADAPTIVE_COUNTER(this_instr[1].counter);
}
- #endif /* ENABLE_SPECIALIZATION */
- #endif /* _Py_TIER2 */
+ #endif
}
DISPATCH();
}
@@ -5226,10 +5283,41 @@
* generator or coroutine, so we deliberately do not check it here.
* (see bpo-30039).
*/
+ assert(oparg <= INSTR_OFFSET());
JUMPBY(-oparg);
DISPATCH();
}
+ TARGET(JUMP_BACKWARD_NO_JIT) {
+ frame->instr_ptr = next_instr;
+ next_instr += 2;
+ INSTRUCTION_STATS(JUMP_BACKWARD_NO_JIT);
+ static_assert(1 == 1, "incorrect cache size");
+ /* Skip 1 cache entry */
+ // _CHECK_PERIODIC
+ {
+ _Py_CHECK_EMSCRIPTEN_SIGNALS_PERIODICALLY();
+ QSBR_QUIESCENT_STATE(tstate);
+ if (_Py_atomic_load_uintptr_relaxed(&tstate->eval_breaker) & _PY_EVAL_EVENTS_MASK) {
+ _PyFrame_SetStackPointer(frame, stack_pointer);
+ int err = _Py_HandlePending(tstate);
+ stack_pointer = _PyFrame_GetStackPointer(frame);
+ if (err != 0) goto error;
+ }
+ }
+ // _JUMP_BACKWARD_NO_INTERRUPT
+ {
+ /* This bytecode is used in the `yield from` or `await` loop.
+ * If there is an interrupt, we want it handled in the innermost
+ * generator or coroutine, so we deliberately do not check it here.
+ * (see bpo-30039).
+ */
+ assert(oparg <= INSTR_OFFSET());
+ JUMPBY(-oparg);
+ }
+ DISPATCH();
+ }
+
TARGET(JUMP_FORWARD) {
frame->instr_ptr = next_instr;
next_instr += 1;
@@ -5369,6 +5457,7 @@
TARGET(LOAD_ATTR_CLASS) {
_Py_CODEUNIT* const this_instr = frame->instr_ptr = next_instr;
+ (void)this_instr;
next_instr += 10;
INSTRUCTION_STATS(LOAD_ATTR_CLASS);
static_assert(INLINE_CACHE_ENTRIES_LOAD_ATTR == 9, "incorrect cache size");
@@ -5407,6 +5496,7 @@
TARGET(LOAD_ATTR_CLASS_WITH_METACLASS_CHECK) {
_Py_CODEUNIT* const this_instr = frame->instr_ptr = next_instr;
+ (void)this_instr;
next_instr += 10;
INSTRUCTION_STATS(LOAD_ATTR_CLASS_WITH_METACLASS_CHECK);
static_assert(INLINE_CACHE_ENTRIES_LOAD_ATTR == 9, "incorrect cache size");
@@ -5451,6 +5541,7 @@
TARGET(LOAD_ATTR_GETATTRIBUTE_OVERRIDDEN) {
_Py_CODEUNIT* const this_instr = frame->instr_ptr = next_instr;
+ (void)this_instr;
next_instr += 10;
INSTRUCTION_STATS(LOAD_ATTR_GETATTRIBUTE_OVERRIDDEN);
static_assert(INLINE_CACHE_ENTRIES_LOAD_ATTR == 9, "incorrect cache size");
@@ -5487,6 +5578,7 @@
TARGET(LOAD_ATTR_INSTANCE_VALUE) {
_Py_CODEUNIT* const this_instr = frame->instr_ptr = next_instr;
+ (void)this_instr;
next_instr += 10;
INSTRUCTION_STATS(LOAD_ATTR_INSTANCE_VALUE);
static_assert(INLINE_CACHE_ENTRIES_LOAD_ATTR == 9, "incorrect cache size");
@@ -5540,6 +5632,7 @@
TARGET(LOAD_ATTR_METHOD_LAZY_DICT) {
_Py_CODEUNIT* const this_instr = frame->instr_ptr = next_instr;
+ (void)this_instr;
next_instr += 10;
INSTRUCTION_STATS(LOAD_ATTR_METHOD_LAZY_DICT);
static_assert(INLINE_CACHE_ENTRIES_LOAD_ATTR == 9, "incorrect cache size");
@@ -5583,6 +5676,7 @@
TARGET(LOAD_ATTR_METHOD_NO_DICT) {
_Py_CODEUNIT* const this_instr = frame->instr_ptr = next_instr;
+ (void)this_instr;
next_instr += 10;
INSTRUCTION_STATS(LOAD_ATTR_METHOD_NO_DICT);
static_assert(INLINE_CACHE_ENTRIES_LOAD_ATTR == 9, "incorrect cache size");
@@ -5619,6 +5713,7 @@
TARGET(LOAD_ATTR_METHOD_WITH_VALUES) {
_Py_CODEUNIT* const this_instr = frame->instr_ptr = next_instr;
+ (void)this_instr;
next_instr += 10;
INSTRUCTION_STATS(LOAD_ATTR_METHOD_WITH_VALUES);
static_assert(INLINE_CACHE_ENTRIES_LOAD_ATTR == 9, "incorrect cache size");
@@ -5669,6 +5764,7 @@
TARGET(LOAD_ATTR_MODULE) {
_Py_CODEUNIT* const this_instr = frame->instr_ptr = next_instr;
+ (void)this_instr;
next_instr += 10;
INSTRUCTION_STATS(LOAD_ATTR_MODULE);
static_assert(INLINE_CACHE_ENTRIES_LOAD_ATTR == 9, "incorrect cache size");
@@ -5724,6 +5820,7 @@
TARGET(LOAD_ATTR_NONDESCRIPTOR_NO_DICT) {
_Py_CODEUNIT* const this_instr = frame->instr_ptr = next_instr;
+ (void)this_instr;
next_instr += 10;
INSTRUCTION_STATS(LOAD_ATTR_NONDESCRIPTOR_NO_DICT);
static_assert(INLINE_CACHE_ENTRIES_LOAD_ATTR == 9, "incorrect cache size");
@@ -5755,6 +5852,7 @@
TARGET(LOAD_ATTR_NONDESCRIPTOR_WITH_VALUES) {
_Py_CODEUNIT* const this_instr = frame->instr_ptr = next_instr;
+ (void)this_instr;
next_instr += 10;
INSTRUCTION_STATS(LOAD_ATTR_NONDESCRIPTOR_WITH_VALUES);
static_assert(INLINE_CACHE_ENTRIES_LOAD_ATTR == 9, "incorrect cache size");
@@ -5799,6 +5897,7 @@
TARGET(LOAD_ATTR_PROPERTY) {
_Py_CODEUNIT* const this_instr = frame->instr_ptr = next_instr;
+ (void)this_instr;
next_instr += 10;
INSTRUCTION_STATS(LOAD_ATTR_PROPERTY);
static_assert(INLINE_CACHE_ENTRIES_LOAD_ATTR == 9, "incorrect cache size");
@@ -5864,6 +5963,7 @@
TARGET(LOAD_ATTR_SLOT) {
_Py_CODEUNIT* const this_instr = frame->instr_ptr = next_instr;
+ (void)this_instr;
next_instr += 10;
INSTRUCTION_STATS(LOAD_ATTR_SLOT);
static_assert(INLINE_CACHE_ENTRIES_LOAD_ATTR == 9, "incorrect cache size");
@@ -5909,6 +6009,7 @@
TARGET(LOAD_ATTR_WITH_HINT) {
_Py_CODEUNIT* const this_instr = frame->instr_ptr = next_instr;
+ (void)this_instr;
next_instr += 10;
INSTRUCTION_STATS(LOAD_ATTR_WITH_HINT);
static_assert(INLINE_CACHE_ENTRIES_LOAD_ATTR == 9, "incorrect cache size");
@@ -6326,6 +6427,7 @@
TARGET(LOAD_GLOBAL_BUILTIN) {
_Py_CODEUNIT* const this_instr = frame->instr_ptr = next_instr;
+ (void)this_instr;
next_instr += 5;
INSTRUCTION_STATS(LOAD_GLOBAL_BUILTIN);
static_assert(INLINE_CACHE_ENTRIES_LOAD_GLOBAL == 4, "incorrect cache size");
@@ -6380,6 +6482,7 @@
TARGET(LOAD_GLOBAL_MODULE) {
_Py_CODEUNIT* const this_instr = frame->instr_ptr = next_instr;
+ (void)this_instr;
next_instr += 5;
INSTRUCTION_STATS(LOAD_GLOBAL_MODULE);
static_assert(INLINE_CACHE_ENTRIES_LOAD_GLOBAL == 4, "incorrect cache size");
@@ -7557,6 +7660,7 @@
TARGET(STORE_ATTR_INSTANCE_VALUE) {
_Py_CODEUNIT* const this_instr = frame->instr_ptr = next_instr;
+ (void)this_instr;
next_instr += 5;
INSTRUCTION_STATS(STORE_ATTR_INSTANCE_VALUE);
static_assert(INLINE_CACHE_ENTRIES_STORE_ATTR == 4, "incorrect cache size");
@@ -7615,6 +7719,7 @@
TARGET(STORE_ATTR_SLOT) {
_Py_CODEUNIT* const this_instr = frame->instr_ptr = next_instr;
+ (void)this_instr;
next_instr += 5;
INSTRUCTION_STATS(STORE_ATTR_SLOT);
static_assert(INLINE_CACHE_ENTRIES_STORE_ATTR == 4, "incorrect cache size");
@@ -7652,6 +7757,7 @@
TARGET(STORE_ATTR_WITH_HINT) {
_Py_CODEUNIT* const this_instr = frame->instr_ptr = next_instr;
+ (void)this_instr;
next_instr += 5;
INSTRUCTION_STATS(STORE_ATTR_WITH_HINT);
static_assert(INLINE_CACHE_ENTRIES_STORE_ATTR == 4, "incorrect cache size");
@@ -8046,6 +8152,7 @@
TARGET(TO_BOOL_ALWAYS_TRUE) {
_Py_CODEUNIT* const this_instr = frame->instr_ptr = next_instr;
+ (void)this_instr;
next_instr += 4;
INSTRUCTION_STATS(TO_BOOL_ALWAYS_TRUE);
static_assert(INLINE_CACHE_ENTRIES_TO_BOOL == 3, "incorrect cache size");
diff --git a/Python/opcode_targets.h b/Python/opcode_targets.h
index 873378b43374c5..09a834bb38fa67 100644
--- a/Python/opcode_targets.h
+++ b/Python/opcode_targets.h
@@ -194,6 +194,8 @@ static void *opcode_targets[256] = {
&&TARGET_FOR_ITER_LIST,
&&TARGET_FOR_ITER_RANGE,
&&TARGET_FOR_ITER_TUPLE,
+ &&TARGET_JUMP_BACKWARD_JIT,
+ &&TARGET_JUMP_BACKWARD_NO_JIT,
&&TARGET_LOAD_ATTR_CLASS,
&&TARGET_LOAD_ATTR_CLASS_WITH_METACLASS_CHECK,
&&TARGET_LOAD_ATTR_GETATTRIBUTE_OVERRIDDEN,
@@ -232,8 +234,6 @@ static void *opcode_targets[256] = {
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
- &&_unknown_opcode,
- &&_unknown_opcode,
&&TARGET_INSTRUMENTED_END_FOR,
&&TARGET_INSTRUMENTED_POP_ITER,
&&TARGET_INSTRUMENTED_END_SEND,
diff --git a/Python/optimizer.c b/Python/optimizer.c
index e3950843964f11..b16695a3c3d33e 100644
--- a/Python/optimizer.c
+++ b/Python/optimizer.c
@@ -91,70 +91,13 @@ insert_executor(PyCodeObject *code, _Py_CODEUNIT *instr, int index, _PyExecutorO
instr->op.arg = index;
}
-
-static int
-never_optimize(
- _PyOptimizerObject* self,
- _PyInterpreterFrame *frame,
- _Py_CODEUNIT *instr,
- _PyExecutorObject **exec,
- int Py_UNUSED(stack_entries),
- bool Py_UNUSED(progress_needed))
-{
- // This may be called if the optimizer is reset
- return 0;
-}
-
-PyTypeObject _PyDefaultOptimizer_Type = {
- PyVarObject_HEAD_INIT(&PyType_Type, 0)
- .tp_name = "noop_optimizer",
- .tp_basicsize = sizeof(_PyOptimizerObject),
- .tp_itemsize = 0,
- .tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_DISALLOW_INSTANTIATION,
-};
-
-static _PyOptimizerObject _PyOptimizer_Default = {
- PyObject_HEAD_INIT(&_PyDefaultOptimizer_Type)
- .optimize = never_optimize,
-};
-
-_PyOptimizerObject *
-_Py_GetOptimizer(void)
-{
- PyInterpreterState *interp = _PyInterpreterState_GET();
- if (interp->optimizer == &_PyOptimizer_Default) {
- return NULL;
- }
- Py_INCREF(interp->optimizer);
- return interp->optimizer;
-}
-
static _PyExecutorObject *
make_executor_from_uops(_PyUOpInstruction *buffer, int length, const _PyBloomFilter *dependencies);
-_PyOptimizerObject *
-_Py_SetOptimizer(PyInterpreterState *interp, _PyOptimizerObject *optimizer)
-{
- if (optimizer == NULL) {
- optimizer = &_PyOptimizer_Default;
- }
- _PyOptimizerObject *old = interp->optimizer;
- if (old == NULL) {
- old = &_PyOptimizer_Default;
- }
- Py_INCREF(optimizer);
- interp->optimizer = optimizer;
- return old;
-}
-
-int
-_Py_SetTier2Optimizer(_PyOptimizerObject *optimizer)
-{
- PyInterpreterState *interp = _PyInterpreterState_GET();
- _PyOptimizerObject *old = _Py_SetOptimizer(interp, optimizer);
- Py_XDECREF(old);
- return old == NULL ? -1 : 0;
-}
+static int
+uop_optimize(_PyInterpreterFrame *frame, _Py_CODEUNIT *instr,
+ _PyExecutorObject **exec_ptr, int curr_stackentries,
+ bool progress_needed);
/* Returns 1 if optimized, 0 if not optimized, and -1 for an error.
* If optimized, *executor_ptr contains a new reference to the executor
@@ -164,6 +107,7 @@ _PyOptimizer_Optimize(
_PyInterpreterFrame *frame, _Py_CODEUNIT *start,
_PyStackRef *stack_pointer, _PyExecutorObject **executor_ptr, int chain_depth)
{
+ assert(_PyInterpreterState_GET()->jit);
// The first executor in a chain and the MAX_CHAIN_DEPTH'th executor *must*
// make progress in order to avoid infinite loops or excessively-long
// side-exit chains. We can only insert the executor into the bytecode if
@@ -172,12 +116,10 @@ _PyOptimizer_Optimize(
bool progress_needed = chain_depth == 0;
PyCodeObject *code = _PyFrame_GetCode(frame);
assert(PyCode_Check(code));
- PyInterpreterState *interp = _PyInterpreterState_GET();
if (progress_needed && !has_space_for_executor(code, start)) {
return 0;
}
- _PyOptimizerObject *opt = interp->optimizer;
- int err = opt->optimize(opt, frame, start, executor_ptr, (int)(stack_pointer - _PyFrame_Stackbase(frame)), progress_needed);
+ int err = uop_optimize(frame, start, executor_ptr, (int)(stack_pointer - _PyFrame_Stackbase(frame)), progress_needed);
if (err <= 0) {
return err;
}
@@ -684,6 +626,7 @@ translate_bytecode_to_trace(
}
case JUMP_BACKWARD:
+ case JUMP_BACKWARD_JIT:
ADD_TO_TRACE(_CHECK_PERIODIC, 0, 0, target);
_Py_FALLTHROUGH;
case JUMP_BACKWARD_NO_INTERRUPT:
@@ -1241,7 +1184,6 @@ int effective_trace_length(_PyUOpInstruction *buffer, int length)
static int
uop_optimize(
- _PyOptimizerObject *self,
_PyInterpreterFrame *frame,
_Py_CODEUNIT *instr,
_PyExecutorObject **exec_ptr,
@@ -1299,31 +1241,6 @@ uop_optimize(
return 1;
}
-static void
-uop_opt_dealloc(PyObject *self) {
- PyObject_Free(self);
-}
-
-PyTypeObject _PyUOpOptimizer_Type = {
- PyVarObject_HEAD_INIT(&PyType_Type, 0)
- .tp_name = "uop_optimizer",
- .tp_basicsize = sizeof(_PyOptimizerObject),
- .tp_itemsize = 0,
- .tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_DISALLOW_INSTANTIATION,
- .tp_dealloc = uop_opt_dealloc,
-};
-
-PyObject *
-_PyOptimizer_NewUOpOptimizer(void)
-{
- _PyOptimizerObject *opt = PyObject_New(_PyOptimizerObject, &_PyUOpOptimizer_Type);
- if (opt == NULL) {
- return NULL;
- }
- opt->optimize = uop_optimize;
- return (PyObject *)opt;
-}
-
/*****************************************
* Executor management
diff --git a/Python/pylifecycle.c b/Python/pylifecycle.c
index f357ddfbcfb033..00a98af998cfce 100644
--- a/Python/pylifecycle.c
+++ b/Python/pylifecycle.c
@@ -1306,14 +1306,7 @@ init_interp_main(PyThreadState *tstate)
} else
#endif
{
- PyObject *opt = _PyOptimizer_NewUOpOptimizer();
- if (opt == NULL) {
- return _PyStatus_ERR("can't initialize optimizer");
- }
- if (_Py_SetTier2Optimizer((_PyOptimizerObject *)opt)) {
- return _PyStatus_ERR("can't install optimizer");
- }
- Py_DECREF(opt);
+ interp->jit = true;
}
}
}
@@ -1665,11 +1658,10 @@ finalize_modules(PyThreadState *tstate)
{
PyInterpreterState *interp = tstate->interp;
+ // Invalidate all executors and turn off JIT:
+ interp->jit = false;
#ifdef _Py_TIER2
- // Invalidate all executors and turn off tier 2 optimizer
_Py_Executors_InvalidateAll(interp, 0);
- _PyOptimizerObject *old = _Py_SetOptimizer(interp, NULL);
- Py_XDECREF(old);
#endif
// Stop watching __builtin__ modifications
diff --git a/Python/pystate.c b/Python/pystate.c
index 26047edb459480..e6770ef40df740 100644
--- a/Python/pystate.c
+++ b/Python/pystate.c
@@ -655,11 +655,9 @@ init_interpreter(PyInterpreterState *interp,
}
interp->sys_profile_initialized = false;
interp->sys_trace_initialized = false;
-#ifdef _Py_TIER2
- (void)_Py_SetOptimizer(interp, NULL);
+ interp->jit = false;
interp->executor_list_head = NULL;
interp->trace_run_counter = JIT_CLEANUP_THRESHOLD;
-#endif
if (interp != &runtime->_main_interpreter) {
/* Fix the self-referential, statically initialized fields. */
interp->dtoa = (struct _dtoa_state)_dtoa_state_INIT(interp);
@@ -829,12 +827,6 @@ interpreter_clear(PyInterpreterState *interp, PyThreadState *tstate)
tstate->_status.cleared = 0;
}
-#ifdef _Py_TIER2
- _PyOptimizerObject *old = _Py_SetOptimizer(interp, NULL);
- assert(old != NULL);
- Py_DECREF(old);
-#endif
-
/* It is possible that any of the objects below have a finalizer
that runs Python code or otherwise relies on a thread state
or even the interpreter state. For now we trust that isn't
diff --git a/Python/sysmodule.c b/Python/sysmodule.c
index 887591a681b25c..7e4cb45af05672 100644
--- a/Python/sysmodule.c
+++ b/Python/sysmodule.c
@@ -2265,9 +2265,7 @@ sys_activate_stack_trampoline_impl(PyObject *module, const char *backend)
{
#ifdef PY_HAVE_PERF_TRAMPOLINE
#ifdef _Py_JIT
- _PyOptimizerObject* optimizer = _Py_GetOptimizer();
- if (optimizer != NULL) {
- Py_DECREF(optimizer);
+ if (_PyInterpreterState_GET()->jit) {
PyErr_SetString(PyExc_ValueError, "Cannot activate the perf trampoline if the JIT is active");
return NULL;
}
diff --git a/Tools/c-analyzer/cpython/ignored.tsv b/Tools/c-analyzer/cpython/ignored.tsv
index fbb84fc7950fae..415d20e5b7fabb 100644
--- a/Tools/c-analyzer/cpython/ignored.tsv
+++ b/Tools/c-analyzer/cpython/ignored.tsv
@@ -388,9 +388,7 @@ Python/sysmodule.c - perf_map_state -
Python/sysmodule.c - _PySys_ImplCacheTag -
Python/sysmodule.c - _PySys_ImplName -
Python/sysmodule.c - whatstrings -
-Python/optimizer.c - _PyDefaultOptimizer_Type -
Python/optimizer.c - _PyUOpExecutor_Type -
-Python/optimizer.c - _PyUOpOptimizer_Type -
Python/optimizer.c - _PyOptimizer_Default -
Python/optimizer.c - _ColdExit_Type -
Python/optimizer.c - Py_FatalErrorExecutor -
diff --git a/Tools/cases_generator/tier1_generator.py b/Tools/cases_generator/tier1_generator.py
index 59ce5c95852d28..13430524b26dcd 100644
--- a/Tools/cases_generator/tier1_generator.py
+++ b/Tools/cases_generator/tier1_generator.py
@@ -202,7 +202,7 @@ def generate_tier1_cases(
needs_this = uses_this(inst)
out.emit("\n")
out.emit(f"TARGET({name}) {{\n")
- unused_guard = "(void)this_instr;\n" if inst.family is None else ""
+ unused_guard = "(void)this_instr;\n"
if inst.properties.needs_prev:
out.emit(f"_Py_CODEUNIT* const prev_instr = frame->instr_ptr;\n")
if needs_this and not inst.is_target:
1
0
Jan. 29, 2025
https://github.com/python/cpython/commit/5c930a26fb78c40929f1b894efee1b07c6…
commit: 5c930a26fb78c40929f1b894efee1b07c6d828fd
branch: main
author: T. Wouters <thomas(a)python.org>
committer: Yhg1s <thomas(a)python.org>
date: 2025-01-29T01:07:56+01:00
summary:
gh-115999: Enable free-threaded specialization of LOAD_CONST (#129365)
Enable free-threaded specialization of LOAD_CONST.
files:
M Lib/test/test_opcache.py
M Python/bytecodes.c
M Python/generated_cases.c.h
M Tools/cases_generator/analyzer.py
diff --git a/Lib/test/test_opcache.py b/Lib/test/test_opcache.py
index 4d7304b1c9abb6..e8ea21f8179978 100644
--- a/Lib/test/test_opcache.py
+++ b/Lib/test/test_opcache.py
@@ -1773,6 +1773,20 @@ def compare_op_str():
self.assert_specialized(compare_op_str, "COMPARE_OP_STR")
self.assert_no_opcode(compare_op_str, "COMPARE_OP")
+ @cpython_only
+ @requires_specialization_ft
+ def test_load_const(self):
+ def load_const():
+ def unused(): pass
+ # Currently, the empty tuple is immortal, and the otherwise
+ # unused nested function's code object is mortal. This test will
+ # have to use different values if either of that changes.
+ return ()
+
+ load_const()
+ self.assert_specialized(load_const, "LOAD_CONST_IMMORTAL")
+ self.assert_specialized(load_const, "LOAD_CONST_MORTAL")
+ self.assert_no_opcode(load_const, "LOAD_CONST")
if __name__ == "__main__":
unittest.main()
diff --git a/Python/bytecodes.c b/Python/bytecodes.c
index 5f0be8d3feefd4..7d463511aee41d 100644
--- a/Python/bytecodes.c
+++ b/Python/bytecodes.c
@@ -294,10 +294,20 @@ dummy_func(
* marshalling can intern strings and make them immortal. */
PyObject *obj = GETITEM(FRAME_CO_CONSTS, oparg);
value = PyStackRef_FromPyObjectNew(obj);
-#if ENABLE_SPECIALIZATION
+#if ENABLE_SPECIALIZATION_FT
+#ifdef Py_GIL_DISABLED
+ uint8_t expected = LOAD_CONST;
+ if (!_Py_atomic_compare_exchange_uint8(
+ &this_instr->op.code, &expected,
+ _Py_IsImmortal(obj) ? LOAD_CONST_IMMORTAL : LOAD_CONST_MORTAL)) {
+ // We might lose a race with instrumentation, which we don't care about.
+ assert(expected >= MIN_INSTRUMENTED_OPCODE);
+ }
+#else
if (this_instr->op.code == LOAD_CONST) {
this_instr->op.code = _Py_IsImmortal(obj) ? LOAD_CONST_IMMORTAL : LOAD_CONST_MORTAL;
}
+#endif
#endif
}
@@ -2558,7 +2568,7 @@ dummy_func(
}
OPCODE_DEFERRED_INC(COMPARE_OP);
ADVANCE_ADAPTIVE_COUNTER(this_instr[1].counter);
- #endif /* ENABLE_SPECIALIZATION */
+ #endif /* ENABLE_SPECIALIZATION_FT */
}
op(_COMPARE_OP, (left, right -- res)) {
diff --git a/Python/generated_cases.c.h b/Python/generated_cases.c.h
index ad044e62a38b1c..5dd2f37d811109 100644
--- a/Python/generated_cases.c.h
+++ b/Python/generated_cases.c.h
@@ -3318,7 +3318,7 @@
}
OPCODE_DEFERRED_INC(COMPARE_OP);
ADVANCE_ADAPTIVE_COUNTER(this_instr[1].counter);
- #endif /* ENABLE_SPECIALIZATION */
+ #endif /* ENABLE_SPECIALIZATION_FT */
}
// _COMPARE_OP
{
@@ -6035,11 +6035,21 @@
* marshalling can intern strings and make them immortal. */
PyObject *obj = GETITEM(FRAME_CO_CONSTS, oparg);
value = PyStackRef_FromPyObjectNew(obj);
- #if ENABLE_SPECIALIZATION
+ #if ENABLE_SPECIALIZATION_FT
+ #ifdef Py_GIL_DISABLED
+ uint8_t expected = LOAD_CONST;
+ if (!_Py_atomic_compare_exchange_uint8(
+ &this_instr->op.code, &expected,
+ _Py_IsImmortal(obj) ? LOAD_CONST_IMMORTAL : LOAD_CONST_MORTAL)) {
+ // We might lose a race with instrumentation, which we don't care about.
+ assert(expected >= MIN_INSTRUMENTED_OPCODE);
+ }
+ #else
if (this_instr->op.code == LOAD_CONST) {
this_instr->op.code = _Py_IsImmortal(obj) ? LOAD_CONST_IMMORTAL : LOAD_CONST_MORTAL;
}
#endif
+ #endif
stack_pointer[0] = value;
stack_pointer += 1;
assert(WITHIN_STACK_BOUNDS());
diff --git a/Tools/cases_generator/analyzer.py b/Tools/cases_generator/analyzer.py
index bc9c42e045a610..b9293ff4b19951 100644
--- a/Tools/cases_generator/analyzer.py
+++ b/Tools/cases_generator/analyzer.py
@@ -634,6 +634,7 @@ def has_error_without_pop(op: parser.InstDef) -> bool:
"_Py_STR",
"_Py_TryIncrefCompare",
"_Py_TryIncrefCompareStackRef",
+ "_Py_atomic_compare_exchange_uint8",
"_Py_atomic_load_ptr_acquire",
"_Py_atomic_load_uintptr_relaxed",
"_Py_set_eval_breaker_bit",
1
0
Jan. 28, 2025
https://github.com/python/cpython/commit/789390872b16e1c8cc681605bf100d2d7b…
commit: 789390872b16e1c8cc681605bf100d2d7b3e9111
branch: main
author: Hood Chatham <roberthoodchatham(a)gmail.com>
committer: freakboy3742 <russell(a)keith-magee.com>
date: 2025-01-29T07:30:42+08:00
summary:
gh-127146: Skip test_readinto_non_blocking on Emscripten (#129421)
Skips an additional test due to non_blocking not working reliably on Emscripten.
files:
M Lib/test/test_os.py
diff --git a/Lib/test/test_os.py b/Lib/test/test_os.py
index 72208d1c582568..6e40cb4f58bfee 100644
--- a/Lib/test/test_os.py
+++ b/Lib/test/test_os.py
@@ -264,6 +264,7 @@ def test_readinto(self):
@unittest.skipUnless(hasattr(os, 'get_blocking'),
'needs os.get_blocking() and os.set_blocking()')
@unittest.skipUnless(hasattr(os, "pipe"), "requires os.pipe()")
+ @unittest.skipIf(support.is_emscripten, "set_blocking does not work correctly")
def test_readinto_non_blocking(self):
# Verify behavior of a readinto which would block on a non-blocking fd.
r, w = os.pipe()
1
0
https://github.com/python/cpython/commit/64c417dee5594c882beac03e7d2942ca05…
commit: 64c417dee5594c882beac03e7d2942ca05b5c204
branch: main
author: Pieter Eendebak <pieter.eendebak(a)gmail.com>
committer: colesbury <colesbury(a)gmail.com>
date: 2025-01-28T21:55:45Z
summary:
gh-112075: Remove critical section in dict.get (gh-129336)
The `dict.get` implementation uses `_Py_dict_lookup_threadsafe`, which is
thread-safe, so we remove the critical section from the argument clinic.
Add a test for concurrent dict get and set operations.
files:
M Lib/test/test_free_threading/test_dict.py
M Objects/clinic/dictobject.c.h
M Objects/dictobject.c
diff --git a/Lib/test/test_free_threading/test_dict.py b/Lib/test/test_free_threading/test_dict.py
index 13717cb39fa35d..4f605e0c51f0d5 100644
--- a/Lib/test/test_free_threading/test_dict.py
+++ b/Lib/test/test_free_threading/test_dict.py
@@ -5,7 +5,7 @@
from ast import Or
from functools import partial
-from threading import Thread
+from threading import Barrier, Thread
from unittest import TestCase
try:
@@ -142,6 +142,27 @@ def writer_func(l):
for ref in thread_list:
self.assertIsNone(ref())
+ def test_racing_get_set_dict(self):
+ """Races getting and setting a dict should be thread safe"""
+ THREAD_COUNT = 10
+ barrier = Barrier(THREAD_COUNT)
+ def work(d):
+ barrier.wait()
+ for _ in range(1000):
+ d[10] = 0
+ d.get(10, None)
+ _ = d[10]
+
+ d = {}
+ worker_threads = []
+ for ii in range(THREAD_COUNT):
+ worker_threads.append(Thread(target=work, args=[d]))
+ for t in worker_threads:
+ t.start()
+ for t in worker_threads:
+ t.join()
+
+
def test_racing_set_object_dict(self):
"""Races assigning to __dict__ should be thread safe"""
class C: pass
diff --git a/Objects/clinic/dictobject.c.h b/Objects/clinic/dictobject.c.h
index cdf39ce147203b..c66916bb33aa37 100644
--- a/Objects/clinic/dictobject.c.h
+++ b/Objects/clinic/dictobject.c.h
@@ -94,9 +94,7 @@ dict_get(PyObject *self, PyObject *const *args, Py_ssize_t nargs)
}
default_value = args[1];
skip_optional:
- Py_BEGIN_CRITICAL_SECTION(self);
return_value = dict_get_impl((PyDictObject *)self, key, default_value);
- Py_END_CRITICAL_SECTION();
exit:
return return_value;
@@ -312,4 +310,4 @@ dict_values(PyObject *self, PyObject *Py_UNUSED(ignored))
{
return dict_values_impl((PyDictObject *)self);
}
-/*[clinic end generated code: output=4956c5b276ea652f input=a9049054013a1b77]*/
+/*[clinic end generated code: output=0f04bf0e7e6b130f input=a9049054013a1b77]*/
diff --git a/Objects/dictobject.c b/Objects/dictobject.c
index 8fe71123252a75..733a10a2e80b18 100644
--- a/Objects/dictobject.c
+++ b/Objects/dictobject.c
@@ -4248,7 +4248,6 @@ dict___contains__(PyDictObject *self, PyObject *key)
}
/*[clinic input]
-@critical_section
dict.get
key: object
@@ -4260,7 +4259,7 @@ Return the value for key if key is in the dictionary, else default.
static PyObject *
dict_get_impl(PyDictObject *self, PyObject *key, PyObject *default_value)
-/*[clinic end generated code: output=bba707729dee05bf input=a631d3f18f584c60]*/
+/*[clinic end generated code: output=bba707729dee05bf input=279ddb5790b6b107]*/
{
PyObject *val = NULL;
Py_hash_t hash;
1
0
Jan. 28, 2025
https://github.com/python/cpython/commit/a4459c34ea4270311f0b7e22f1203af5cc…
commit: a4459c34ea4270311f0b7e22f1203af5ccc39875
branch: main
author: Barney Gale <barney.gale(a)gmail.com>
committer: barneygale <barney.gale(a)gmail.com>
date: 2025-01-28T20:22:55Z
summary:
GH-127381: pathlib ABCs: remove `JoinablePath.match()` (#129147)
Unlike `ReadablePath.[r]glob()` and `JoinablePath.full_match()`, the
`JoinablePath.match()` method doesn't support the recursive wildcard `**`,
and matches from the right when a fully relative pattern is given. These
quirks means its probably unsuitable for inclusion in the pathlib ABCs,
especially given `full_match()` handles the same use case.
files:
M Lib/pathlib/_abc.py
M Lib/pathlib/_local.py
M Lib/test/test_pathlib/test_pathlib.py
M Lib/test/test_pathlib/test_pathlib_abc.py
diff --git a/Lib/pathlib/_abc.py b/Lib/pathlib/_abc.py
index d55cc6f243cf2b..e498dc78e83b5e 100644
--- a/Lib/pathlib/_abc.py
+++ b/Lib/pathlib/_abc.py
@@ -358,33 +358,6 @@ def parents(self):
parent = split(path)[0]
return tuple(parents)
- def match(self, path_pattern, *, case_sensitive=None):
- """
- Return True if this path matches the given pattern. If the pattern is
- relative, matching is done from the right; otherwise, the entire path
- is matched. The recursive wildcard '**' is *not* supported by this
- method.
- """
- if not isinstance(path_pattern, JoinablePath):
- path_pattern = self.with_segments(path_pattern)
- if case_sensitive is None:
- case_sensitive = _is_case_sensitive(self.parser)
- sep = path_pattern.parser.sep
- path_parts = self.parts[::-1]
- pattern_parts = path_pattern.parts[::-1]
- if not pattern_parts:
- raise ValueError("empty pattern")
- if len(path_parts) < len(pattern_parts):
- return False
- if len(path_parts) > len(pattern_parts) and path_pattern.anchor:
- return False
- globber = PathGlobber(sep, case_sensitive)
- for path_part, pattern_part in zip(path_parts, pattern_parts):
- match = globber.compile(pattern_part)
- if match(path_part) is None:
- return False
- return True
-
def full_match(self, pattern, *, case_sensitive=None):
"""
Return True if this path matches the given glob-style pattern. The
diff --git a/Lib/pathlib/_local.py b/Lib/pathlib/_local.py
index 2b42f3c22254b8..b3ec934f7510de 100644
--- a/Lib/pathlib/_local.py
+++ b/Lib/pathlib/_local.py
@@ -668,6 +668,32 @@ def full_match(self, pattern, *, case_sensitive=None):
globber = _StringGlobber(self.parser.sep, case_sensitive, recursive=True)
return globber.compile(pattern)(path) is not None
+ def match(self, path_pattern, *, case_sensitive=None):
+ """
+ Return True if this path matches the given pattern. If the pattern is
+ relative, matching is done from the right; otherwise, the entire path
+ is matched. The recursive wildcard '**' is *not* supported by this
+ method.
+ """
+ if not isinstance(path_pattern, PurePath):
+ path_pattern = self.with_segments(path_pattern)
+ if case_sensitive is None:
+ case_sensitive = self.parser is posixpath
+ path_parts = self.parts[::-1]
+ pattern_parts = path_pattern.parts[::-1]
+ if not pattern_parts:
+ raise ValueError("empty pattern")
+ if len(path_parts) < len(pattern_parts):
+ return False
+ if len(path_parts) > len(pattern_parts) and path_pattern.anchor:
+ return False
+ globber = _StringGlobber(self.parser.sep, case_sensitive)
+ for path_part, pattern_part in zip(path_parts, pattern_parts):
+ match = globber.compile(pattern_part)
+ if match(path_part) is None:
+ return False
+ return True
+
# Subclassing os.PathLike makes isinstance() checks slower,
# which in turn makes Path construction slower. Register instead!
os.PathLike.register(PurePath)
diff --git a/Lib/test/test_pathlib/test_pathlib.py b/Lib/test/test_pathlib/test_pathlib.py
index 866a2d07dd692a..d64092b710a4d6 100644
--- a/Lib/test/test_pathlib/test_pathlib.py
+++ b/Lib/test/test_pathlib/test_pathlib.py
@@ -438,6 +438,84 @@ def test_match_empty(self):
self.assertRaises(ValueError, P('a').match, '')
self.assertRaises(ValueError, P('a').match, '.')
+ def test_match_common(self):
+ P = self.cls
+ # Simple relative pattern.
+ self.assertTrue(P('b.py').match('b.py'))
+ self.assertTrue(P('a/b.py').match('b.py'))
+ self.assertTrue(P('/a/b.py').match('b.py'))
+ self.assertFalse(P('a.py').match('b.py'))
+ self.assertFalse(P('b/py').match('b.py'))
+ self.assertFalse(P('/a.py').match('b.py'))
+ self.assertFalse(P('b.py/c').match('b.py'))
+ # Wildcard relative pattern.
+ self.assertTrue(P('b.py').match('*.py'))
+ self.assertTrue(P('a/b.py').match('*.py'))
+ self.assertTrue(P('/a/b.py').match('*.py'))
+ self.assertFalse(P('b.pyc').match('*.py'))
+ self.assertFalse(P('b./py').match('*.py'))
+ self.assertFalse(P('b.py/c').match('*.py'))
+ # Multi-part relative pattern.
+ self.assertTrue(P('ab/c.py').match('a*/*.py'))
+ self.assertTrue(P('/d/ab/c.py').match('a*/*.py'))
+ self.assertFalse(P('a.py').match('a*/*.py'))
+ self.assertFalse(P('/dab/c.py').match('a*/*.py'))
+ self.assertFalse(P('ab/c.py/d').match('a*/*.py'))
+ # Absolute pattern.
+ self.assertTrue(P('/b.py').match('/*.py'))
+ self.assertFalse(P('b.py').match('/*.py'))
+ self.assertFalse(P('a/b.py').match('/*.py'))
+ self.assertFalse(P('/a/b.py').match('/*.py'))
+ # Multi-part absolute pattern.
+ self.assertTrue(P('/a/b.py').match('/a/*.py'))
+ self.assertFalse(P('/ab.py').match('/a/*.py'))
+ self.assertFalse(P('/a/b/c.py').match('/a/*.py'))
+ # Multi-part glob-style pattern.
+ self.assertFalse(P('/a/b/c.py').match('/**/*.py'))
+ self.assertTrue(P('/a/b/c.py').match('/a/**/*.py'))
+ # Case-sensitive flag
+ self.assertFalse(P('A.py').match('a.PY', case_sensitive=True))
+ self.assertTrue(P('A.py').match('a.PY', case_sensitive=False))
+ self.assertFalse(P('c:/a/B.Py').match('C:/A/*.pY', case_sensitive=True))
+ self.assertTrue(P('/a/b/c.py').match('/A/*/*.Py', case_sensitive=False))
+ # Matching against empty path
+ self.assertFalse(P('').match('*'))
+ self.assertFalse(P('').match('**'))
+ self.assertFalse(P('').match('**/*'))
+
+ @needs_posix
+ def test_match_posix(self):
+ P = self.cls
+ self.assertFalse(P('A.py').match('a.PY'))
+
+ @needs_windows
+ def test_match_windows(self):
+ P = self.cls
+ # Absolute patterns.
+ self.assertTrue(P('c:/b.py').match('*:/*.py'))
+ self.assertTrue(P('c:/b.py').match('c:/*.py'))
+ self.assertFalse(P('d:/b.py').match('c:/*.py')) # wrong drive
+ self.assertFalse(P('b.py').match('/*.py'))
+ self.assertFalse(P('b.py').match('c:*.py'))
+ self.assertFalse(P('b.py').match('c:/*.py'))
+ self.assertFalse(P('c:b.py').match('/*.py'))
+ self.assertFalse(P('c:b.py').match('c:/*.py'))
+ self.assertFalse(P('/b.py').match('c:*.py'))
+ self.assertFalse(P('/b.py').match('c:/*.py'))
+ # UNC patterns.
+ self.assertTrue(P('//some/share/a.py').match('//*/*/*.py'))
+ self.assertTrue(P('//some/share/a.py').match('//some/share/*.py'))
+ self.assertFalse(P('//other/share/a.py').match('//some/share/*.py'))
+ self.assertFalse(P('//some/share/a/b.py').match('//some/share/*.py'))
+ # Case-insensitivity.
+ self.assertTrue(P('B.py').match('b.PY'))
+ self.assertTrue(P('c:/a/B.Py').match('C:/A/*.pY'))
+ self.assertTrue(P('//Some/Share/B.Py').match('//somE/sharE/*.pY'))
+ # Path anchor doesn't match pattern anchor
+ self.assertFalse(P('c:/b.py').match('/*.py')) # 'c:/' vs '/'
+ self.assertFalse(P('c:/b.py').match('c:*.py')) # 'c:/' vs 'c:'
+ self.assertFalse(P('//some/share/a.py').match('/*.py')) # '//some/share/' vs '/'
+
@needs_posix
def test_parse_path_posix(self):
check = self._check_parse_path
diff --git a/Lib/test/test_pathlib/test_pathlib_abc.py b/Lib/test/test_pathlib/test_pathlib_abc.py
index d60bb147b72971..e67bead4297829 100644
--- a/Lib/test/test_pathlib/test_pathlib_abc.py
+++ b/Lib/test/test_pathlib/test_pathlib_abc.py
@@ -296,88 +296,6 @@ def test_str_windows(self):
p = self.cls('//a/b/c/d')
self.assertEqual(str(p), '\\\\a\\b\\c\\d')
- def test_match_empty(self):
- P = self.cls
- self.assertRaises(ValueError, P('a').match, '')
-
- def test_match_common(self):
- P = self.cls
- # Simple relative pattern.
- self.assertTrue(P('b.py').match('b.py'))
- self.assertTrue(P('a/b.py').match('b.py'))
- self.assertTrue(P('/a/b.py').match('b.py'))
- self.assertFalse(P('a.py').match('b.py'))
- self.assertFalse(P('b/py').match('b.py'))
- self.assertFalse(P('/a.py').match('b.py'))
- self.assertFalse(P('b.py/c').match('b.py'))
- # Wildcard relative pattern.
- self.assertTrue(P('b.py').match('*.py'))
- self.assertTrue(P('a/b.py').match('*.py'))
- self.assertTrue(P('/a/b.py').match('*.py'))
- self.assertFalse(P('b.pyc').match('*.py'))
- self.assertFalse(P('b./py').match('*.py'))
- self.assertFalse(P('b.py/c').match('*.py'))
- # Multi-part relative pattern.
- self.assertTrue(P('ab/c.py').match('a*/*.py'))
- self.assertTrue(P('/d/ab/c.py').match('a*/*.py'))
- self.assertFalse(P('a.py').match('a*/*.py'))
- self.assertFalse(P('/dab/c.py').match('a*/*.py'))
- self.assertFalse(P('ab/c.py/d').match('a*/*.py'))
- # Absolute pattern.
- self.assertTrue(P('/b.py').match('/*.py'))
- self.assertFalse(P('b.py').match('/*.py'))
- self.assertFalse(P('a/b.py').match('/*.py'))
- self.assertFalse(P('/a/b.py').match('/*.py'))
- # Multi-part absolute pattern.
- self.assertTrue(P('/a/b.py').match('/a/*.py'))
- self.assertFalse(P('/ab.py').match('/a/*.py'))
- self.assertFalse(P('/a/b/c.py').match('/a/*.py'))
- # Multi-part glob-style pattern.
- self.assertFalse(P('/a/b/c.py').match('/**/*.py'))
- self.assertTrue(P('/a/b/c.py').match('/a/**/*.py'))
- # Case-sensitive flag
- self.assertFalse(P('A.py').match('a.PY', case_sensitive=True))
- self.assertTrue(P('A.py').match('a.PY', case_sensitive=False))
- self.assertFalse(P('c:/a/B.Py').match('C:/A/*.pY', case_sensitive=True))
- self.assertTrue(P('/a/b/c.py').match('/A/*/*.Py', case_sensitive=False))
- # Matching against empty path
- self.assertFalse(P('').match('*'))
- self.assertFalse(P('').match('**'))
- self.assertFalse(P('').match('**/*'))
-
- @needs_posix
- def test_match_posix(self):
- P = self.cls
- self.assertFalse(P('A.py').match('a.PY'))
-
- @needs_windows
- def test_match_windows(self):
- P = self.cls
- # Absolute patterns.
- self.assertTrue(P('c:/b.py').match('*:/*.py'))
- self.assertTrue(P('c:/b.py').match('c:/*.py'))
- self.assertFalse(P('d:/b.py').match('c:/*.py')) # wrong drive
- self.assertFalse(P('b.py').match('/*.py'))
- self.assertFalse(P('b.py').match('c:*.py'))
- self.assertFalse(P('b.py').match('c:/*.py'))
- self.assertFalse(P('c:b.py').match('/*.py'))
- self.assertFalse(P('c:b.py').match('c:/*.py'))
- self.assertFalse(P('/b.py').match('c:*.py'))
- self.assertFalse(P('/b.py').match('c:/*.py'))
- # UNC patterns.
- self.assertTrue(P('//some/share/a.py').match('//*/*/*.py'))
- self.assertTrue(P('//some/share/a.py').match('//some/share/*.py'))
- self.assertFalse(P('//other/share/a.py').match('//some/share/*.py'))
- self.assertFalse(P('//some/share/a/b.py').match('//some/share/*.py'))
- # Case-insensitivity.
- self.assertTrue(P('B.py').match('b.PY'))
- self.assertTrue(P('c:/a/B.Py').match('C:/A/*.pY'))
- self.assertTrue(P('//Some/Share/B.Py').match('//somE/sharE/*.pY'))
- # Path anchor doesn't match pattern anchor
- self.assertFalse(P('c:/b.py').match('/*.py')) # 'c:/' vs '/'
- self.assertFalse(P('c:/b.py').match('c:*.py')) # 'c:/' vs 'c:'
- self.assertFalse(P('//some/share/a.py').match('/*.py')) # '//some/share/' vs '/'
-
def test_full_match_common(self):
P = self.cls
# Simple relative pattern.
1
0