Python-checkins
Threads by month
- ----- 2024 -----
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2023 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2022 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2021 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2020 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2019 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2018 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2017 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2016 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2015 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2014 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2013 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2012 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2011 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2010 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2009 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2008 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2007 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2006 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2005 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2004 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2003 -----
- December
- November
- October
- September
- August
March 2023
- 1 participants
- 425 discussions
https://github.com/python/cpython/commit/d97aef8ebfbbb275384b17f06945e583fb…
commit: d97aef8ebfbbb275384b17f06945e583fb3189ea
branch: main
author: Brett Cannon <brett(a)python.org>
committer: brettcannon <brett(a)python.org>
date: 2023-03-31T14:23:55-07:00
summary:
Add missing variables to `bytecodes.c` (GH-103153)
The code works without this change, but it does cause C tooling to complain less about undeclared variables.
files:
M Python/bytecodes.c
M Python/generated_cases.c.h
diff --git a/Python/bytecodes.c b/Python/bytecodes.c
index 484f6e6b1a1c..825fa705a4cd 100644
--- a/Python/bytecodes.c
+++ b/Python/bytecodes.c
@@ -74,10 +74,56 @@ dummy_func(
PyObject **stack_pointer,
PyObject *kwnames,
int throwflag,
- binaryfunc binary_ops[]
+ binaryfunc binary_ops[],
+ PyObject *args[]
)
{
+ // Dummy labels.
+ pop_1_error:
+ // Dummy locals.
+ PyObject *annotations;
+ PyObject *attrs;
+ PyObject *bottom;
+ PyObject *callable;
+ PyObject *callargs;
+ PyObject *closure;
+ PyObject *codeobj;
+ PyObject *cond;
+ PyObject *defaults;
+ PyObject *descr;
_PyInterpreterFrame entry_frame;
+ PyObject *exc;
+ PyObject *exit;
+ PyObject *fget;
+ PyObject *fmt_spec;
+ PyObject *func;
+ uint32_t func_version;
+ PyObject *getattribute;
+ PyObject *kwargs;
+ PyObject *kwdefaults;
+ PyObject *len_o;
+ PyObject *match;
+ PyObject *match_type;
+ PyObject *method;
+ PyObject *mgr;
+ Py_ssize_t min_args;
+ PyObject *names;
+ PyObject *new_exc;
+ PyObject *next;
+ PyObject *none;
+ PyObject *null;
+ PyObject *prev_exc;
+ PyObject *receiver;
+ PyObject *rest;
+ int result;
+ PyObject *self;
+ PyObject *seq;
+ PyObject *slice;
+ PyObject *step;
+ PyObject *subject;
+ PyObject *top;
+ PyObject *type;
+ int values_or_none;
switch (opcode) {
diff --git a/Python/generated_cases.c.h b/Python/generated_cases.c.h
index d9c66343430f..6bb37d69cc07 100644
--- a/Python/generated_cases.c.h
+++ b/Python/generated_cases.c.h
@@ -8,7 +8,7 @@
}
TARGET(RESUME) {
- #line 89 "Python/bytecodes.c"
+ #line 135 "Python/bytecodes.c"
assert(tstate->cframe == &cframe);
assert(frame == cframe.current_frame);
if (_Py_atomic_load_relaxed_int32(eval_breaker) && oparg < 2) {
@@ -20,7 +20,7 @@
TARGET(LOAD_CLOSURE) {
PyObject *value;
- #line 97 "Python/bytecodes.c"
+ #line 143 "Python/bytecodes.c"
/* We keep LOAD_CLOSURE so that the bytecode stays more readable. */
value = GETLOCAL(oparg);
if (value == NULL) goto unbound_local_error;
@@ -33,7 +33,7 @@
TARGET(LOAD_FAST_CHECK) {
PyObject *value;
- #line 104 "Python/bytecodes.c"
+ #line 150 "Python/bytecodes.c"
value = GETLOCAL(oparg);
if (value == NULL) goto unbound_local_error;
Py_INCREF(value);
@@ -45,7 +45,7 @@
TARGET(LOAD_FAST) {
PyObject *value;
- #line 110 "Python/bytecodes.c"
+ #line 156 "Python/bytecodes.c"
value = GETLOCAL(oparg);
assert(value != NULL);
Py_INCREF(value);
@@ -58,7 +58,7 @@
TARGET(LOAD_CONST) {
PREDICTED(LOAD_CONST);
PyObject *value;
- #line 116 "Python/bytecodes.c"
+ #line 162 "Python/bytecodes.c"
value = GETITEM(frame->f_code->co_consts, oparg);
Py_INCREF(value);
#line 65 "Python/generated_cases.c.h"
@@ -69,7 +69,7 @@
TARGET(STORE_FAST) {
PyObject *value = stack_pointer[-1];
- #line 121 "Python/bytecodes.c"
+ #line 167 "Python/bytecodes.c"
SETLOCAL(oparg, value);
#line 75 "Python/generated_cases.c.h"
STACK_SHRINK(1);
@@ -81,7 +81,7 @@
PyObject *_tmp_2;
{
PyObject *value;
- #line 110 "Python/bytecodes.c"
+ #line 156 "Python/bytecodes.c"
value = GETLOCAL(oparg);
assert(value != NULL);
Py_INCREF(value);
@@ -91,7 +91,7 @@
oparg = (next_instr++)->op.arg;
{
PyObject *value;
- #line 110 "Python/bytecodes.c"
+ #line 156 "Python/bytecodes.c"
value = GETLOCAL(oparg);
assert(value != NULL);
Py_INCREF(value);
@@ -109,7 +109,7 @@
PyObject *_tmp_2;
{
PyObject *value;
- #line 110 "Python/bytecodes.c"
+ #line 156 "Python/bytecodes.c"
value = GETLOCAL(oparg);
assert(value != NULL);
Py_INCREF(value);
@@ -119,7 +119,7 @@
oparg = (next_instr++)->op.arg;
{
PyObject *value;
- #line 116 "Python/bytecodes.c"
+ #line 162 "Python/bytecodes.c"
value = GETITEM(frame->f_code->co_consts, oparg);
Py_INCREF(value);
#line 126 "Python/generated_cases.c.h"
@@ -135,14 +135,14 @@
PyObject *_tmp_1 = stack_pointer[-1];
{
PyObject *value = _tmp_1;
- #line 121 "Python/bytecodes.c"
+ #line 167 "Python/bytecodes.c"
SETLOCAL(oparg, value);
#line 141 "Python/generated_cases.c.h"
}
oparg = (next_instr++)->op.arg;
{
PyObject *value;
- #line 110 "Python/bytecodes.c"
+ #line 156 "Python/bytecodes.c"
value = GETLOCAL(oparg);
assert(value != NULL);
Py_INCREF(value);
@@ -158,14 +158,14 @@
PyObject *_tmp_2 = stack_pointer[-2];
{
PyObject *value = _tmp_1;
- #line 121 "Python/bytecodes.c"
+ #line 167 "Python/bytecodes.c"
SETLOCAL(oparg, value);
#line 164 "Python/generated_cases.c.h"
}
oparg = (next_instr++)->op.arg;
{
PyObject *value = _tmp_2;
- #line 121 "Python/bytecodes.c"
+ #line 167 "Python/bytecodes.c"
SETLOCAL(oparg, value);
#line 171 "Python/generated_cases.c.h"
}
@@ -178,7 +178,7 @@
PyObject *_tmp_2;
{
PyObject *value;
- #line 116 "Python/bytecodes.c"
+ #line 162 "Python/bytecodes.c"
value = GETITEM(frame->f_code->co_consts, oparg);
Py_INCREF(value);
#line 185 "Python/generated_cases.c.h"
@@ -187,7 +187,7 @@
oparg = (next_instr++)->op.arg;
{
PyObject *value;
- #line 110 "Python/bytecodes.c"
+ #line 156 "Python/bytecodes.c"
value = GETLOCAL(oparg);
assert(value != NULL);
Py_INCREF(value);
@@ -202,7 +202,7 @@
TARGET(POP_TOP) {
PyObject *value = stack_pointer[-1];
- #line 131 "Python/bytecodes.c"
+ #line 177 "Python/bytecodes.c"
#line 207 "Python/generated_cases.c.h"
Py_DECREF(value);
STACK_SHRINK(1);
@@ -211,7 +211,7 @@
TARGET(PUSH_NULL) {
PyObject *res;
- #line 135 "Python/bytecodes.c"
+ #line 181 "Python/bytecodes.c"
res = NULL;
#line 217 "Python/generated_cases.c.h"
STACK_GROW(1);
@@ -224,13 +224,13 @@
PyObject *_tmp_2 = stack_pointer[-2];
{
PyObject *value = _tmp_1;
- #line 131 "Python/bytecodes.c"
+ #line 177 "Python/bytecodes.c"
#line 229 "Python/generated_cases.c.h"
Py_DECREF(value);
}
{
PyObject *value = _tmp_2;
- #line 131 "Python/bytecodes.c"
+ #line 177 "Python/bytecodes.c"
#line 235 "Python/generated_cases.c.h"
Py_DECREF(value);
}
@@ -241,11 +241,11 @@
TARGET(UNARY_NEGATIVE) {
PyObject *value = stack_pointer[-1];
PyObject *res;
- #line 141 "Python/bytecodes.c"
+ #line 187 "Python/bytecodes.c"
res = PyNumber_Negative(value);
#line 247 "Python/generated_cases.c.h"
Py_DECREF(value);
- #line 143 "Python/bytecodes.c"
+ #line 189 "Python/bytecodes.c"
if (res == NULL) goto pop_1_error;
#line 251 "Python/generated_cases.c.h"
stack_pointer[-1] = res;
@@ -255,11 +255,11 @@
TARGET(UNARY_NOT) {
PyObject *value = stack_pointer[-1];
PyObject *res;
- #line 147 "Python/bytecodes.c"
+ #line 193 "Python/bytecodes.c"
int err = PyObject_IsTrue(value);
#line 261 "Python/generated_cases.c.h"
Py_DECREF(value);
- #line 149 "Python/bytecodes.c"
+ #line 195 "Python/bytecodes.c"
if (err < 0) goto pop_1_error;
if (err == 0) {
res = Py_True;
@@ -276,11 +276,11 @@
TARGET(UNARY_INVERT) {
PyObject *value = stack_pointer[-1];
PyObject *res;
- #line 160 "Python/bytecodes.c"
+ #line 206 "Python/bytecodes.c"
res = PyNumber_Invert(value);
#line 282 "Python/generated_cases.c.h"
Py_DECREF(value);
- #line 162 "Python/bytecodes.c"
+ #line 208 "Python/bytecodes.c"
if (res == NULL) goto pop_1_error;
#line 286 "Python/generated_cases.c.h"
stack_pointer[-1] = res;
@@ -291,7 +291,7 @@
PyObject *right = stack_pointer[-1];
PyObject *left = stack_pointer[-2];
PyObject *prod;
- #line 179 "Python/bytecodes.c"
+ #line 225 "Python/bytecodes.c"
assert(cframe.use_tracing == 0);
DEOPT_IF(!PyLong_CheckExact(left), BINARY_OP);
DEOPT_IF(!PyLong_CheckExact(right), BINARY_OP);
@@ -311,7 +311,7 @@
PyObject *right = stack_pointer[-1];
PyObject *left = stack_pointer[-2];
PyObject *prod;
- #line 190 "Python/bytecodes.c"
+ #line 236 "Python/bytecodes.c"
assert(cframe.use_tracing == 0);
DEOPT_IF(!PyFloat_CheckExact(left), BINARY_OP);
DEOPT_IF(!PyFloat_CheckExact(right), BINARY_OP);
@@ -330,7 +330,7 @@
PyObject *right = stack_pointer[-1];
PyObject *left = stack_pointer[-2];
PyObject *sub;
- #line 200 "Python/bytecodes.c"
+ #line 246 "Python/bytecodes.c"
assert(cframe.use_tracing == 0);
DEOPT_IF(!PyLong_CheckExact(left), BINARY_OP);
DEOPT_IF(!PyLong_CheckExact(right), BINARY_OP);
@@ -350,7 +350,7 @@
PyObject *right = stack_pointer[-1];
PyObject *left = stack_pointer[-2];
PyObject *sub;
- #line 211 "Python/bytecodes.c"
+ #line 257 "Python/bytecodes.c"
assert(cframe.use_tracing == 0);
DEOPT_IF(!PyFloat_CheckExact(left), BINARY_OP);
DEOPT_IF(!PyFloat_CheckExact(right), BINARY_OP);
@@ -368,7 +368,7 @@
PyObject *right = stack_pointer[-1];
PyObject *left = stack_pointer[-2];
PyObject *res;
- #line 220 "Python/bytecodes.c"
+ #line 266 "Python/bytecodes.c"
assert(cframe.use_tracing == 0);
DEOPT_IF(!PyUnicode_CheckExact(left), BINARY_OP);
DEOPT_IF(Py_TYPE(right) != Py_TYPE(left), BINARY_OP);
@@ -387,7 +387,7 @@
TARGET(BINARY_OP_INPLACE_ADD_UNICODE) {
PyObject *right = stack_pointer[-1];
PyObject *left = stack_pointer[-2];
- #line 237 "Python/bytecodes.c"
+ #line 283 "Python/bytecodes.c"
assert(cframe.use_tracing == 0);
DEOPT_IF(!PyUnicode_CheckExact(left), BINARY_OP);
DEOPT_IF(Py_TYPE(right) != Py_TYPE(left), BINARY_OP);
@@ -424,7 +424,7 @@
PyObject *right = stack_pointer[-1];
PyObject *left = stack_pointer[-2];
PyObject *sum;
- #line 267 "Python/bytecodes.c"
+ #line 313 "Python/bytecodes.c"
assert(cframe.use_tracing == 0);
DEOPT_IF(!PyFloat_CheckExact(left), BINARY_OP);
DEOPT_IF(Py_TYPE(right) != Py_TYPE(left), BINARY_OP);
@@ -443,7 +443,7 @@
PyObject *right = stack_pointer[-1];
PyObject *left = stack_pointer[-2];
PyObject *sum;
- #line 277 "Python/bytecodes.c"
+ #line 323 "Python/bytecodes.c"
assert(cframe.use_tracing == 0);
DEOPT_IF(!PyLong_CheckExact(left), BINARY_OP);
DEOPT_IF(Py_TYPE(right) != Py_TYPE(left), BINARY_OP);
@@ -465,7 +465,7 @@
PyObject *sub = stack_pointer[-1];
PyObject *container = stack_pointer[-2];
PyObject *res;
- #line 296 "Python/bytecodes.c"
+ #line 342 "Python/bytecodes.c"
#if ENABLE_SPECIALIZATION
_PyBinarySubscrCache *cache = (_PyBinarySubscrCache *)next_instr;
if (ADAPTIVE_COUNTER_IS_ZERO(cache->counter)) {
@@ -481,7 +481,7 @@
#line 482 "Python/generated_cases.c.h"
Py_DECREF(container);
Py_DECREF(sub);
- #line 309 "Python/bytecodes.c"
+ #line 355 "Python/bytecodes.c"
if (res == NULL) goto pop_2_error;
#line 487 "Python/generated_cases.c.h"
STACK_SHRINK(1);
@@ -495,7 +495,7 @@
PyObject *start = stack_pointer[-2];
PyObject *container = stack_pointer[-3];
PyObject *res;
- #line 313 "Python/bytecodes.c"
+ #line 359 "Python/bytecodes.c"
PyObject *slice = _PyBuildSlice_ConsumeRefs(start, stop);
// Can't use ERROR_IF() here, because we haven't
// DECREF'ed container yet, and we still own slice.
@@ -519,7 +519,7 @@
PyObject *start = stack_pointer[-2];
PyObject *container = stack_pointer[-3];
PyObject *v = stack_pointer[-4];
- #line 328 "Python/bytecodes.c"
+ #line 374 "Python/bytecodes.c"
PyObject *slice = _PyBuildSlice_ConsumeRefs(start, stop);
int err;
if (slice == NULL) {
@@ -541,7 +541,7 @@
PyObject *sub = stack_pointer[-1];
PyObject *list = stack_pointer[-2];
PyObject *res;
- #line 343 "Python/bytecodes.c"
+ #line 389 "Python/bytecodes.c"
assert(cframe.use_tracing == 0);
DEOPT_IF(!PyLong_CheckExact(sub), BINARY_SUBSCR);
DEOPT_IF(!PyList_CheckExact(list), BINARY_SUBSCR);
@@ -567,7 +567,7 @@
PyObject *sub = stack_pointer[-1];
PyObject *tuple = stack_pointer[-2];
PyObject *res;
- #line 360 "Python/bytecodes.c"
+ #line 406 "Python/bytecodes.c"
assert(cframe.use_tracing == 0);
DEOPT_IF(!PyLong_CheckExact(sub), BINARY_SUBSCR);
DEOPT_IF(!PyTuple_CheckExact(tuple), BINARY_SUBSCR);
@@ -593,7 +593,7 @@
PyObject *sub = stack_pointer[-1];
PyObject *dict = stack_pointer[-2];
PyObject *res;
- #line 377 "Python/bytecodes.c"
+ #line 423 "Python/bytecodes.c"
assert(cframe.use_tracing == 0);
DEOPT_IF(!PyDict_CheckExact(dict), BINARY_SUBSCR);
STAT_INC(BINARY_SUBSCR, hit);
@@ -605,7 +605,7 @@
#line 606 "Python/generated_cases.c.h"
Py_DECREF(dict);
Py_DECREF(sub);
- #line 386 "Python/bytecodes.c"
+ #line 432 "Python/bytecodes.c"
if (true) goto pop_2_error;
}
Py_INCREF(res); // Do this before DECREF'ing dict, sub
@@ -621,7 +621,7 @@
TARGET(BINARY_SUBSCR_GETITEM) {
PyObject *sub = stack_pointer[-1];
PyObject *container = stack_pointer[-2];
- #line 393 "Python/bytecodes.c"
+ #line 439 "Python/bytecodes.c"
PyTypeObject *tp = Py_TYPE(container);
DEOPT_IF(!PyType_HasFeature(tp, Py_TPFLAGS_HEAPTYPE), BINARY_SUBSCR);
PyHeapTypeObject *ht = (PyHeapTypeObject *)tp;
@@ -648,7 +648,7 @@
TARGET(LIST_APPEND) {
PyObject *v = stack_pointer[-1];
PyObject *list = stack_pointer[-(2 + (oparg-1))];
- #line 416 "Python/bytecodes.c"
+ #line 462 "Python/bytecodes.c"
if (_PyList_AppendTakeRef((PyListObject *)list, v) < 0) goto pop_1_error;
#line 654 "Python/generated_cases.c.h"
STACK_SHRINK(1);
@@ -659,11 +659,11 @@
TARGET(SET_ADD) {
PyObject *v = stack_pointer[-1];
PyObject *set = stack_pointer[-(2 + (oparg-1))];
- #line 421 "Python/bytecodes.c"
+ #line 467 "Python/bytecodes.c"
int err = PySet_Add(set, v);
#line 665 "Python/generated_cases.c.h"
Py_DECREF(v);
- #line 423 "Python/bytecodes.c"
+ #line 469 "Python/bytecodes.c"
if (err) goto pop_1_error;
#line 669 "Python/generated_cases.c.h"
STACK_SHRINK(1);
@@ -678,7 +678,7 @@
PyObject *container = stack_pointer[-2];
PyObject *v = stack_pointer[-3];
uint16_t counter = read_u16(&next_instr[0].cache);
- #line 434 "Python/bytecodes.c"
+ #line 480 "Python/bytecodes.c"
#if ENABLE_SPECIALIZATION
if (ADAPTIVE_COUNTER_IS_ZERO(counter)) {
assert(cframe.use_tracing == 0);
@@ -698,7 +698,7 @@
Py_DECREF(v);
Py_DECREF(container);
Py_DECREF(sub);
- #line 450 "Python/bytecodes.c"
+ #line 496 "Python/bytecodes.c"
if (err) goto pop_3_error;
#line 704 "Python/generated_cases.c.h"
STACK_SHRINK(3);
@@ -710,7 +710,7 @@
PyObject *sub = stack_pointer[-1];
PyObject *list = stack_pointer[-2];
PyObject *value = stack_pointer[-3];
- #line 454 "Python/bytecodes.c"
+ #line 500 "Python/bytecodes.c"
assert(cframe.use_tracing == 0);
DEOPT_IF(!PyLong_CheckExact(sub), STORE_SUBSCR);
DEOPT_IF(!PyList_CheckExact(list), STORE_SUBSCR);
@@ -738,7 +738,7 @@
PyObject *sub = stack_pointer[-1];
PyObject *dict = stack_pointer[-2];
PyObject *value = stack_pointer[-3];
- #line 474 "Python/bytecodes.c"
+ #line 520 "Python/bytecodes.c"
assert(cframe.use_tracing == 0);
DEOPT_IF(!PyDict_CheckExact(dict), STORE_SUBSCR);
STAT_INC(STORE_SUBSCR, hit);
@@ -754,13 +754,13 @@
TARGET(DELETE_SUBSCR) {
PyObject *sub = stack_pointer[-1];
PyObject *container = stack_pointer[-2];
- #line 483 "Python/bytecodes.c"
+ #line 529 "Python/bytecodes.c"
/* del container[sub] */
int err = PyObject_DelItem(container, sub);
#line 761 "Python/generated_cases.c.h"
Py_DECREF(container);
Py_DECREF(sub);
- #line 486 "Python/bytecodes.c"
+ #line 532 "Python/bytecodes.c"
if (err) goto pop_2_error;
#line 766 "Python/generated_cases.c.h"
STACK_SHRINK(2);
@@ -770,12 +770,12 @@
TARGET(CALL_INTRINSIC_1) {
PyObject *value = stack_pointer[-1];
PyObject *res;
- #line 490 "Python/bytecodes.c"
+ #line 536 "Python/bytecodes.c"
assert(oparg <= MAX_INTRINSIC_1);
res = _PyIntrinsics_UnaryFunctions[oparg](tstate, value);
#line 777 "Python/generated_cases.c.h"
Py_DECREF(value);
- #line 493 "Python/bytecodes.c"
+ #line 539 "Python/bytecodes.c"
if (res == NULL) goto pop_1_error;
#line 781 "Python/generated_cases.c.h"
stack_pointer[-1] = res;
@@ -786,13 +786,13 @@
PyObject *value1 = stack_pointer[-1];
PyObject *value2 = stack_pointer[-2];
PyObject *res;
- #line 497 "Python/bytecodes.c"
+ #line 543 "Python/bytecodes.c"
assert(oparg <= MAX_INTRINSIC_2);
res = _PyIntrinsics_BinaryFunctions[oparg](tstate, value2, value1);
#line 793 "Python/generated_cases.c.h"
Py_DECREF(value2);
Py_DECREF(value1);
- #line 500 "Python/bytecodes.c"
+ #line 546 "Python/bytecodes.c"
if (res == NULL) goto pop_2_error;
#line 798 "Python/generated_cases.c.h"
STACK_SHRINK(1);
@@ -802,7 +802,7 @@
TARGET(RAISE_VARARGS) {
PyObject **args = (stack_pointer - oparg);
- #line 504 "Python/bytecodes.c"
+ #line 550 "Python/bytecodes.c"
PyObject *cause = NULL, *exc = NULL;
switch (oparg) {
case 2:
@@ -825,7 +825,7 @@
TARGET(INTERPRETER_EXIT) {
PyObject *retval = stack_pointer[-1];
- #line 524 "Python/bytecodes.c"
+ #line 570 "Python/bytecodes.c"
assert(frame == &entry_frame);
assert(_PyFrame_IsIncomplete(frame));
STACK_SHRINK(1); // Since we're not going to DISPATCH()
@@ -842,7 +842,7 @@
TARGET(RETURN_VALUE) {
PyObject *retval = stack_pointer[-1];
- #line 538 "Python/bytecodes.c"
+ #line 584 "Python/bytecodes.c"
STACK_SHRINK(1);
assert(EMPTY());
_PyFrame_SetStackPointer(frame, stack_pointer);
@@ -860,7 +860,7 @@
}
TARGET(RETURN_CONST) {
- #line 554 "Python/bytecodes.c"
+ #line 600 "Python/bytecodes.c"
PyObject *retval = GETITEM(frame->f_code->co_consts, oparg);
Py_INCREF(retval);
assert(EMPTY());
@@ -881,7 +881,7 @@
TARGET(GET_AITER) {
PyObject *obj = stack_pointer[-1];
PyObject *iter;
- #line 571 "Python/bytecodes.c"
+ #line 617 "Python/bytecodes.c"
unaryfunc getter = NULL;
PyTypeObject *type = Py_TYPE(obj);
@@ -896,14 +896,14 @@
type->tp_name);
#line 898 "Python/generated_cases.c.h"
Py_DECREF(obj);
- #line 584 "Python/bytecodes.c"
+ #line 630 "Python/bytecodes.c"
if (true) goto pop_1_error;
}
iter = (*getter)(obj);
#line 905 "Python/generated_cases.c.h"
Py_DECREF(obj);
- #line 589 "Python/bytecodes.c"
+ #line 635 "Python/bytecodes.c"
if (iter == NULL) goto pop_1_error;
if (Py_TYPE(iter)->tp_as_async == NULL ||
@@ -924,7 +924,7 @@
TARGET(GET_ANEXT) {
PyObject *aiter = stack_pointer[-1];
PyObject *awaitable;
- #line 604 "Python/bytecodes.c"
+ #line 650 "Python/bytecodes.c"
unaryfunc getter = NULL;
PyObject *next_iter = NULL;
PyTypeObject *type = Py_TYPE(aiter);
@@ -979,7 +979,7 @@
PREDICTED(GET_AWAITABLE);
PyObject *iterable = stack_pointer[-1];
PyObject *iter;
- #line 651 "Python/bytecodes.c"
+ #line 697 "Python/bytecodes.c"
iter = _PyCoro_GetAwaitableIter(iterable);
if (iter == NULL) {
@@ -988,7 +988,7 @@
#line 990 "Python/generated_cases.c.h"
Py_DECREF(iterable);
- #line 658 "Python/bytecodes.c"
+ #line 704 "Python/bytecodes.c"
if (iter != NULL && PyCoro_CheckExact(iter)) {
PyObject *yf = _PyGen_yf((PyGenObject*)iter);
@@ -1017,7 +1017,7 @@
PyObject *v = stack_pointer[-1];
PyObject *receiver = stack_pointer[-2];
PyObject *retval;
- #line 684 "Python/bytecodes.c"
+ #line 730 "Python/bytecodes.c"
#if ENABLE_SPECIALIZATION
_PySendCache *cache = (_PySendCache *)next_instr;
if (ADAPTIVE_COUNTER_IS_ZERO(cache->counter)) {
@@ -1062,7 +1062,7 @@
TARGET(SEND_GEN) {
PyObject *v = stack_pointer[-1];
PyObject *receiver = stack_pointer[-2];
- #line 722 "Python/bytecodes.c"
+ #line 768 "Python/bytecodes.c"
assert(cframe.use_tracing == 0);
PyGenObject *gen = (PyGenObject *)receiver;
DEOPT_IF(Py_TYPE(gen) != &PyGen_Type &&
@@ -1083,7 +1083,7 @@
TARGET(YIELD_VALUE) {
PyObject *retval = stack_pointer[-1];
- #line 740 "Python/bytecodes.c"
+ #line 786 "Python/bytecodes.c"
// NOTE: It's important that YIELD_VALUE never raises an exception!
// The compiler treats any exception raised here as a failed close()
// or throw() call.
@@ -1107,7 +1107,7 @@
TARGET(POP_EXCEPT) {
PyObject *exc_value = stack_pointer[-1];
- #line 761 "Python/bytecodes.c"
+ #line 807 "Python/bytecodes.c"
_PyErr_StackItem *exc_info = tstate->exc_info;
Py_XSETREF(exc_info->exc_value, exc_value);
#line 1114 "Python/generated_cases.c.h"
@@ -1118,7 +1118,7 @@
TARGET(RERAISE) {
PyObject *exc = stack_pointer[-1];
PyObject **values = (stack_pointer - (1 + oparg));
- #line 766 "Python/bytecodes.c"
+ #line 812 "Python/bytecodes.c"
assert(oparg >= 0 && oparg <= 2);
if (oparg) {
PyObject *lasti = values[0];
@@ -1142,13 +1142,13 @@
TARGET(END_ASYNC_FOR) {
PyObject *exc = stack_pointer[-1];
PyObject *awaitable = stack_pointer[-2];
- #line 786 "Python/bytecodes.c"
+ #line 832 "Python/bytecodes.c"
assert(exc && PyExceptionInstance_Check(exc));
if (PyErr_GivenExceptionMatches(exc, PyExc_StopAsyncIteration)) {
#line 1149 "Python/generated_cases.c.h"
Py_DECREF(awaitable);
Py_DECREF(exc);
- #line 789 "Python/bytecodes.c"
+ #line 835 "Python/bytecodes.c"
}
else {
Py_INCREF(exc);
@@ -1166,7 +1166,7 @@
PyObject *sub_iter = stack_pointer[-3];
PyObject *none;
PyObject *value;
- #line 798 "Python/bytecodes.c"
+ #line 844 "Python/bytecodes.c"
assert(throwflag);
assert(exc_value && PyExceptionInstance_Check(exc_value));
if (PyErr_GivenExceptionMatches(exc_value, PyExc_StopIteration)) {
@@ -1175,7 +1175,7 @@
Py_DECREF(sub_iter);
Py_DECREF(last_sent_val);
Py_DECREF(exc_value);
- #line 803 "Python/bytecodes.c"
+ #line 849 "Python/bytecodes.c"
none = Py_NewRef(Py_None);
}
else {
@@ -1191,7 +1191,7 @@
TARGET(LOAD_ASSERTION_ERROR) {
PyObject *value;
- #line 812 "Python/bytecodes.c"
+ #line 858 "Python/bytecodes.c"
value = Py_NewRef(PyExc_AssertionError);
#line 1197 "Python/generated_cases.c.h"
STACK_GROW(1);
@@ -1201,7 +1201,7 @@
TARGET(LOAD_BUILD_CLASS) {
PyObject *bc;
- #line 816 "Python/bytecodes.c"
+ #line 862 "Python/bytecodes.c"
if (PyDict_CheckExact(BUILTINS())) {
bc = _PyDict_GetItemWithError(BUILTINS(),
&_Py_ID(__build_class__));
@@ -1231,7 +1231,7 @@
TARGET(STORE_NAME) {
PyObject *v = stack_pointer[-1];
- #line 840 "Python/bytecodes.c"
+ #line 886 "Python/bytecodes.c"
PyObject *name = GETITEM(frame->f_code->co_names, oparg);
PyObject *ns = LOCALS();
int err;
@@ -1240,7 +1240,7 @@
"no locals found when storing %R", name);
#line 1242 "Python/generated_cases.c.h"
Py_DECREF(v);
- #line 847 "Python/bytecodes.c"
+ #line 893 "Python/bytecodes.c"
if (true) goto pop_1_error;
}
if (PyDict_CheckExact(ns))
@@ -1249,7 +1249,7 @@
err = PyObject_SetItem(ns, name, v);
#line 1251 "Python/generated_cases.c.h"
Py_DECREF(v);
- #line 854 "Python/bytecodes.c"
+ #line 900 "Python/bytecodes.c"
if (err) goto pop_1_error;
#line 1255 "Python/generated_cases.c.h"
STACK_SHRINK(1);
@@ -1257,7 +1257,7 @@
}
TARGET(DELETE_NAME) {
- #line 858 "Python/bytecodes.c"
+ #line 904 "Python/bytecodes.c"
PyObject *name = GETITEM(frame->f_code->co_names, oparg);
PyObject *ns = LOCALS();
int err;
@@ -1282,7 +1282,7 @@
PREDICTED(UNPACK_SEQUENCE);
static_assert(INLINE_CACHE_ENTRIES_UNPACK_SEQUENCE == 1, "incorrect cache size");
PyObject *seq = stack_pointer[-1];
- #line 884 "Python/bytecodes.c"
+ #line 930 "Python/bytecodes.c"
#if ENABLE_SPECIALIZATION
_PyUnpackSequenceCache *cache = (_PyUnpackSequenceCache *)next_instr;
if (ADAPTIVE_COUNTER_IS_ZERO(cache->counter)) {
@@ -1298,7 +1298,7 @@
int res = unpack_iterable(tstate, seq, oparg, -1, top);
#line 1300 "Python/generated_cases.c.h"
Py_DECREF(seq);
- #line 898 "Python/bytecodes.c"
+ #line 944 "Python/bytecodes.c"
if (res == 0) goto pop_1_error;
#line 1304 "Python/generated_cases.c.h"
STACK_SHRINK(1);
@@ -1310,7 +1310,7 @@
TARGET(UNPACK_SEQUENCE_TWO_TUPLE) {
PyObject *seq = stack_pointer[-1];
PyObject **values = stack_pointer - (1);
- #line 902 "Python/bytecodes.c"
+ #line 948 "Python/bytecodes.c"
DEOPT_IF(!PyTuple_CheckExact(seq), UNPACK_SEQUENCE);
DEOPT_IF(PyTuple_GET_SIZE(seq) != 2, UNPACK_SEQUENCE);
assert(oparg == 2);
@@ -1328,7 +1328,7 @@
TARGET(UNPACK_SEQUENCE_TUPLE) {
PyObject *seq = stack_pointer[-1];
PyObject **values = stack_pointer - (1);
- #line 912 "Python/bytecodes.c"
+ #line 958 "Python/bytecodes.c"
DEOPT_IF(!PyTuple_CheckExact(seq), UNPACK_SEQUENCE);
DEOPT_IF(PyTuple_GET_SIZE(seq) != oparg, UNPACK_SEQUENCE);
STAT_INC(UNPACK_SEQUENCE, hit);
@@ -1347,7 +1347,7 @@
TARGET(UNPACK_SEQUENCE_LIST) {
PyObject *seq = stack_pointer[-1];
PyObject **values = stack_pointer - (1);
- #line 923 "Python/bytecodes.c"
+ #line 969 "Python/bytecodes.c"
DEOPT_IF(!PyList_CheckExact(seq), UNPACK_SEQUENCE);
DEOPT_IF(PyList_GET_SIZE(seq) != oparg, UNPACK_SEQUENCE);
STAT_INC(UNPACK_SEQUENCE, hit);
@@ -1365,13 +1365,13 @@
TARGET(UNPACK_EX) {
PyObject *seq = stack_pointer[-1];
- #line 934 "Python/bytecodes.c"
+ #line 980 "Python/bytecodes.c"
int totalargs = 1 + (oparg & 0xFF) + (oparg >> 8);
PyObject **top = stack_pointer + totalargs - 1;
int res = unpack_iterable(tstate, seq, oparg & 0xFF, oparg >> 8, top);
#line 1373 "Python/generated_cases.c.h"
Py_DECREF(seq);
- #line 938 "Python/bytecodes.c"
+ #line 984 "Python/bytecodes.c"
if (res == 0) goto pop_1_error;
#line 1377 "Python/generated_cases.c.h"
STACK_GROW((oparg & 0xFF) + (oparg >> 8));
@@ -1384,7 +1384,7 @@
PyObject *owner = stack_pointer[-1];
PyObject *v = stack_pointer[-2];
uint16_t counter = read_u16(&next_instr[0].cache);
- #line 949 "Python/bytecodes.c"
+ #line 995 "Python/bytecodes.c"
#if ENABLE_SPECIALIZATION
if (ADAPTIVE_COUNTER_IS_ZERO(counter)) {
assert(cframe.use_tracing == 0);
@@ -1404,7 +1404,7 @@
#line 1405 "Python/generated_cases.c.h"
Py_DECREF(v);
Py_DECREF(owner);
- #line 966 "Python/bytecodes.c"
+ #line 1012 "Python/bytecodes.c"
if (err) goto pop_2_error;
#line 1410 "Python/generated_cases.c.h"
STACK_SHRINK(2);
@@ -1414,12 +1414,12 @@
TARGET(DELETE_ATTR) {
PyObject *owner = stack_pointer[-1];
- #line 970 "Python/bytecodes.c"
+ #line 1016 "Python/bytecodes.c"
PyObject *name = GETITEM(frame->f_code->co_names, oparg);
int err = PyObject_SetAttr(owner, name, (PyObject *)NULL);
#line 1421 "Python/generated_cases.c.h"
Py_DECREF(owner);
- #line 973 "Python/bytecodes.c"
+ #line 1019 "Python/bytecodes.c"
if (err) goto pop_1_error;
#line 1425 "Python/generated_cases.c.h"
STACK_SHRINK(1);
@@ -1428,12 +1428,12 @@
TARGET(STORE_GLOBAL) {
PyObject *v = stack_pointer[-1];
- #line 977 "Python/bytecodes.c"
+ #line 1023 "Python/bytecodes.c"
PyObject *name = GETITEM(frame->f_code->co_names, oparg);
int err = PyDict_SetItem(GLOBALS(), name, v);
#line 1435 "Python/generated_cases.c.h"
Py_DECREF(v);
- #line 980 "Python/bytecodes.c"
+ #line 1026 "Python/bytecodes.c"
if (err) goto pop_1_error;
#line 1439 "Python/generated_cases.c.h"
STACK_SHRINK(1);
@@ -1441,7 +1441,7 @@
}
TARGET(DELETE_GLOBAL) {
- #line 984 "Python/bytecodes.c"
+ #line 1030 "Python/bytecodes.c"
PyObject *name = GETITEM(frame->f_code->co_names, oparg);
int err;
err = PyDict_DelItem(GLOBALS(), name);
@@ -1459,7 +1459,7 @@
TARGET(LOAD_NAME) {
PyObject *v;
- #line 998 "Python/bytecodes.c"
+ #line 1044 "Python/bytecodes.c"
PyObject *name = GETITEM(frame->f_code->co_names, oparg);
PyObject *locals = LOCALS();
if (locals == NULL) {
@@ -1529,7 +1529,7 @@
static_assert(INLINE_CACHE_ENTRIES_LOAD_GLOBAL == 4, "incorrect cache size");
PyObject *null = NULL;
PyObject *v;
- #line 1065 "Python/bytecodes.c"
+ #line 1111 "Python/bytecodes.c"
#if ENABLE_SPECIALIZATION
_PyLoadGlobalCache *cache = (_PyLoadGlobalCache *)next_instr;
if (ADAPTIVE_COUNTER_IS_ZERO(cache->counter)) {
@@ -1596,7 +1596,7 @@
PyObject *res;
uint16_t index = read_u16(&next_instr[1].cache);
uint16_t version = read_u16(&next_instr[2].cache);
- #line 1120 "Python/bytecodes.c"
+ #line 1166 "Python/bytecodes.c"
assert(cframe.use_tracing == 0);
DEOPT_IF(!PyDict_CheckExact(GLOBALS()), LOAD_GLOBAL);
PyDictObject *dict = (PyDictObject *)GLOBALS();
@@ -1623,7 +1623,7 @@
uint16_t index = read_u16(&next_instr[1].cache);
uint16_t mod_version = read_u16(&next_instr[2].cache);
uint16_t bltn_version = read_u16(&next_instr[3].cache);
- #line 1134 "Python/bytecodes.c"
+ #line 1180 "Python/bytecodes.c"
assert(cframe.use_tracing == 0);
DEOPT_IF(!PyDict_CheckExact(GLOBALS()), LOAD_GLOBAL);
DEOPT_IF(!PyDict_CheckExact(BUILTINS()), LOAD_GLOBAL);
@@ -1648,7 +1648,7 @@
}
TARGET(DELETE_FAST) {
- #line 1151 "Python/bytecodes.c"
+ #line 1197 "Python/bytecodes.c"
PyObject *v = GETLOCAL(oparg);
if (v == NULL) goto unbound_local_error;
SETLOCAL(oparg, NULL);
@@ -1657,7 +1657,7 @@
}
TARGET(MAKE_CELL) {
- #line 1157 "Python/bytecodes.c"
+ #line 1203 "Python/bytecodes.c"
// "initial" is probably NULL but not if it's an arg (or set
// via PyFrame_LocalsToFast() before MAKE_CELL has run).
PyObject *initial = GETLOCAL(oparg);
@@ -1671,7 +1671,7 @@
}
TARGET(DELETE_DEREF) {
- #line 1168 "Python/bytecodes.c"
+ #line 1214 "Python/bytecodes.c"
PyObject *cell = GETLOCAL(oparg);
PyObject *oldobj = PyCell_GET(cell);
// Can't use ERROR_IF here.
@@ -1688,7 +1688,7 @@
TARGET(LOAD_CLASSDEREF) {
PyObject *value;
- #line 1181 "Python/bytecodes.c"
+ #line 1227 "Python/bytecodes.c"
PyObject *name, *locals = LOCALS();
assert(locals);
assert(oparg >= 0 && oparg < frame->f_code->co_nlocalsplus);
@@ -1728,7 +1728,7 @@
TARGET(LOAD_DEREF) {
PyObject *value;
- #line 1215 "Python/bytecodes.c"
+ #line 1261 "Python/bytecodes.c"
PyObject *cell = GETLOCAL(oparg);
value = PyCell_GET(cell);
if (value == NULL) {
@@ -1744,7 +1744,7 @@
TARGET(STORE_DEREF) {
PyObject *v = stack_pointer[-1];
- #line 1225 "Python/bytecodes.c"
+ #line 1271 "Python/bytecodes.c"
PyObject *cell = GETLOCAL(oparg);
PyObject *oldobj = PyCell_GET(cell);
PyCell_SET(cell, v);
@@ -1755,7 +1755,7 @@
}
TARGET(COPY_FREE_VARS) {
- #line 1232 "Python/bytecodes.c"
+ #line 1278 "Python/bytecodes.c"
/* Copy closure variables to free variables */
PyCodeObject *co = frame->f_code;
assert(PyFunction_Check(frame->f_funcobj));
@@ -1773,13 +1773,13 @@
TARGET(BUILD_STRING) {
PyObject **pieces = (stack_pointer - oparg);
PyObject *str;
- #line 1245 "Python/bytecodes.c"
+ #line 1291 "Python/bytecodes.c"
str = _PyUnicode_JoinArray(&_Py_STR(empty), pieces, oparg);
#line 1779 "Python/generated_cases.c.h"
for (int _i = oparg; --_i >= 0;) {
Py_DECREF(pieces[_i]);
}
- #line 1247 "Python/bytecodes.c"
+ #line 1293 "Python/bytecodes.c"
if (str == NULL) { STACK_SHRINK(oparg); goto error; }
#line 1785 "Python/generated_cases.c.h"
STACK_SHRINK(oparg);
@@ -1791,7 +1791,7 @@
TARGET(BUILD_TUPLE) {
PyObject **values = (stack_pointer - oparg);
PyObject *tup;
- #line 1251 "Python/bytecodes.c"
+ #line 1297 "Python/bytecodes.c"
tup = _PyTuple_FromArraySteal(values, oparg);
if (tup == NULL) { STACK_SHRINK(oparg); goto error; }
#line 1798 "Python/generated_cases.c.h"
@@ -1804,7 +1804,7 @@
TARGET(BUILD_LIST) {
PyObject **values = (stack_pointer - oparg);
PyObject *list;
- #line 1256 "Python/bytecodes.c"
+ #line 1302 "Python/bytecodes.c"
list = _PyList_FromArraySteal(values, oparg);
if (list == NULL) { STACK_SHRINK(oparg); goto error; }
#line 1811 "Python/generated_cases.c.h"
@@ -1817,7 +1817,7 @@
TARGET(LIST_EXTEND) {
PyObject *iterable = stack_pointer[-1];
PyObject *list = stack_pointer[-(2 + (oparg-1))];
- #line 1261 "Python/bytecodes.c"
+ #line 1307 "Python/bytecodes.c"
PyObject *none_val = _PyList_Extend((PyListObject *)list, iterable);
if (none_val == NULL) {
if (_PyErr_ExceptionMatches(tstate, PyExc_TypeError) &&
@@ -1830,7 +1830,7 @@
}
#line 1832 "Python/generated_cases.c.h"
Py_DECREF(iterable);
- #line 1272 "Python/bytecodes.c"
+ #line 1318 "Python/bytecodes.c"
if (true) goto pop_1_error;
}
Py_DECREF(none_val);
@@ -1843,11 +1843,11 @@
TARGET(SET_UPDATE) {
PyObject *iterable = stack_pointer[-1];
PyObject *set = stack_pointer[-(2 + (oparg-1))];
- #line 1279 "Python/bytecodes.c"
+ #line 1325 "Python/bytecodes.c"
int err = _PySet_Update(set, iterable);
#line 1849 "Python/generated_cases.c.h"
Py_DECREF(iterable);
- #line 1281 "Python/bytecodes.c"
+ #line 1327 "Python/bytecodes.c"
if (err < 0) goto pop_1_error;
#line 1853 "Python/generated_cases.c.h"
STACK_SHRINK(1);
@@ -1857,7 +1857,7 @@
TARGET(BUILD_SET) {
PyObject **values = (stack_pointer - oparg);
PyObject *set;
- #line 1285 "Python/bytecodes.c"
+ #line 1331 "Python/bytecodes.c"
set = PySet_New(NULL);
if (set == NULL)
goto error;
@@ -1882,7 +1882,7 @@
TARGET(BUILD_MAP) {
PyObject **values = (stack_pointer - oparg*2);
PyObject *map;
- #line 1302 "Python/bytecodes.c"
+ #line 1348 "Python/bytecodes.c"
map = _PyDict_FromItems(
values, 2,
values+1, 2,
@@ -1894,7 +1894,7 @@
for (int _i = oparg*2; --_i >= 0;) {
Py_DECREF(values[_i]);
}
- #line 1310 "Python/bytecodes.c"
+ #line 1356 "Python/bytecodes.c"
if (map == NULL) { STACK_SHRINK(oparg*2); goto error; }
#line 1900 "Python/generated_cases.c.h"
STACK_SHRINK(oparg*2);
@@ -1904,7 +1904,7 @@
}
TARGET(SETUP_ANNOTATIONS) {
- #line 1314 "Python/bytecodes.c"
+ #line 1360 "Python/bytecodes.c"
int err;
PyObject *ann_dict;
if (LOCALS() == NULL) {
@@ -1952,7 +1952,7 @@
PyObject *keys = stack_pointer[-1];
PyObject **values = (stack_pointer - (1 + oparg));
PyObject *map;
- #line 1356 "Python/bytecodes.c"
+ #line 1402 "Python/bytecodes.c"
if (!PyTuple_CheckExact(keys) ||
PyTuple_GET_SIZE(keys) != (Py_ssize_t)oparg) {
_PyErr_SetString(tstate, PyExc_SystemError,
@@ -1967,7 +1967,7 @@
Py_DECREF(values[_i]);
}
Py_DECREF(keys);
- #line 1366 "Python/bytecodes.c"
+ #line 1412 "Python/bytecodes.c"
if (map == NULL) { STACK_SHRINK(oparg); goto pop_1_error; }
#line 1973 "Python/generated_cases.c.h"
STACK_SHRINK(oparg);
@@ -1977,7 +1977,7 @@
TARGET(DICT_UPDATE) {
PyObject *update = stack_pointer[-1];
- #line 1370 "Python/bytecodes.c"
+ #line 1416 "Python/bytecodes.c"
PyObject *dict = PEEK(oparg + 1); // update is still on the stack
if (PyDict_Update(dict, update) < 0) {
if (_PyErr_ExceptionMatches(tstate, PyExc_AttributeError)) {
@@ -1987,7 +1987,7 @@
}
#line 1989 "Python/generated_cases.c.h"
Py_DECREF(update);
- #line 1378 "Python/bytecodes.c"
+ #line 1424 "Python/bytecodes.c"
if (true) goto pop_1_error;
}
#line 1994 "Python/generated_cases.c.h"
@@ -1998,14 +1998,14 @@
TARGET(DICT_MERGE) {
PyObject *update = stack_pointer[-1];
- #line 1384 "Python/bytecodes.c"
+ #line 1430 "Python/bytecodes.c"
PyObject *dict = PEEK(oparg + 1); // update is still on the stack
if (_PyDict_MergeEx(dict, update, 2) < 0) {
format_kwargs_error(tstate, PEEK(3 + oparg), update);
#line 2007 "Python/generated_cases.c.h"
Py_DECREF(update);
- #line 1389 "Python/bytecodes.c"
+ #line 1435 "Python/bytecodes.c"
if (true) goto pop_1_error;
}
#line 2012 "Python/generated_cases.c.h"
@@ -2018,7 +2018,7 @@
TARGET(MAP_ADD) {
PyObject *value = stack_pointer[-1];
PyObject *key = stack_pointer[-2];
- #line 1396 "Python/bytecodes.c"
+ #line 1442 "Python/bytecodes.c"
PyObject *dict = PEEK(oparg + 2); // key, value are still on the stack
assert(PyDict_CheckExact(dict));
/* dict[key] = value */
@@ -2036,7 +2036,7 @@
PyObject *owner = stack_pointer[-1];
PyObject *res2 = NULL;
PyObject *res;
- #line 1419 "Python/bytecodes.c"
+ #line 1465 "Python/bytecodes.c"
#if ENABLE_SPECIALIZATION
_PyAttrCache *cache = (_PyAttrCache *)next_instr;
if (ADAPTIVE_COUNTER_IS_ZERO(cache->counter)) {
@@ -2073,7 +2073,7 @@
*/
#line 2075 "Python/generated_cases.c.h"
Py_DECREF(owner);
- #line 1454 "Python/bytecodes.c"
+ #line 1500 "Python/bytecodes.c"
if (meth == NULL) goto pop_1_error;
res2 = NULL;
res = meth;
@@ -2084,7 +2084,7 @@
res = PyObject_GetAttr(owner, name);
#line 2086 "Python/generated_cases.c.h"
Py_DECREF(owner);
- #line 1463 "Python/bytecodes.c"
+ #line 1509 "Python/bytecodes.c"
if (res == NULL) goto pop_1_error;
}
#line 2091 "Python/generated_cases.c.h"
@@ -2101,7 +2101,7 @@
PyObject *res;
uint32_t type_version = read_u32(&next_instr[1].cache);
uint16_t index = read_u16(&next_instr[3].cache);
- #line 1468 "Python/bytecodes.c"
+ #line 1514 "Python/bytecodes.c"
assert(cframe.use_tracing == 0);
PyTypeObject *tp = Py_TYPE(owner);
assert(type_version != 0);
@@ -2130,7 +2130,7 @@
PyObject *res;
uint32_t type_version = read_u32(&next_instr[1].cache);
uint16_t index = read_u16(&next_instr[3].cache);
- #line 1485 "Python/bytecodes.c"
+ #line 1531 "Python/bytecodes.c"
assert(cframe.use_tracing == 0);
DEOPT_IF(!PyModule_CheckExact(owner), LOAD_ATTR);
PyDictObject *dict = (PyDictObject *)((PyModuleObject *)owner)->md_dict;
@@ -2159,7 +2159,7 @@
PyObject *res;
uint32_t type_version = read_u32(&next_instr[1].cache);
uint16_t index = read_u16(&next_instr[3].cache);
- #line 1502 "Python/bytecodes.c"
+ #line 1548 "Python/bytecodes.c"
assert(cframe.use_tracing == 0);
PyTypeObject *tp = Py_TYPE(owner);
assert(type_version != 0);
@@ -2202,7 +2202,7 @@
PyObject *res;
uint32_t type_version = read_u32(&next_instr[1].cache);
uint16_t index = read_u16(&next_instr[3].cache);
- #line 1533 "Python/bytecodes.c"
+ #line 1579 "Python/bytecodes.c"
assert(cframe.use_tracing == 0);
PyTypeObject *tp = Py_TYPE(owner);
assert(type_version != 0);
@@ -2228,7 +2228,7 @@
PyObject *res;
uint32_t type_version = read_u32(&next_instr[1].cache);
PyObject *descr = read_obj(&next_instr[5].cache);
- #line 1547 "Python/bytecodes.c"
+ #line 1593 "Python/bytecodes.c"
assert(cframe.use_tracing == 0);
DEOPT_IF(!PyType_Check(cls), LOAD_ATTR);
@@ -2255,7 +2255,7 @@
uint32_t type_version = read_u32(&next_instr[1].cache);
uint32_t func_version = read_u32(&next_instr[3].cache);
PyObject *fget = read_obj(&next_instr[5].cache);
- #line 1563 "Python/bytecodes.c"
+ #line 1609 "Python/bytecodes.c"
assert(cframe.use_tracing == 0);
DEOPT_IF(tstate->interp->eval_frame, LOAD_ATTR);
@@ -2287,7 +2287,7 @@
uint32_t type_version = read_u32(&next_instr[1].cache);
uint32_t func_version = read_u32(&next_instr[3].cache);
PyObject *getattribute = read_obj(&next_instr[5].cache);
- #line 1589 "Python/bytecodes.c"
+ #line 1635 "Python/bytecodes.c"
assert(cframe.use_tracing == 0);
DEOPT_IF(tstate->interp->eval_frame, LOAD_ATTR);
PyTypeObject *cls = Py_TYPE(owner);
@@ -2321,7 +2321,7 @@
PyObject *value = stack_pointer[-2];
uint32_t type_version = read_u32(&next_instr[1].cache);
uint16_t index = read_u16(&next_instr[3].cache);
- #line 1617 "Python/bytecodes.c"
+ #line 1663 "Python/bytecodes.c"
assert(cframe.use_tracing == 0);
PyTypeObject *tp = Py_TYPE(owner);
assert(type_version != 0);
@@ -2351,7 +2351,7 @@
PyObject *value = stack_pointer[-2];
uint32_t type_version = read_u32(&next_instr[1].cache);
uint16_t hint = read_u16(&next_instr[3].cache);
- #line 1638 "Python/bytecodes.c"
+ #line 1684 "Python/bytecodes.c"
assert(cframe.use_tracing == 0);
PyTypeObject *tp = Py_TYPE(owner);
assert(type_version != 0);
@@ -2402,7 +2402,7 @@
PyObject *value = stack_pointer[-2];
uint32_t type_version = read_u32(&next_instr[1].cache);
uint16_t index = read_u16(&next_instr[3].cache);
- #line 1680 "Python/bytecodes.c"
+ #line 1726 "Python/bytecodes.c"
assert(cframe.use_tracing == 0);
PyTypeObject *tp = Py_TYPE(owner);
assert(type_version != 0);
@@ -2425,7 +2425,7 @@
PyObject *right = stack_pointer[-1];
PyObject *left = stack_pointer[-2];
PyObject *res;
- #line 1700 "Python/bytecodes.c"
+ #line 1746 "Python/bytecodes.c"
#if ENABLE_SPECIALIZATION
_PyCompareOpCache *cache = (_PyCompareOpCache *)next_instr;
if (ADAPTIVE_COUNTER_IS_ZERO(cache->counter)) {
@@ -2442,7 +2442,7 @@
#line 2443 "Python/generated_cases.c.h"
Py_DECREF(left);
Py_DECREF(right);
- #line 1714 "Python/bytecodes.c"
+ #line 1760 "Python/bytecodes.c"
if (res == NULL) goto pop_2_error;
#line 2448 "Python/generated_cases.c.h"
STACK_SHRINK(1);
@@ -2455,7 +2455,7 @@
PyObject *right = stack_pointer[-1];
PyObject *left = stack_pointer[-2];
PyObject *res;
- #line 1718 "Python/bytecodes.c"
+ #line 1764 "Python/bytecodes.c"
assert(cframe.use_tracing == 0);
DEOPT_IF(!PyFloat_CheckExact(left), COMPARE_OP);
DEOPT_IF(!PyFloat_CheckExact(right), COMPARE_OP);
@@ -2479,7 +2479,7 @@
PyObject *right = stack_pointer[-1];
PyObject *left = stack_pointer[-2];
PyObject *res;
- #line 1734 "Python/bytecodes.c"
+ #line 1780 "Python/bytecodes.c"
assert(cframe.use_tracing == 0);
DEOPT_IF(!PyLong_CheckExact(left), COMPARE_OP);
DEOPT_IF(!PyLong_CheckExact(right), COMPARE_OP);
@@ -2507,7 +2507,7 @@
PyObject *right = stack_pointer[-1];
PyObject *left = stack_pointer[-2];
PyObject *res;
- #line 1754 "Python/bytecodes.c"
+ #line 1800 "Python/bytecodes.c"
assert(cframe.use_tracing == 0);
DEOPT_IF(!PyUnicode_CheckExact(left), COMPARE_OP);
DEOPT_IF(!PyUnicode_CheckExact(right), COMPARE_OP);
@@ -2532,12 +2532,12 @@
PyObject *right = stack_pointer[-1];
PyObject *left = stack_pointer[-2];
PyObject *b;
- #line 1770 "Python/bytecodes.c"
+ #line 1816 "Python/bytecodes.c"
int res = Py_Is(left, right) ^ oparg;
#line 2538 "Python/generated_cases.c.h"
Py_DECREF(left);
Py_DECREF(right);
- #line 1772 "Python/bytecodes.c"
+ #line 1818 "Python/bytecodes.c"
b = Py_NewRef(res ? Py_True : Py_False);
#line 2543 "Python/generated_cases.c.h"
STACK_SHRINK(1);
@@ -2549,12 +2549,12 @@
PyObject *right = stack_pointer[-1];
PyObject *left = stack_pointer[-2];
PyObject *b;
- #line 1776 "Python/bytecodes.c"
+ #line 1822 "Python/bytecodes.c"
int res = PySequence_Contains(right, left);
#line 2555 "Python/generated_cases.c.h"
Py_DECREF(left);
Py_DECREF(right);
- #line 1778 "Python/bytecodes.c"
+ #line 1824 "Python/bytecodes.c"
if (res < 0) goto pop_2_error;
b = Py_NewRef((res^oparg) ? Py_True : Py_False);
#line 2561 "Python/generated_cases.c.h"
@@ -2568,12 +2568,12 @@
PyObject *exc_value = stack_pointer[-2];
PyObject *rest;
PyObject *match;
- #line 1783 "Python/bytecodes.c"
+ #line 1829 "Python/bytecodes.c"
if (check_except_star_type_valid(tstate, match_type) < 0) {
#line 2574 "Python/generated_cases.c.h"
Py_DECREF(exc_value);
Py_DECREF(match_type);
- #line 1785 "Python/bytecodes.c"
+ #line 1831 "Python/bytecodes.c"
if (true) goto pop_2_error;
}
@@ -2584,7 +2584,7 @@
#line 2585 "Python/generated_cases.c.h"
Py_DECREF(exc_value);
Py_DECREF(match_type);
- #line 1793 "Python/bytecodes.c"
+ #line 1839 "Python/bytecodes.c"
if (res < 0) goto pop_2_error;
assert((match == NULL) == (rest == NULL));
@@ -2603,19 +2603,19 @@
PyObject *right = stack_pointer[-1];
PyObject *left = stack_pointer[-2];
PyObject *b;
- #line 1804 "Python/bytecodes.c"
+ #line 1850 "Python/bytecodes.c"
assert(PyExceptionInstance_Check(left));
if (check_except_type_valid(tstate, right) < 0) {
#line 2610 "Python/generated_cases.c.h"
Py_DECREF(right);
- #line 1807 "Python/bytecodes.c"
+ #line 1853 "Python/bytecodes.c"
if (true) goto pop_1_error;
}
int res = PyErr_GivenExceptionMatches(left, right);
#line 2617 "Python/generated_cases.c.h"
Py_DECREF(right);
- #line 1812 "Python/bytecodes.c"
+ #line 1858 "Python/bytecodes.c"
b = Py_NewRef(res ? Py_True : Py_False);
#line 2621 "Python/generated_cases.c.h"
stack_pointer[-1] = b;
@@ -2626,13 +2626,13 @@
PyObject *fromlist = stack_pointer[-1];
PyObject *level = stack_pointer[-2];
PyObject *res;
- #line 1816 "Python/bytecodes.c"
+ #line 1862 "Python/bytecodes.c"
PyObject *name = GETITEM(frame->f_code->co_names, oparg);
res = import_name(tstate, frame, name, fromlist, level);
#line 2633 "Python/generated_cases.c.h"
Py_DECREF(level);
Py_DECREF(fromlist);
- #line 1819 "Python/bytecodes.c"
+ #line 1865 "Python/bytecodes.c"
if (res == NULL) goto pop_2_error;
#line 2638 "Python/generated_cases.c.h"
STACK_SHRINK(1);
@@ -2643,7 +2643,7 @@
TARGET(IMPORT_FROM) {
PyObject *from = stack_pointer[-1];
PyObject *res;
- #line 1823 "Python/bytecodes.c"
+ #line 1869 "Python/bytecodes.c"
PyObject *name = GETITEM(frame->f_code->co_names, oparg);
res = import_from(tstate, from, name);
if (res == NULL) goto error;
@@ -2654,7 +2654,7 @@
}
TARGET(JUMP_FORWARD) {
- #line 1829 "Python/bytecodes.c"
+ #line 1875 "Python/bytecodes.c"
JUMPBY(oparg);
#line 2660 "Python/generated_cases.c.h"
DISPATCH();
@@ -2662,7 +2662,7 @@
TARGET(JUMP_BACKWARD) {
PREDICTED(JUMP_BACKWARD);
- #line 1833 "Python/bytecodes.c"
+ #line 1879 "Python/bytecodes.c"
assert(oparg < INSTR_OFFSET());
JUMPBY(-oparg);
#line 2669 "Python/generated_cases.c.h"
@@ -2673,7 +2673,7 @@
TARGET(POP_JUMP_IF_FALSE) {
PREDICTED(POP_JUMP_IF_FALSE);
PyObject *cond = stack_pointer[-1];
- #line 1839 "Python/bytecodes.c"
+ #line 1885 "Python/bytecodes.c"
if (Py_IsTrue(cond)) {
_Py_DECREF_NO_DEALLOC(cond);
}
@@ -2685,7 +2685,7 @@
int err = PyObject_IsTrue(cond);
#line 2687 "Python/generated_cases.c.h"
Py_DECREF(cond);
- #line 1849 "Python/bytecodes.c"
+ #line 1895 "Python/bytecodes.c"
if (err == 0) {
JUMPBY(oparg);
}
@@ -2700,7 +2700,7 @@
TARGET(POP_JUMP_IF_TRUE) {
PyObject *cond = stack_pointer[-1];
- #line 1859 "Python/bytecodes.c"
+ #line 1905 "Python/bytecodes.c"
if (Py_IsFalse(cond)) {
_Py_DECREF_NO_DEALLOC(cond);
}
@@ -2712,7 +2712,7 @@
int err = PyObject_IsTrue(cond);
#line 2714 "Python/generated_cases.c.h"
Py_DECREF(cond);
- #line 1869 "Python/bytecodes.c"
+ #line 1915 "Python/bytecodes.c"
if (err > 0) {
JUMPBY(oparg);
}
@@ -2727,11 +2727,11 @@
TARGET(POP_JUMP_IF_NOT_NONE) {
PyObject *value = stack_pointer[-1];
- #line 1879 "Python/bytecodes.c"
+ #line 1925 "Python/bytecodes.c"
if (!Py_IsNone(value)) {
#line 2733 "Python/generated_cases.c.h"
Py_DECREF(value);
- #line 1881 "Python/bytecodes.c"
+ #line 1927 "Python/bytecodes.c"
JUMPBY(oparg);
}
else {
@@ -2744,7 +2744,7 @@
TARGET(POP_JUMP_IF_NONE) {
PyObject *value = stack_pointer[-1];
- #line 1889 "Python/bytecodes.c"
+ #line 1935 "Python/bytecodes.c"
if (Py_IsNone(value)) {
_Py_DECREF_NO_DEALLOC(value);
JUMPBY(oparg);
@@ -2752,7 +2752,7 @@
else {
#line 2754 "Python/generated_cases.c.h"
Py_DECREF(value);
- #line 1895 "Python/bytecodes.c"
+ #line 1941 "Python/bytecodes.c"
}
#line 2758 "Python/generated_cases.c.h"
STACK_SHRINK(1);
@@ -2760,7 +2760,7 @@
}
TARGET(JUMP_BACKWARD_NO_INTERRUPT) {
- #line 1899 "Python/bytecodes.c"
+ #line 1945 "Python/bytecodes.c"
/* This bytecode is used in the `yield from` or `await` loop.
* If there is an interrupt, we want it handled in the innermost
* generator or coroutine, so we deliberately do not check it here.
@@ -2774,7 +2774,7 @@
TARGET(GET_LEN) {
PyObject *obj = stack_pointer[-1];
PyObject *len_o;
- #line 1908 "Python/bytecodes.c"
+ #line 1954 "Python/bytecodes.c"
// PUSH(len(TOS))
Py_ssize_t len_i = PyObject_Length(obj);
if (len_i < 0) goto error;
@@ -2791,7 +2791,7 @@
PyObject *type = stack_pointer[-2];
PyObject *subject = stack_pointer[-3];
PyObject *attrs;
- #line 1916 "Python/bytecodes.c"
+ #line 1962 "Python/bytecodes.c"
// Pop TOS and TOS1. Set TOS to a tuple of attributes on success, or
// None on failure.
assert(PyTuple_CheckExact(names));
@@ -2800,7 +2800,7 @@
Py_DECREF(subject);
Py_DECREF(type);
Py_DECREF(names);
- #line 1921 "Python/bytecodes.c"
+ #line 1967 "Python/bytecodes.c"
if (attrs) {
assert(PyTuple_CheckExact(attrs)); // Success!
}
@@ -2817,7 +2817,7 @@
TARGET(MATCH_MAPPING) {
PyObject *subject = stack_pointer[-1];
PyObject *res;
- #line 1931 "Python/bytecodes.c"
+ #line 1977 "Python/bytecodes.c"
int match = Py_TYPE(subject)->tp_flags & Py_TPFLAGS_MAPPING;
res = Py_NewRef(match ? Py_True : Py_False);
#line 2824 "Python/generated_cases.c.h"
@@ -2830,7 +2830,7 @@
TARGET(MATCH_SEQUENCE) {
PyObject *subject = stack_pointer[-1];
PyObject *res;
- #line 1937 "Python/bytecodes.c"
+ #line 1983 "Python/bytecodes.c"
int match = Py_TYPE(subject)->tp_flags & Py_TPFLAGS_SEQUENCE;
res = Py_NewRef(match ? Py_True : Py_False);
#line 2837 "Python/generated_cases.c.h"
@@ -2844,7 +2844,7 @@
PyObject *keys = stack_pointer[-1];
PyObject *subject = stack_pointer[-2];
PyObject *values_or_none;
- #line 1943 "Python/bytecodes.c"
+ #line 1989 "Python/bytecodes.c"
// On successful match, PUSH(values). Otherwise, PUSH(None).
values_or_none = match_keys(tstate, subject, keys);
if (values_or_none == NULL) goto error;
@@ -2857,12 +2857,12 @@
TARGET(GET_ITER) {
PyObject *iterable = stack_pointer[-1];
PyObject *iter;
- #line 1949 "Python/bytecodes.c"
+ #line 1995 "Python/bytecodes.c"
/* before: [obj]; after [getiter(obj)] */
iter = PyObject_GetIter(iterable);
#line 2864 "Python/generated_cases.c.h"
Py_DECREF(iterable);
- #line 1952 "Python/bytecodes.c"
+ #line 1998 "Python/bytecodes.c"
if (iter == NULL) goto pop_1_error;
#line 2868 "Python/generated_cases.c.h"
stack_pointer[-1] = iter;
@@ -2872,7 +2872,7 @@
TARGET(GET_YIELD_FROM_ITER) {
PyObject *iterable = stack_pointer[-1];
PyObject *iter;
- #line 1956 "Python/bytecodes.c"
+ #line 2002 "Python/bytecodes.c"
/* before: [obj]; after [getiter(obj)] */
if (PyCoro_CheckExact(iterable)) {
/* `iterable` is a coroutine */
@@ -2897,7 +2897,7 @@
}
#line 2899 "Python/generated_cases.c.h"
Py_DECREF(iterable);
- #line 1979 "Python/bytecodes.c"
+ #line 2025 "Python/bytecodes.c"
}
#line 2903 "Python/generated_cases.c.h"
stack_pointer[-1] = iter;
@@ -2910,7 +2910,7 @@
static_assert(INLINE_CACHE_ENTRIES_FOR_ITER == 1, "incorrect cache size");
PyObject *iter = stack_pointer[-1];
PyObject *next;
- #line 1998 "Python/bytecodes.c"
+ #line 2044 "Python/bytecodes.c"
#if ENABLE_SPECIALIZATION
_PyForIterCache *cache = (_PyForIterCache *)next_instr;
if (ADAPTIVE_COUNTER_IS_ZERO(cache->counter)) {
@@ -2953,7 +2953,7 @@
TARGET(FOR_ITER_LIST) {
PyObject *iter = stack_pointer[-1];
PyObject *next;
- #line 2033 "Python/bytecodes.c"
+ #line 2079 "Python/bytecodes.c"
assert(cframe.use_tracing == 0);
DEOPT_IF(Py_TYPE(iter) != &PyListIter_Type, FOR_ITER);
_PyListIterObject *it = (_PyListIterObject *)iter;
@@ -2984,7 +2984,7 @@
TARGET(FOR_ITER_TUPLE) {
PyObject *iter = stack_pointer[-1];
PyObject *next;
- #line 2056 "Python/bytecodes.c"
+ #line 2102 "Python/bytecodes.c"
assert(cframe.use_tracing == 0);
_PyTupleIterObject *it = (_PyTupleIterObject *)iter;
DEOPT_IF(Py_TYPE(it) != &PyTupleIter_Type, FOR_ITER);
@@ -3015,7 +3015,7 @@
TARGET(FOR_ITER_RANGE) {
PyObject *iter = stack_pointer[-1];
PyObject *next;
- #line 2079 "Python/bytecodes.c"
+ #line 2125 "Python/bytecodes.c"
assert(cframe.use_tracing == 0);
_PyRangeIterObject *r = (_PyRangeIterObject *)iter;
DEOPT_IF(Py_TYPE(r) != &PyRangeIter_Type, FOR_ITER);
@@ -3043,7 +3043,7 @@
TARGET(FOR_ITER_GEN) {
PyObject *iter = stack_pointer[-1];
- #line 2100 "Python/bytecodes.c"
+ #line 2146 "Python/bytecodes.c"
assert(cframe.use_tracing == 0);
PyGenObject *gen = (PyGenObject *)iter;
DEOPT_IF(Py_TYPE(gen) != &PyGen_Type, FOR_ITER);
@@ -3065,7 +3065,7 @@
PyObject *mgr = stack_pointer[-1];
PyObject *exit;
PyObject *res;
- #line 2117 "Python/bytecodes.c"
+ #line 2163 "Python/bytecodes.c"
PyObject *enter = _PyObject_LookupSpecial(mgr, &_Py_ID(__aenter__));
if (enter == NULL) {
if (!_PyErr_Occurred(tstate)) {
@@ -3090,7 +3090,7 @@
}
#line 3092 "Python/generated_cases.c.h"
Py_DECREF(mgr);
- #line 2140 "Python/bytecodes.c"
+ #line 2186 "Python/bytecodes.c"
res = _PyObject_CallNoArgs(enter);
Py_DECREF(enter);
if (res == NULL) {
@@ -3109,7 +3109,7 @@
PyObject *mgr = stack_pointer[-1];
PyObject *exit;
PyObject *res;
- #line 2150 "Python/bytecodes.c"
+ #line 2196 "Python/bytecodes.c"
/* pop the context manager, push its __exit__ and the
* value returned from calling its __enter__
*/
@@ -3137,7 +3137,7 @@
}
#line 3139 "Python/generated_cases.c.h"
Py_DECREF(mgr);
- #line 2176 "Python/bytecodes.c"
+ #line 2222 "Python/bytecodes.c"
res = _PyObject_CallNoArgs(enter);
Py_DECREF(enter);
if (res == NULL) {
@@ -3156,7 +3156,7 @@
PyObject *lasti = stack_pointer[-3];
PyObject *exit_func = stack_pointer[-4];
PyObject *res;
- #line 2185 "Python/bytecodes.c"
+ #line 2231 "Python/bytecodes.c"
/* At the top of the stack are 4 values:
- val: TOP = exc_info()
- unused: SECOND = previous exception
@@ -3186,7 +3186,7 @@
TARGET(PUSH_EXC_INFO) {
PyObject *new_exc = stack_pointer[-1];
PyObject *prev_exc;
- #line 2208 "Python/bytecodes.c"
+ #line 2254 "Python/bytecodes.c"
_PyErr_StackItem *exc_info = tstate->exc_info;
if (exc_info->exc_value != NULL) {
prev_exc = exc_info->exc_value;
@@ -3210,7 +3210,7 @@
uint32_t type_version = read_u32(&next_instr[1].cache);
uint32_t keys_version = read_u32(&next_instr[3].cache);
PyObject *descr = read_obj(&next_instr[5].cache);
- #line 2220 "Python/bytecodes.c"
+ #line 2266 "Python/bytecodes.c"
/* Cached method object */
assert(cframe.use_tracing == 0);
PyTypeObject *self_cls = Py_TYPE(self);
@@ -3242,7 +3242,7 @@
PyObject *res;
uint32_t type_version = read_u32(&next_instr[1].cache);
PyObject *descr = read_obj(&next_instr[5].cache);
- #line 2240 "Python/bytecodes.c"
+ #line 2286 "Python/bytecodes.c"
assert(cframe.use_tracing == 0);
PyTypeObject *self_cls = Py_TYPE(self);
DEOPT_IF(self_cls->tp_version_tag != type_version, LOAD_ATTR);
@@ -3267,7 +3267,7 @@
PyObject *res;
uint32_t type_version = read_u32(&next_instr[1].cache);
PyObject *descr = read_obj(&next_instr[5].cache);
- #line 2253 "Python/bytecodes.c"
+ #line 2299 "Python/bytecodes.c"
assert(cframe.use_tracing == 0);
PyTypeObject *self_cls = Py_TYPE(self);
DEOPT_IF(self_cls->tp_version_tag != type_version, LOAD_ATTR);
@@ -3291,7 +3291,7 @@
}
TARGET(KW_NAMES) {
- #line 2270 "Python/bytecodes.c"
+ #line 2316 "Python/bytecodes.c"
assert(kwnames == NULL);
assert(oparg < PyTuple_GET_SIZE(frame->f_code->co_consts));
kwnames = GETITEM(frame->f_code->co_consts, oparg);
@@ -3306,7 +3306,7 @@
PyObject *callable = stack_pointer[-(1 + oparg)];
PyObject *method = stack_pointer[-(2 + oparg)];
PyObject *res;
- #line 2306 "Python/bytecodes.c"
+ #line 2352 "Python/bytecodes.c"
int is_meth = method != NULL;
int total_args = oparg;
if (is_meth) {
@@ -3390,7 +3390,7 @@
TARGET(CALL_BOUND_METHOD_EXACT_ARGS) {
PyObject *callable = stack_pointer[-(1 + oparg)];
PyObject *method = stack_pointer[-(2 + oparg)];
- #line 2384 "Python/bytecodes.c"
+ #line 2430 "Python/bytecodes.c"
DEOPT_IF(method != NULL, CALL);
DEOPT_IF(Py_TYPE(callable) != &PyMethod_Type, CALL);
STAT_INC(CALL, hit);
@@ -3409,7 +3409,7 @@
PyObject *callable = stack_pointer[-(1 + oparg)];
PyObject *method = stack_pointer[-(2 + oparg)];
uint32_t func_version = read_u32(&next_instr[1].cache);
- #line 2396 "Python/bytecodes.c"
+ #line 2442 "Python/bytecodes.c"
assert(kwnames == NULL);
DEOPT_IF(tstate->interp->eval_frame, CALL);
int is_meth = method != NULL;
@@ -3443,7 +3443,7 @@
PyObject *method = stack_pointer[-(2 + oparg)];
uint32_t func_version = read_u32(&next_instr[1].cache);
uint16_t min_args = read_u16(&next_instr[3].cache);
- #line 2423 "Python/bytecodes.c"
+ #line 2469 "Python/bytecodes.c"
assert(kwnames == NULL);
DEOPT_IF(tstate->interp->eval_frame, CALL);
int is_meth = method != NULL;
@@ -3481,7 +3481,7 @@
PyObject *callable = stack_pointer[-(1 + oparg)];
PyObject *null = stack_pointer[-(2 + oparg)];
PyObject *res;
- #line 2455 "Python/bytecodes.c"
+ #line 2501 "Python/bytecodes.c"
assert(kwnames == NULL);
assert(cframe.use_tracing == 0);
assert(oparg == 1);
@@ -3505,7 +3505,7 @@
PyObject *callable = stack_pointer[-(1 + oparg)];
PyObject *null = stack_pointer[-(2 + oparg)];
PyObject *res;
- #line 2468 "Python/bytecodes.c"
+ #line 2514 "Python/bytecodes.c"
assert(kwnames == NULL);
assert(cframe.use_tracing == 0);
assert(oparg == 1);
@@ -3531,7 +3531,7 @@
PyObject *callable = stack_pointer[-(1 + oparg)];
PyObject *null = stack_pointer[-(2 + oparg)];
PyObject *res;
- #line 2483 "Python/bytecodes.c"
+ #line 2529 "Python/bytecodes.c"
assert(kwnames == NULL);
assert(oparg == 1);
DEOPT_IF(null != NULL, CALL);
@@ -3556,7 +3556,7 @@
PyObject *callable = stack_pointer[-(1 + oparg)];
PyObject *method = stack_pointer[-(2 + oparg)];
PyObject *res;
- #line 2497 "Python/bytecodes.c"
+ #line 2543 "Python/bytecodes.c"
int is_meth = method != NULL;
int total_args = oparg;
if (is_meth) {
@@ -3592,7 +3592,7 @@
PyObject *callable = stack_pointer[-(1 + oparg)];
PyObject *method = stack_pointer[-(2 + oparg)];
PyObject *res;
- #line 2522 "Python/bytecodes.c"
+ #line 2568 "Python/bytecodes.c"
assert(cframe.use_tracing == 0);
/* Builtin METH_O functions */
assert(kwnames == NULL);
@@ -3635,7 +3635,7 @@
PyObject *callable = stack_pointer[-(1 + oparg)];
PyObject *method = stack_pointer[-(2 + oparg)];
PyObject *res;
- #line 2554 "Python/bytecodes.c"
+ #line 2600 "Python/bytecodes.c"
assert(cframe.use_tracing == 0);
/* Builtin METH_FASTCALL functions, without keywords */
assert(kwnames == NULL);
@@ -3682,7 +3682,7 @@
PyObject *callable = stack_pointer[-(1 + oparg)];
PyObject *method = stack_pointer[-(2 + oparg)];
PyObject *res;
- #line 2590 "Python/bytecodes.c"
+ #line 2636 "Python/bytecodes.c"
assert(cframe.use_tracing == 0);
/* Builtin METH_FASTCALL | METH_KEYWORDS functions */
int is_meth = method != NULL;
@@ -3729,7 +3729,7 @@
PyObject *callable = stack_pointer[-(1 + oparg)];
PyObject *method = stack_pointer[-(2 + oparg)];
PyObject *res;
- #line 2626 "Python/bytecodes.c"
+ #line 2672 "Python/bytecodes.c"
assert(cframe.use_tracing == 0);
assert(kwnames == NULL);
/* len(o) */
@@ -3768,7 +3768,7 @@
PyObject *callable = stack_pointer[-(1 + oparg)];
PyObject *method = stack_pointer[-(2 + oparg)];
PyObject *res;
- #line 2654 "Python/bytecodes.c"
+ #line 2700 "Python/bytecodes.c"
assert(cframe.use_tracing == 0);
assert(kwnames == NULL);
/* isinstance(o, o2) */
@@ -3808,7 +3808,7 @@
PyObject **args = (stack_pointer - oparg);
PyObject *self = stack_pointer[-(1 + oparg)];
PyObject *method = stack_pointer[-(2 + oparg)];
- #line 2685 "Python/bytecodes.c"
+ #line 2731 "Python/bytecodes.c"
assert(cframe.use_tracing == 0);
assert(kwnames == NULL);
assert(oparg == 1);
@@ -3834,7 +3834,7 @@
PyObject **args = (stack_pointer - oparg);
PyObject *method = stack_pointer[-(2 + oparg)];
PyObject *res;
- #line 2706 "Python/bytecodes.c"
+ #line 2752 "Python/bytecodes.c"
assert(kwnames == NULL);
int is_meth = method != NULL;
int total_args = oparg;
@@ -3878,7 +3878,7 @@
PyObject **args = (stack_pointer - oparg);
PyObject *method = stack_pointer[-(2 + oparg)];
PyObject *res;
- #line 2740 "Python/bytecodes.c"
+ #line 2786 "Python/bytecodes.c"
int is_meth = method != NULL;
int total_args = oparg;
if (is_meth) {
@@ -3920,7 +3920,7 @@
PyObject **args = (stack_pointer - oparg);
PyObject *method = stack_pointer[-(2 + oparg)];
PyObject *res;
- #line 2772 "Python/bytecodes.c"
+ #line 2818 "Python/bytecodes.c"
assert(kwnames == NULL);
assert(oparg == 0 || oparg == 1);
int is_meth = method != NULL;
@@ -3962,7 +3962,7 @@
PyObject **args = (stack_pointer - oparg);
PyObject *method = stack_pointer[-(2 + oparg)];
PyObject *res;
- #line 2804 "Python/bytecodes.c"
+ #line 2850 "Python/bytecodes.c"
assert(kwnames == NULL);
int is_meth = method != NULL;
int total_args = oparg;
@@ -4005,7 +4005,7 @@
PyObject *callargs = stack_pointer[-(1 + ((oparg & 1) ? 1 : 0))];
PyObject *func = stack_pointer[-(2 + ((oparg & 1) ? 1 : 0))];
PyObject *result;
- #line 2835 "Python/bytecodes.c"
+ #line 2881 "Python/bytecodes.c"
if (oparg & 1) {
// DICT_MERGE is called before this opcode if there are kwargs.
// It converts all dict subtypes in kwargs into regular dicts.
@@ -4028,7 +4028,7 @@
Py_DECREF(func);
Py_DECREF(callargs);
Py_XDECREF(kwargs);
- #line 2854 "Python/bytecodes.c"
+ #line 2900 "Python/bytecodes.c"
assert(PEEK(3 + (oparg & 1)) == NULL);
if (result == NULL) { STACK_SHRINK(((oparg & 1) ? 1 : 0)); goto pop_3_error; }
@@ -4047,7 +4047,7 @@
PyObject *kwdefaults = (oparg & 0x02) ? stack_pointer[-(1 + ((oparg & 0x08) ? 1 : 0) + ((oparg & 0x04) ? 1 : 0) + ((oparg & 0x02) ? 1 : 0))] : NULL;
PyObject *defaults = (oparg & 0x01) ? stack_pointer[-(1 + ((oparg & 0x08) ? 1 : 0) + ((oparg & 0x04) ? 1 : 0) + ((oparg & 0x02) ? 1 : 0) + ((oparg & 0x01) ? 1 : 0))] : NULL;
PyObject *func;
- #line 2865 "Python/bytecodes.c"
+ #line 2911 "Python/bytecodes.c"
PyFunctionObject *func_obj = (PyFunctionObject *)
PyFunction_New(codeobj, GLOBALS());
@@ -4083,7 +4083,7 @@
}
TARGET(RETURN_GENERATOR) {
- #line 2896 "Python/bytecodes.c"
+ #line 2942 "Python/bytecodes.c"
assert(PyFunction_Check(frame->f_funcobj));
PyFunctionObject *func = (PyFunctionObject *)frame->f_funcobj;
PyGenObject *gen = (PyGenObject *)_Py_MakeCoro(func);
@@ -4112,13 +4112,13 @@
PyObject *stop = stack_pointer[-(1 + ((oparg == 3) ? 1 : 0))];
PyObject *start = stack_pointer[-(2 + ((oparg == 3) ? 1 : 0))];
PyObject *slice;
- #line 2919 "Python/bytecodes.c"
+ #line 2965 "Python/bytecodes.c"
slice = PySlice_New(start, stop, step);
#line 4118 "Python/generated_cases.c.h"
Py_DECREF(start);
Py_DECREF(stop);
Py_XDECREF(step);
- #line 2921 "Python/bytecodes.c"
+ #line 2967 "Python/bytecodes.c"
if (slice == NULL) { STACK_SHRINK(((oparg == 3) ? 1 : 0)); goto pop_2_error; }
#line 4124 "Python/generated_cases.c.h"
STACK_SHRINK(((oparg == 3) ? 1 : 0));
@@ -4131,7 +4131,7 @@
PyObject *fmt_spec = ((oparg & FVS_MASK) == FVS_HAVE_SPEC) ? stack_pointer[-((((oparg & FVS_MASK) == FVS_HAVE_SPEC) ? 1 : 0))] : NULL;
PyObject *value = stack_pointer[-(1 + (((oparg & FVS_MASK) == FVS_HAVE_SPEC) ? 1 : 0))];
PyObject *result;
- #line 2925 "Python/bytecodes.c"
+ #line 2971 "Python/bytecodes.c"
/* Handles f-string value formatting. */
PyObject *(*conv_fn)(PyObject *);
int which_conversion = oparg & FVC_MASK;
@@ -4175,7 +4175,7 @@
TARGET(COPY) {
PyObject *bottom = stack_pointer[-(1 + (oparg-1))];
PyObject *top;
- #line 2962 "Python/bytecodes.c"
+ #line 3008 "Python/bytecodes.c"
assert(oparg > 0);
top = Py_NewRef(bottom);
#line 4182 "Python/generated_cases.c.h"
@@ -4190,7 +4190,7 @@
PyObject *rhs = stack_pointer[-1];
PyObject *lhs = stack_pointer[-2];
PyObject *res;
- #line 2967 "Python/bytecodes.c"
+ #line 3013 "Python/bytecodes.c"
#if ENABLE_SPECIALIZATION
_PyBinaryOpCache *cache = (_PyBinaryOpCache *)next_instr;
if (ADAPTIVE_COUNTER_IS_ZERO(cache->counter)) {
@@ -4209,7 +4209,7 @@
#line 4210 "Python/generated_cases.c.h"
Py_DECREF(lhs);
Py_DECREF(rhs);
- #line 2983 "Python/bytecodes.c"
+ #line 3029 "Python/bytecodes.c"
if (res == NULL) goto pop_2_error;
#line 4215 "Python/generated_cases.c.h"
STACK_SHRINK(1);
@@ -4221,7 +4221,7 @@
TARGET(SWAP) {
PyObject *top = stack_pointer[-1];
PyObject *bottom = stack_pointer[-(2 + (oparg-2))];
- #line 2988 "Python/bytecodes.c"
+ #line 3034 "Python/bytecodes.c"
assert(oparg >= 2);
#line 4227 "Python/generated_cases.c.h"
stack_pointer[-1] = bottom;
@@ -4230,7 +4230,7 @@
}
TARGET(EXTENDED_ARG) {
- #line 2992 "Python/bytecodes.c"
+ #line 3038 "Python/bytecodes.c"
assert(oparg);
assert(cframe.use_tracing == 0);
opcode = next_instr->op.code;
@@ -4241,7 +4241,7 @@
}
TARGET(CACHE) {
- #line 3001 "Python/bytecodes.c"
+ #line 3047 "Python/bytecodes.c"
Py_UNREACHABLE();
#line 4247 "Python/generated_cases.c.h"
}
1
0
31 Mar '23
https://github.com/python/cpython/commit/361a3eaf1b6bf3360e34388cc909307ddd…
commit: 361a3eaf1b6bf3360e34388cc909307ddd20737b
branch: main
author: Alex Waygood <Alex.Waygood(a)Gmail.com>
committer: AlexWaygood <Alex.Waygood(a)Gmail.com>
date: 2023-03-31T21:54:50+01:00
summary:
gh-74690: Micro-optimise `typing._get_protocol_attrs` (#103152)
Improve performance of `isinstance()` checks against runtime-checkable protocols
files:
M Lib/typing.py
diff --git a/Lib/typing.py b/Lib/typing.py
index 3d086dc1cb90..a88542cfbaec 100644
--- a/Lib/typing.py
+++ b/Lib/typing.py
@@ -1903,15 +1903,19 @@ class _TypingEllipsis:
"""Internal placeholder for ... (ellipsis)."""
-_TYPING_INTERNALS = ['__parameters__', '__orig_bases__', '__orig_class__',
- '_is_protocol', '_is_runtime_protocol']
+_TYPING_INTERNALS = frozenset({
+ '__parameters__', '__orig_bases__', '__orig_class__',
+ '_is_protocol', '_is_runtime_protocol'
+})
-_SPECIAL_NAMES = ['__abstractmethods__', '__annotations__', '__dict__', '__doc__',
- '__init__', '__module__', '__new__', '__slots__',
- '__subclasshook__', '__weakref__', '__class_getitem__']
+_SPECIAL_NAMES = frozenset({
+ '__abstractmethods__', '__annotations__', '__dict__', '__doc__',
+ '__init__', '__module__', '__new__', '__slots__',
+ '__subclasshook__', '__weakref__', '__class_getitem__'
+})
# These special attributes will be not collected as protocol members.
-EXCLUDED_ATTRIBUTES = _TYPING_INTERNALS + _SPECIAL_NAMES + ['_MutableMapping__marker']
+EXCLUDED_ATTRIBUTES = _TYPING_INTERNALS | _SPECIAL_NAMES | {'_MutableMapping__marker'}
def _get_protocol_attrs(cls):
@@ -1922,10 +1926,10 @@ def _get_protocol_attrs(cls):
"""
attrs = set()
for base in cls.__mro__[:-1]: # without object
- if base.__name__ in ('Protocol', 'Generic'):
+ if base.__name__ in {'Protocol', 'Generic'}:
continue
annotations = getattr(base, '__annotations__', {})
- for attr in list(base.__dict__.keys()) + list(annotations.keys()):
+ for attr in (*base.__dict__, *annotations):
if not attr.startswith('_abc_') and attr not in EXCLUDED_ATTRIBUTES:
attrs.add(attr)
return attrs
1
0
gh-102549: [Enum] fail enum creation when data type raises in __init__ (GH-103149)
by ethanfurman 31 Mar '23
by ethanfurman 31 Mar '23
31 Mar '23
https://github.com/python/cpython/commit/2a4d8c0a9e88f45047da640ce5a92b304d…
commit: 2a4d8c0a9e88f45047da640ce5a92b304d2d39b1
branch: main
author: Ethan Furman <ethan(a)stoneleaf.us>
committer: ethanfurman <ethan(a)stoneleaf.us>
date: 2023-03-31T13:52:31-07:00
summary:
gh-102549: [Enum] fail enum creation when data type raises in __init__ (GH-103149)
files:
A Misc/NEWS.d/next/Library/2023-03-27-19-21-51.gh-issue-102549.NQ6Nlv.rst
M Lib/enum.py
M Lib/test/test_enum.py
diff --git a/Lib/enum.py b/Lib/enum.py
index 8c77117ce6ac..4e231e7e8ea7 100644
--- a/Lib/enum.py
+++ b/Lib/enum.py
@@ -266,23 +266,20 @@ def __set_name__(self, enum_class, member_name):
args = (args, ) # wrap it one more time
if not enum_class._use_args_:
enum_member = enum_class._new_member_(enum_class)
- if not hasattr(enum_member, '_value_'):
+ else:
+ enum_member = enum_class._new_member_(enum_class, *args)
+ if not hasattr(enum_member, '_value_'):
+ if enum_class._member_type_ is object:
+ enum_member._value_ = value
+ else:
try:
enum_member._value_ = enum_class._member_type_(*args)
except Exception as exc:
- enum_member._value_ = value
- else:
- enum_member = enum_class._new_member_(enum_class, *args)
- if not hasattr(enum_member, '_value_'):
- if enum_class._member_type_ is object:
- enum_member._value_ = value
- else:
- try:
- enum_member._value_ = enum_class._member_type_(*args)
- except Exception as exc:
- raise TypeError(
- '_value_ not set in __new__, unable to create it'
- ) from None
+ new_exc = TypeError(
+ '_value_ not set in __new__, unable to create it'
+ )
+ new_exc.__cause__ = exc
+ raise new_exc
value = enum_member._value_
enum_member._name_ = member_name
enum_member.__objclass__ = enum_class
diff --git a/Lib/test/test_enum.py b/Lib/test/test_enum.py
index bea19542705d..ee5280601be1 100644
--- a/Lib/test/test_enum.py
+++ b/Lib/test/test_enum.py
@@ -2916,6 +2916,26 @@ def __new__(cls, c):
self.assertEqual(FlagFromChar.a, 158456325028528675187087900672)
self.assertEqual(FlagFromChar.a|1, 158456325028528675187087900673)
+ def test_init_exception(self):
+ class Base:
+ def __init__(self, x):
+ raise ValueError("I don't like", x)
+ with self.assertRaises(TypeError):
+ class MyEnum(Base, enum.Enum):
+ A = 'a'
+ def __init__(self, y):
+ self.y = y
+ with self.assertRaises(ValueError):
+ class MyEnum(Base, enum.Enum):
+ A = 'a'
+ def __init__(self, y):
+ self.y = y
+ def __new__(cls, value):
+ member = Base.__new__(cls)
+ member._value_ = Base(value)
+ return member
+
+
class TestOrder(unittest.TestCase):
"test usage of the `_order_` attribute"
diff --git a/Misc/NEWS.d/next/Library/2023-03-27-19-21-51.gh-issue-102549.NQ6Nlv.rst b/Misc/NEWS.d/next/Library/2023-03-27-19-21-51.gh-issue-102549.NQ6Nlv.rst
new file mode 100644
index 000000000000..e4def038175b
--- /dev/null
+++ b/Misc/NEWS.d/next/Library/2023-03-27-19-21-51.gh-issue-102549.NQ6Nlv.rst
@@ -0,0 +1 @@
+Don't ignore exceptions in member type creation.
1
0
gh-101659: Clean Up the General Import Tests for Subinterpreters (gh-103151)
by ericsnowcurrently 31 Mar '23
by ericsnowcurrently 31 Mar '23
31 Mar '23
https://github.com/python/cpython/commit/dfc4c95762f417e84dcb21dbbe6399ab7b…
commit: dfc4c95762f417e84dcb21dbbe6399ab7b7cef19
branch: main
author: Eric Snow <ericsnowcurrently(a)gmail.com>
committer: ericsnowcurrently <ericsnowcurrently(a)gmail.com>
date: 2023-03-31T12:18:33-06:00
summary:
gh-101659: Clean Up the General Import Tests for Subinterpreters (gh-103151)
This involves 3 changes: some general cleanup, checks to match the kind of module, and switch from testing against sys to _imp.
This is a precursor to gh-103150, though the changes are meant to stand on their own.
files:
M Lib/test/test_import/__init__.py
diff --git a/Lib/test/test_import/__init__.py b/Lib/test/test_import/__init__.py
index 96815b3f758a..3ef07203c46c 100644
--- a/Lib/test/test_import/__init__.py
+++ b/Lib/test/test_import/__init__.py
@@ -4,6 +4,9 @@
import glob
import importlib.util
from importlib._bootstrap_external import _get_sourcefile
+from importlib.machinery import (
+ BuiltinImporter, ExtensionFileLoader, FrozenImporter, SourceFileLoader,
+)
import marshal
import os
import py_compile
@@ -44,6 +47,49 @@
sys.dont_write_bytecode,
"test meaningful only when writing bytecode")
+
+def _require_loader(module, loader, skip):
+ if isinstance(module, str):
+ module = __import__(module)
+
+ MODULE_KINDS = {
+ BuiltinImporter: 'built-in',
+ ExtensionFileLoader: 'extension',
+ FrozenImporter: 'frozen',
+ SourceFileLoader: 'pure Python',
+ }
+
+ expected = loader
+ assert isinstance(expected, type), expected
+ expected = MODULE_KINDS[expected]
+
+ actual = module.__spec__.loader
+ if not isinstance(actual, type):
+ actual = type(actual)
+ actual = MODULE_KINDS[actual]
+
+ if actual != expected:
+ err = f'expected module to be {expected}, got {module.__spec__}'
+ if skip:
+ raise unittest.SkipTest(err)
+ raise Exception(err)
+ return module
+
+def require_builtin(module, *, skip=False):
+ module = _require_loader(module, BuiltinImporter, skip)
+ assert module.__spec__.origin == 'built-in', module.__spec__
+
+def require_extension(module, *, skip=False):
+ _require_loader(module, ExtensionFileLoader, skip)
+
+def require_frozen(module, *, skip=True):
+ module = _require_loader(module, FrozenImporter, skip)
+ assert module.__spec__.origin == 'frozen', module.__spec__
+
+def require_pure_python(module, *, skip=False):
+ _require_loader(module, SourceFileLoader, skip)
+
+
def remove_files(name):
for f in (name + ".py",
name + ".pyc",
@@ -1437,10 +1483,10 @@ def import_script(self, name, fd, check_override=None):
os.write({fd}, text.encode('utf-8'))
''')
- def run_shared(self, name, *,
- check_singlephase_setting=False,
- check_singlephase_override=None,
- ):
+ def run_here(self, name, *,
+ check_singlephase_setting=False,
+ check_singlephase_override=None,
+ ):
"""
Try importing the named module in a subinterpreter.
@@ -1470,27 +1516,35 @@ def run_shared(self, name, *,
self.assertEqual(ret, 0)
return os.read(r, 100)
- def check_compatible_shared(self, name, *, strict=False):
+ def check_compatible_here(self, name, *, strict=False):
# Verify that the named module may be imported in a subinterpreter.
- # (See run_shared() for more info.)
- out = self.run_shared(name, check_singlephase_setting=strict)
+ # (See run_here() for more info.)
+ out = self.run_here(name,
+ check_singlephase_setting=strict,
+ )
self.assertEqual(out, b'okay')
- def check_incompatible_shared(self, name):
- # Differences from check_compatible_shared():
+ def check_incompatible_here(self, name):
+ # Differences from check_compatible_here():
# * verify that import fails
# * "strict" is always True
- out = self.run_shared(name, check_singlephase_setting=True)
+ out = self.run_here(name,
+ check_singlephase_setting=True,
+ )
self.assertEqual(
out.decode('utf-8'),
f'ImportError: module {name} does not support loading in subinterpreters',
)
- def check_compatible_isolated(self, name, *, strict=False):
- # Differences from check_compatible_shared():
+ def check_compatible_fresh(self, name, *, strict=False):
+ # Differences from check_compatible_here():
# * subinterpreter in a new process
# * module has never been imported before in that process
# * this tests importing the module for the first time
+ kwargs = dict(
+ **self.RUN_KWARGS,
+ check_multi_interp_extensions=strict,
+ )
_, out, err = script_helper.assert_python_ok('-c', textwrap.dedent(f'''
import _testcapi, sys
assert (
@@ -1499,25 +1553,27 @@ def check_compatible_isolated(self, name, *, strict=False):
), repr({name!r})
ret = _testcapi.run_in_subinterp_with_config(
{self.import_script(name, "sys.stdout.fileno()")!r},
- **{self.RUN_KWARGS},
- check_multi_interp_extensions={strict},
+ **{kwargs},
)
assert ret == 0, ret
'''))
self.assertEqual(err, b'')
self.assertEqual(out, b'okay')
- def check_incompatible_isolated(self, name):
- # Differences from check_compatible_isolated():
+ def check_incompatible_fresh(self, name):
+ # Differences from check_compatible_fresh():
# * verify that import fails
# * "strict" is always True
+ kwargs = dict(
+ **self.RUN_KWARGS,
+ check_multi_interp_extensions=True,
+ )
_, out, err = script_helper.assert_python_ok('-c', textwrap.dedent(f'''
import _testcapi, sys
assert {name!r} not in sys.modules, {name!r}
ret = _testcapi.run_in_subinterp_with_config(
{self.import_script(name, "sys.stdout.fileno()")!r},
- **{self.RUN_KWARGS},
- check_multi_interp_extensions=True,
+ **{kwargs},
)
assert ret == 0, ret
'''))
@@ -1528,59 +1584,65 @@ def check_incompatible_isolated(self, name):
)
def test_builtin_compat(self):
- module = 'sys'
+ # For now we avoid using sys or builtins
+ # since they still don't implement multi-phase init.
+ module = '_imp'
+ require_builtin(module)
with self.subTest(f'{module}: not strict'):
- self.check_compatible_shared(module, strict=False)
- with self.subTest(f'{module}: strict, shared'):
- self.check_compatible_shared(module, strict=True)
+ self.check_compatible_here(module, strict=False)
+ with self.subTest(f'{module}: strict, not fresh'):
+ self.check_compatible_here(module, strict=True)
@cpython_only
def test_frozen_compat(self):
module = '_frozen_importlib'
+ require_frozen(module, skip=True)
if __import__(module).__spec__.origin != 'frozen':
raise unittest.SkipTest(f'{module} is unexpectedly not frozen')
with self.subTest(f'{module}: not strict'):
- self.check_compatible_shared(module, strict=False)
- with self.subTest(f'{module}: strict, shared'):
- self.check_compatible_shared(module, strict=True)
+ self.check_compatible_here(module, strict=False)
+ with self.subTest(f'{module}: strict, not fresh'):
+ self.check_compatible_here(module, strict=True)
@unittest.skipIf(_testsinglephase is None, "test requires _testsinglephase module")
def test_single_init_extension_compat(self):
module = '_testsinglephase'
+ require_extension(module)
with self.subTest(f'{module}: not strict'):
- self.check_compatible_shared(module, strict=False)
- with self.subTest(f'{module}: strict, shared'):
- self.check_incompatible_shared(module)
- with self.subTest(f'{module}: strict, isolated'):
- self.check_incompatible_isolated(module)
+ self.check_compatible_here(module, strict=False)
+ with self.subTest(f'{module}: strict, not fresh'):
+ self.check_incompatible_here(module)
+ with self.subTest(f'{module}: strict, fresh'):
+ self.check_incompatible_fresh(module)
@unittest.skipIf(_testmultiphase is None, "test requires _testmultiphase module")
def test_multi_init_extension_compat(self):
module = '_testmultiphase'
+ require_extension(module)
with self.subTest(f'{module}: not strict'):
- self.check_compatible_shared(module, strict=False)
- with self.subTest(f'{module}: strict, shared'):
- self.check_compatible_shared(module, strict=True)
- with self.subTest(f'{module}: strict, isolated'):
- self.check_compatible_isolated(module, strict=True)
+ self.check_compatible_here(module, strict=False)
+ with self.subTest(f'{module}: strict, not fresh'):
+ self.check_compatible_here(module, strict=True)
+ with self.subTest(f'{module}: strict, fresh'):
+ self.check_compatible_fresh(module, strict=True)
def test_python_compat(self):
module = 'threading'
- if __import__(module).__spec__.origin == 'frozen':
- raise unittest.SkipTest(f'{module} is unexpectedly frozen')
+ require_pure_python(module)
with self.subTest(f'{module}: not strict'):
- self.check_compatible_shared(module, strict=False)
- with self.subTest(f'{module}: strict, shared'):
- self.check_compatible_shared(module, strict=True)
- with self.subTest(f'{module}: strict, isolated'):
- self.check_compatible_isolated(module, strict=True)
+ self.check_compatible_here(module, strict=False)
+ with self.subTest(f'{module}: strict, not fresh'):
+ self.check_compatible_here(module, strict=True)
+ with self.subTest(f'{module}: strict, fresh'):
+ self.check_compatible_fresh(module, strict=True)
@unittest.skipIf(_testsinglephase is None, "test requires _testsinglephase module")
def test_singlephase_check_with_setting_and_override(self):
module = '_testsinglephase'
+ require_extension(module)
def check_compatible(setting, override):
- out = self.run_shared(
+ out = self.run_here(
module,
check_singlephase_setting=setting,
check_singlephase_override=override,
@@ -1588,7 +1650,7 @@ def check_compatible(setting, override):
self.assertEqual(out, b'okay')
def check_incompatible(setting, override):
- out = self.run_shared(
+ out = self.run_here(
module,
check_singlephase_setting=setting,
check_singlephase_override=override,
1
0
31 Mar '23
https://github.com/python/cpython/commit/dde028480e57bffa83fb084b15ec22490c…
commit: dde028480e57bffa83fb084b15ec22490c42ef93
branch: main
author: Eric Snow <ericsnowcurrently(a)gmail.com>
committer: ericsnowcurrently <ericsnowcurrently(a)gmail.com>
date: 2023-03-31T12:09:10-06:00
summary:
gh-100227: Fix Cleanup of the Extensions Cache (gh-103150)
Decref the key in the right interpreter in _extensions_cache_set().
This is a follow-up to gh-103084. I found the bug while working on gh-101660.
files:
M Python/import.c
diff --git a/Python/import.c b/Python/import.c
index a45b3bfaacb2..24249ae4a6ad 100644
--- a/Python/import.c
+++ b/Python/import.c
@@ -983,13 +983,13 @@ _extensions_cache_set(PyObject *filename, PyObject *name, PyModuleDef *def)
res = 0;
finally:
+ Py_XDECREF(key);
if (oldts != NULL) {
_PyThreadState_Swap(interp->runtime, oldts);
_PyThreadState_UnbindDetached(main_tstate);
Py_DECREF(name);
Py_DECREF(filename);
}
- Py_XDECREF(key);
extensions_lock_release();
return res;
}
1
0
gh-74690: typing: Don't unnecessarily call `_get_protocol_attrs` twice in `_ProtocolMeta.__instancecheck__` (#103141)
by AlexWaygood 31 Mar '23
by AlexWaygood 31 Mar '23
31 Mar '23
https://github.com/python/cpython/commit/9048d73f7a5c58be21988250c381f86658…
commit: 9048d73f7a5c58be21988250c381f866586687a0
branch: main
author: Alex Waygood <Alex.Waygood(a)Gmail.com>
committer: AlexWaygood <Alex.Waygood(a)Gmail.com>
date: 2023-03-31T18:37:24+01:00
summary:
gh-74690: typing: Don't unnecessarily call `_get_protocol_attrs` twice in `_ProtocolMeta.__instancecheck__` (#103141)
Speed up `isinstance()` calls against runtime-checkable protocols
files:
M Lib/typing.py
diff --git a/Lib/typing.py b/Lib/typing.py
index 157a563bbece..3d086dc1cb90 100644
--- a/Lib/typing.py
+++ b/Lib/typing.py
@@ -1931,9 +1931,9 @@ def _get_protocol_attrs(cls):
return attrs
-def _is_callable_members_only(cls):
+def _is_callable_members_only(cls, protocol_attrs):
# PEP 544 prohibits using issubclass() with protocols that have non-method members.
- return all(callable(getattr(cls, attr, None)) for attr in _get_protocol_attrs(cls))
+ return all(callable(getattr(cls, attr, None)) for attr in protocol_attrs)
def _no_init_or_replace_init(self, *args, **kwargs):
@@ -2000,24 +2000,32 @@ class _ProtocolMeta(ABCMeta):
def __instancecheck__(cls, instance):
# We need this method for situations where attributes are
# assigned in __init__.
+ is_protocol_cls = getattr(cls, "_is_protocol", False)
if (
- getattr(cls, '_is_protocol', False) and
+ is_protocol_cls and
not getattr(cls, '_is_runtime_protocol', False) and
not _allow_reckless_class_checks(depth=2)
):
raise TypeError("Instance and class checks can only be used with"
" @runtime_checkable protocols")
- if ((not getattr(cls, '_is_protocol', False) or
- _is_callable_members_only(cls)) and
- issubclass(instance.__class__, cls)):
+ if not is_protocol_cls and issubclass(instance.__class__, cls):
return True
- if cls._is_protocol:
+
+ protocol_attrs = _get_protocol_attrs(cls)
+
+ if (
+ _is_callable_members_only(cls, protocol_attrs)
+ and issubclass(instance.__class__, cls)
+ ):
+ return True
+
+ if is_protocol_cls:
if all(hasattr(instance, attr) and
# All *methods* can be blocked by setting them to None.
(not callable(getattr(cls, attr, None)) or
getattr(instance, attr) is not None)
- for attr in _get_protocol_attrs(cls)):
+ for attr in protocol_attrs):
return True
return super().__instancecheck__(instance)
@@ -2074,7 +2082,10 @@ def _proto_hook(other):
return NotImplemented
raise TypeError("Instance and class checks can only be used with"
" @runtime_checkable protocols")
- if not _is_callable_members_only(cls):
+
+ protocol_attrs = _get_protocol_attrs(cls)
+
+ if not _is_callable_members_only(cls, protocol_attrs):
if _allow_reckless_class_checks():
return NotImplemented
raise TypeError("Protocols with non-method members"
@@ -2084,7 +2095,7 @@ def _proto_hook(other):
raise TypeError('issubclass() arg 1 must be a class')
# Second, perform the actual structural compatibility check.
- for attr in _get_protocol_attrs(cls):
+ for attr in protocol_attrs:
for base in other.__mro__:
# Check if the members appears in the class dictionary...
if attr in base.__dict__:
1
0
31 Mar '23
https://github.com/python/cpython/commit/80163e17d3f826067c5d95198db7696287…
commit: 80163e17d3f826067c5d95198db7696287beb416
branch: main
author: Irit Katriel <1055913+iritkatriel(a)users.noreply.github.com>
committer: iritkatriel <1055913+iritkatriel(a)users.noreply.github.com>
date: 2023-03-31T18:17:59+01:00
summary:
gh-87092: move CFG related code from compile.c to flowgraph.c (#103021)
files:
A Include/internal/pycore_flowgraph.h
A Include/internal/pycore_opcode_utils.h
A Python/flowgraph.c
M Include/internal/pycore_compile.h
M Include/internal/pycore_opcode.h
M Makefile.pre.in
M PCbuild/_freeze_module.vcxproj
M PCbuild/_freeze_module.vcxproj.filters
M PCbuild/pythoncore.vcxproj
M PCbuild/pythoncore.vcxproj.filters
M Python/compile.c
M Python/opcode_metadata.h
M Tools/build/generate_opcode_h.py
M Tools/cases_generator/generate_cases.py
diff --git a/Include/internal/pycore_compile.h b/Include/internal/pycore_compile.h
index 511f0689c938..6a1e02c0b895 100644
--- a/Include/internal/pycore_compile.h
+++ b/Include/internal/pycore_compile.h
@@ -33,6 +33,16 @@ extern int _PyAST_Optimize(
struct _arena *arena,
_PyASTOptimizeState *state);
+/* Utility for a number of growing arrays used in the compiler */
+int _PyCompile_EnsureArrayLargeEnough(
+ int idx,
+ void **array,
+ int *alloc,
+ int default_alloc,
+ size_t item_size);
+
+int _PyCompile_ConstCacheMergeOne(PyObject *const_cache, PyObject **obj);
+
/* Access compiler internals for unit testing */
PyAPI_FUNC(PyObject*) _PyCompile_CodeGen(
diff --git a/Include/internal/pycore_flowgraph.h b/Include/internal/pycore_flowgraph.h
new file mode 100644
index 000000000000..7c0b8fe980c3
--- /dev/null
+++ b/Include/internal/pycore_flowgraph.h
@@ -0,0 +1,117 @@
+#ifndef Py_INTERNAL_CFG_H
+#define Py_INTERNAL_CFG_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef Py_BUILD_CORE
+# error "this header requires Py_BUILD_CORE define"
+#endif
+
+#include "pycore_opcode_utils.h"
+
+static const _PyCompilerSrcLocation NO_LOCATION = {-1, -1, -1, -1};
+
+typedef struct {
+ int i_opcode;
+ int i_oparg;
+ _PyCompilerSrcLocation i_loc;
+ struct _PyCfgBasicblock_ *i_target; /* target block (if jump instruction) */
+ struct _PyCfgBasicblock_ *i_except; /* target block when exception is raised */
+} _PyCfgInstruction;
+
+typedef struct {
+ int id;
+} _PyCfgJumpTargetLabel;
+
+
+typedef struct {
+ struct _PyCfgBasicblock_ *handlers[CO_MAXBLOCKS+1];
+ int depth;
+} _PyCfgExceptStack;
+
+typedef struct _PyCfgBasicblock_ {
+ /* Each basicblock in a compilation unit is linked via b_list in the
+ reverse order that the block are allocated. b_list points to the next
+ block, not to be confused with b_next, which is next by control flow. */
+ struct _PyCfgBasicblock_ *b_list;
+ /* The label of this block if it is a jump target, -1 otherwise */
+ _PyCfgJumpTargetLabel b_label;
+ /* Exception stack at start of block, used by assembler to create the exception handling table */
+ _PyCfgExceptStack *b_exceptstack;
+ /* pointer to an array of instructions, initially NULL */
+ _PyCfgInstruction *b_instr;
+ /* If b_next is non-NULL, it is a pointer to the next
+ block reached by normal control flow. */
+ struct _PyCfgBasicblock_ *b_next;
+ /* number of instructions used */
+ int b_iused;
+ /* length of instruction array (b_instr) */
+ int b_ialloc;
+ /* Used by add_checks_for_loads_of_unknown_variables */
+ uint64_t b_unsafe_locals_mask;
+ /* Number of predecessors that a block has. */
+ int b_predecessors;
+ /* depth of stack upon entry of block, computed by stackdepth() */
+ int b_startdepth;
+ /* instruction offset for block, computed by assemble_jump_offsets() */
+ int b_offset;
+ /* Basic block is an exception handler that preserves lasti */
+ unsigned b_preserve_lasti : 1;
+ /* Used by compiler passes to mark whether they have visited a basic block. */
+ unsigned b_visited : 1;
+ /* b_except_handler is used by the cold-detection algorithm to mark exception targets */
+ unsigned b_except_handler : 1;
+ /* b_cold is true if this block is not perf critical (like an exception handler) */
+ unsigned b_cold : 1;
+ /* b_warm is used by the cold-detection algorithm to mark blocks which are definitely not cold */
+ unsigned b_warm : 1;
+} _PyCfgBasicblock;
+
+int _PyBasicblock_InsertInstruction(_PyCfgBasicblock *block, int pos, _PyCfgInstruction *instr);
+
+typedef struct cfg_builder_ {
+ /* The entryblock, at which control flow begins. All blocks of the
+ CFG are reachable through the b_next links */
+ _PyCfgBasicblock *g_entryblock;
+ /* Pointer to the most recently allocated block. By following
+ b_list links, you can reach all allocated blocks. */
+ _PyCfgBasicblock *g_block_list;
+ /* pointer to the block currently being constructed */
+ _PyCfgBasicblock *g_curblock;
+ /* label for the next instruction to be placed */
+ _PyCfgJumpTargetLabel g_current_label;
+} _PyCfgBuilder;
+
+int _PyCfgBuilder_UseLabel(_PyCfgBuilder *g, _PyCfgJumpTargetLabel lbl);
+int _PyCfgBuilder_Addop(_PyCfgBuilder *g, int opcode, int oparg, _PyCompilerSrcLocation loc);
+
+int _PyCfgBuilder_Init(_PyCfgBuilder *g);
+void _PyCfgBuilder_Fini(_PyCfgBuilder *g);
+
+_PyCfgInstruction* _PyCfg_BasicblockLastInstr(const _PyCfgBasicblock *b);
+int _PyCfg_OptimizeCodeUnit(_PyCfgBuilder *g, PyObject *consts, PyObject *const_cache,
+ int code_flags, int nlocals, int nparams);
+int _PyCfg_Stackdepth(_PyCfgBasicblock *entryblock, int code_flags);
+void _PyCfg_ConvertExceptionHandlersToNops(_PyCfgBasicblock *entryblock);
+int _PyCfg_ResolveLineNumbers(_PyCfgBuilder *g, int firstlineno);
+int _PyCfg_ResolveJumps(_PyCfgBuilder *g);
+int _PyCfg_InstrSize(_PyCfgInstruction *instruction);
+
+
+static inline int
+basicblock_nofallthrough(const _PyCfgBasicblock *b) {
+ _PyCfgInstruction *last = _PyCfg_BasicblockLastInstr(b);
+ return (last &&
+ (IS_SCOPE_EXIT_OPCODE(last->i_opcode) ||
+ IS_UNCONDITIONAL_JUMP_OPCODE(last->i_opcode)));
+}
+
+#define BB_NO_FALLTHROUGH(B) (basicblock_nofallthrough(B))
+#define BB_HAS_FALLTHROUGH(B) (!basicblock_nofallthrough(B))
+
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* !Py_INTERNAL_CFG_H */
diff --git a/Include/internal/pycore_opcode.h b/Include/internal/pycore_opcode.h
index 22914da07994..58f7da51aebd 100644
--- a/Include/internal/pycore_opcode.h
+++ b/Include/internal/pycore_opcode.h
@@ -12,12 +12,16 @@ extern "C" {
#include "opcode.h"
+extern const uint32_t _PyOpcode_RelativeJump[9];
+
+extern const uint32_t _PyOpcode_Jump[9];
+
extern const uint8_t _PyOpcode_Caches[256];
extern const uint8_t _PyOpcode_Deopt[256];
#ifdef NEED_OPCODE_TABLES
-static const uint32_t _PyOpcode_RelativeJump[9] = {
+const uint32_t _PyOpcode_RelativeJump[9] = {
0U,
0U,
536870912U,
@@ -28,7 +32,7 @@ static const uint32_t _PyOpcode_RelativeJump[9] = {
0U,
48U,
};
-static const uint32_t _PyOpcode_Jump[9] = {
+const uint32_t _PyOpcode_Jump[9] = {
0U,
0U,
536870912U,
diff --git a/Include/internal/pycore_opcode_utils.h b/Include/internal/pycore_opcode_utils.h
new file mode 100644
index 000000000000..96bb4d743bf2
--- /dev/null
+++ b/Include/internal/pycore_opcode_utils.h
@@ -0,0 +1,95 @@
+#ifndef Py_INTERNAL_OPCODE_UTILS_H
+#define Py_INTERNAL_OPCODE_UTILS_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef Py_BUILD_CORE
+# error "this header requires Py_BUILD_CORE define"
+#endif
+
+#include "pycore_opcode.h" // _PyOpcode_RelativeJump
+
+
+#define MAX_REAL_OPCODE 254
+
+#define IS_WITHIN_OPCODE_RANGE(opcode) \
+ (((opcode) >= 0 && (opcode) <= MAX_REAL_OPCODE) || \
+ IS_PSEUDO_OPCODE(opcode))
+
+#define IS_JUMP_OPCODE(opcode) \
+ is_bit_set_in_table(_PyOpcode_Jump, opcode)
+
+#define IS_BLOCK_PUSH_OPCODE(opcode) \
+ ((opcode) == SETUP_FINALLY || \
+ (opcode) == SETUP_WITH || \
+ (opcode) == SETUP_CLEANUP)
+
+#define HAS_TARGET(opcode) \
+ (IS_JUMP_OPCODE(opcode) || IS_BLOCK_PUSH_OPCODE(opcode))
+
+/* opcodes that must be last in the basicblock */
+#define IS_TERMINATOR_OPCODE(opcode) \
+ (IS_JUMP_OPCODE(opcode) || IS_SCOPE_EXIT_OPCODE(opcode))
+
+/* opcodes which are not emitted in codegen stage, only by the assembler */
+#define IS_ASSEMBLER_OPCODE(opcode) \
+ ((opcode) == JUMP_FORWARD || \
+ (opcode) == JUMP_BACKWARD || \
+ (opcode) == JUMP_BACKWARD_NO_INTERRUPT)
+
+#define IS_BACKWARDS_JUMP_OPCODE(opcode) \
+ ((opcode) == JUMP_BACKWARD || \
+ (opcode) == JUMP_BACKWARD_NO_INTERRUPT)
+
+#define IS_UNCONDITIONAL_JUMP_OPCODE(opcode) \
+ ((opcode) == JUMP || \
+ (opcode) == JUMP_NO_INTERRUPT || \
+ (opcode) == JUMP_FORWARD || \
+ (opcode) == JUMP_BACKWARD || \
+ (opcode) == JUMP_BACKWARD_NO_INTERRUPT)
+
+#define IS_SCOPE_EXIT_OPCODE(opcode) \
+ ((opcode) == RETURN_VALUE || \
+ (opcode) == RETURN_CONST || \
+ (opcode) == RAISE_VARARGS || \
+ (opcode) == RERAISE)
+
+#define IS_SUPERINSTRUCTION_OPCODE(opcode) \
+ ((opcode) == LOAD_FAST__LOAD_FAST || \
+ (opcode) == LOAD_FAST__LOAD_CONST || \
+ (opcode) == LOAD_CONST__LOAD_FAST || \
+ (opcode) == STORE_FAST__LOAD_FAST || \
+ (opcode) == STORE_FAST__STORE_FAST)
+
+
+#define LOG_BITS_PER_INT 5
+#define MASK_LOW_LOG_BITS 31
+
+static inline int
+is_bit_set_in_table(const uint32_t *table, int bitindex) {
+ /* Is the relevant bit set in the relevant word? */
+ /* 512 bits fit into 9 32-bits words.
+ * Word is indexed by (bitindex>>ln(size of int in bits)).
+ * Bit within word is the low bits of bitindex.
+ */
+ if (bitindex >= 0 && bitindex < 512) {
+ uint32_t word = table[bitindex >> LOG_BITS_PER_INT];
+ return (word >> (bitindex & MASK_LOW_LOG_BITS)) & 1;
+ }
+ else {
+ return 0;
+ }
+}
+
+#undef LOG_BITS_PER_INT
+#undef MASK_LOW_LOG_BITS
+
+#define IS_RELATIVE_JUMP(opcode) (is_bit_set_in_table(_PyOpcode_RelativeJump, opcode))
+
+
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* !Py_INTERNAL_OPCODE_UTILS_H */
diff --git a/Makefile.pre.in b/Makefile.pre.in
index 74e4171b010d..b97daaf6f445 100644
--- a/Makefile.pre.in
+++ b/Makefile.pre.in
@@ -374,6 +374,7 @@ PYTHON_OBJS= \
Python/ast_unparse.o \
Python/bltinmodule.o \
Python/ceval.o \
+ Python/flowgraph.o \
Python/codecs.o \
Python/compile.o \
Python/context.o \
@@ -1702,6 +1703,8 @@ PYTHON_HEADERS= \
$(srcdir)/Include/internal/pycore_object_state.h \
$(srcdir)/Include/internal/pycore_obmalloc.h \
$(srcdir)/Include/internal/pycore_obmalloc_init.h \
+ $(srcdir)/Include/internal/pycore_opcode.h \
+ $(srcdir)/Include/internal/pycore_opcode_utils.h \
$(srcdir)/Include/internal/pycore_pathconfig.h \
$(srcdir)/Include/internal/pycore_pyarena.h \
$(srcdir)/Include/internal/pycore_pyerrors.h \
diff --git a/PCbuild/_freeze_module.vcxproj b/PCbuild/_freeze_module.vcxproj
index 4f39756019e6..28eced6d4b28 100644
--- a/PCbuild/_freeze_module.vcxproj
+++ b/PCbuild/_freeze_module.vcxproj
@@ -183,6 +183,7 @@
<ClCompile Include="..\Python\bltinmodule.c" />
<ClCompile Include="..\Python\bootstrap_hash.c" />
<ClCompile Include="..\Python\ceval.c" />
+ <ClCompile Include="..\Python\flowgraph.c" />
<ClCompile Include="..\Python\codecs.c" />
<ClCompile Include="..\Python\compile.c" />
<ClCompile Include="..\Python\context.c" />
diff --git a/PCbuild/_freeze_module.vcxproj.filters b/PCbuild/_freeze_module.vcxproj.filters
index 7d7c4587b9a3..e4faa89bb831 100644
--- a/PCbuild/_freeze_module.vcxproj.filters
+++ b/PCbuild/_freeze_module.vcxproj.filters
@@ -76,6 +76,9 @@
<ClCompile Include="..\Python\ceval.c">
<Filter>Source Files</Filter>
</ClCompile>
+ <ClCompile Include="..\Python\flowgraph.c">
+ <Filter>Source Files</Filter>
+ </ClCompile>
<ClCompile Include="..\Objects\classobject.c">
<Filter>Source Files</Filter>
</ClCompile>
diff --git a/PCbuild/pythoncore.vcxproj b/PCbuild/pythoncore.vcxproj
index c754b2165745..8fab60033416 100644
--- a/PCbuild/pythoncore.vcxproj
+++ b/PCbuild/pythoncore.vcxproj
@@ -205,6 +205,7 @@
<ClInclude Include="..\Include\internal\pycore_call.h" />
<ClInclude Include="..\Include\internal\pycore_ceval.h" />
<ClInclude Include="..\Include\internal\pycore_ceval_state.h" />
+ <ClInclude Include="..\Include\internal\pycore_cfg.h" />
<ClInclude Include="..\Include\internal\pycore_code.h" />
<ClInclude Include="..\Include\internal\pycore_compile.h" />
<ClInclude Include="..\Include\internal\pycore_condvar.h" />
@@ -504,6 +505,7 @@
<ClCompile Include="..\Python\bltinmodule.c" />
<ClCompile Include="..\Python\bootstrap_hash.c" />
<ClCompile Include="..\Python\ceval.c" />
+ <ClCompile Include="..\Python\flowgraph.c" />
<ClCompile Include="..\Python\codecs.c" />
<ClCompile Include="..\Python\compile.c" />
<ClCompile Include="..\Python\context.c" />
diff --git a/PCbuild/pythoncore.vcxproj.filters b/PCbuild/pythoncore.vcxproj.filters
index 90ed0602821b..6c5d8dd89f5b 100644
--- a/PCbuild/pythoncore.vcxproj.filters
+++ b/PCbuild/pythoncore.vcxproj.filters
@@ -1106,6 +1106,9 @@
<ClCompile Include="..\Python\ceval.c">
<Filter>Python</Filter>
</ClCompile>
+ <ClCompile Include="..\Python\flowgraph.c">
+ <Filter>Python</Filter>
+ </ClCompile>
<ClCompile Include="..\Python\codecs.c">
<Filter>Python</Filter>
</ClCompile>
diff --git a/Python/compile.c b/Python/compile.c
index 192deaa4b35f..fed9ae7066e4 100644
--- a/Python/compile.c
+++ b/Python/compile.c
@@ -23,23 +23,21 @@
#include <stdbool.h>
-// Need _PyOpcode_RelativeJump of pycore_opcode.h
-#define NEED_OPCODE_TABLES
-
#include "Python.h"
#include "pycore_ast.h" // _PyAST_GetDocString()
+#define NEED_OPCODE_TABLES
+#include "pycore_opcode_utils.h"
+#undef NEED_OPCODE_TABLES
+#include "pycore_flowgraph.h"
#include "pycore_code.h" // _PyCode_New()
#include "pycore_compile.h"
#include "pycore_intrinsics.h"
#include "pycore_long.h" // _PyLong_GetZero()
-#include "pycore_opcode.h" // _PyOpcode_Caches
#include "pycore_pymem.h" // _PyMem_IsPtrFreed()
#include "pycore_symtable.h" // PySTEntryObject, _PyFuture_FromAST()
#include "opcode_metadata.h" // _PyOpcode_opcode_metadata, _PyOpcode_num_popped/pushed
-
-#define DEFAULT_BLOCK_SIZE 16
#define DEFAULT_CODE_SIZE 128
#define DEFAULT_LNOTAB_SIZE 16
#define DEFAULT_CNOTAB_SIZE 32
@@ -83,68 +81,17 @@
*/
#define MAX_ALLOWED_STACK_USE (STACK_USE_GUIDELINE * 100)
-
-#define MAX_REAL_OPCODE 254
-
-#define IS_WITHIN_OPCODE_RANGE(opcode) \
- (((opcode) >= 0 && (opcode) <= MAX_REAL_OPCODE) || \
- IS_PSEUDO_OPCODE(opcode))
-
-#define IS_JUMP_OPCODE(opcode) \
- is_bit_set_in_table(_PyOpcode_Jump, opcode)
-
-#define IS_BLOCK_PUSH_OPCODE(opcode) \
- ((opcode) == SETUP_FINALLY || \
- (opcode) == SETUP_WITH || \
- (opcode) == SETUP_CLEANUP)
-
-#define HAS_TARGET(opcode) \
- (IS_JUMP_OPCODE(opcode) || IS_BLOCK_PUSH_OPCODE(opcode))
-
-/* opcodes that must be last in the basicblock */
-#define IS_TERMINATOR_OPCODE(opcode) \
- (IS_JUMP_OPCODE(opcode) || IS_SCOPE_EXIT_OPCODE(opcode))
-
-/* opcodes which are not emitted in codegen stage, only by the assembler */
-#define IS_ASSEMBLER_OPCODE(opcode) \
- ((opcode) == JUMP_FORWARD || \
- (opcode) == JUMP_BACKWARD || \
- (opcode) == JUMP_BACKWARD_NO_INTERRUPT)
-
-#define IS_BACKWARDS_JUMP_OPCODE(opcode) \
- ((opcode) == JUMP_BACKWARD || \
- (opcode) == JUMP_BACKWARD_NO_INTERRUPT)
-
-#define IS_UNCONDITIONAL_JUMP_OPCODE(opcode) \
- ((opcode) == JUMP || \
- (opcode) == JUMP_NO_INTERRUPT || \
- (opcode) == JUMP_FORWARD || \
- (opcode) == JUMP_BACKWARD || \
- (opcode) == JUMP_BACKWARD_NO_INTERRUPT)
-
-#define IS_SCOPE_EXIT_OPCODE(opcode) \
- ((opcode) == RETURN_VALUE || \
- (opcode) == RETURN_CONST || \
- (opcode) == RAISE_VARARGS || \
- (opcode) == RERAISE)
-
-#define IS_SUPERINSTRUCTION_OPCODE(opcode) \
- ((opcode) == LOAD_FAST__LOAD_FAST || \
- (opcode) == LOAD_FAST__LOAD_CONST || \
- (opcode) == LOAD_CONST__LOAD_FAST || \
- (opcode) == STORE_FAST__LOAD_FAST || \
- (opcode) == STORE_FAST__STORE_FAST)
-
#define IS_TOP_LEVEL_AWAIT(C) ( \
((C)->c_flags.cf_flags & PyCF_ALLOW_TOP_LEVEL_AWAIT) \
&& ((C)->u->u_ste->ste_type == ModuleBlock))
typedef _PyCompilerSrcLocation location;
+typedef _PyCfgInstruction cfg_instr;
+typedef _PyCfgBasicblock basicblock;
+typedef _PyCfgBuilder cfg_builder;
#define LOCATION(LNO, END_LNO, COL, END_COL) \
- ((const location){(LNO), (END_LNO), (COL), (END_COL)})
-
-static location NO_LOCATION = {-1, -1, -1, -1};
+ ((const _PyCompilerSrcLocation){(LNO), (END_LNO), (COL), (END_COL)})
/* Return true if loc1 starts after loc2 ends. */
static inline bool
@@ -165,11 +112,9 @@ same_location(location a, location b)
#define LOC(x) SRC_LOCATION_FROM_AST(x)
-typedef struct jump_target_label_ {
- int id;
-} jump_target_label;
+typedef _PyCfgJumpTargetLabel jump_target_label;
-static struct jump_target_label_ NO_LABEL = {-1};
+static jump_target_label NO_LABEL = {-1};
#define SAME_LABEL(L1, L2) ((L1).id == (L2).id)
#define IS_LABEL(L) (!SAME_LABEL((L), (NO_LABEL)))
@@ -183,88 +128,9 @@ static struct jump_target_label_ NO_LABEL = {-1};
#define USE_LABEL(C, LBL) \
RETURN_IF_ERROR(instr_sequence_use_label(INSTR_SEQUENCE(C), (LBL).id))
-struct cfg_instr {
- int i_opcode;
- int i_oparg;
- location i_loc;
- struct basicblock_ *i_target; /* target block (if jump instruction) */
- struct basicblock_ *i_except; /* target block when exception is raised */
-};
-
-/* One arg*/
-#define INSTR_SET_OP1(I, OP, ARG) \
- do { \
- assert(HAS_ARG(OP)); \
- struct cfg_instr *_instr__ptr_ = (I); \
- _instr__ptr_->i_opcode = (OP); \
- _instr__ptr_->i_oparg = (ARG); \
- } while (0);
-
-/* No args*/
-#define INSTR_SET_OP0(I, OP) \
- do { \
- assert(!HAS_ARG(OP)); \
- struct cfg_instr *_instr__ptr_ = (I); \
- _instr__ptr_->i_opcode = (OP); \
- _instr__ptr_->i_oparg = 0; \
- } while (0);
-
-typedef struct exceptstack {
- struct basicblock_ *handlers[CO_MAXBLOCKS+1];
- int depth;
-} ExceptStack;
-
-#define LOG_BITS_PER_INT 5
-#define MASK_LOW_LOG_BITS 31
-
-static inline int
-is_bit_set_in_table(const uint32_t *table, int bitindex) {
- /* Is the relevant bit set in the relevant word? */
- /* 512 bits fit into 9 32-bits words.
- * Word is indexed by (bitindex>>ln(size of int in bits)).
- * Bit within word is the low bits of bitindex.
- */
- if (bitindex >= 0 && bitindex < 512) {
- uint32_t word = table[bitindex >> LOG_BITS_PER_INT];
- return (word >> (bitindex & MASK_LOW_LOG_BITS)) & 1;
- }
- else {
- return 0;
- }
-}
-
-static inline int
-is_relative_jump(struct cfg_instr *i)
-{
- return is_bit_set_in_table(_PyOpcode_RelativeJump, i->i_opcode);
-}
-
-static inline int
-is_block_push(struct cfg_instr *i)
-{
- return IS_BLOCK_PUSH_OPCODE(i->i_opcode);
-}
-
-static inline int
-is_jump(struct cfg_instr *i)
-{
- return IS_JUMP_OPCODE(i->i_opcode);
-}
-
-static int
-instr_size(struct cfg_instr *instruction)
-{
- int opcode = instruction->i_opcode;
- assert(!IS_PSEUDO_OPCODE(opcode));
- int oparg = instruction->i_oparg;
- assert(HAS_ARG(opcode) || oparg == 0);
- int extended_args = (0xFFFFFF < oparg) + (0xFFFF < oparg) + (0xFF < oparg);
- int caches = _PyOpcode_Caches[opcode];
- return extended_args + 1 + caches;
-}
static void
-write_instr(_Py_CODEUNIT *codestr, struct cfg_instr *instruction, int ilen)
+write_instr(_Py_CODEUNIT *codestr, cfg_instr *instruction, int ilen)
{
int opcode = instruction->i_opcode;
assert(!IS_PSEUDO_OPCODE(opcode));
@@ -302,71 +168,6 @@ write_instr(_Py_CODEUNIT *codestr, struct cfg_instr *instruction, int ilen)
}
}
-typedef struct basicblock_ {
- /* Each basicblock in a compilation unit is linked via b_list in the
- reverse order that the block are allocated. b_list points to the next
- block, not to be confused with b_next, which is next by control flow. */
- struct basicblock_ *b_list;
- /* The label of this block if it is a jump target, -1 otherwise */
- jump_target_label b_label;
- /* Exception stack at start of block, used by assembler to create the exception handling table */
- ExceptStack *b_exceptstack;
- /* pointer to an array of instructions, initially NULL */
- struct cfg_instr *b_instr;
- /* If b_next is non-NULL, it is a pointer to the next
- block reached by normal control flow. */
- struct basicblock_ *b_next;
- /* number of instructions used */
- int b_iused;
- /* length of instruction array (b_instr) */
- int b_ialloc;
- /* Used by add_checks_for_loads_of_unknown_variables */
- uint64_t b_unsafe_locals_mask;
- /* Number of predecessors that a block has. */
- int b_predecessors;
- /* depth of stack upon entry of block, computed by stackdepth() */
- int b_startdepth;
- /* instruction offset for block, computed by assemble_jump_offsets() */
- int b_offset;
- /* Basic block is an exception handler that preserves lasti */
- unsigned b_preserve_lasti : 1;
- /* Used by compiler passes to mark whether they have visited a basic block. */
- unsigned b_visited : 1;
- /* b_except_handler is used by the cold-detection algorithm to mark exception targets */
- unsigned b_except_handler : 1;
- /* b_cold is true if this block is not perf critical (like an exception handler) */
- unsigned b_cold : 1;
- /* b_warm is used by the cold-detection algorithm to mark blocks which are definitely not cold */
- unsigned b_warm : 1;
-} basicblock;
-
-
-static struct cfg_instr *
-basicblock_last_instr(const basicblock *b) {
- assert(b->b_iused >= 0);
- if (b->b_iused > 0) {
- assert(b->b_instr != NULL);
- return &b->b_instr[b->b_iused - 1];
- }
- return NULL;
-}
-
-static inline int
-basicblock_exits_scope(const basicblock *b) {
- struct cfg_instr *last = basicblock_last_instr(b);
- return last && IS_SCOPE_EXIT_OPCODE(last->i_opcode);
-}
-
-static inline int
-basicblock_nofallthrough(const basicblock *b) {
- struct cfg_instr *last = basicblock_last_instr(b);
- return (last &&
- (IS_SCOPE_EXIT_OPCODE(last->i_opcode) ||
- IS_UNCONDITIONAL_JUMP_OPCODE(last->i_opcode)));
-}
-
-#define BB_NO_FALLTHROUGH(B) (basicblock_nofallthrough(B))
-#define BB_HAS_FALLTHROUGH(B) (!basicblock_nofallthrough(B))
/* fblockinfo tracks the current frame block.
@@ -397,26 +198,12 @@ enum {
COMPILER_SCOPE_COMPREHENSION,
};
-typedef struct cfg_builder_ {
- /* The entryblock, at which control flow begins. All blocks of the
- CFG are reachable through the b_next links */
- basicblock *g_entryblock;
- /* Pointer to the most recently allocated block. By following
- b_list links, you can reach all allocated blocks. */
- basicblock *g_block_list;
- /* pointer to the block currently being constructed */
- basicblock *g_curblock;
- /* label for the next instruction to be placed */
- jump_target_label g_current_label;
-} cfg_builder;
-
typedef struct {
int i_opcode;
int i_oparg;
location i_loc;
} instruction;
-
typedef struct instr_sequence_ {
instruction *s_instrs;
int s_allocated;
@@ -440,10 +227,11 @@ typedef struct instr_sequence_ {
* item_size: size of each item
*
*/
-static int
-ensure_array_large_enough(int idx, void **arr_, int *alloc, int default_alloc, size_t item_size)
+int
+_PyCompile_EnsureArrayLargeEnough(int idx, void **array, int *alloc,
+ int default_alloc, size_t item_size)
{
- void *arr = *arr_;
+ void *arr = *array;
if (arr == NULL) {
int new_alloc = default_alloc;
if (idx >= new_alloc) {
@@ -480,7 +268,7 @@ ensure_array_large_enough(int idx, void **arr_, int *alloc, int default_alloc, s
memset((char *)arr + oldsize, 0, newsize - oldsize);
}
- *arr_ = arr;
+ *array = arr;
return SUCCESS;
}
@@ -489,11 +277,11 @@ instr_sequence_next_inst(instr_sequence *seq) {
assert(seq->s_instrs != NULL || seq->s_used == 0);
RETURN_IF_ERROR(
- ensure_array_large_enough(seq->s_used + 1,
- (void**)&seq->s_instrs,
- &seq->s_allocated,
- INITIAL_INSTR_SEQUENCE_SIZE,
- sizeof(instruction)));
+ _PyCompile_EnsureArrayLargeEnough(seq->s_used + 1,
+ (void**)&seq->s_instrs,
+ &seq->s_allocated,
+ INITIAL_INSTR_SEQUENCE_SIZE,
+ sizeof(instruction)));
assert(seq->s_used < seq->s_allocated);
return seq->s_used++;
}
@@ -509,11 +297,11 @@ static int
instr_sequence_use_label(instr_sequence *seq, int lbl) {
int old_size = seq->s_labelmap_size;
RETURN_IF_ERROR(
- ensure_array_large_enough(lbl,
- (void**)&seq->s_labelmap,
- &seq->s_labelmap_size,
- INITIAL_INSTR_SEQUENCE_LABELS_MAP_SIZE,
- sizeof(int)));
+ _PyCompile_EnsureArrayLargeEnough(lbl,
+ (void**)&seq->s_labelmap,
+ &seq->s_labelmap_size,
+ INITIAL_INSTR_SEQUENCE_LABELS_MAP_SIZE,
+ sizeof(int)));
for(int i = old_size; i < seq->s_labelmap_size; i++) {
seq->s_labelmap[i] = -111; /* something weird, for debugging */
@@ -572,29 +360,11 @@ instr_sequence_fini(instr_sequence *seq) {
seq->s_instrs = NULL;
}
-static int basicblock_addop(basicblock *b, int opcode, int oparg, location loc);
-static int cfg_builder_maybe_start_new_block(cfg_builder *g);
-
-static int
-cfg_builder_use_label(cfg_builder *g, jump_target_label lbl)
-{
- g->g_current_label = lbl;
- return cfg_builder_maybe_start_new_block(g);
-}
-
-static int
-cfg_builder_addop(cfg_builder *g, int opcode, int oparg, location loc)
-{
- RETURN_IF_ERROR(cfg_builder_maybe_start_new_block(g));
- return basicblock_addop(g->g_curblock, opcode, oparg, loc);
-}
-
-static int cfg_builder_init(cfg_builder *g);
static int
instr_sequence_to_cfg(instr_sequence *seq, cfg_builder *g) {
memset(g, 0, sizeof(cfg_builder));
- RETURN_IF_ERROR(cfg_builder_init(g));
+ RETURN_IF_ERROR(_PyCfgBuilder_Init(g));
/* There can be more than one label for the same offset. The
* offset2lbl maping selects one of them which we use consistently.
@@ -621,7 +391,7 @@ instr_sequence_to_cfg(instr_sequence *seq, cfg_builder *g) {
if (lbl >= 0) {
assert (lbl < seq->s_labelmap_size);
jump_target_label lbl_ = {lbl};
- if (cfg_builder_use_label(g, lbl_) < 0) {
+ if (_PyCfgBuilder_UseLabel(g, lbl_) < 0) {
goto error;
}
}
@@ -635,7 +405,7 @@ instr_sequence_to_cfg(instr_sequence *seq, cfg_builder *g) {
assert(lbl >= 0 && lbl < seq->s_labelmap_size);
oparg = lbl;
}
- if (cfg_builder_addop(g, opcode, oparg, instr->i_loc) < 0) {
+ if (_PyCfgBuilder_Addop(g, opcode, oparg, instr->i_loc) < 0) {
goto error;
}
}
@@ -746,8 +516,6 @@ typedef struct {
Py_ssize_t on_top;
} pattern_context;
-static int basicblock_next_instr(basicblock *);
-
static int codegen_addop_i(instr_sequence *seq, int opcode, Py_ssize_t oparg, location loc);
static void compiler_free(struct compiler *);
@@ -798,8 +566,6 @@ static int compiler_match(struct compiler *, stmt_ty);
static int compiler_pattern_subpattern(struct compiler *,
pattern_ty, pattern_context *);
-static int remove_redundant_nops(basicblock *bb);
-
static PyCodeObject *assemble(struct compiler *, int addNone);
#define CAPSULE_NAME "compile.c compiler unit"
@@ -979,57 +745,6 @@ dictbytype(PyObject *src, int scope_type, int flag, Py_ssize_t offset)
return dest;
}
-#ifndef NDEBUG
-static bool
-cfg_builder_check(cfg_builder *g)
-{
- assert(g->g_entryblock->b_iused > 0);
- for (basicblock *block = g->g_block_list; block != NULL; block = block->b_list) {
- assert(!_PyMem_IsPtrFreed(block));
- if (block->b_instr != NULL) {
- assert(block->b_ialloc > 0);
- assert(block->b_iused >= 0);
- assert(block->b_ialloc >= block->b_iused);
- }
- else {
- assert (block->b_iused == 0);
- assert (block->b_ialloc == 0);
- }
- }
- return true;
-}
-#endif
-
-static basicblock *cfg_builder_new_block(cfg_builder *g);
-
-static int
-cfg_builder_init(cfg_builder *g)
-{
- g->g_block_list = NULL;
- basicblock *block = cfg_builder_new_block(g);
- if (block == NULL) {
- return ERROR;
- }
- g->g_curblock = g->g_entryblock = block;
- g->g_current_label = NO_LABEL;
- return SUCCESS;
-}
-
-static void
-cfg_builder_fini(cfg_builder* g)
-{
- assert(cfg_builder_check(g));
- basicblock *b = g->g_block_list;
- while (b != NULL) {
- if (b->b_instr) {
- PyObject_Free((void *)b->b_instr);
- }
- basicblock *next = b->b_list;
- PyObject_Free((void *)b);
- b = next;
- }
-}
-
static void
compiler_unit_free(struct compiler_unit *u)
{
@@ -1119,85 +834,6 @@ compiler_set_qualname(struct compiler *c)
return SUCCESS;
}
-/* Allocate a new block and return a pointer to it.
- Returns NULL on error.
-*/
-static basicblock *
-cfg_builder_new_block(cfg_builder *g)
-{
- basicblock *b = (basicblock *)PyObject_Calloc(1, sizeof(basicblock));
- if (b == NULL) {
- PyErr_NoMemory();
- return NULL;
- }
- /* Extend the singly linked list of blocks with new block. */
- b->b_list = g->g_block_list;
- g->g_block_list = b;
- b->b_label = NO_LABEL;
- return b;
-}
-
-static basicblock *
-cfg_builder_use_next_block(cfg_builder *g, basicblock *block)
-{
- assert(block != NULL);
- g->g_curblock->b_next = block;
- g->g_curblock = block;
- return block;
-}
-
-static inline int
-basicblock_append_instructions(basicblock *target, basicblock *source)
-{
- for (int i = 0; i < source->b_iused; i++) {
- int n = basicblock_next_instr(target);
- if (n < 0) {
- return ERROR;
- }
- target->b_instr[n] = source->b_instr[i];
- }
- return SUCCESS;
-}
-
-static basicblock *
-copy_basicblock(cfg_builder *g, basicblock *block)
-{
- /* Cannot copy a block if it has a fallthrough, since
- * a block can only have one fallthrough predecessor.
- */
- assert(BB_NO_FALLTHROUGH(block));
- basicblock *result = cfg_builder_new_block(g);
- if (result == NULL) {
- return NULL;
- }
- if (basicblock_append_instructions(result, block) < 0) {
- return NULL;
- }
- return result;
-}
-
-/* Returns the offset of the next instruction in the current block's
- b_instr array. Resizes the b_instr as necessary.
- Returns -1 on failure.
-*/
-
-static int
-basicblock_next_instr(basicblock *b)
-{
- assert(b != NULL);
-
- RETURN_IF_ERROR(
- ensure_array_large_enough(
- b->b_iused + 1,
- (void**)&b->b_instr,
- &b->b_ialloc,
- DEFAULT_BLOCK_SIZE,
- sizeof(struct cfg_instr)));
-
- return b->b_iused++;
-}
-
-
/* Return the stack effect of opcode with argument oparg.
Some opcodes have different stack effect when jump to the target and
@@ -1290,62 +926,6 @@ PyCompile_OpcodeStackEffect(int opcode, int oparg)
return stack_effect(opcode, oparg, -1);
}
-static int
-basicblock_addop(basicblock *b, int opcode, int oparg, location loc)
-{
- assert(IS_WITHIN_OPCODE_RANGE(opcode));
- assert(!IS_ASSEMBLER_OPCODE(opcode));
- assert(HAS_ARG(opcode) || HAS_TARGET(opcode) || oparg == 0);
- assert(0 <= oparg && oparg < (1 << 30));
-
- int off = basicblock_next_instr(b);
- if (off < 0) {
- return ERROR;
- }
- struct cfg_instr *i = &b->b_instr[off];
- i->i_opcode = opcode;
- i->i_oparg = oparg;
- i->i_target = NULL;
- i->i_loc = loc;
-
- return SUCCESS;
-}
-
-static bool
-cfg_builder_current_block_is_terminated(cfg_builder *g)
-{
- struct cfg_instr *last = basicblock_last_instr(g->g_curblock);
- if (last && IS_TERMINATOR_OPCODE(last->i_opcode)) {
- return true;
- }
- if (IS_LABEL(g->g_current_label)) {
- if (last || IS_LABEL(g->g_curblock->b_label)) {
- return true;
- }
- else {
- /* current block is empty, label it */
- g->g_curblock->b_label = g->g_current_label;
- g->g_current_label = NO_LABEL;
- }
- }
- return false;
-}
-
-static int
-cfg_builder_maybe_start_new_block(cfg_builder *g)
-{
- if (cfg_builder_current_block_is_terminated(g)) {
- basicblock *b = cfg_builder_new_block(g);
- if (b == NULL) {
- return ERROR;
- }
- b->b_label = g->g_current_label;
- g->g_current_label = NO_LABEL;
- cfg_builder_use_next_block(g, b);
- }
- return SUCCESS;
-}
-
static int
codegen_addop_noarg(instr_sequence *seq, int opcode, location loc)
{
@@ -2520,16 +2100,6 @@ compiler_check_debug_args(struct compiler *c, arguments_ty args)
return SUCCESS;
}
-static inline int
-insert_instruction(basicblock *block, int pos, struct cfg_instr *instr) {
- RETURN_IF_ERROR(basicblock_next_instr(block));
- for (int i = block->b_iused - 1; i > pos; i--) {
- block->b_instr[i] = block->b_instr[i-1];
- }
- block->b_instr[pos] = *instr;
- return SUCCESS;
-}
-
static int
wrap_in_stopiteration_handler(struct compiler *c)
{
@@ -7037,101 +6607,6 @@ struct assembler {
int a_location_off; /* offset of last written location info frame */
};
-static basicblock**
-make_cfg_traversal_stack(basicblock *entryblock) {
- int nblocks = 0;
- for (basicblock *b = entryblock; b != NULL; b = b->b_next) {
- b->b_visited = 0;
- nblocks++;
- }
- basicblock **stack = (basicblock **)PyMem_Malloc(sizeof(basicblock *) * nblocks);
- if (!stack) {
- PyErr_NoMemory();
- }
- return stack;
-}
-
-Py_LOCAL_INLINE(void)
-stackdepth_push(basicblock ***sp, basicblock *b, int depth)
-{
- assert(b->b_startdepth < 0 || b->b_startdepth == depth);
- if (b->b_startdepth < depth && b->b_startdepth < 100) {
- assert(b->b_startdepth < 0);
- b->b_startdepth = depth;
- *(*sp)++ = b;
- }
-}
-
-/* Find the flow path that needs the largest stack. We assume that
- * cycles in the flow graph have no net effect on the stack depth.
- */
-static int
-stackdepth(basicblock *entryblock, int code_flags)
-{
- for (basicblock *b = entryblock; b != NULL; b = b->b_next) {
- b->b_startdepth = INT_MIN;
- }
- basicblock **stack = make_cfg_traversal_stack(entryblock);
- if (!stack) {
- return ERROR;
- }
-
- int maxdepth = 0;
- basicblock **sp = stack;
- if (code_flags & (CO_GENERATOR | CO_COROUTINE | CO_ASYNC_GENERATOR)) {
- stackdepth_push(&sp, entryblock, 1);
- } else {
- stackdepth_push(&sp, entryblock, 0);
- }
-
- while (sp != stack) {
- basicblock *b = *--sp;
- int depth = b->b_startdepth;
- assert(depth >= 0);
- basicblock *next = b->b_next;
- for (int i = 0; i < b->b_iused; i++) {
- struct cfg_instr *instr = &b->b_instr[i];
- int effect = stack_effect(instr->i_opcode, instr->i_oparg, 0);
- if (effect == PY_INVALID_STACK_EFFECT) {
- PyErr_Format(PyExc_SystemError,
- "compiler stack_effect(opcode=%d, arg=%i) failed",
- instr->i_opcode, instr->i_oparg);
- return ERROR;
- }
- int new_depth = depth + effect;
- assert(new_depth >= 0); /* invalid code or bug in stackdepth() */
- if (new_depth > maxdepth) {
- maxdepth = new_depth;
- }
- if (HAS_TARGET(instr->i_opcode)) {
- effect = stack_effect(instr->i_opcode, instr->i_oparg, 1);
- assert(effect != PY_INVALID_STACK_EFFECT);
- int target_depth = depth + effect;
- assert(target_depth >= 0); /* invalid code or bug in stackdepth() */
- if (target_depth > maxdepth) {
- maxdepth = target_depth;
- }
- stackdepth_push(&sp, instr->i_target, target_depth);
- }
- depth = new_depth;
- assert(!IS_ASSEMBLER_OPCODE(instr->i_opcode));
- if (IS_UNCONDITIONAL_JUMP_OPCODE(instr->i_opcode) ||
- IS_SCOPE_EXIT_OPCODE(instr->i_opcode))
- {
- /* remaining code is dead */
- next = NULL;
- break;
- }
- }
- if (next != NULL) {
- assert(BB_HAS_FALLTHROUGH(b));
- stackdepth_push(&sp, next, depth);
- }
- }
- PyMem_Free(stack);
- return maxdepth;
-}
-
static int
assemble_init(struct assembler *a, int firstlineno)
{
@@ -7168,389 +6643,47 @@ assemble_free(struct assembler *a)
Py_XDECREF(a->a_except_table);
}
-static int
-blocksize(basicblock *b)
-{
- int size = 0;
- for (int i = 0; i < b->b_iused; i++) {
- size += instr_size(&b->b_instr[i]);
- }
- return size;
-}
-
-static basicblock *
-push_except_block(ExceptStack *stack, struct cfg_instr *setup) {
- assert(is_block_push(setup));
- int opcode = setup->i_opcode;
- basicblock * target = setup->i_target;
- if (opcode == SETUP_WITH || opcode == SETUP_CLEANUP) {
- target->b_preserve_lasti = 1;
- }
- stack->handlers[++stack->depth] = target;
- return target;
-}
-
-static basicblock *
-pop_except_block(ExceptStack *stack) {
- assert(stack->depth > 0);
- return stack->handlers[--stack->depth];
+static inline void
+write_except_byte(struct assembler *a, int byte) {
+ unsigned char *p = (unsigned char *) PyBytes_AS_STRING(a->a_except_table);
+ p[a->a_except_table_off++] = byte;
}
-static basicblock *
-except_stack_top(ExceptStack *stack) {
- return stack->handlers[stack->depth];
-}
+#define CONTINUATION_BIT 64
-static ExceptStack *
-make_except_stack(void) {
- ExceptStack *new = PyMem_Malloc(sizeof(ExceptStack));
- if (new == NULL) {
- PyErr_NoMemory();
- return NULL;
+static void
+assemble_emit_exception_table_item(struct assembler *a, int value, int msb)
+{
+ assert ((msb | 128) == 128);
+ assert(value >= 0 && value < (1 << 30));
+ if (value >= 1 << 24) {
+ write_except_byte(a, (value >> 24) | CONTINUATION_BIT | msb);
+ msb = 0;
}
- new->depth = 0;
- new->handlers[0] = NULL;
- return new;
-}
-
-static ExceptStack *
-copy_except_stack(ExceptStack *stack) {
- ExceptStack *copy = PyMem_Malloc(sizeof(ExceptStack));
- if (copy == NULL) {
- PyErr_NoMemory();
- return NULL;
+ if (value >= 1 << 18) {
+ write_except_byte(a, ((value >> 18)&0x3f) | CONTINUATION_BIT | msb);
+ msb = 0;
+ }
+ if (value >= 1 << 12) {
+ write_except_byte(a, ((value >> 12)&0x3f) | CONTINUATION_BIT | msb);
+ msb = 0;
}
- memcpy(copy, stack, sizeof(ExceptStack));
- return copy;
+ if (value >= 1 << 6) {
+ write_except_byte(a, ((value >> 6)&0x3f) | CONTINUATION_BIT | msb);
+ msb = 0;
+ }
+ write_except_byte(a, (value&0x3f) | msb);
}
+/* See Objects/exception_handling_notes.txt for details of layout */
+#define MAX_SIZE_OF_ENTRY 20
+
static int
-label_exception_targets(basicblock *entryblock) {
- basicblock **todo_stack = make_cfg_traversal_stack(entryblock);
- if (todo_stack == NULL) {
- return ERROR;
- }
- ExceptStack *except_stack = make_except_stack();
- if (except_stack == NULL) {
- PyMem_Free(todo_stack);
- PyErr_NoMemory();
- return ERROR;
- }
- except_stack->depth = 0;
- todo_stack[0] = entryblock;
- entryblock->b_visited = 1;
- entryblock->b_exceptstack = except_stack;
- basicblock **todo = &todo_stack[1];
- basicblock *handler = NULL;
- while (todo > todo_stack) {
- todo--;
- basicblock *b = todo[0];
- assert(b->b_visited == 1);
- except_stack = b->b_exceptstack;
- assert(except_stack != NULL);
- b->b_exceptstack = NULL;
- handler = except_stack_top(except_stack);
- for (int i = 0; i < b->b_iused; i++) {
- struct cfg_instr *instr = &b->b_instr[i];
- if (is_block_push(instr)) {
- if (!instr->i_target->b_visited) {
- ExceptStack *copy = copy_except_stack(except_stack);
- if (copy == NULL) {
- goto error;
- }
- instr->i_target->b_exceptstack = copy;
- todo[0] = instr->i_target;
- instr->i_target->b_visited = 1;
- todo++;
- }
- handler = push_except_block(except_stack, instr);
- }
- else if (instr->i_opcode == POP_BLOCK) {
- handler = pop_except_block(except_stack);
- }
- else if (is_jump(instr)) {
- instr->i_except = handler;
- assert(i == b->b_iused -1);
- if (!instr->i_target->b_visited) {
- if (BB_HAS_FALLTHROUGH(b)) {
- ExceptStack *copy = copy_except_stack(except_stack);
- if (copy == NULL) {
- goto error;
- }
- instr->i_target->b_exceptstack = copy;
- }
- else {
- instr->i_target->b_exceptstack = except_stack;
- except_stack = NULL;
- }
- todo[0] = instr->i_target;
- instr->i_target->b_visited = 1;
- todo++;
- }
- }
- else {
- if (instr->i_opcode == YIELD_VALUE) {
- instr->i_oparg = except_stack->depth;
- }
- instr->i_except = handler;
- }
- }
- if (BB_HAS_FALLTHROUGH(b) && !b->b_next->b_visited) {
- assert(except_stack != NULL);
- b->b_next->b_exceptstack = except_stack;
- todo[0] = b->b_next;
- b->b_next->b_visited = 1;
- todo++;
- }
- else if (except_stack != NULL) {
- PyMem_Free(except_stack);
- }
- }
-#ifdef Py_DEBUG
- for (basicblock *b = entryblock; b != NULL; b = b->b_next) {
- assert(b->b_exceptstack == NULL);
- }
-#endif
- PyMem_Free(todo_stack);
- return SUCCESS;
-error:
- PyMem_Free(todo_stack);
- PyMem_Free(except_stack);
- return ERROR;
-}
-
-
-static int
-mark_except_handlers(basicblock *entryblock) {
-#ifndef NDEBUG
- for (basicblock *b = entryblock; b != NULL; b = b->b_next) {
- assert(!b->b_except_handler);
- }
-#endif
- for (basicblock *b = entryblock; b != NULL; b = b->b_next) {
- for (int i=0; i < b->b_iused; i++) {
- struct cfg_instr *instr = &b->b_instr[i];
- if (is_block_push(instr)) {
- instr->i_target->b_except_handler = 1;
- }
- }
- }
- return SUCCESS;
-}
-
-static int
-mark_warm(basicblock *entryblock) {
- basicblock **stack = make_cfg_traversal_stack(entryblock);
- if (stack == NULL) {
- return ERROR;
- }
- basicblock **sp = stack;
-
- *sp++ = entryblock;
- entryblock->b_visited = 1;
- while (sp > stack) {
- basicblock *b = *(--sp);
- assert(!b->b_except_handler);
- b->b_warm = 1;
- basicblock *next = b->b_next;
- if (next && BB_HAS_FALLTHROUGH(b) && !next->b_visited) {
- *sp++ = next;
- next->b_visited = 1;
- }
- for (int i=0; i < b->b_iused; i++) {
- struct cfg_instr *instr = &b->b_instr[i];
- if (is_jump(instr) && !instr->i_target->b_visited) {
- *sp++ = instr->i_target;
- instr->i_target->b_visited = 1;
- }
- }
- }
- PyMem_Free(stack);
- return SUCCESS;
-}
-
-static int
-mark_cold(basicblock *entryblock) {
- for (basicblock *b = entryblock; b != NULL; b = b->b_next) {
- assert(!b->b_cold && !b->b_warm);
- }
- if (mark_warm(entryblock) < 0) {
- return ERROR;
- }
-
- basicblock **stack = make_cfg_traversal_stack(entryblock);
- if (stack == NULL) {
- return ERROR;
- }
-
- basicblock **sp = stack;
- for (basicblock *b = entryblock; b != NULL; b = b->b_next) {
- if (b->b_except_handler) {
- assert(!b->b_warm);
- *sp++ = b;
- b->b_visited = 1;
- }
- }
-
- while (sp > stack) {
- basicblock *b = *(--sp);
- b->b_cold = 1;
- basicblock *next = b->b_next;
- if (next && BB_HAS_FALLTHROUGH(b)) {
- if (!next->b_warm && !next->b_visited) {
- *sp++ = next;
- next->b_visited = 1;
- }
- }
- for (int i = 0; i < b->b_iused; i++) {
- struct cfg_instr *instr = &b->b_instr[i];
- if (is_jump(instr)) {
- assert(i == b->b_iused - 1);
- basicblock *target = b->b_instr[i].i_target;
- if (!target->b_warm && !target->b_visited) {
- *sp++ = target;
- target->b_visited = 1;
- }
- }
- }
- }
- PyMem_Free(stack);
- return SUCCESS;
-}
-
-static int
-remove_redundant_jumps(cfg_builder *g);
-
-static int
-push_cold_blocks_to_end(cfg_builder *g, int code_flags) {
- basicblock *entryblock = g->g_entryblock;
- if (entryblock->b_next == NULL) {
- /* single basicblock, no need to reorder */
- return SUCCESS;
- }
- RETURN_IF_ERROR(mark_cold(entryblock));
-
- /* If we have a cold block with fallthrough to a warm block, add */
- /* an explicit jump instead of fallthrough */
- for (basicblock *b = entryblock; b != NULL; b = b->b_next) {
- if (b->b_cold && BB_HAS_FALLTHROUGH(b) && b->b_next && b->b_next->b_warm) {
- basicblock *explicit_jump = cfg_builder_new_block(g);
- if (explicit_jump == NULL) {
- return ERROR;
- }
- basicblock_addop(explicit_jump, JUMP, b->b_next->b_label.id, NO_LOCATION);
- explicit_jump->b_cold = 1;
- explicit_jump->b_next = b->b_next;
- b->b_next = explicit_jump;
-
- /* set target */
- struct cfg_instr *last = basicblock_last_instr(explicit_jump);
- last->i_target = explicit_jump->b_next;
- }
- }
-
- assert(!entryblock->b_cold); /* First block can't be cold */
- basicblock *cold_blocks = NULL;
- basicblock *cold_blocks_tail = NULL;
-
- basicblock *b = entryblock;
- while(b->b_next) {
- assert(!b->b_cold);
- while (b->b_next && !b->b_next->b_cold) {
- b = b->b_next;
- }
- if (b->b_next == NULL) {
- /* no more cold blocks */
- break;
- }
-
- /* b->b_next is the beginning of a cold streak */
- assert(!b->b_cold && b->b_next->b_cold);
-
- basicblock *b_end = b->b_next;
- while (b_end->b_next && b_end->b_next->b_cold) {
- b_end = b_end->b_next;
- }
-
- /* b_end is the end of the cold streak */
- assert(b_end && b_end->b_cold);
- assert(b_end->b_next == NULL || !b_end->b_next->b_cold);
-
- if (cold_blocks == NULL) {
- cold_blocks = b->b_next;
- }
- else {
- cold_blocks_tail->b_next = b->b_next;
- }
- cold_blocks_tail = b_end;
- b->b_next = b_end->b_next;
- b_end->b_next = NULL;
- }
- assert(b != NULL && b->b_next == NULL);
- b->b_next = cold_blocks;
-
- if (cold_blocks != NULL) {
- RETURN_IF_ERROR(remove_redundant_jumps(g));
- }
- return SUCCESS;
-}
-
-static void
-convert_exception_handlers_to_nops(basicblock *entryblock) {
- for (basicblock *b = entryblock; b != NULL; b = b->b_next) {
- for (int i = 0; i < b->b_iused; i++) {
- struct cfg_instr *instr = &b->b_instr[i];
- if (is_block_push(instr) || instr->i_opcode == POP_BLOCK) {
- INSTR_SET_OP0(instr, NOP);
- }
- }
- }
- for (basicblock *b = entryblock; b != NULL; b = b->b_next) {
- remove_redundant_nops(b);
- }
-}
-
-static inline void
-write_except_byte(struct assembler *a, int byte) {
- unsigned char *p = (unsigned char *) PyBytes_AS_STRING(a->a_except_table);
- p[a->a_except_table_off++] = byte;
-}
-
-#define CONTINUATION_BIT 64
-
-static void
-assemble_emit_exception_table_item(struct assembler *a, int value, int msb)
-{
- assert ((msb | 128) == 128);
- assert(value >= 0 && value < (1 << 30));
- if (value >= 1 << 24) {
- write_except_byte(a, (value >> 24) | CONTINUATION_BIT | msb);
- msb = 0;
- }
- if (value >= 1 << 18) {
- write_except_byte(a, ((value >> 18)&0x3f) | CONTINUATION_BIT | msb);
- msb = 0;
- }
- if (value >= 1 << 12) {
- write_except_byte(a, ((value >> 12)&0x3f) | CONTINUATION_BIT | msb);
- msb = 0;
- }
- if (value >= 1 << 6) {
- write_except_byte(a, ((value >> 6)&0x3f) | CONTINUATION_BIT | msb);
- msb = 0;
- }
- write_except_byte(a, (value&0x3f) | msb);
-}
-
-/* See Objects/exception_handling_notes.txt for details of layout */
-#define MAX_SIZE_OF_ENTRY 20
-
-static int
-assemble_emit_exception_table_entry(struct assembler *a, int start, int end, basicblock *handler)
-{
- Py_ssize_t len = PyBytes_GET_SIZE(a->a_except_table);
- if (a->a_except_table_off + MAX_SIZE_OF_ENTRY >= len) {
- RETURN_IF_ERROR(_PyBytes_Resize(&a->a_except_table, len * 2));
+assemble_emit_exception_table_entry(struct assembler *a, int start, int end, basicblock *handler)
+{
+ Py_ssize_t len = PyBytes_GET_SIZE(a->a_except_table);
+ if (a->a_except_table_off + MAX_SIZE_OF_ENTRY >= len) {
+ RETURN_IF_ERROR(_PyBytes_Resize(&a->a_except_table, len * 2));
}
int size = end-start;
assert(end > start);
@@ -7578,7 +6711,7 @@ assemble_exception_table(struct assembler *a, basicblock *entryblock)
for (b = entryblock; b != NULL; b = b->b_next) {
ioffset = b->b_offset;
for (int i = 0; i < b->b_iused; i++) {
- struct cfg_instr *instr = &b->b_instr[i];
+ cfg_instr *instr = &b->b_instr[i];
if (instr->i_except != handler) {
if (handler != NULL) {
RETURN_IF_ERROR(
@@ -7587,7 +6720,7 @@ assemble_exception_table(struct assembler *a, basicblock *entryblock)
start = ioffset;
handler = instr->i_except;
}
- ioffset += instr_size(instr);
+ ioffset += _PyCfg_InstrSize(instr);
}
}
if (handler != NULL) {
@@ -7755,7 +6888,7 @@ assemble_location_info(struct assembler *a, basicblock *entryblock, int firstlin
loc = b->b_instr[j].i_loc;
size = 0;
}
- size += instr_size(&b->b_instr[j]);
+ size += _PyCfg_InstrSize(&b->b_instr[j]);
}
}
RETURN_IF_ERROR(assemble_emit_location(a, loc, size));
@@ -7768,12 +6901,12 @@ assemble_location_info(struct assembler *a, basicblock *entryblock, int firstlin
*/
static int
-assemble_emit_instr(struct assembler *a, struct cfg_instr *i)
+assemble_emit_instr(struct assembler *a, cfg_instr *i)
{
Py_ssize_t len = PyBytes_GET_SIZE(a->a_bytecode);
_Py_CODEUNIT *code;
- int size = instr_size(i);
+ int size = _PyCfg_InstrSize(i);
if (a->a_offset + size >= len / (int)sizeof(_Py_CODEUNIT)) {
if (len > PY_SSIZE_T_MAX / 2) {
return ERROR;
@@ -7786,8 +6919,6 @@ assemble_emit_instr(struct assembler *a, struct cfg_instr *i)
return SUCCESS;
}
-static int merge_const_one(PyObject *const_cache, PyObject **obj);
-
static int
assemble_emit(struct assembler *a, basicblock *entryblock, int first_lineno,
PyObject *const_cache)
@@ -7805,391 +6936,95 @@ assemble_emit(struct assembler *a, basicblock *entryblock, int first_lineno,
RETURN_IF_ERROR(assemble_exception_table(a, entryblock));
RETURN_IF_ERROR(_PyBytes_Resize(&a->a_except_table, a->a_except_table_off));
- RETURN_IF_ERROR(merge_const_one(const_cache, &a->a_except_table));
+ RETURN_IF_ERROR(_PyCompile_ConstCacheMergeOne(const_cache, &a->a_except_table));
RETURN_IF_ERROR(_PyBytes_Resize(&a->a_linetable, a->a_location_off));
- RETURN_IF_ERROR(merge_const_one(const_cache, &a->a_linetable));
+ RETURN_IF_ERROR(_PyCompile_ConstCacheMergeOne(const_cache, &a->a_linetable));
RETURN_IF_ERROR(_PyBytes_Resize(&a->a_bytecode, a->a_offset * sizeof(_Py_CODEUNIT)));
- RETURN_IF_ERROR(merge_const_one(const_cache, &a->a_bytecode));
+ RETURN_IF_ERROR(_PyCompile_ConstCacheMergeOne(const_cache, &a->a_bytecode));
return SUCCESS;
}
-static int
-normalize_jumps_in_block(cfg_builder *g, basicblock *b) {
- struct cfg_instr *last = basicblock_last_instr(b);
- if (last == NULL || !is_jump(last)) {
- return SUCCESS;
- }
- assert(!IS_ASSEMBLER_OPCODE(last->i_opcode));
- bool is_forward = last->i_target->b_visited == 0;
- switch(last->i_opcode) {
- case JUMP:
- last->i_opcode = is_forward ? JUMP_FORWARD : JUMP_BACKWARD;
- return SUCCESS;
- case JUMP_NO_INTERRUPT:
- last->i_opcode = is_forward ?
- JUMP_FORWARD : JUMP_BACKWARD_NO_INTERRUPT;
- return SUCCESS;
- }
- int reversed_opcode = 0;
- switch(last->i_opcode) {
- case POP_JUMP_IF_NOT_NONE:
- reversed_opcode = POP_JUMP_IF_NONE;
- break;
- case POP_JUMP_IF_NONE:
- reversed_opcode = POP_JUMP_IF_NOT_NONE;
- break;
- case POP_JUMP_IF_FALSE:
- reversed_opcode = POP_JUMP_IF_TRUE;
- break;
- case POP_JUMP_IF_TRUE:
- reversed_opcode = POP_JUMP_IF_FALSE;
- break;
- }
- if (is_forward) {
- return SUCCESS;
+static PyObject *
+dict_keys_inorder(PyObject *dict, Py_ssize_t offset)
+{
+ PyObject *tuple, *k, *v;
+ Py_ssize_t i, pos = 0, size = PyDict_GET_SIZE(dict);
+
+ tuple = PyTuple_New(size);
+ if (tuple == NULL)
+ return NULL;
+ while (PyDict_Next(dict, &pos, &k, &v)) {
+ i = PyLong_AS_LONG(v);
+ assert((i - offset) < size);
+ assert((i - offset) >= 0);
+ PyTuple_SET_ITEM(tuple, i - offset, Py_NewRef(k));
}
+ return tuple;
+}
- /* transform 'conditional jump T' to
- * 'reversed_jump b_next' followed by 'jump_backwards T'
- */
+static PyObject *
+consts_dict_keys_inorder(PyObject *dict)
+{
+ PyObject *consts, *k, *v;
+ Py_ssize_t i, pos = 0, size = PyDict_GET_SIZE(dict);
- basicblock *target = last->i_target;
- basicblock *backwards_jump = cfg_builder_new_block(g);
- if (backwards_jump == NULL) {
- return ERROR;
+ consts = PyList_New(size); /* PyCode_Optimize() requires a list */
+ if (consts == NULL)
+ return NULL;
+ while (PyDict_Next(dict, &pos, &k, &v)) {
+ i = PyLong_AS_LONG(v);
+ /* The keys of the dictionary can be tuples wrapping a constant.
+ * (see dict_add_o and _PyCode_ConstantKey). In that case
+ * the object we want is always second. */
+ if (PyTuple_CheckExact(k)) {
+ k = PyTuple_GET_ITEM(k, 1);
+ }
+ assert(i < size);
+ assert(i >= 0);
+ PyList_SET_ITEM(consts, i, Py_NewRef(k));
}
- basicblock_addop(backwards_jump, JUMP, target->b_label.id, NO_LOCATION);
- backwards_jump->b_instr[0].i_target = target;
- last->i_opcode = reversed_opcode;
- last->i_target = b->b_next;
-
- backwards_jump->b_cold = b->b_cold;
- backwards_jump->b_next = b->b_next;
- b->b_next = backwards_jump;
- return SUCCESS;
+ return consts;
}
static int
-normalize_jumps(cfg_builder *g)
+compute_code_flags(struct compiler *c)
{
- basicblock *entryblock = g->g_entryblock;
- for (basicblock *b = entryblock; b != NULL; b = b->b_next) {
- b->b_visited = 0;
- }
- for (basicblock *b = entryblock; b != NULL; b = b->b_next) {
- b->b_visited = 1;
- RETURN_IF_ERROR(normalize_jumps_in_block(g, b));
+ PySTEntryObject *ste = c->u->u_ste;
+ int flags = 0;
+ if (ste->ste_type == FunctionBlock) {
+ flags |= CO_NEWLOCALS | CO_OPTIMIZED;
+ if (ste->ste_nested)
+ flags |= CO_NESTED;
+ if (ste->ste_generator && !ste->ste_coroutine)
+ flags |= CO_GENERATOR;
+ if (!ste->ste_generator && ste->ste_coroutine)
+ flags |= CO_COROUTINE;
+ if (ste->ste_generator && ste->ste_coroutine)
+ flags |= CO_ASYNC_GENERATOR;
+ if (ste->ste_varargs)
+ flags |= CO_VARARGS;
+ if (ste->ste_varkeywords)
+ flags |= CO_VARKEYWORDS;
}
- return SUCCESS;
-}
-
-static void
-assemble_jump_offsets(basicblock *entryblock)
-{
- int bsize, totsize, extended_arg_recompile;
-
- /* Compute the size of each block and fixup jump args.
- Replace block pointer with position in bytecode. */
- do {
- totsize = 0;
- for (basicblock *b = entryblock; b != NULL; b = b->b_next) {
- bsize = blocksize(b);
- b->b_offset = totsize;
- totsize += bsize;
- }
- extended_arg_recompile = 0;
- for (basicblock *b = entryblock; b != NULL; b = b->b_next) {
- bsize = b->b_offset;
- for (int i = 0; i < b->b_iused; i++) {
- struct cfg_instr *instr = &b->b_instr[i];
- int isize = instr_size(instr);
- /* Relative jumps are computed relative to
- the instruction pointer after fetching
- the jump instruction.
- */
- bsize += isize;
- if (is_jump(instr)) {
- instr->i_oparg = instr->i_target->b_offset;
- if (is_relative_jump(instr)) {
- if (instr->i_oparg < bsize) {
- assert(IS_BACKWARDS_JUMP_OPCODE(instr->i_opcode));
- instr->i_oparg = bsize - instr->i_oparg;
- }
- else {
- assert(!IS_BACKWARDS_JUMP_OPCODE(instr->i_opcode));
- instr->i_oparg -= bsize;
- }
- }
- else {
- assert(!IS_BACKWARDS_JUMP_OPCODE(instr->i_opcode));
- }
- if (instr_size(instr) != isize) {
- extended_arg_recompile = 1;
- }
- }
- }
- }
- /* XXX: This is an awful hack that could hurt performance, but
- on the bright side it should work until we come up
- with a better solution.
+ /* (Only) inherit compilerflags in PyCF_MASK */
+ flags |= (c->c_flags.cf_flags & PyCF_MASK);
- The issue is that in the first loop blocksize() is called
- which calls instr_size() which requires i_oparg be set
- appropriately. There is a bootstrap problem because
- i_oparg is calculated in the second loop above.
+ if ((IS_TOP_LEVEL_AWAIT(c)) &&
+ ste->ste_coroutine &&
+ !ste->ste_generator) {
+ flags |= CO_COROUTINE;
+ }
- So we loop until we stop seeing new EXTENDED_ARGs.
- The only EXTENDED_ARGs that could be popping up are
- ones in jump instructions. So this should converge
- fairly quickly.
- */
- } while (extended_arg_recompile);
+ return flags;
}
-
-// helper functions for add_checks_for_loads_of_unknown_variables
-static inline void
-maybe_push(basicblock *b, uint64_t unsafe_mask, basicblock ***sp)
-{
- // Push b if the unsafe mask is giving us any new information.
- // To avoid overflowing the stack, only allow each block once.
- // Use b->b_visited=1 to mean that b is currently on the stack.
- uint64_t both = b->b_unsafe_locals_mask | unsafe_mask;
- if (b->b_unsafe_locals_mask != both) {
- b->b_unsafe_locals_mask = both;
- // More work left to do.
- if (!b->b_visited) {
- // not on the stack, so push it.
- *(*sp)++ = b;
- b->b_visited = 1;
- }
- }
-}
-
-static void
-scan_block_for_locals(basicblock *b, basicblock ***sp)
-{
- // bit i is set if local i is potentially uninitialized
- uint64_t unsafe_mask = b->b_unsafe_locals_mask;
- for (int i = 0; i < b->b_iused; i++) {
- struct cfg_instr *instr = &b->b_instr[i];
- assert(instr->i_opcode != EXTENDED_ARG);
- assert(!IS_SUPERINSTRUCTION_OPCODE(instr->i_opcode));
- if (instr->i_except != NULL) {
- maybe_push(instr->i_except, unsafe_mask, sp);
- }
- if (instr->i_oparg >= 64) {
- continue;
- }
- assert(instr->i_oparg >= 0);
- uint64_t bit = (uint64_t)1 << instr->i_oparg;
- switch (instr->i_opcode) {
- case DELETE_FAST:
- unsafe_mask |= bit;
- break;
- case STORE_FAST:
- unsafe_mask &= ~bit;
- break;
- case LOAD_FAST_CHECK:
- // If this doesn't raise, then the local is defined.
- unsafe_mask &= ~bit;
- break;
- case LOAD_FAST:
- if (unsafe_mask & bit) {
- instr->i_opcode = LOAD_FAST_CHECK;
- }
- unsafe_mask &= ~bit;
- break;
- }
- }
- if (b->b_next && BB_HAS_FALLTHROUGH(b)) {
- maybe_push(b->b_next, unsafe_mask, sp);
- }
- struct cfg_instr *last = basicblock_last_instr(b);
- if (last && is_jump(last)) {
- assert(last->i_target != NULL);
- maybe_push(last->i_target, unsafe_mask, sp);
- }
-}
-
-static int
-fast_scan_many_locals(basicblock *entryblock, int nlocals)
-{
- assert(nlocals > 64);
- Py_ssize_t *states = PyMem_Calloc(nlocals - 64, sizeof(Py_ssize_t));
- if (states == NULL) {
- PyErr_NoMemory();
- return ERROR;
- }
- Py_ssize_t blocknum = 0;
- // state[i - 64] == blocknum if local i is guaranteed to
- // be initialized, i.e., if it has had a previous LOAD_FAST or
- // STORE_FAST within that basicblock (not followed by DELETE_FAST).
- for (basicblock *b = entryblock; b != NULL; b = b->b_next) {
- blocknum++;
- for (int i = 0; i < b->b_iused; i++) {
- struct cfg_instr *instr = &b->b_instr[i];
- assert(instr->i_opcode != EXTENDED_ARG);
- assert(!IS_SUPERINSTRUCTION_OPCODE(instr->i_opcode));
- int arg = instr->i_oparg;
- if (arg < 64) {
- continue;
- }
- assert(arg >= 0);
- switch (instr->i_opcode) {
- case DELETE_FAST:
- states[arg - 64] = blocknum - 1;
- break;
- case STORE_FAST:
- states[arg - 64] = blocknum;
- break;
- case LOAD_FAST:
- if (states[arg - 64] != blocknum) {
- instr->i_opcode = LOAD_FAST_CHECK;
- }
- states[arg - 64] = blocknum;
- break;
- case LOAD_FAST_CHECK:
- Py_UNREACHABLE();
- }
- }
- }
- PyMem_Free(states);
- return SUCCESS;
-}
-
-static int
-add_checks_for_loads_of_uninitialized_variables(basicblock *entryblock,
- int nlocals,
- int nparams)
-{
- if (nlocals == 0) {
- return SUCCESS;
- }
- if (nlocals > 64) {
- // To avoid O(nlocals**2) compilation, locals beyond the first
- // 64 are only analyzed one basicblock at a time: initialization
- // info is not passed between basicblocks.
- if (fast_scan_many_locals(entryblock, nlocals) < 0) {
- return ERROR;
- }
- nlocals = 64;
- }
- basicblock **stack = make_cfg_traversal_stack(entryblock);
- if (stack == NULL) {
- return ERROR;
- }
- basicblock **sp = stack;
-
- // First origin of being uninitialized:
- // The non-parameter locals in the entry block.
- uint64_t start_mask = 0;
- for (int i = nparams; i < nlocals; i++) {
- start_mask |= (uint64_t)1 << i;
- }
- maybe_push(entryblock, start_mask, &sp);
-
- // Second origin of being uninitialized:
- // There could be DELETE_FAST somewhere, so
- // be sure to scan each basicblock at least once.
- for (basicblock *b = entryblock; b != NULL; b = b->b_next) {
- scan_block_for_locals(b, &sp);
- }
-
- // Now propagate the uncertainty from the origins we found: Use
- // LOAD_FAST_CHECK for any LOAD_FAST where the local could be undefined.
- while (sp > stack) {
- basicblock *b = *--sp;
- // mark as no longer on stack
- b->b_visited = 0;
- scan_block_for_locals(b, &sp);
- }
- PyMem_Free(stack);
- return SUCCESS;
-}
-
-static PyObject *
-dict_keys_inorder(PyObject *dict, Py_ssize_t offset)
-{
- PyObject *tuple, *k, *v;
- Py_ssize_t i, pos = 0, size = PyDict_GET_SIZE(dict);
-
- tuple = PyTuple_New(size);
- if (tuple == NULL)
- return NULL;
- while (PyDict_Next(dict, &pos, &k, &v)) {
- i = PyLong_AS_LONG(v);
- assert((i - offset) < size);
- assert((i - offset) >= 0);
- PyTuple_SET_ITEM(tuple, i - offset, Py_NewRef(k));
- }
- return tuple;
-}
-
-static PyObject *
-consts_dict_keys_inorder(PyObject *dict)
-{
- PyObject *consts, *k, *v;
- Py_ssize_t i, pos = 0, size = PyDict_GET_SIZE(dict);
-
- consts = PyList_New(size); /* PyCode_Optimize() requires a list */
- if (consts == NULL)
- return NULL;
- while (PyDict_Next(dict, &pos, &k, &v)) {
- i = PyLong_AS_LONG(v);
- /* The keys of the dictionary can be tuples wrapping a constant.
- * (see dict_add_o and _PyCode_ConstantKey). In that case
- * the object we want is always second. */
- if (PyTuple_CheckExact(k)) {
- k = PyTuple_GET_ITEM(k, 1);
- }
- assert(i < size);
- assert(i >= 0);
- PyList_SET_ITEM(consts, i, Py_NewRef(k));
- }
- return consts;
-}
-
-static int
-compute_code_flags(struct compiler *c)
-{
- PySTEntryObject *ste = c->u->u_ste;
- int flags = 0;
- if (ste->ste_type == FunctionBlock) {
- flags |= CO_NEWLOCALS | CO_OPTIMIZED;
- if (ste->ste_nested)
- flags |= CO_NESTED;
- if (ste->ste_generator && !ste->ste_coroutine)
- flags |= CO_GENERATOR;
- if (!ste->ste_generator && ste->ste_coroutine)
- flags |= CO_COROUTINE;
- if (ste->ste_generator && ste->ste_coroutine)
- flags |= CO_ASYNC_GENERATOR;
- if (ste->ste_varargs)
- flags |= CO_VARARGS;
- if (ste->ste_varkeywords)
- flags |= CO_VARKEYWORDS;
- }
-
- /* (Only) inherit compilerflags in PyCF_MASK */
- flags |= (c->c_flags.cf_flags & PyCF_MASK);
-
- if ((IS_TOP_LEVEL_AWAIT(c)) &&
- ste->ste_coroutine &&
- !ste->ste_generator) {
- flags |= CO_COROUTINE;
- }
-
- return flags;
-}
-
-// Merge *obj* with constant cache.
-// Unlike merge_consts_recursive(), this function doesn't work recursively.
-static int
-merge_const_one(PyObject *const_cache, PyObject **obj)
+// Merge *obj* with constant cache.
+// Unlike merge_consts_recursive(), this function doesn't work recursively.
+int
+_PyCompile_ConstCacheMergeOne(PyObject *const_cache, PyObject **obj)
{
assert(PyDict_CheckExact(const_cache));
PyObject *key = _PyCode_ConstantKey(*obj);
@@ -8279,7 +7114,7 @@ makecode(struct compiler_unit *u, struct assembler *a, PyObject *const_cache,
if (!names) {
goto error;
}
- if (merge_const_one(const_cache, &names) < 0) {
+ if (_PyCompile_ConstCacheMergeOne(const_cache, &names) < 0) {
goto error;
}
@@ -8287,7 +7122,7 @@ makecode(struct compiler_unit *u, struct assembler *a, PyObject *const_cache,
if (consts == NULL) {
goto error;
}
- if (merge_const_one(const_cache, &consts) < 0) {
+ if (_PyCompile_ConstCacheMergeOne(const_cache, &consts) < 0) {
goto error;
}
@@ -8338,7 +7173,7 @@ makecode(struct compiler_unit *u, struct assembler *a, PyObject *const_cache,
goto error;
}
- if (merge_const_one(const_cache, &localsplusnames) < 0) {
+ if (_PyCompile_ConstCacheMergeOne(const_cache, &localsplusnames) < 0) {
goto error;
}
con.localsplusnames = localsplusnames;
@@ -8357,64 +7192,6 @@ makecode(struct compiler_unit *u, struct assembler *a, PyObject *const_cache,
}
-/* For debugging purposes only */
-#if 0
-static void
-dump_instr(struct cfg_instr *i)
-{
- const char *jrel = (is_relative_jump(i)) ? "jrel " : "";
- const char *jabs = (is_jump(i) && !is_relative_jump(i))? "jabs " : "";
-
- char arg[128];
-
- *arg = '\0';
- if (HAS_ARG(i->i_opcode)) {
- sprintf(arg, "arg: %d ", i->i_oparg);
- }
- if (HAS_TARGET(i->i_opcode)) {
- sprintf(arg, "target: %p [%d] ", i->i_target, i->i_oparg);
- }
- fprintf(stderr, "line: %d, opcode: %d %s%s%s\n",
- i->i_loc.lineno, i->i_opcode, arg, jabs, jrel);
-}
-
-static inline int
-basicblock_returns(const basicblock *b) {
- struct cfg_instr *last = basicblock_last_instr(b);
- return last && (last->i_opcode == RETURN_VALUE || last->i_opcode == RETURN_CONST);
-}
-
-static void
-dump_basicblock(const basicblock *b)
-{
- const char *b_return = basicblock_returns(b) ? "return " : "";
- fprintf(stderr, "%d: [EH=%d CLD=%d WRM=%d NO_FT=%d %p] used: %d, depth: %d, offset: %d %s\n",
- b->b_label.id, b->b_except_handler, b->b_cold, b->b_warm, BB_NO_FALLTHROUGH(b), b, b->b_iused,
- b->b_startdepth, b->b_offset, b_return);
- if (b->b_instr) {
- int i;
- for (i = 0; i < b->b_iused; i++) {
- fprintf(stderr, " [%02d] ", i);
- dump_instr(b->b_instr + i);
- }
- }
-}
-#endif
-
-
-static int
-translate_jump_labels_to_targets(basicblock *entryblock);
-
-static int
-optimize_cfg(cfg_builder *g, PyObject *consts, PyObject *const_cache);
-
-static int
-remove_unused_consts(basicblock *entryblock, PyObject *consts);
-
-/* Duplicates exit BBs, so that line numbers can be propagated to them */
-static int
-duplicate_exits_without_lineno(cfg_builder *g);
-
static int *
build_cellfixedoffsets(struct compiler_unit *u)
{
@@ -8456,20 +7233,20 @@ insert_prefix_instructions(struct compiler_unit *u, basicblock *entryblock,
/* Add the generator prefix instructions. */
if (code_flags & (CO_GENERATOR | CO_COROUTINE | CO_ASYNC_GENERATOR)) {
- struct cfg_instr make_gen = {
+ cfg_instr make_gen = {
.i_opcode = RETURN_GENERATOR,
.i_oparg = 0,
.i_loc = LOCATION(u->u_firstlineno, u->u_firstlineno, -1, -1),
.i_target = NULL,
};
- RETURN_IF_ERROR(insert_instruction(entryblock, 0, &make_gen));
- struct cfg_instr pop_top = {
+ RETURN_IF_ERROR(_PyBasicblock_InsertInstruction(entryblock, 0, &make_gen));
+ cfg_instr pop_top = {
.i_opcode = POP_TOP,
.i_oparg = 0,
.i_loc = NO_LOCATION,
.i_target = NULL,
};
- RETURN_IF_ERROR(insert_instruction(entryblock, 1, &pop_top));
+ RETURN_IF_ERROR(_PyBasicblock_InsertInstruction(entryblock, 1, &pop_top));
}
/* Set up cells for any variable that escapes, to be put in a closure. */
@@ -8492,60 +7269,32 @@ insert_prefix_instructions(struct compiler_unit *u, basicblock *entryblock,
if (oldindex == -1) {
continue;
}
- struct cfg_instr make_cell = {
+ cfg_instr make_cell = {
.i_opcode = MAKE_CELL,
// This will get fixed in offset_derefs().
.i_oparg = oldindex,
.i_loc = NO_LOCATION,
.i_target = NULL,
};
- RETURN_IF_ERROR(insert_instruction(entryblock, ncellsused, &make_cell));
+ RETURN_IF_ERROR(_PyBasicblock_InsertInstruction(entryblock, ncellsused, &make_cell));
ncellsused += 1;
}
PyMem_RawFree(sorted);
}
if (nfreevars) {
- struct cfg_instr copy_frees = {
+ cfg_instr copy_frees = {
.i_opcode = COPY_FREE_VARS,
.i_oparg = nfreevars,
.i_loc = NO_LOCATION,
.i_target = NULL,
};
- RETURN_IF_ERROR(insert_instruction(entryblock, 0, ©_frees));
+ RETURN_IF_ERROR(_PyBasicblock_InsertInstruction(entryblock, 0, ©_frees));
}
return SUCCESS;
}
-/* Make sure that all returns have a line number, even if early passes
- * have failed to propagate a correct line number.
- * The resulting line number may not be correct according to PEP 626,
- * but should be "good enough", and no worse than in older versions. */
-static void
-guarantee_lineno_for_exits(basicblock *entryblock, int firstlineno) {
- int lineno = firstlineno;
- assert(lineno > 0);
- for (basicblock *b = entryblock; b != NULL; b = b->b_next) {
- struct cfg_instr *last = basicblock_last_instr(b);
- if (last == NULL) {
- continue;
- }
- if (last->i_loc.lineno < 0) {
- if (last->i_opcode == RETURN_VALUE) {
- for (int i = 0; i < b->b_iused; i++) {
- assert(b->b_instr[i].i_loc.lineno < 0);
-
- b->b_instr[i].i_loc.lineno = lineno;
- }
- }
- }
- else {
- lineno = last->i_loc.lineno;
- }
- }
-}
-
static int
fix_cell_offsets(struct compiler_unit *u, basicblock *entryblock, int *fixedmap)
{
@@ -8569,7 +7318,7 @@ fix_cell_offsets(struct compiler_unit *u, basicblock *entryblock, int *fixedmap)
// Then update offsets, either relative to locals or by cell2arg.
for (basicblock *b = entryblock; b != NULL; b = b->b_next) {
for (int i = 0; i < b->b_iused; i++) {
- struct cfg_instr *inst = &b->b_instr[i];
+ cfg_instr *inst = &b->b_instr[i];
// This is called before extended args are generated.
assert(inst->i_opcode != EXTENDED_ARG);
int oldoffset = inst->i_oparg;
@@ -8592,100 +7341,6 @@ fix_cell_offsets(struct compiler_unit *u, basicblock *entryblock, int *fixedmap)
}
-#ifndef NDEBUG
-
-static bool
-no_redundant_nops(cfg_builder *g) {
- for (basicblock *b = g->g_entryblock; b != NULL; b = b->b_next) {
- if (remove_redundant_nops(b) != 0) {
- return false;
- }
- }
- return true;
-}
-
-static bool
-no_redundant_jumps(cfg_builder *g) {
- for (basicblock *b = g->g_entryblock; b != NULL; b = b->b_next) {
- struct cfg_instr *last = basicblock_last_instr(b);
- if (last != NULL) {
- if (IS_UNCONDITIONAL_JUMP_OPCODE(last->i_opcode)) {
- assert(last->i_target != b->b_next);
- if (last->i_target == b->b_next) {
- return false;
- }
- }
- }
- }
- return true;
-}
-
-static bool
-opcode_metadata_is_sane(cfg_builder *g) {
- bool result = true;
- for (basicblock *b = g->g_entryblock; b != NULL; b = b->b_next) {
- for (int i = 0; i < b->b_iused; i++) {
- struct cfg_instr *instr = &b->b_instr[i];
- int opcode = instr->i_opcode;
- int oparg = instr->i_oparg;
- assert(opcode <= MAX_REAL_OPCODE);
- for (int jump = 0; jump <= 1; jump++) {
- int popped = _PyOpcode_num_popped(opcode, oparg, jump ? true : false);
- int pushed = _PyOpcode_num_pushed(opcode, oparg, jump ? true : false);
- assert((pushed < 0) == (popped < 0));
- if (pushed >= 0) {
- assert(_PyOpcode_opcode_metadata[opcode].valid_entry);
- int effect = stack_effect(opcode, instr->i_oparg, jump);
- if (effect != pushed - popped) {
- fprintf(stderr,
- "op=%d arg=%d jump=%d: stack_effect (%d) != pushed (%d) - popped (%d)\n",
- opcode, oparg, jump, effect, pushed, popped);
- result = false;
- }
- }
- }
- }
- }
- return result;
-}
-
-static bool
-no_empty_basic_blocks(cfg_builder *g) {
- for (basicblock *b = g->g_entryblock; b != NULL; b = b->b_next) {
- if (b->b_iused == 0) {
- return false;
- }
- }
- return true;
-}
-#endif
-
-static int
-remove_redundant_jumps(cfg_builder *g) {
- /* If a non-empty block ends with a jump instruction, check if the next
- * non-empty block reached through normal flow control is the target
- * of that jump. If it is, then the jump instruction is redundant and
- * can be deleted.
- */
- assert(no_empty_basic_blocks(g));
- for (basicblock *b = g->g_entryblock; b != NULL; b = b->b_next) {
- struct cfg_instr *last = basicblock_last_instr(b);
- assert(last != NULL);
- assert(!IS_ASSEMBLER_OPCODE(last->i_opcode));
- if (IS_UNCONDITIONAL_JUMP_OPCODE(last->i_opcode)) {
- if (last->i_target == NULL) {
- PyErr_SetString(PyExc_SystemError, "jump with NULL target");
- return ERROR;
- }
- if (last->i_target == b->b_next) {
- assert(b->b_next->b_iused);
- INSTR_SET_OP0(last, NOP);
- }
- }
- }
- return SUCCESS;
-}
-
static int
prepare_localsplus(struct compiler_unit* u, cfg_builder *g, int code_flags)
{
@@ -8734,47 +7389,6 @@ add_return_at_end(struct compiler *c, int addNone)
return SUCCESS;
}
-static void propagate_line_numbers(basicblock *entryblock);
-
-static int
-resolve_line_numbers(struct compiler_unit *u, cfg_builder *g)
-{
- /* Set firstlineno if it wasn't explicitly set. */
- if (!u->u_firstlineno) {
- if (g->g_entryblock->b_instr && g->g_entryblock->b_instr->i_loc.lineno) {
- u->u_firstlineno = g->g_entryblock->b_instr->i_loc.lineno;
- }
- else {
- u->u_firstlineno = 1;
- }
- }
- RETURN_IF_ERROR(duplicate_exits_without_lineno(g));
- propagate_line_numbers(g->g_entryblock);
- guarantee_lineno_for_exits(g->g_entryblock, u->u_firstlineno);
- return SUCCESS;
-}
-
-static int
-optimize_code_unit(cfg_builder *g, PyObject *consts, PyObject *const_cache,
- int code_flags, int nlocals, int nparams)
-{
- assert(cfg_builder_check(g));
- /** Preprocessing **/
- /* Map labels to targets and mark exception handlers */
- RETURN_IF_ERROR(translate_jump_labels_to_targets(g->g_entryblock));
- RETURN_IF_ERROR(mark_except_handlers(g->g_entryblock));
- RETURN_IF_ERROR(label_exception_targets(g->g_entryblock));
-
- /** Optimization **/
- RETURN_IF_ERROR(optimize_cfg(g, consts, const_cache));
- RETURN_IF_ERROR(remove_unused_consts(g->g_entryblock, consts));
- RETURN_IF_ERROR(
- add_checks_for_loads_of_uninitialized_variables(
- g->g_entryblock, nlocals, nparams));
-
- RETURN_IF_ERROR(push_cold_blocks_to_end(g, code_flags));
- return SUCCESS;
-}
static PyCodeObject *
assemble_code_unit(struct compiler_unit *u, PyObject *const_cache,
@@ -8791,13 +7405,21 @@ assemble_code_unit(struct compiler_unit *u, PyObject *const_cache,
}
int nparams = (int)PyList_GET_SIZE(u->u_ste->ste_varnames);
int nlocals = (int)PyDict_GET_SIZE(u->u_varnames);
- if (optimize_code_unit(&g, consts, const_cache, code_flags, nlocals, nparams) < 0) {
+ if (_PyCfg_OptimizeCodeUnit(&g, consts, const_cache, code_flags, nlocals, nparams) < 0) {
goto error;
}
/** Assembly **/
-
- if (resolve_line_numbers(u, &g) < 0) {
+ /* Set firstlineno if it wasn't explicitly set. */
+ if (!u->u_firstlineno) {
+ if (g.g_entryblock->b_instr && g.g_entryblock->b_instr->i_loc.lineno) {
+ u->u_firstlineno = g.g_entryblock->b_instr->i_loc.lineno;
+ }
+ else {
+ u->u_firstlineno = 1;
+ }
+ }
+ if (_PyCfg_ResolveLineNumbers(&g, u->u_firstlineno) < 0) {
goto error;
}
@@ -8806,23 +7428,21 @@ assemble_code_unit(struct compiler_unit *u, PyObject *const_cache,
goto error;
}
- int maxdepth = stackdepth(g.g_entryblock, code_flags);
+ int maxdepth = _PyCfg_Stackdepth(g.g_entryblock, code_flags);
if (maxdepth < 0) {
goto error;
}
/* TO DO -- For 3.12, make sure that `maxdepth <= MAX_ALLOWED_STACK_USE` */
- convert_exception_handlers_to_nops(g.g_entryblock);
+ _PyCfg_ConvertExceptionHandlersToNops(g.g_entryblock);
/* Order of basic blocks must have been determined by now */
- if (normalize_jumps(&g) < 0) {
+
+ if (_PyCfg_ResolveJumps(&g) < 0) {
goto error;
}
- assert(no_redundant_jumps(&g));
- assert(opcode_metadata_is_sane(&g));
/* Can't modify the bytecode after computing jump offsets. */
- assemble_jump_offsets(g.g_entryblock);
struct assembler a;
int res = assemble_emit(&a, g.g_entryblock, u->u_firstlineno, const_cache);
@@ -8834,7 +7454,7 @@ assemble_code_unit(struct compiler_unit *u, PyObject *const_cache,
error:
Py_XDECREF(consts);
- cfg_builder_fini(&g);
+ _PyCfgBuilder_Fini(&g);
return co;
}
@@ -8857,941 +7477,6 @@ assemble(struct compiler *c, int addNone)
return assemble_code_unit(u, const_cache, code_flags, filename);
}
-static PyObject*
-get_const_value(int opcode, int oparg, PyObject *co_consts)
-{
- PyObject *constant = NULL;
- assert(HAS_CONST(opcode));
- if (opcode == LOAD_CONST) {
- constant = PyList_GET_ITEM(co_consts, oparg);
- }
-
- if (constant == NULL) {
- PyErr_SetString(PyExc_SystemError,
- "Internal error: failed to get value of a constant");
- return NULL;
- }
- return Py_NewRef(constant);
-}
-
-/* Replace LOAD_CONST c1, LOAD_CONST c2 ... LOAD_CONST cn, BUILD_TUPLE n
- with LOAD_CONST (c1, c2, ... cn).
- The consts table must still be in list form so that the
- new constant (c1, c2, ... cn) can be appended.
- Called with codestr pointing to the first LOAD_CONST.
-*/
-static int
-fold_tuple_on_constants(PyObject *const_cache,
- struct cfg_instr *inst,
- int n, PyObject *consts)
-{
- /* Pre-conditions */
- assert(PyDict_CheckExact(const_cache));
- assert(PyList_CheckExact(consts));
- assert(inst[n].i_opcode == BUILD_TUPLE);
- assert(inst[n].i_oparg == n);
-
- for (int i = 0; i < n; i++) {
- if (!HAS_CONST(inst[i].i_opcode)) {
- return SUCCESS;
- }
- }
-
- /* Buildup new tuple of constants */
- PyObject *newconst = PyTuple_New(n);
- if (newconst == NULL) {
- return ERROR;
- }
- for (int i = 0; i < n; i++) {
- int op = inst[i].i_opcode;
- int arg = inst[i].i_oparg;
- PyObject *constant = get_const_value(op, arg, consts);
- if (constant == NULL) {
- return ERROR;
- }
- PyTuple_SET_ITEM(newconst, i, constant);
- }
- if (merge_const_one(const_cache, &newconst) < 0) {
- Py_DECREF(newconst);
- return ERROR;
- }
-
- Py_ssize_t index;
- for (index = 0; index < PyList_GET_SIZE(consts); index++) {
- if (PyList_GET_ITEM(consts, index) == newconst) {
- break;
- }
- }
- if (index == PyList_GET_SIZE(consts)) {
- if ((size_t)index >= (size_t)INT_MAX - 1) {
- Py_DECREF(newconst);
- PyErr_SetString(PyExc_OverflowError, "too many constants");
- return ERROR;
- }
- if (PyList_Append(consts, newconst)) {
- Py_DECREF(newconst);
- return ERROR;
- }
- }
- Py_DECREF(newconst);
- for (int i = 0; i < n; i++) {
- INSTR_SET_OP0(&inst[i], NOP);
- }
- INSTR_SET_OP1(&inst[n], LOAD_CONST, (int)index);
- return SUCCESS;
-}
-
-#define VISITED (-1)
-
-// Replace an arbitrary run of SWAPs and NOPs with an optimal one that has the
-// same effect.
-static int
-swaptimize(basicblock *block, int *ix)
-{
- // NOTE: "./python -m test test_patma" serves as a good, quick stress test
- // for this function. Make sure to blow away cached *.pyc files first!
- assert(*ix < block->b_iused);
- struct cfg_instr *instructions = &block->b_instr[*ix];
- // Find the length of the current sequence of SWAPs and NOPs, and record the
- // maximum depth of the stack manipulations:
- assert(instructions[0].i_opcode == SWAP);
- int depth = instructions[0].i_oparg;
- int len = 0;
- int more = false;
- int limit = block->b_iused - *ix;
- while (++len < limit) {
- int opcode = instructions[len].i_opcode;
- if (opcode == SWAP) {
- depth = Py_MAX(depth, instructions[len].i_oparg);
- more = true;
- }
- else if (opcode != NOP) {
- break;
- }
- }
- // It's already optimal if there's only one SWAP:
- if (!more) {
- return SUCCESS;
- }
- // Create an array with elements {0, 1, 2, ..., depth - 1}:
- int *stack = PyMem_Malloc(depth * sizeof(int));
- if (stack == NULL) {
- PyErr_NoMemory();
- return ERROR;
- }
- for (int i = 0; i < depth; i++) {
- stack[i] = i;
- }
- // Simulate the combined effect of these instructions by "running" them on
- // our "stack":
- for (int i = 0; i < len; i++) {
- if (instructions[i].i_opcode == SWAP) {
- int oparg = instructions[i].i_oparg;
- int top = stack[0];
- // SWAPs are 1-indexed:
- stack[0] = stack[oparg - 1];
- stack[oparg - 1] = top;
- }
- }
- // Now we can begin! Our approach here is based on a solution to a closely
- // related problem (https://cs.stackexchange.com/a/13938). It's easiest to
- // think of this algorithm as determining the steps needed to efficiently
- // "un-shuffle" our stack. By performing the moves in *reverse* order,
- // though, we can efficiently *shuffle* it! For this reason, we will be
- // replacing instructions starting from the *end* of the run. Since the
- // solution is optimal, we don't need to worry about running out of space:
- int current = len - 1;
- for (int i = 0; i < depth; i++) {
- // Skip items that have already been visited, or just happen to be in
- // the correct location:
- if (stack[i] == VISITED || stack[i] == i) {
- continue;
- }
- // Okay, we've found an item that hasn't been visited. It forms a cycle
- // with other items; traversing the cycle and swapping each item with
- // the next will put them all in the correct place. The weird
- // loop-and-a-half is necessary to insert 0 into every cycle, since we
- // can only swap from that position:
- int j = i;
- while (true) {
- // Skip the actual swap if our item is zero, since swapping the top
- // item with itself is pointless:
- if (j) {
- assert(0 <= current);
- // SWAPs are 1-indexed:
- instructions[current].i_opcode = SWAP;
- instructions[current--].i_oparg = j + 1;
- }
- if (stack[j] == VISITED) {
- // Completed the cycle:
- assert(j == i);
- break;
- }
- int next_j = stack[j];
- stack[j] = VISITED;
- j = next_j;
- }
- }
- // NOP out any unused instructions:
- while (0 <= current) {
- INSTR_SET_OP0(&instructions[current--], NOP);
- }
- PyMem_Free(stack);
- *ix += len - 1;
- return SUCCESS;
-}
-
-// This list is pretty small, since it's only okay to reorder opcodes that:
-// - can't affect control flow (like jumping or raising exceptions)
-// - can't invoke arbitrary code (besides finalizers)
-// - only touch the TOS (and pop it when finished)
-#define SWAPPABLE(opcode) \
- ((opcode) == STORE_FAST || (opcode) == POP_TOP)
-
-static int
-next_swappable_instruction(basicblock *block, int i, int lineno)
-{
- while (++i < block->b_iused) {
- struct cfg_instr *instruction = &block->b_instr[i];
- if (0 <= lineno && instruction->i_loc.lineno != lineno) {
- // Optimizing across this instruction could cause user-visible
- // changes in the names bound between line tracing events!
- return -1;
- }
- if (instruction->i_opcode == NOP) {
- continue;
- }
- if (SWAPPABLE(instruction->i_opcode)) {
- return i;
- }
- return -1;
- }
- return -1;
-}
-
-// Attempt to apply SWAPs statically by swapping *instructions* rather than
-// stack items. For example, we can replace SWAP(2), POP_TOP, STORE_FAST(42)
-// with the more efficient NOP, STORE_FAST(42), POP_TOP.
-static void
-apply_static_swaps(basicblock *block, int i)
-{
- // SWAPs are to our left, and potential swaperands are to our right:
- for (; 0 <= i; i--) {
- assert(i < block->b_iused);
- struct cfg_instr *swap = &block->b_instr[i];
- if (swap->i_opcode != SWAP) {
- if (swap->i_opcode == NOP || SWAPPABLE(swap->i_opcode)) {
- // Nope, but we know how to handle these. Keep looking:
- continue;
- }
- // We can't reason about what this instruction does. Bail:
- return;
- }
- int j = next_swappable_instruction(block, i, -1);
- if (j < 0) {
- return;
- }
- int k = j;
- int lineno = block->b_instr[j].i_loc.lineno;
- for (int count = swap->i_oparg - 1; 0 < count; count--) {
- k = next_swappable_instruction(block, k, lineno);
- if (k < 0) {
- return;
- }
- }
- // Success!
- INSTR_SET_OP0(swap, NOP);
- struct cfg_instr temp = block->b_instr[j];
- block->b_instr[j] = block->b_instr[k];
- block->b_instr[k] = temp;
- }
-}
-
-// Attempt to eliminate jumps to jumps by updating inst to jump to
-// target->i_target using the provided opcode. Return whether or not the
-// optimization was successful.
-static bool
-jump_thread(struct cfg_instr *inst, struct cfg_instr *target, int opcode)
-{
- assert(is_jump(inst));
- assert(is_jump(target));
- // bpo-45773: If inst->i_target == target->i_target, then nothing actually
- // changes (and we fall into an infinite loop):
- if ((inst->i_loc.lineno == target->i_loc.lineno || target->i_loc.lineno == -1) &&
- inst->i_target != target->i_target)
- {
- inst->i_target = target->i_target;
- inst->i_opcode = opcode;
- return true;
- }
- return false;
-}
-
-/* Maximum size of basic block that should be copied in optimizer */
-#define MAX_COPY_SIZE 4
-
-/* Optimization */
-static int
-optimize_basic_block(PyObject *const_cache, basicblock *bb, PyObject *consts)
-{
- assert(PyDict_CheckExact(const_cache));
- assert(PyList_CheckExact(consts));
- struct cfg_instr nop;
- INSTR_SET_OP0(&nop, NOP);
- struct cfg_instr *target = &nop;
- int opcode = 0;
- int oparg = 0;
- int nextop = 0;
- for (int i = 0; i < bb->b_iused; i++) {
- struct cfg_instr *inst = &bb->b_instr[i];
- bool is_copy_of_load_const = (opcode == LOAD_CONST &&
- inst->i_opcode == COPY &&
- inst->i_oparg == 1);
- if (! is_copy_of_load_const) {
- opcode = inst->i_opcode;
- oparg = inst->i_oparg;
- if (HAS_TARGET(opcode)) {
- assert(inst->i_target->b_iused > 0);
- target = &inst->i_target->b_instr[0];
- assert(!IS_ASSEMBLER_OPCODE(target->i_opcode));
- }
- else {
- target = &nop;
- }
- }
- nextop = i+1 < bb->b_iused ? bb->b_instr[i+1].i_opcode : 0;
- assert(!IS_ASSEMBLER_OPCODE(opcode));
- switch (opcode) {
- /* Remove LOAD_CONST const; conditional jump */
- case LOAD_CONST:
- {
- PyObject* cnt;
- int is_true;
- int jump_if_true;
- switch(nextop) {
- case POP_JUMP_IF_FALSE:
- case POP_JUMP_IF_TRUE:
- cnt = get_const_value(opcode, oparg, consts);
- if (cnt == NULL) {
- goto error;
- }
- is_true = PyObject_IsTrue(cnt);
- Py_DECREF(cnt);
- if (is_true == -1) {
- goto error;
- }
- INSTR_SET_OP0(inst, NOP);
- jump_if_true = nextop == POP_JUMP_IF_TRUE;
- if (is_true == jump_if_true) {
- bb->b_instr[i+1].i_opcode = JUMP;
- }
- else {
- INSTR_SET_OP0(&bb->b_instr[i + 1], NOP);
- }
- break;
- case IS_OP:
- cnt = get_const_value(opcode, oparg, consts);
- if (cnt == NULL) {
- goto error;
- }
- int jump_op = i+2 < bb->b_iused ? bb->b_instr[i+2].i_opcode : 0;
- if (Py_IsNone(cnt) && (jump_op == POP_JUMP_IF_FALSE || jump_op == POP_JUMP_IF_TRUE)) {
- unsigned char nextarg = bb->b_instr[i+1].i_oparg;
- INSTR_SET_OP0(inst, NOP);
- INSTR_SET_OP0(&bb->b_instr[i + 1], NOP);
- bb->b_instr[i+2].i_opcode = nextarg ^ (jump_op == POP_JUMP_IF_FALSE) ?
- POP_JUMP_IF_NOT_NONE : POP_JUMP_IF_NONE;
- }
- Py_DECREF(cnt);
- break;
- case RETURN_VALUE:
- INSTR_SET_OP0(inst, NOP);
- INSTR_SET_OP1(&bb->b_instr[++i], RETURN_CONST, oparg);
- break;
- }
- break;
- }
-
- /* Try to fold tuples of constants.
- Skip over BUILD_TUPLE(1) UNPACK_SEQUENCE(1).
- Replace BUILD_TUPLE(2) UNPACK_SEQUENCE(2) with SWAP(2).
- Replace BUILD_TUPLE(3) UNPACK_SEQUENCE(3) with SWAP(3). */
- case BUILD_TUPLE:
- if (nextop == UNPACK_SEQUENCE && oparg == bb->b_instr[i+1].i_oparg) {
- switch(oparg) {
- case 1:
- INSTR_SET_OP0(inst, NOP);
- INSTR_SET_OP0(&bb->b_instr[i + 1], NOP);
- continue;
- case 2:
- case 3:
- INSTR_SET_OP0(inst, NOP);
- bb->b_instr[i+1].i_opcode = SWAP;
- continue;
- }
- }
- if (i >= oparg) {
- if (fold_tuple_on_constants(const_cache, inst-oparg, oparg, consts)) {
- goto error;
- }
- }
- break;
- case POP_JUMP_IF_NOT_NONE:
- case POP_JUMP_IF_NONE:
- switch (target->i_opcode) {
- case JUMP:
- i -= jump_thread(inst, target, inst->i_opcode);
- }
- break;
- case POP_JUMP_IF_FALSE:
- switch (target->i_opcode) {
- case JUMP:
- i -= jump_thread(inst, target, POP_JUMP_IF_FALSE);
- }
- break;
- case POP_JUMP_IF_TRUE:
- switch (target->i_opcode) {
- case JUMP:
- i -= jump_thread(inst, target, POP_JUMP_IF_TRUE);
- }
- break;
- case JUMP:
- switch (target->i_opcode) {
- case JUMP:
- i -= jump_thread(inst, target, JUMP);
- }
- break;
- case FOR_ITER:
- if (target->i_opcode == JUMP) {
- /* This will not work now because the jump (at target) could
- * be forward or backward and FOR_ITER only jumps forward. We
- * can re-enable this if ever we implement a backward version
- * of FOR_ITER.
- */
- /*
- i -= jump_thread(inst, target, FOR_ITER);
- */
- }
- break;
- case SWAP:
- if (oparg == 1) {
- INSTR_SET_OP0(inst, NOP);
- break;
- }
- if (swaptimize(bb, &i) < 0) {
- goto error;
- }
- apply_static_swaps(bb, i);
- break;
- case KW_NAMES:
- break;
- case PUSH_NULL:
- if (nextop == LOAD_GLOBAL && (inst[1].i_opcode & 1) == 0) {
- INSTR_SET_OP0(inst, NOP);
- inst[1].i_oparg |= 1;
- }
- break;
- default:
- /* All HAS_CONST opcodes should be handled with LOAD_CONST */
- assert (!HAS_CONST(inst->i_opcode));
- }
- }
- return SUCCESS;
-error:
- return ERROR;
-}
-
-/* If this block ends with an unconditional jump to a small exit block, then
- * remove the jump and extend this block with the target.
- * Returns 1 if extended, 0 if no change, and -1 on error.
- */
-static int
-inline_small_exit_blocks(basicblock *bb) {
- struct cfg_instr *last = basicblock_last_instr(bb);
- if (last == NULL) {
- return 0;
- }
- if (!IS_UNCONDITIONAL_JUMP_OPCODE(last->i_opcode)) {
- return 0;
- }
- basicblock *target = last->i_target;
- if (basicblock_exits_scope(target) && target->b_iused <= MAX_COPY_SIZE) {
- INSTR_SET_OP0(last, NOP);
- RETURN_IF_ERROR(basicblock_append_instructions(bb, target));
- return 1;
- }
- return 0;
-}
-
-
-static int
-remove_redundant_nops_and_pairs(basicblock *entryblock)
-{
- bool done = false;
-
- while (! done) {
- done = true;
- struct cfg_instr *prev_instr = NULL;
- struct cfg_instr *instr = NULL;
- for (basicblock *b = entryblock; b != NULL; b = b->b_next) {
- remove_redundant_nops(b);
- if (IS_LABEL(b->b_label)) {
- /* this block is a jump target, forget instr */
- instr = NULL;
- }
- for (int i = 0; i < b->b_iused; i++) {
- prev_instr = instr;
- instr = &b->b_instr[i];
- int prev_opcode = prev_instr ? prev_instr->i_opcode : 0;
- int prev_oparg = prev_instr ? prev_instr->i_oparg : 0;
- int opcode = instr->i_opcode;
- bool is_redundant_pair = false;
- if (opcode == POP_TOP) {
- if (prev_opcode == LOAD_CONST) {
- is_redundant_pair = true;
- }
- else if (prev_opcode == COPY && prev_oparg == 1) {
- is_redundant_pair = true;
- }
- }
- if (is_redundant_pair) {
- INSTR_SET_OP0(prev_instr, NOP);
- INSTR_SET_OP0(instr, NOP);
- done = false;
- }
- }
- if ((instr && is_jump(instr)) || !BB_HAS_FALLTHROUGH(b)) {
- instr = NULL;
- }
- }
- }
- return SUCCESS;
-}
-
-
-static int
-remove_redundant_nops(basicblock *bb) {
- /* Remove NOPs when legal to do so. */
- int dest = 0;
- int prev_lineno = -1;
- for (int src = 0; src < bb->b_iused; src++) {
- int lineno = bb->b_instr[src].i_loc.lineno;
- if (bb->b_instr[src].i_opcode == NOP) {
- /* Eliminate no-op if it doesn't have a line number */
- if (lineno < 0) {
- continue;
- }
- /* or, if the previous instruction had the same line number. */
- if (prev_lineno == lineno) {
- continue;
- }
- /* or, if the next instruction has same line number or no line number */
- if (src < bb->b_iused - 1) {
- int next_lineno = bb->b_instr[src+1].i_loc.lineno;
- if (next_lineno == lineno) {
- continue;
- }
- if (next_lineno < 0) {
- bb->b_instr[src+1].i_loc = bb->b_instr[src].i_loc;
- continue;
- }
- }
- else {
- basicblock* next = bb->b_next;
- while (next && next->b_iused == 0) {
- next = next->b_next;
- }
- /* or if last instruction in BB and next BB has same line number */
- if (next) {
- if (lineno == next->b_instr[0].i_loc.lineno) {
- continue;
- }
- }
- }
-
- }
- if (dest != src) {
- bb->b_instr[dest] = bb->b_instr[src];
- }
- dest++;
- prev_lineno = lineno;
- }
- assert(dest <= bb->b_iused);
- int num_removed = bb->b_iused - dest;
- bb->b_iused = dest;
- return num_removed;
-}
-
-static int
-check_cfg(cfg_builder *g) {
- for (basicblock *b = g->g_entryblock; b != NULL; b = b->b_next) {
- /* Raise SystemError if jump or exit is not last instruction in the block. */
- for (int i = 0; i < b->b_iused; i++) {
- int opcode = b->b_instr[i].i_opcode;
- assert(!IS_ASSEMBLER_OPCODE(opcode));
- if (IS_TERMINATOR_OPCODE(opcode)) {
- if (i != b->b_iused - 1) {
- PyErr_SetString(PyExc_SystemError, "malformed control flow graph.");
- return ERROR;
- }
- }
- }
- }
- return SUCCESS;
-}
-
-static int
-mark_reachable(basicblock *entryblock) {
- basicblock **stack = make_cfg_traversal_stack(entryblock);
- if (stack == NULL) {
- return ERROR;
- }
- basicblock **sp = stack;
- entryblock->b_predecessors = 1;
- *sp++ = entryblock;
- while (sp > stack) {
- basicblock *b = *(--sp);
- b->b_visited = 1;
- if (b->b_next && BB_HAS_FALLTHROUGH(b)) {
- if (!b->b_next->b_visited) {
- assert(b->b_next->b_predecessors == 0);
- *sp++ = b->b_next;
- }
- b->b_next->b_predecessors++;
- }
- for (int i = 0; i < b->b_iused; i++) {
- basicblock *target;
- struct cfg_instr *instr = &b->b_instr[i];
- if (is_jump(instr) || is_block_push(instr)) {
- target = instr->i_target;
- if (!target->b_visited) {
- assert(target->b_predecessors == 0 || target == b->b_next);
- *sp++ = target;
- }
- target->b_predecessors++;
- }
- }
- }
- PyMem_Free(stack);
- return SUCCESS;
-}
-
-static void
-eliminate_empty_basic_blocks(cfg_builder *g) {
- /* Eliminate empty blocks */
- for (basicblock *b = g->g_entryblock; b != NULL; b = b->b_next) {
- basicblock *next = b->b_next;
- while (next && next->b_iused == 0) {
- next = next->b_next;
- }
- b->b_next = next;
- }
- while(g->g_entryblock && g->g_entryblock->b_iused == 0) {
- g->g_entryblock = g->g_entryblock->b_next;
- }
- for (basicblock *b = g->g_entryblock; b != NULL; b = b->b_next) {
- assert(b->b_iused > 0);
- for (int i = 0; i < b->b_iused; i++) {
- struct cfg_instr *instr = &b->b_instr[i];
- if (HAS_TARGET(instr->i_opcode)) {
- basicblock *target = instr->i_target;
- while (target->b_iused == 0) {
- target = target->b_next;
- }
- instr->i_target = target;
- assert(instr->i_target && instr->i_target->b_iused > 0);
- }
- }
- }
-}
-
-
-/* If an instruction has no line number, but it's predecessor in the BB does,
- * then copy the line number. If a successor block has no line number, and only
- * one predecessor, then inherit the line number.
- * This ensures that all exit blocks (with one predecessor) receive a line number.
- * Also reduces the size of the line number table,
- * but has no impact on the generated line number events.
- */
-static void
-propagate_line_numbers(basicblock *entryblock) {
- for (basicblock *b = entryblock; b != NULL; b = b->b_next) {
- struct cfg_instr *last = basicblock_last_instr(b);
- if (last == NULL) {
- continue;
- }
-
- location prev_location = NO_LOCATION;
- for (int i = 0; i < b->b_iused; i++) {
- if (b->b_instr[i].i_loc.lineno < 0) {
- b->b_instr[i].i_loc = prev_location;
- }
- else {
- prev_location = b->b_instr[i].i_loc;
- }
- }
- if (BB_HAS_FALLTHROUGH(b) && b->b_next->b_predecessors == 1) {
- assert(b->b_next->b_iused);
- if (b->b_next->b_instr[0].i_loc.lineno < 0) {
- b->b_next->b_instr[0].i_loc = prev_location;
- }
- }
- if (is_jump(last)) {
- basicblock *target = last->i_target;
- if (target->b_predecessors == 1) {
- if (target->b_instr[0].i_loc.lineno < 0) {
- target->b_instr[0].i_loc = prev_location;
- }
- }
- }
- }
-}
-
-
-/* Calculate the actual jump target from the target_label */
-static int
-translate_jump_labels_to_targets(basicblock *entryblock)
-{
- int max_label = -1;
- for (basicblock *b = entryblock; b != NULL; b = b->b_next) {
- if (b->b_label.id > max_label) {
- max_label = b->b_label.id;
- }
- }
- size_t mapsize = sizeof(basicblock *) * (max_label + 1);
- basicblock **label2block = (basicblock **)PyMem_Malloc(mapsize);
- if (!label2block) {
- PyErr_NoMemory();
- return ERROR;
- }
- memset(label2block, 0, mapsize);
- for (basicblock *b = entryblock; b != NULL; b = b->b_next) {
- if (b->b_label.id >= 0) {
- label2block[b->b_label.id] = b;
- }
- }
- for (basicblock *b = entryblock; b != NULL; b = b->b_next) {
- for (int i = 0; i < b->b_iused; i++) {
- struct cfg_instr *instr = &b->b_instr[i];
- assert(instr->i_target == NULL);
- if (HAS_TARGET(instr->i_opcode)) {
- int lbl = instr->i_oparg;
- assert(lbl >= 0 && lbl <= max_label);
- instr->i_target = label2block[lbl];
- assert(instr->i_target != NULL);
- assert(instr->i_target->b_label.id == lbl);
- }
- }
- }
- PyMem_Free(label2block);
- return SUCCESS;
-}
-
-/* Perform optimizations on a control flow graph.
- The consts object should still be in list form to allow new constants
- to be appended.
-
- Code trasnformations that reduce code size initially fill the gaps with
- NOPs. Later those NOPs are removed.
-*/
-
-static int
-optimize_cfg(cfg_builder *g, PyObject *consts, PyObject *const_cache)
-{
- assert(PyDict_CheckExact(const_cache));
- RETURN_IF_ERROR(check_cfg(g));
- eliminate_empty_basic_blocks(g);
- for (basicblock *b = g->g_entryblock; b != NULL; b = b->b_next) {
- RETURN_IF_ERROR(inline_small_exit_blocks(b));
- }
- assert(no_empty_basic_blocks(g));
- for (basicblock *b = g->g_entryblock; b != NULL; b = b->b_next) {
- RETURN_IF_ERROR(optimize_basic_block(const_cache, b, consts));
- assert(b->b_predecessors == 0);
- }
- RETURN_IF_ERROR(remove_redundant_nops_and_pairs(g->g_entryblock));
- for (basicblock *b = g->g_entryblock; b != NULL; b = b->b_next) {
- RETURN_IF_ERROR(inline_small_exit_blocks(b));
- }
- RETURN_IF_ERROR(mark_reachable(g->g_entryblock));
-
- /* Delete unreachable instructions */
- for (basicblock *b = g->g_entryblock; b != NULL; b = b->b_next) {
- if (b->b_predecessors == 0) {
- b->b_iused = 0;
- }
- }
- for (basicblock *b = g->g_entryblock; b != NULL; b = b->b_next) {
- remove_redundant_nops(b);
- }
- eliminate_empty_basic_blocks(g);
- assert(no_redundant_nops(g));
- RETURN_IF_ERROR(remove_redundant_jumps(g));
- return SUCCESS;
-}
-
-
-static int
-remove_unused_consts(basicblock *entryblock, PyObject *consts)
-{
- assert(PyList_CheckExact(consts));
- Py_ssize_t nconsts = PyList_GET_SIZE(consts);
- if (nconsts == 0) {
- return SUCCESS; /* nothing to do */
- }
-
- Py_ssize_t *index_map = NULL;
- Py_ssize_t *reverse_index_map = NULL;
- int err = ERROR;
-
- index_map = PyMem_Malloc(nconsts * sizeof(Py_ssize_t));
- if (index_map == NULL) {
- goto end;
- }
- for (Py_ssize_t i = 1; i < nconsts; i++) {
- index_map[i] = -1;
- }
- // The first constant may be docstring; keep it always.
- index_map[0] = 0;
-
- /* mark used consts */
- for (basicblock *b = entryblock; b != NULL; b = b->b_next) {
- for (int i = 0; i < b->b_iused; i++) {
- if (HAS_CONST(b->b_instr[i].i_opcode)) {
- int index = b->b_instr[i].i_oparg;
- index_map[index] = index;
- }
- }
- }
- /* now index_map[i] == i if consts[i] is used, -1 otherwise */
-
- /* condense consts */
- Py_ssize_t n_used_consts = 0;
- for (int i = 0; i < nconsts; i++) {
- if (index_map[i] != -1) {
- assert(index_map[i] == i);
- index_map[n_used_consts++] = index_map[i];
- }
- }
- if (n_used_consts == nconsts) {
- /* nothing to do */
- err = SUCCESS;
- goto end;
- }
-
- /* move all used consts to the beginning of the consts list */
- assert(n_used_consts < nconsts);
- for (Py_ssize_t i = 0; i < n_used_consts; i++) {
- Py_ssize_t old_index = index_map[i];
- assert(i <= old_index && old_index < nconsts);
- if (i != old_index) {
- PyObject *value = PyList_GET_ITEM(consts, index_map[i]);
- assert(value != NULL);
- PyList_SetItem(consts, i, Py_NewRef(value));
- }
- }
-
- /* truncate the consts list at its new size */
- if (PyList_SetSlice(consts, n_used_consts, nconsts, NULL) < 0) {
- goto end;
- }
-
- /* adjust const indices in the bytecode */
- reverse_index_map = PyMem_Malloc(nconsts * sizeof(Py_ssize_t));
- if (reverse_index_map == NULL) {
- goto end;
- }
- for (Py_ssize_t i = 0; i < nconsts; i++) {
- reverse_index_map[i] = -1;
- }
- for (Py_ssize_t i = 0; i < n_used_consts; i++) {
- assert(index_map[i] != -1);
- assert(reverse_index_map[index_map[i]] == -1);
- reverse_index_map[index_map[i]] = i;
- }
-
- for (basicblock *b = entryblock; b != NULL; b = b->b_next) {
- for (int i = 0; i < b->b_iused; i++) {
- if (HAS_CONST(b->b_instr[i].i_opcode)) {
- int index = b->b_instr[i].i_oparg;
- assert(reverse_index_map[index] >= 0);
- assert(reverse_index_map[index] < n_used_consts);
- b->b_instr[i].i_oparg = (int)reverse_index_map[index];
- }
- }
- }
-
- err = SUCCESS;
-end:
- PyMem_Free(index_map);
- PyMem_Free(reverse_index_map);
- return err;
-}
-
-static inline bool
-is_exit_without_lineno(basicblock *b) {
- if (!basicblock_exits_scope(b)) {
- return false;
- }
- for (int i = 0; i < b->b_iused; i++) {
- if (b->b_instr[i].i_loc.lineno >= 0) {
- return false;
- }
- }
- return true;
-}
-
-/* PEP 626 mandates that the f_lineno of a frame is correct
- * after a frame terminates. It would be prohibitively expensive
- * to continuously update the f_lineno field at runtime,
- * so we make sure that all exiting instruction (raises and returns)
- * have a valid line number, allowing us to compute f_lineno lazily.
- * We can do this by duplicating the exit blocks without line number
- * so that none have more than one predecessor. We can then safely
- * copy the line number from the sole predecessor block.
- */
-static int
-duplicate_exits_without_lineno(cfg_builder *g)
-{
- assert(no_empty_basic_blocks(g));
- /* Copy all exit blocks without line number that are targets of a jump.
- */
- basicblock *entryblock = g->g_entryblock;
- for (basicblock *b = entryblock; b != NULL; b = b->b_next) {
- struct cfg_instr *last = basicblock_last_instr(b);
- assert(last != NULL);
- if (is_jump(last)) {
- basicblock *target = last->i_target;
- if (is_exit_without_lineno(target) && target->b_predecessors > 1) {
- basicblock *new_target = copy_basicblock(g, target);
- if (new_target == NULL) {
- return ERROR;
- }
- new_target->b_instr[0].i_loc = last->i_loc;
- last->i_target = new_target;
- target->b_predecessors--;
- new_target->b_predecessors = 1;
- new_target->b_next = target->b_next;
- target->b_next = new_target;
- }
- }
- }
-
- /* Any remaining reachable exit blocks without line number can only be reached by
- * fall through, and thus can only have a single predecessor */
- for (basicblock *b = entryblock; b != NULL; b = b->b_next) {
- if (BB_HAS_FALLTHROUGH(b) && b->b_next && b->b_iused > 0) {
- if (is_exit_without_lineno(b->b_next)) {
- struct cfg_instr *last = basicblock_last_instr(b);
- assert(last != NULL);
- b->b_next->b_instr[0].i_loc = last->i_loc;
- }
- }
- }
- return SUCCESS;
-}
-
-
/* Access to compiler optimizations for unit tests.
*
* _PyCompile_CodeGen takes and AST, applies code-gen and
@@ -9842,7 +7527,7 @@ instructions_to_cfg(PyObject *instructions, cfg_builder *g)
for (int i = 0; i < num_insts; i++) {
if (is_target[i]) {
jump_target_label lbl = {i};
- RETURN_IF_ERROR(cfg_builder_use_label(g, lbl));
+ RETURN_IF_ERROR(_PyCfgBuilder_UseLabel(g, lbl));
}
PyObject *item = PyList_GET_ITEM(instructions, i);
if (!PyTuple_Check(item) || PyTuple_GET_SIZE(item) != 6) {
@@ -9880,11 +7565,11 @@ instructions_to_cfg(PyObject *instructions, cfg_builder *g)
if (PyErr_Occurred()) {
goto error;
}
- RETURN_IF_ERROR(cfg_builder_addop(g, opcode, oparg, loc));
+ RETURN_IF_ERROR(_PyCfgBuilder_Addop(g, opcode, oparg, loc));
}
- struct cfg_instr *last = basicblock_last_instr(g->g_curblock);
+ cfg_instr *last = _PyCfg_BasicblockLastInstr(g->g_curblock);
if (last && !IS_TERMINATOR_OPCODE(last->i_opcode)) {
- RETURN_IF_ERROR(cfg_builder_addop(g, RETURN_VALUE, 0, NO_LOCATION));
+ RETURN_IF_ERROR(_PyCfgBuilder_Addop(g, RETURN_VALUE, 0, NO_LOCATION));
}
PyMem_Free(is_target);
return SUCCESS;
@@ -9939,7 +7624,7 @@ cfg_to_instructions(cfg_builder *g)
}
for (basicblock *b = g->g_entryblock; b != NULL; b = b->b_next) {
for (int i = 0; i < b->b_iused; i++) {
- struct cfg_instr *instr = &b->b_instr[i];
+ cfg_instr *instr = &b->b_instr[i];
location loc = instr->i_loc;
int arg = HAS_TARGET(instr->i_opcode) ?
instr->i_target->b_label.id : instr->i_oparg;
@@ -10018,26 +7703,26 @@ _PyCompile_OptimizeCfg(PyObject *instructions, PyObject *consts)
cfg_builder g;
memset(&g, 0, sizeof(cfg_builder));
- if (cfg_builder_init(&g) < 0) {
+ if (_PyCfgBuilder_Init(&g) < 0) {
goto error;
}
if (instructions_to_cfg(instructions, &g) < 0) {
goto error;
}
int code_flags = 0, nlocals = 0, nparams = 0;
- if (optimize_code_unit(&g, consts, const_cache, code_flags, nlocals, nparams) < 0) {
+ if (_PyCfg_OptimizeCodeUnit(&g, consts, const_cache, code_flags, nlocals, nparams) < 0) {
goto error;
}
res = cfg_to_instructions(&g);
error:
Py_DECREF(const_cache);
- cfg_builder_fini(&g);
+ _PyCfgBuilder_Fini(&g);
return res;
}
/* Retained for API compatibility.
- * Optimization is now done in optimize_cfg */
+ * Optimization is now done in _PyCfg_OptimizeCodeUnit */
PyObject *
PyCode_Optimize(PyObject *code, PyObject* Py_UNUSED(consts),
diff --git a/Python/flowgraph.c b/Python/flowgraph.c
new file mode 100644
index 000000000000..cecddbd39c94
--- /dev/null
+++ b/Python/flowgraph.c
@@ -0,0 +1,2160 @@
+
+#include <stdbool.h>
+
+#include "Python.h"
+#include "pycore_flowgraph.h"
+#include "pycore_compile.h"
+#include "pycore_pymem.h" // _PyMem_IsPtrFreed()
+
+#include "pycore_opcode_utils.h"
+#define NEED_OPCODE_METADATA
+#include "opcode_metadata.h" // _PyOpcode_opcode_metadata, _PyOpcode_num_popped/pushed
+#undef NEED_OPCODE_METADATA
+
+
+#undef SUCCESS
+#undef ERROR
+#define SUCCESS 0
+#define ERROR -1
+
+#define RETURN_IF_ERROR(X) \
+ if ((X) == -1) { \
+ return ERROR; \
+ }
+
+#define DEFAULT_BLOCK_SIZE 16
+
+typedef _PyCompilerSrcLocation location;
+typedef _PyCfgJumpTargetLabel jump_target_label;
+typedef _PyCfgBasicblock basicblock;
+typedef _PyCfgBuilder cfg_builder;
+typedef _PyCfgInstruction cfg_instr;
+
+static const jump_target_label NO_LABEL = {-1};
+
+#define SAME_LABEL(L1, L2) ((L1).id == (L2).id)
+#define IS_LABEL(L) (!SAME_LABEL((L), (NO_LABEL)))
+
+
+static inline int
+is_block_push(cfg_instr *i)
+{
+ return IS_BLOCK_PUSH_OPCODE(i->i_opcode);
+}
+
+static inline int
+is_relative_jump(cfg_instr *i)
+{
+ return IS_RELATIVE_JUMP(i->i_opcode);
+}
+
+static inline int
+is_jump(cfg_instr *i)
+{
+ return IS_JUMP_OPCODE(i->i_opcode);
+}
+
+/* One arg*/
+#define INSTR_SET_OP1(I, OP, ARG) \
+ do { \
+ assert(HAS_ARG(OP)); \
+ _PyCfgInstruction *_instr__ptr_ = (I); \
+ _instr__ptr_->i_opcode = (OP); \
+ _instr__ptr_->i_oparg = (ARG); \
+ } while (0);
+
+/* No args*/
+#define INSTR_SET_OP0(I, OP) \
+ do { \
+ assert(!HAS_ARG(OP)); \
+ _PyCfgInstruction *_instr__ptr_ = (I); \
+ _instr__ptr_->i_opcode = (OP); \
+ _instr__ptr_->i_oparg = 0; \
+ } while (0);
+
+/***** Blocks *****/
+
+/* Returns the offset of the next instruction in the current block's
+ b_instr array. Resizes the b_instr as necessary.
+ Returns -1 on failure.
+*/
+static int
+basicblock_next_instr(basicblock *b)
+{
+ assert(b != NULL);
+ RETURN_IF_ERROR(
+ _PyCompile_EnsureArrayLargeEnough(
+ b->b_iused + 1,
+ (void**)&b->b_instr,
+ &b->b_ialloc,
+ DEFAULT_BLOCK_SIZE,
+ sizeof(cfg_instr)));
+ return b->b_iused++;
+}
+
+/* Allocate a new block and return a pointer to it.
+ Returns NULL on error.
+*/
+
+static basicblock *
+cfg_builder_new_block(cfg_builder *g)
+{
+ basicblock *b = (basicblock *)PyObject_Calloc(1, sizeof(basicblock));
+ if (b == NULL) {
+ PyErr_NoMemory();
+ return NULL;
+ }
+ /* Extend the singly linked list of blocks with new block. */
+ b->b_list = g->g_block_list;
+ g->g_block_list = b;
+ b->b_label = NO_LABEL;
+ return b;
+}
+
+static int
+basicblock_addop(basicblock *b, int opcode, int oparg, location loc)
+{
+ assert(IS_WITHIN_OPCODE_RANGE(opcode));
+ assert(!IS_ASSEMBLER_OPCODE(opcode));
+ assert(HAS_ARG(opcode) || HAS_TARGET(opcode) || oparg == 0);
+ assert(0 <= oparg && oparg < (1 << 30));
+
+ int off = basicblock_next_instr(b);
+ if (off < 0) {
+ return ERROR;
+ }
+ cfg_instr *i = &b->b_instr[off];
+ i->i_opcode = opcode;
+ i->i_oparg = oparg;
+ i->i_target = NULL;
+ i->i_loc = loc;
+
+ return SUCCESS;
+}
+
+static inline int
+basicblock_append_instructions(basicblock *target, basicblock *source)
+{
+ for (int i = 0; i < source->b_iused; i++) {
+ int n = basicblock_next_instr(target);
+ if (n < 0) {
+ return ERROR;
+ }
+ target->b_instr[n] = source->b_instr[i];
+ }
+ return SUCCESS;
+}
+
+static basicblock *
+copy_basicblock(cfg_builder *g, basicblock *block)
+{
+ /* Cannot copy a block if it has a fallthrough, since
+ * a block can only have one fallthrough predecessor.
+ */
+ assert(BB_NO_FALLTHROUGH(block));
+ basicblock *result = cfg_builder_new_block(g);
+ if (result == NULL) {
+ return NULL;
+ }
+ if (basicblock_append_instructions(result, block) < 0) {
+ return NULL;
+ }
+ return result;
+}
+
+int
+_PyBasicblock_InsertInstruction(basicblock *block, int pos, cfg_instr *instr) {
+ RETURN_IF_ERROR(basicblock_next_instr(block));
+ for (int i = block->b_iused - 1; i > pos; i--) {
+ block->b_instr[i] = block->b_instr[i-1];
+ }
+ block->b_instr[pos] = *instr;
+ return SUCCESS;
+}
+
+int
+_PyCfg_InstrSize(cfg_instr *instruction)
+{
+ int opcode = instruction->i_opcode;
+ assert(!IS_PSEUDO_OPCODE(opcode));
+ int oparg = instruction->i_oparg;
+ assert(HAS_ARG(opcode) || oparg == 0);
+ int extended_args = (0xFFFFFF < oparg) + (0xFFFF < oparg) + (0xFF < oparg);
+ int caches = _PyOpcode_Caches[opcode];
+ return extended_args + 1 + caches;
+}
+
+static int
+blocksize(basicblock *b)
+{
+ int size = 0;
+ for (int i = 0; i < b->b_iused; i++) {
+ size += _PyCfg_InstrSize(&b->b_instr[i]);
+ }
+ return size;
+}
+
+/* For debugging purposes only */
+#if 0
+static void
+dump_instr(cfg_instr *i)
+{
+ const char *jrel = (is_relative_jump(i)) ? "jrel " : "";
+ const char *jabs = (is_jump(i) && !is_relative_jump(i))? "jabs " : "";
+
+ char arg[128];
+
+ *arg = '\0';
+ if (HAS_ARG(i->i_opcode)) {
+ sprintf(arg, "arg: %d ", i->i_oparg);
+ }
+ if (HAS_TARGET(i->i_opcode)) {
+ sprintf(arg, "target: %p [%d] ", i->i_target, i->i_oparg);
+ }
+ fprintf(stderr, "line: %d, opcode: %d %s%s%s\n",
+ i->i_loc.lineno, i->i_opcode, arg, jabs, jrel);
+}
+
+static inline int
+basicblock_returns(const basicblock *b) {
+ cfg_instr *last = _PyCfg_BasicblockLastInstr(b);
+ return last && (last->i_opcode == RETURN_VALUE || last->i_opcode == RETURN_CONST);
+}
+
+static void
+dump_basicblock(const basicblock *b)
+{
+ const char *b_return = basicblock_returns(b) ? "return " : "";
+ fprintf(stderr, "%d: [EH=%d CLD=%d WRM=%d NO_FT=%d %p] used: %d, depth: %d, offset: %d %s\n",
+ b->b_label.id, b->b_except_handler, b->b_cold, b->b_warm, BB_NO_FALLTHROUGH(b), b, b->b_iused,
+ b->b_startdepth, b->b_offset, b_return);
+ if (b->b_instr) {
+ int i;
+ for (i = 0; i < b->b_iused; i++) {
+ fprintf(stderr, " [%02d] ", i);
+ dump_instr(b->b_instr + i);
+ }
+ }
+}
+#endif
+
+
+/***** CFG construction and modification *****/
+
+static basicblock *
+cfg_builder_use_next_block(cfg_builder *g, basicblock *block)
+{
+ assert(block != NULL);
+ g->g_curblock->b_next = block;
+ g->g_curblock = block;
+ return block;
+}
+
+cfg_instr *
+_PyCfg_BasicblockLastInstr(const basicblock *b) {
+ assert(b->b_iused >= 0);
+ if (b->b_iused > 0) {
+ assert(b->b_instr != NULL);
+ return &b->b_instr[b->b_iused - 1];
+ }
+ return NULL;
+}
+
+static inline int
+basicblock_exits_scope(const basicblock *b) {
+ cfg_instr *last = _PyCfg_BasicblockLastInstr(b);
+ return last && IS_SCOPE_EXIT_OPCODE(last->i_opcode);
+}
+
+static bool
+cfg_builder_current_block_is_terminated(cfg_builder *g)
+{
+ cfg_instr *last = _PyCfg_BasicblockLastInstr(g->g_curblock);
+ if (last && IS_TERMINATOR_OPCODE(last->i_opcode)) {
+ return true;
+ }
+ if (IS_LABEL(g->g_current_label)) {
+ if (last || IS_LABEL(g->g_curblock->b_label)) {
+ return true;
+ }
+ else {
+ /* current block is empty, label it */
+ g->g_curblock->b_label = g->g_current_label;
+ g->g_current_label = NO_LABEL;
+ }
+ }
+ return false;
+}
+
+static int
+cfg_builder_maybe_start_new_block(cfg_builder *g)
+{
+ if (cfg_builder_current_block_is_terminated(g)) {
+ basicblock *b = cfg_builder_new_block(g);
+ if (b == NULL) {
+ return ERROR;
+ }
+ b->b_label = g->g_current_label;
+ g->g_current_label = NO_LABEL;
+ cfg_builder_use_next_block(g, b);
+ }
+ return SUCCESS;
+}
+
+#ifndef NDEBUG
+static bool
+cfg_builder_check(cfg_builder *g)
+{
+ assert(g->g_entryblock->b_iused > 0);
+ for (basicblock *block = g->g_block_list; block != NULL; block = block->b_list) {
+ assert(!_PyMem_IsPtrFreed(block));
+ if (block->b_instr != NULL) {
+ assert(block->b_ialloc > 0);
+ assert(block->b_iused >= 0);
+ assert(block->b_ialloc >= block->b_iused);
+ }
+ else {
+ assert (block->b_iused == 0);
+ assert (block->b_ialloc == 0);
+ }
+ }
+ return true;
+}
+#endif
+
+int
+_PyCfgBuilder_Init(cfg_builder *g)
+{
+ g->g_block_list = NULL;
+ basicblock *block = cfg_builder_new_block(g);
+ if (block == NULL) {
+ return ERROR;
+ }
+ g->g_curblock = g->g_entryblock = block;
+ g->g_current_label = NO_LABEL;
+ return SUCCESS;
+}
+
+void
+_PyCfgBuilder_Fini(cfg_builder* g)
+{
+ assert(cfg_builder_check(g));
+ basicblock *b = g->g_block_list;
+ while (b != NULL) {
+ if (b->b_instr) {
+ PyObject_Free((void *)b->b_instr);
+ }
+ basicblock *next = b->b_list;
+ PyObject_Free((void *)b);
+ b = next;
+ }
+}
+
+int
+_PyCfgBuilder_UseLabel(cfg_builder *g, jump_target_label lbl)
+{
+ g->g_current_label = lbl;
+ return cfg_builder_maybe_start_new_block(g);
+}
+
+int
+_PyCfgBuilder_Addop(cfg_builder *g, int opcode, int oparg, location loc)
+{
+ RETURN_IF_ERROR(cfg_builder_maybe_start_new_block(g));
+ return basicblock_addop(g->g_curblock, opcode, oparg, loc);
+}
+
+
+/***** debugging helpers *****/
+
+#ifndef NDEBUG
+static int remove_redundant_nops(basicblock *bb);
+
+static bool
+no_redundant_nops(cfg_builder *g) {
+ for (basicblock *b = g->g_entryblock; b != NULL; b = b->b_next) {
+ if (remove_redundant_nops(b) != 0) {
+ return false;
+ }
+ }
+ return true;
+}
+
+static bool
+no_empty_basic_blocks(cfg_builder *g) {
+ for (basicblock *b = g->g_entryblock; b != NULL; b = b->b_next) {
+ if (b->b_iused == 0) {
+ return false;
+ }
+ }
+ return true;
+}
+
+static bool
+no_redundant_jumps(cfg_builder *g) {
+ for (basicblock *b = g->g_entryblock; b != NULL; b = b->b_next) {
+ cfg_instr *last = _PyCfg_BasicblockLastInstr(b);
+ if (last != NULL) {
+ if (IS_UNCONDITIONAL_JUMP_OPCODE(last->i_opcode)) {
+ assert(last->i_target != b->b_next);
+ if (last->i_target == b->b_next) {
+ return false;
+ }
+ }
+ }
+ }
+ return true;
+}
+
+#endif
+
+/***** CFG preprocessing (jump targets and exceptions) *****/
+
+static int
+normalize_jumps_in_block(cfg_builder *g, basicblock *b) {
+ cfg_instr *last = _PyCfg_BasicblockLastInstr(b);
+ if (last == NULL || !is_jump(last)) {
+ return SUCCESS;
+ }
+ assert(!IS_ASSEMBLER_OPCODE(last->i_opcode));
+ bool is_forward = last->i_target->b_visited == 0;
+ switch(last->i_opcode) {
+ case JUMP:
+ last->i_opcode = is_forward ? JUMP_FORWARD : JUMP_BACKWARD;
+ return SUCCESS;
+ case JUMP_NO_INTERRUPT:
+ last->i_opcode = is_forward ?
+ JUMP_FORWARD : JUMP_BACKWARD_NO_INTERRUPT;
+ return SUCCESS;
+ }
+ int reversed_opcode = 0;
+ switch(last->i_opcode) {
+ case POP_JUMP_IF_NOT_NONE:
+ reversed_opcode = POP_JUMP_IF_NONE;
+ break;
+ case POP_JUMP_IF_NONE:
+ reversed_opcode = POP_JUMP_IF_NOT_NONE;
+ break;
+ case POP_JUMP_IF_FALSE:
+ reversed_opcode = POP_JUMP_IF_TRUE;
+ break;
+ case POP_JUMP_IF_TRUE:
+ reversed_opcode = POP_JUMP_IF_FALSE;
+ break;
+ }
+ if (is_forward) {
+ return SUCCESS;
+ }
+ /* transform 'conditional jump T' to
+ * 'reversed_jump b_next' followed by 'jump_backwards T'
+ */
+
+ basicblock *target = last->i_target;
+ basicblock *backwards_jump = cfg_builder_new_block(g);
+ if (backwards_jump == NULL) {
+ return ERROR;
+ }
+ basicblock_addop(backwards_jump, JUMP, target->b_label.id, NO_LOCATION);
+ backwards_jump->b_instr[0].i_target = target;
+ last->i_opcode = reversed_opcode;
+ last->i_target = b->b_next;
+
+ backwards_jump->b_cold = b->b_cold;
+ backwards_jump->b_next = b->b_next;
+ b->b_next = backwards_jump;
+ return SUCCESS;
+}
+
+
+static int
+normalize_jumps(_PyCfgBuilder *g)
+{
+ basicblock *entryblock = g->g_entryblock;
+ for (basicblock *b = entryblock; b != NULL; b = b->b_next) {
+ b->b_visited = 0;
+ }
+ for (basicblock *b = entryblock; b != NULL; b = b->b_next) {
+ b->b_visited = 1;
+ RETURN_IF_ERROR(normalize_jumps_in_block(g, b));
+ }
+ return SUCCESS;
+}
+
+static void
+resolve_jump_offsets(basicblock *entryblock)
+{
+ int bsize, totsize, extended_arg_recompile;
+
+ /* Compute the size of each block and fixup jump args.
+ Replace block pointer with position in bytecode. */
+ do {
+ totsize = 0;
+ for (basicblock *b = entryblock; b != NULL; b = b->b_next) {
+ bsize = blocksize(b);
+ b->b_offset = totsize;
+ totsize += bsize;
+ }
+ extended_arg_recompile = 0;
+ for (basicblock *b = entryblock; b != NULL; b = b->b_next) {
+ bsize = b->b_offset;
+ for (int i = 0; i < b->b_iused; i++) {
+ cfg_instr *instr = &b->b_instr[i];
+ int isize = _PyCfg_InstrSize(instr);
+ /* Relative jumps are computed relative to
+ the instruction pointer after fetching
+ the jump instruction.
+ */
+ bsize += isize;
+ if (is_jump(instr)) {
+ instr->i_oparg = instr->i_target->b_offset;
+ if (is_relative_jump(instr)) {
+ if (instr->i_oparg < bsize) {
+ assert(IS_BACKWARDS_JUMP_OPCODE(instr->i_opcode));
+ instr->i_oparg = bsize - instr->i_oparg;
+ }
+ else {
+ assert(!IS_BACKWARDS_JUMP_OPCODE(instr->i_opcode));
+ instr->i_oparg -= bsize;
+ }
+ }
+ else {
+ assert(!IS_BACKWARDS_JUMP_OPCODE(instr->i_opcode));
+ }
+ if (_PyCfg_InstrSize(instr) != isize) {
+ extended_arg_recompile = 1;
+ }
+ }
+ }
+ }
+
+ /* XXX: This is an awful hack that could hurt performance, but
+ on the bright side it should work until we come up
+ with a better solution.
+
+ The issue is that in the first loop blocksize() is called
+ which calls _PyCfg_InstrSize() which requires i_oparg be set
+ appropriately. There is a bootstrap problem because
+ i_oparg is calculated in the second loop above.
+
+ So we loop until we stop seeing new EXTENDED_ARGs.
+ The only EXTENDED_ARGs that could be popping up are
+ ones in jump instructions. So this should converge
+ fairly quickly.
+ */
+ } while (extended_arg_recompile);
+}
+
+int
+_PyCfg_ResolveJumps(_PyCfgBuilder *g)
+{
+ RETURN_IF_ERROR(normalize_jumps(g));
+ assert(no_redundant_jumps(g));
+ resolve_jump_offsets(g->g_entryblock);
+ return SUCCESS;
+}
+
+static int
+check_cfg(cfg_builder *g) {
+ for (basicblock *b = g->g_entryblock; b != NULL; b = b->b_next) {
+ /* Raise SystemError if jump or exit is not last instruction in the block. */
+ for (int i = 0; i < b->b_iused; i++) {
+ int opcode = b->b_instr[i].i_opcode;
+ assert(!IS_ASSEMBLER_OPCODE(opcode));
+ if (IS_TERMINATOR_OPCODE(opcode)) {
+ if (i != b->b_iused - 1) {
+ PyErr_SetString(PyExc_SystemError, "malformed control flow graph.");
+ return ERROR;
+ }
+ }
+ }
+ }
+ return SUCCESS;
+}
+
+/* Calculate the actual jump target from the target_label */
+static int
+translate_jump_labels_to_targets(basicblock *entryblock)
+{
+ int max_label = -1;
+ for (basicblock *b = entryblock; b != NULL; b = b->b_next) {
+ if (b->b_label.id > max_label) {
+ max_label = b->b_label.id;
+ }
+ }
+ size_t mapsize = sizeof(basicblock *) * (max_label + 1);
+ basicblock **label2block = (basicblock **)PyMem_Malloc(mapsize);
+ if (!label2block) {
+ PyErr_NoMemory();
+ return ERROR;
+ }
+ memset(label2block, 0, mapsize);
+ for (basicblock *b = entryblock; b != NULL; b = b->b_next) {
+ if (b->b_label.id >= 0) {
+ label2block[b->b_label.id] = b;
+ }
+ }
+ for (basicblock *b = entryblock; b != NULL; b = b->b_next) {
+ for (int i = 0; i < b->b_iused; i++) {
+ cfg_instr *instr = &b->b_instr[i];
+ assert(instr->i_target == NULL);
+ if (HAS_TARGET(instr->i_opcode)) {
+ int lbl = instr->i_oparg;
+ assert(lbl >= 0 && lbl <= max_label);
+ instr->i_target = label2block[lbl];
+ assert(instr->i_target != NULL);
+ assert(instr->i_target->b_label.id == lbl);
+ }
+ }
+ }
+ PyMem_Free(label2block);
+ return SUCCESS;
+}
+
+
+static int
+mark_except_handlers(basicblock *entryblock) {
+#ifndef NDEBUG
+ for (basicblock *b = entryblock; b != NULL; b = b->b_next) {
+ assert(!b->b_except_handler);
+ }
+#endif
+ for (basicblock *b = entryblock; b != NULL; b = b->b_next) {
+ for (int i=0; i < b->b_iused; i++) {
+ cfg_instr *instr = &b->b_instr[i];
+ if (is_block_push(instr)) {
+ instr->i_target->b_except_handler = 1;
+ }
+ }
+ }
+ return SUCCESS;
+}
+
+
+typedef _PyCfgExceptStack ExceptStack;
+
+static basicblock *
+push_except_block(ExceptStack *stack, cfg_instr *setup) {
+ assert(is_block_push(setup));
+ int opcode = setup->i_opcode;
+ basicblock * target = setup->i_target;
+ if (opcode == SETUP_WITH || opcode == SETUP_CLEANUP) {
+ target->b_preserve_lasti = 1;
+ }
+ stack->handlers[++stack->depth] = target;
+ return target;
+}
+
+static basicblock *
+pop_except_block(ExceptStack *stack) {
+ assert(stack->depth > 0);
+ return stack->handlers[--stack->depth];
+}
+
+static basicblock *
+except_stack_top(ExceptStack *stack) {
+ return stack->handlers[stack->depth];
+}
+
+static ExceptStack *
+make_except_stack(void) {
+ ExceptStack *new = PyMem_Malloc(sizeof(ExceptStack));
+ if (new == NULL) {
+ PyErr_NoMemory();
+ return NULL;
+ }
+ new->depth = 0;
+ new->handlers[0] = NULL;
+ return new;
+}
+
+static ExceptStack *
+copy_except_stack(ExceptStack *stack) {
+ ExceptStack *copy = PyMem_Malloc(sizeof(ExceptStack));
+ if (copy == NULL) {
+ PyErr_NoMemory();
+ return NULL;
+ }
+ memcpy(copy, stack, sizeof(ExceptStack));
+ return copy;
+}
+
+static basicblock**
+make_cfg_traversal_stack(basicblock *entryblock) {
+ int nblocks = 0;
+ for (basicblock *b = entryblock; b != NULL; b = b->b_next) {
+ b->b_visited = 0;
+ nblocks++;
+ }
+ basicblock **stack = (basicblock **)PyMem_Malloc(sizeof(basicblock *) * nblocks);
+ if (!stack) {
+ PyErr_NoMemory();
+ }
+ return stack;
+}
+
+Py_LOCAL_INLINE(void)
+stackdepth_push(basicblock ***sp, basicblock *b, int depth)
+{
+ assert(b->b_startdepth < 0 || b->b_startdepth == depth);
+ if (b->b_startdepth < depth && b->b_startdepth < 100) {
+ assert(b->b_startdepth < 0);
+ b->b_startdepth = depth;
+ *(*sp)++ = b;
+ }
+}
+
+/* Find the flow path that needs the largest stack. We assume that
+ * cycles in the flow graph have no net effect on the stack depth.
+ */
+int
+_PyCfg_Stackdepth(basicblock *entryblock, int code_flags)
+{
+ for (basicblock *b = entryblock; b != NULL; b = b->b_next) {
+ b->b_startdepth = INT_MIN;
+ }
+ basicblock **stack = make_cfg_traversal_stack(entryblock);
+ if (!stack) {
+ return ERROR;
+ }
+
+ int maxdepth = 0;
+ basicblock **sp = stack;
+ if (code_flags & (CO_GENERATOR | CO_COROUTINE | CO_ASYNC_GENERATOR)) {
+ stackdepth_push(&sp, entryblock, 1);
+ } else {
+ stackdepth_push(&sp, entryblock, 0);
+ }
+
+ while (sp != stack) {
+ basicblock *b = *--sp;
+ int depth = b->b_startdepth;
+ assert(depth >= 0);
+ basicblock *next = b->b_next;
+ for (int i = 0; i < b->b_iused; i++) {
+ cfg_instr *instr = &b->b_instr[i];
+ int effect = PyCompile_OpcodeStackEffectWithJump(instr->i_opcode, instr->i_oparg, 0);
+ if (effect == PY_INVALID_STACK_EFFECT) {
+ PyErr_Format(PyExc_SystemError,
+ "compiler PyCompile_OpcodeStackEffectWithJump(opcode=%d, arg=%i) failed",
+ instr->i_opcode, instr->i_oparg);
+ return ERROR;
+ }
+ int new_depth = depth + effect;
+ assert(new_depth >= 0); /* invalid code or bug in stackdepth() */
+ if (new_depth > maxdepth) {
+ maxdepth = new_depth;
+ }
+ if (HAS_TARGET(instr->i_opcode)) {
+ effect = PyCompile_OpcodeStackEffectWithJump(instr->i_opcode, instr->i_oparg, 1);
+ assert(effect != PY_INVALID_STACK_EFFECT);
+ int target_depth = depth + effect;
+ assert(target_depth >= 0); /* invalid code or bug in stackdepth() */
+ if (target_depth > maxdepth) {
+ maxdepth = target_depth;
+ }
+ stackdepth_push(&sp, instr->i_target, target_depth);
+ }
+ depth = new_depth;
+ assert(!IS_ASSEMBLER_OPCODE(instr->i_opcode));
+ if (IS_UNCONDITIONAL_JUMP_OPCODE(instr->i_opcode) ||
+ IS_SCOPE_EXIT_OPCODE(instr->i_opcode))
+ {
+ /* remaining code is dead */
+ next = NULL;
+ break;
+ }
+ }
+ if (next != NULL) {
+ assert(BB_HAS_FALLTHROUGH(b));
+ stackdepth_push(&sp, next, depth);
+ }
+ }
+ PyMem_Free(stack);
+ return maxdepth;
+}
+
+static int
+label_exception_targets(basicblock *entryblock) {
+ basicblock **todo_stack = make_cfg_traversal_stack(entryblock);
+ if (todo_stack == NULL) {
+ return ERROR;
+ }
+ ExceptStack *except_stack = make_except_stack();
+ if (except_stack == NULL) {
+ PyMem_Free(todo_stack);
+ PyErr_NoMemory();
+ return ERROR;
+ }
+ except_stack->depth = 0;
+ todo_stack[0] = entryblock;
+ entryblock->b_visited = 1;
+ entryblock->b_exceptstack = except_stack;
+ basicblock **todo = &todo_stack[1];
+ basicblock *handler = NULL;
+ while (todo > todo_stack) {
+ todo--;
+ basicblock *b = todo[0];
+ assert(b->b_visited == 1);
+ except_stack = b->b_exceptstack;
+ assert(except_stack != NULL);
+ b->b_exceptstack = NULL;
+ handler = except_stack_top(except_stack);
+ for (int i = 0; i < b->b_iused; i++) {
+ cfg_instr *instr = &b->b_instr[i];
+ if (is_block_push(instr)) {
+ if (!instr->i_target->b_visited) {
+ ExceptStack *copy = copy_except_stack(except_stack);
+ if (copy == NULL) {
+ goto error;
+ }
+ instr->i_target->b_exceptstack = copy;
+ todo[0] = instr->i_target;
+ instr->i_target->b_visited = 1;
+ todo++;
+ }
+ handler = push_except_block(except_stack, instr);
+ }
+ else if (instr->i_opcode == POP_BLOCK) {
+ handler = pop_except_block(except_stack);
+ }
+ else if (is_jump(instr)) {
+ instr->i_except = handler;
+ assert(i == b->b_iused -1);
+ if (!instr->i_target->b_visited) {
+ if (BB_HAS_FALLTHROUGH(b)) {
+ ExceptStack *copy = copy_except_stack(except_stack);
+ if (copy == NULL) {
+ goto error;
+ }
+ instr->i_target->b_exceptstack = copy;
+ }
+ else {
+ instr->i_target->b_exceptstack = except_stack;
+ except_stack = NULL;
+ }
+ todo[0] = instr->i_target;
+ instr->i_target->b_visited = 1;
+ todo++;
+ }
+ }
+ else {
+ if (instr->i_opcode == YIELD_VALUE) {
+ instr->i_oparg = except_stack->depth;
+ }
+ instr->i_except = handler;
+ }
+ }
+ if (BB_HAS_FALLTHROUGH(b) && !b->b_next->b_visited) {
+ assert(except_stack != NULL);
+ b->b_next->b_exceptstack = except_stack;
+ todo[0] = b->b_next;
+ b->b_next->b_visited = 1;
+ todo++;
+ }
+ else if (except_stack != NULL) {
+ PyMem_Free(except_stack);
+ }
+ }
+#ifdef Py_DEBUG
+ for (basicblock *b = entryblock; b != NULL; b = b->b_next) {
+ assert(b->b_exceptstack == NULL);
+ }
+#endif
+ PyMem_Free(todo_stack);
+ return SUCCESS;
+error:
+ PyMem_Free(todo_stack);
+ PyMem_Free(except_stack);
+ return ERROR;
+}
+
+/***** CFG optimizations *****/
+
+static int
+mark_reachable(basicblock *entryblock) {
+ basicblock **stack = make_cfg_traversal_stack(entryblock);
+ if (stack == NULL) {
+ return ERROR;
+ }
+ basicblock **sp = stack;
+ entryblock->b_predecessors = 1;
+ *sp++ = entryblock;
+ while (sp > stack) {
+ basicblock *b = *(--sp);
+ b->b_visited = 1;
+ if (b->b_next && BB_HAS_FALLTHROUGH(b)) {
+ if (!b->b_next->b_visited) {
+ assert(b->b_next->b_predecessors == 0);
+ *sp++ = b->b_next;
+ }
+ b->b_next->b_predecessors++;
+ }
+ for (int i = 0; i < b->b_iused; i++) {
+ basicblock *target;
+ cfg_instr *instr = &b->b_instr[i];
+ if (is_jump(instr) || is_block_push(instr)) {
+ target = instr->i_target;
+ if (!target->b_visited) {
+ assert(target->b_predecessors == 0 || target == b->b_next);
+ *sp++ = target;
+ }
+ target->b_predecessors++;
+ }
+ }
+ }
+ PyMem_Free(stack);
+ return SUCCESS;
+}
+
+static void
+eliminate_empty_basic_blocks(cfg_builder *g) {
+ /* Eliminate empty blocks */
+ for (basicblock *b = g->g_entryblock; b != NULL; b = b->b_next) {
+ basicblock *next = b->b_next;
+ while (next && next->b_iused == 0) {
+ next = next->b_next;
+ }
+ b->b_next = next;
+ }
+ while(g->g_entryblock && g->g_entryblock->b_iused == 0) {
+ g->g_entryblock = g->g_entryblock->b_next;
+ }
+ for (basicblock *b = g->g_entryblock; b != NULL; b = b->b_next) {
+ assert(b->b_iused > 0);
+ for (int i = 0; i < b->b_iused; i++) {
+ cfg_instr *instr = &b->b_instr[i];
+ if (HAS_TARGET(instr->i_opcode)) {
+ basicblock *target = instr->i_target;
+ while (target->b_iused == 0) {
+ target = target->b_next;
+ }
+ instr->i_target = target;
+ assert(instr->i_target && instr->i_target->b_iused > 0);
+ }
+ }
+ }
+}
+
+static int
+remove_redundant_nops(basicblock *bb) {
+ /* Remove NOPs when legal to do so. */
+ int dest = 0;
+ int prev_lineno = -1;
+ for (int src = 0; src < bb->b_iused; src++) {
+ int lineno = bb->b_instr[src].i_loc.lineno;
+ if (bb->b_instr[src].i_opcode == NOP) {
+ /* Eliminate no-op if it doesn't have a line number */
+ if (lineno < 0) {
+ continue;
+ }
+ /* or, if the previous instruction had the same line number. */
+ if (prev_lineno == lineno) {
+ continue;
+ }
+ /* or, if the next instruction has same line number or no line number */
+ if (src < bb->b_iused - 1) {
+ int next_lineno = bb->b_instr[src+1].i_loc.lineno;
+ if (next_lineno == lineno) {
+ continue;
+ }
+ if (next_lineno < 0) {
+ bb->b_instr[src+1].i_loc = bb->b_instr[src].i_loc;
+ continue;
+ }
+ }
+ else {
+ basicblock* next = bb->b_next;
+ while (next && next->b_iused == 0) {
+ next = next->b_next;
+ }
+ /* or if last instruction in BB and next BB has same line number */
+ if (next) {
+ if (lineno == next->b_instr[0].i_loc.lineno) {
+ continue;
+ }
+ }
+ }
+
+ }
+ if (dest != src) {
+ bb->b_instr[dest] = bb->b_instr[src];
+ }
+ dest++;
+ prev_lineno = lineno;
+ }
+ assert(dest <= bb->b_iused);
+ int num_removed = bb->b_iused - dest;
+ bb->b_iused = dest;
+ return num_removed;
+}
+
+static int
+remove_redundant_nops_and_pairs(basicblock *entryblock)
+{
+ bool done = false;
+
+ while (! done) {
+ done = true;
+ cfg_instr *prev_instr = NULL;
+ cfg_instr *instr = NULL;
+ for (basicblock *b = entryblock; b != NULL; b = b->b_next) {
+ remove_redundant_nops(b);
+ if (IS_LABEL(b->b_label)) {
+ /* this block is a jump target, forget instr */
+ instr = NULL;
+ }
+ for (int i = 0; i < b->b_iused; i++) {
+ prev_instr = instr;
+ instr = &b->b_instr[i];
+ int prev_opcode = prev_instr ? prev_instr->i_opcode : 0;
+ int prev_oparg = prev_instr ? prev_instr->i_oparg : 0;
+ int opcode = instr->i_opcode;
+ bool is_redundant_pair = false;
+ if (opcode == POP_TOP) {
+ if (prev_opcode == LOAD_CONST) {
+ is_redundant_pair = true;
+ }
+ else if (prev_opcode == COPY && prev_oparg == 1) {
+ is_redundant_pair = true;
+ }
+ }
+ if (is_redundant_pair) {
+ INSTR_SET_OP0(prev_instr, NOP);
+ INSTR_SET_OP0(instr, NOP);
+ done = false;
+ }
+ }
+ if ((instr && is_jump(instr)) || !BB_HAS_FALLTHROUGH(b)) {
+ instr = NULL;
+ }
+ }
+ }
+ return SUCCESS;
+}
+
+static int
+remove_redundant_jumps(cfg_builder *g) {
+ /* If a non-empty block ends with a jump instruction, check if the next
+ * non-empty block reached through normal flow control is the target
+ * of that jump. If it is, then the jump instruction is redundant and
+ * can be deleted.
+ */
+ assert(no_empty_basic_blocks(g));
+ for (basicblock *b = g->g_entryblock; b != NULL; b = b->b_next) {
+ cfg_instr *last = _PyCfg_BasicblockLastInstr(b);
+ assert(last != NULL);
+ assert(!IS_ASSEMBLER_OPCODE(last->i_opcode));
+ if (IS_UNCONDITIONAL_JUMP_OPCODE(last->i_opcode)) {
+ if (last->i_target == NULL) {
+ PyErr_SetString(PyExc_SystemError, "jump with NULL target");
+ return ERROR;
+ }
+ if (last->i_target == b->b_next) {
+ assert(b->b_next->b_iused);
+ INSTR_SET_OP0(last, NOP);
+ }
+ }
+ }
+ return SUCCESS;
+}
+
+/* Maximum size of basic block that should be copied in optimizer */
+#define MAX_COPY_SIZE 4
+
+/* If this block ends with an unconditional jump to a small exit block, then
+ * remove the jump and extend this block with the target.
+ * Returns 1 if extended, 0 if no change, and -1 on error.
+ */
+static int
+inline_small_exit_blocks(basicblock *bb) {
+ cfg_instr *last = _PyCfg_BasicblockLastInstr(bb);
+ if (last == NULL) {
+ return 0;
+ }
+ if (!IS_UNCONDITIONAL_JUMP_OPCODE(last->i_opcode)) {
+ return 0;
+ }
+ basicblock *target = last->i_target;
+ if (basicblock_exits_scope(target) && target->b_iused <= MAX_COPY_SIZE) {
+ INSTR_SET_OP0(last, NOP);
+ RETURN_IF_ERROR(basicblock_append_instructions(bb, target));
+ return 1;
+ }
+ return 0;
+}
+
+// Attempt to eliminate jumps to jumps by updating inst to jump to
+// target->i_target using the provided opcode. Return whether or not the
+// optimization was successful.
+static bool
+jump_thread(cfg_instr *inst, cfg_instr *target, int opcode)
+{
+ assert(is_jump(inst));
+ assert(is_jump(target));
+ // bpo-45773: If inst->i_target == target->i_target, then nothing actually
+ // changes (and we fall into an infinite loop):
+ if ((inst->i_loc.lineno == target->i_loc.lineno || target->i_loc.lineno == -1) &&
+ inst->i_target != target->i_target)
+ {
+ inst->i_target = target->i_target;
+ inst->i_opcode = opcode;
+ return true;
+ }
+ return false;
+}
+
+static PyObject*
+get_const_value(int opcode, int oparg, PyObject *co_consts)
+{
+ PyObject *constant = NULL;
+ assert(HAS_CONST(opcode));
+ if (opcode == LOAD_CONST) {
+ constant = PyList_GET_ITEM(co_consts, oparg);
+ }
+
+ if (constant == NULL) {
+ PyErr_SetString(PyExc_SystemError,
+ "Internal error: failed to get value of a constant");
+ return NULL;
+ }
+ return Py_NewRef(constant);
+}
+
+/* Replace LOAD_CONST c1, LOAD_CONST c2 ... LOAD_CONST cn, BUILD_TUPLE n
+ with LOAD_CONST (c1, c2, ... cn).
+ The consts table must still be in list form so that the
+ new constant (c1, c2, ... cn) can be appended.
+ Called with codestr pointing to the first LOAD_CONST.
+*/
+static int
+fold_tuple_on_constants(PyObject *const_cache,
+ cfg_instr *inst,
+ int n, PyObject *consts)
+{
+ /* Pre-conditions */
+ assert(PyDict_CheckExact(const_cache));
+ assert(PyList_CheckExact(consts));
+ assert(inst[n].i_opcode == BUILD_TUPLE);
+ assert(inst[n].i_oparg == n);
+
+ for (int i = 0; i < n; i++) {
+ if (!HAS_CONST(inst[i].i_opcode)) {
+ return SUCCESS;
+ }
+ }
+
+ /* Buildup new tuple of constants */
+ PyObject *newconst = PyTuple_New(n);
+ if (newconst == NULL) {
+ return ERROR;
+ }
+ for (int i = 0; i < n; i++) {
+ int op = inst[i].i_opcode;
+ int arg = inst[i].i_oparg;
+ PyObject *constant = get_const_value(op, arg, consts);
+ if (constant == NULL) {
+ return ERROR;
+ }
+ PyTuple_SET_ITEM(newconst, i, constant);
+ }
+ if (_PyCompile_ConstCacheMergeOne(const_cache, &newconst) < 0) {
+ Py_DECREF(newconst);
+ return ERROR;
+ }
+
+ Py_ssize_t index;
+ for (index = 0; index < PyList_GET_SIZE(consts); index++) {
+ if (PyList_GET_ITEM(consts, index) == newconst) {
+ break;
+ }
+ }
+ if (index == PyList_GET_SIZE(consts)) {
+ if ((size_t)index >= (size_t)INT_MAX - 1) {
+ Py_DECREF(newconst);
+ PyErr_SetString(PyExc_OverflowError, "too many constants");
+ return ERROR;
+ }
+ if (PyList_Append(consts, newconst)) {
+ Py_DECREF(newconst);
+ return ERROR;
+ }
+ }
+ Py_DECREF(newconst);
+ for (int i = 0; i < n; i++) {
+ INSTR_SET_OP0(&inst[i], NOP);
+ }
+ INSTR_SET_OP1(&inst[n], LOAD_CONST, (int)index);
+ return SUCCESS;
+}
+
+#define VISITED (-1)
+
+// Replace an arbitrary run of SWAPs and NOPs with an optimal one that has the
+// same effect.
+static int
+swaptimize(basicblock *block, int *ix)
+{
+ // NOTE: "./python -m test test_patma" serves as a good, quick stress test
+ // for this function. Make sure to blow away cached *.pyc files first!
+ assert(*ix < block->b_iused);
+ cfg_instr *instructions = &block->b_instr[*ix];
+ // Find the length of the current sequence of SWAPs and NOPs, and record the
+ // maximum depth of the stack manipulations:
+ assert(instructions[0].i_opcode == SWAP);
+ int depth = instructions[0].i_oparg;
+ int len = 0;
+ int more = false;
+ int limit = block->b_iused - *ix;
+ while (++len < limit) {
+ int opcode = instructions[len].i_opcode;
+ if (opcode == SWAP) {
+ depth = Py_MAX(depth, instructions[len].i_oparg);
+ more = true;
+ }
+ else if (opcode != NOP) {
+ break;
+ }
+ }
+ // It's already optimal if there's only one SWAP:
+ if (!more) {
+ return SUCCESS;
+ }
+ // Create an array with elements {0, 1, 2, ..., depth - 1}:
+ int *stack = PyMem_Malloc(depth * sizeof(int));
+ if (stack == NULL) {
+ PyErr_NoMemory();
+ return ERROR;
+ }
+ for (int i = 0; i < depth; i++) {
+ stack[i] = i;
+ }
+ // Simulate the combined effect of these instructions by "running" them on
+ // our "stack":
+ for (int i = 0; i < len; i++) {
+ if (instructions[i].i_opcode == SWAP) {
+ int oparg = instructions[i].i_oparg;
+ int top = stack[0];
+ // SWAPs are 1-indexed:
+ stack[0] = stack[oparg - 1];
+ stack[oparg - 1] = top;
+ }
+ }
+ // Now we can begin! Our approach here is based on a solution to a closely
+ // related problem (https://cs.stackexchange.com/a/13938). It's easiest to
+ // think of this algorithm as determining the steps needed to efficiently
+ // "un-shuffle" our stack. By performing the moves in *reverse* order,
+ // though, we can efficiently *shuffle* it! For this reason, we will be
+ // replacing instructions starting from the *end* of the run. Since the
+ // solution is optimal, we don't need to worry about running out of space:
+ int current = len - 1;
+ for (int i = 0; i < depth; i++) {
+ // Skip items that have already been visited, or just happen to be in
+ // the correct location:
+ if (stack[i] == VISITED || stack[i] == i) {
+ continue;
+ }
+ // Okay, we've found an item that hasn't been visited. It forms a cycle
+ // with other items; traversing the cycle and swapping each item with
+ // the next will put them all in the correct place. The weird
+ // loop-and-a-half is necessary to insert 0 into every cycle, since we
+ // can only swap from that position:
+ int j = i;
+ while (true) {
+ // Skip the actual swap if our item is zero, since swapping the top
+ // item with itself is pointless:
+ if (j) {
+ assert(0 <= current);
+ // SWAPs are 1-indexed:
+ instructions[current].i_opcode = SWAP;
+ instructions[current--].i_oparg = j + 1;
+ }
+ if (stack[j] == VISITED) {
+ // Completed the cycle:
+ assert(j == i);
+ break;
+ }
+ int next_j = stack[j];
+ stack[j] = VISITED;
+ j = next_j;
+ }
+ }
+ // NOP out any unused instructions:
+ while (0 <= current) {
+ INSTR_SET_OP0(&instructions[current--], NOP);
+ }
+ PyMem_Free(stack);
+ *ix += len - 1;
+ return SUCCESS;
+}
+
+
+// This list is pretty small, since it's only okay to reorder opcodes that:
+// - can't affect control flow (like jumping or raising exceptions)
+// - can't invoke arbitrary code (besides finalizers)
+// - only touch the TOS (and pop it when finished)
+#define SWAPPABLE(opcode) \
+ ((opcode) == STORE_FAST || (opcode) == POP_TOP)
+
+static int
+next_swappable_instruction(basicblock *block, int i, int lineno)
+{
+ while (++i < block->b_iused) {
+ cfg_instr *instruction = &block->b_instr[i];
+ if (0 <= lineno && instruction->i_loc.lineno != lineno) {
+ // Optimizing across this instruction could cause user-visible
+ // changes in the names bound between line tracing events!
+ return -1;
+ }
+ if (instruction->i_opcode == NOP) {
+ continue;
+ }
+ if (SWAPPABLE(instruction->i_opcode)) {
+ return i;
+ }
+ return -1;
+ }
+ return -1;
+}
+
+// Attempt to apply SWAPs statically by swapping *instructions* rather than
+// stack items. For example, we can replace SWAP(2), POP_TOP, STORE_FAST(42)
+// with the more efficient NOP, STORE_FAST(42), POP_TOP.
+static void
+apply_static_swaps(basicblock *block, int i)
+{
+ // SWAPs are to our left, and potential swaperands are to our right:
+ for (; 0 <= i; i--) {
+ assert(i < block->b_iused);
+ cfg_instr *swap = &block->b_instr[i];
+ if (swap->i_opcode != SWAP) {
+ if (swap->i_opcode == NOP || SWAPPABLE(swap->i_opcode)) {
+ // Nope, but we know how to handle these. Keep looking:
+ continue;
+ }
+ // We can't reason about what this instruction does. Bail:
+ return;
+ }
+ int j = next_swappable_instruction(block, i, -1);
+ if (j < 0) {
+ return;
+ }
+ int k = j;
+ int lineno = block->b_instr[j].i_loc.lineno;
+ for (int count = swap->i_oparg - 1; 0 < count; count--) {
+ k = next_swappable_instruction(block, k, lineno);
+ if (k < 0) {
+ return;
+ }
+ }
+ // Success!
+ INSTR_SET_OP0(swap, NOP);
+ cfg_instr temp = block->b_instr[j];
+ block->b_instr[j] = block->b_instr[k];
+ block->b_instr[k] = temp;
+ }
+}
+
+static int
+optimize_basic_block(PyObject *const_cache, basicblock *bb, PyObject *consts)
+{
+ assert(PyDict_CheckExact(const_cache));
+ assert(PyList_CheckExact(consts));
+ cfg_instr nop;
+ INSTR_SET_OP0(&nop, NOP);
+ cfg_instr *target = &nop;
+ int opcode = 0;
+ int oparg = 0;
+ int nextop = 0;
+ for (int i = 0; i < bb->b_iused; i++) {
+ cfg_instr *inst = &bb->b_instr[i];
+ bool is_copy_of_load_const = (opcode == LOAD_CONST &&
+ inst->i_opcode == COPY &&
+ inst->i_oparg == 1);
+ if (! is_copy_of_load_const) {
+ opcode = inst->i_opcode;
+ oparg = inst->i_oparg;
+ if (HAS_TARGET(opcode)) {
+ assert(inst->i_target->b_iused > 0);
+ target = &inst->i_target->b_instr[0];
+ assert(!IS_ASSEMBLER_OPCODE(target->i_opcode));
+ }
+ else {
+ target = &nop;
+ }
+ }
+ nextop = i+1 < bb->b_iused ? bb->b_instr[i+1].i_opcode : 0;
+ assert(!IS_ASSEMBLER_OPCODE(opcode));
+ switch (opcode) {
+ /* Remove LOAD_CONST const; conditional jump */
+ case LOAD_CONST:
+ {
+ PyObject* cnt;
+ int is_true;
+ int jump_if_true;
+ switch(nextop) {
+ case POP_JUMP_IF_FALSE:
+ case POP_JUMP_IF_TRUE:
+ cnt = get_const_value(opcode, oparg, consts);
+ if (cnt == NULL) {
+ goto error;
+ }
+ is_true = PyObject_IsTrue(cnt);
+ Py_DECREF(cnt);
+ if (is_true == -1) {
+ goto error;
+ }
+ INSTR_SET_OP0(inst, NOP);
+ jump_if_true = nextop == POP_JUMP_IF_TRUE;
+ if (is_true == jump_if_true) {
+ bb->b_instr[i+1].i_opcode = JUMP;
+ }
+ else {
+ INSTR_SET_OP0(&bb->b_instr[i + 1], NOP);
+ }
+ break;
+ case IS_OP:
+ cnt = get_const_value(opcode, oparg, consts);
+ if (cnt == NULL) {
+ goto error;
+ }
+ int jump_op = i+2 < bb->b_iused ? bb->b_instr[i+2].i_opcode : 0;
+ if (Py_IsNone(cnt) && (jump_op == POP_JUMP_IF_FALSE || jump_op == POP_JUMP_IF_TRUE)) {
+ unsigned char nextarg = bb->b_instr[i+1].i_oparg;
+ INSTR_SET_OP0(inst, NOP);
+ INSTR_SET_OP0(&bb->b_instr[i + 1], NOP);
+ bb->b_instr[i+2].i_opcode = nextarg ^ (jump_op == POP_JUMP_IF_FALSE) ?
+ POP_JUMP_IF_NOT_NONE : POP_JUMP_IF_NONE;
+ }
+ Py_DECREF(cnt);
+ break;
+ case RETURN_VALUE:
+ INSTR_SET_OP0(inst, NOP);
+ INSTR_SET_OP1(&bb->b_instr[++i], RETURN_CONST, oparg);
+ break;
+ }
+ break;
+ }
+ /* Try to fold tuples of constants.
+ Skip over BUILD_TUPLE(1) UNPACK_SEQUENCE(1).
+ Replace BUILD_TUPLE(2) UNPACK_SEQUENCE(2) with SWAP(2).
+ Replace BUILD_TUPLE(3) UNPACK_SEQUENCE(3) with SWAP(3). */
+ case BUILD_TUPLE:
+ if (nextop == UNPACK_SEQUENCE && oparg == bb->b_instr[i+1].i_oparg) {
+ switch(oparg) {
+ case 1:
+ INSTR_SET_OP0(inst, NOP);
+ INSTR_SET_OP0(&bb->b_instr[i + 1], NOP);
+ continue;
+ case 2:
+ case 3:
+ INSTR_SET_OP0(inst, NOP);
+ bb->b_instr[i+1].i_opcode = SWAP;
+ continue;
+ }
+ }
+ if (i >= oparg) {
+ if (fold_tuple_on_constants(const_cache, inst-oparg, oparg, consts)) {
+ goto error;
+ }
+ }
+ break;
+ case POP_JUMP_IF_NOT_NONE:
+ case POP_JUMP_IF_NONE:
+ switch (target->i_opcode) {
+ case JUMP:
+ i -= jump_thread(inst, target, inst->i_opcode);
+ }
+ break;
+ case POP_JUMP_IF_FALSE:
+ switch (target->i_opcode) {
+ case JUMP:
+ i -= jump_thread(inst, target, POP_JUMP_IF_FALSE);
+ }
+ break;
+ case POP_JUMP_IF_TRUE:
+ switch (target->i_opcode) {
+ case JUMP:
+ i -= jump_thread(inst, target, POP_JUMP_IF_TRUE);
+ }
+ break;
+ case JUMP:
+ switch (target->i_opcode) {
+ case JUMP:
+ i -= jump_thread(inst, target, JUMP);
+ }
+ break;
+ case FOR_ITER:
+ if (target->i_opcode == JUMP) {
+ /* This will not work now because the jump (at target) could
+ * be forward or backward and FOR_ITER only jumps forward. We
+ * can re-enable this if ever we implement a backward version
+ * of FOR_ITER.
+ */
+ /*
+ i -= jump_thread(inst, target, FOR_ITER);
+ */
+ }
+ break;
+ case SWAP:
+ if (oparg == 1) {
+ INSTR_SET_OP0(inst, NOP);
+ break;
+ }
+ if (swaptimize(bb, &i) < 0) {
+ goto error;
+ }
+ apply_static_swaps(bb, i);
+ break;
+ case KW_NAMES:
+ break;
+ case PUSH_NULL:
+ if (nextop == LOAD_GLOBAL && (inst[1].i_opcode & 1) == 0) {
+ INSTR_SET_OP0(inst, NOP);
+ inst[1].i_oparg |= 1;
+ }
+ break;
+ default:
+ /* All HAS_CONST opcodes should be handled with LOAD_CONST */
+ assert (!HAS_CONST(inst->i_opcode));
+ }
+ }
+ return SUCCESS;
+error:
+ return ERROR;
+}
+
+
+/* Perform optimizations on a control flow graph.
+ The consts object should still be in list form to allow new constants
+ to be appended.
+
+ Code trasnformations that reduce code size initially fill the gaps with
+ NOPs. Later those NOPs are removed.
+*/
+static int
+optimize_cfg(cfg_builder *g, PyObject *consts, PyObject *const_cache)
+{
+ assert(PyDict_CheckExact(const_cache));
+ RETURN_IF_ERROR(check_cfg(g));
+ eliminate_empty_basic_blocks(g);
+ for (basicblock *b = g->g_entryblock; b != NULL; b = b->b_next) {
+ RETURN_IF_ERROR(inline_small_exit_blocks(b));
+ }
+ assert(no_empty_basic_blocks(g));
+ for (basicblock *b = g->g_entryblock; b != NULL; b = b->b_next) {
+ RETURN_IF_ERROR(optimize_basic_block(const_cache, b, consts));
+ assert(b->b_predecessors == 0);
+ }
+ RETURN_IF_ERROR(remove_redundant_nops_and_pairs(g->g_entryblock));
+ for (basicblock *b = g->g_entryblock; b != NULL; b = b->b_next) {
+ RETURN_IF_ERROR(inline_small_exit_blocks(b));
+ }
+ RETURN_IF_ERROR(mark_reachable(g->g_entryblock));
+
+ /* Delete unreachable instructions */
+ for (basicblock *b = g->g_entryblock; b != NULL; b = b->b_next) {
+ if (b->b_predecessors == 0) {
+ b->b_iused = 0;
+ }
+ }
+ for (basicblock *b = g->g_entryblock; b != NULL; b = b->b_next) {
+ remove_redundant_nops(b);
+ }
+ eliminate_empty_basic_blocks(g);
+ assert(no_redundant_nops(g));
+ RETURN_IF_ERROR(remove_redundant_jumps(g));
+ return SUCCESS;
+}
+
+// helper functions for add_checks_for_loads_of_unknown_variables
+static inline void
+maybe_push(basicblock *b, uint64_t unsafe_mask, basicblock ***sp)
+{
+ // Push b if the unsafe mask is giving us any new information.
+ // To avoid overflowing the stack, only allow each block once.
+ // Use b->b_visited=1 to mean that b is currently on the stack.
+ uint64_t both = b->b_unsafe_locals_mask | unsafe_mask;
+ if (b->b_unsafe_locals_mask != both) {
+ b->b_unsafe_locals_mask = both;
+ // More work left to do.
+ if (!b->b_visited) {
+ // not on the stack, so push it.
+ *(*sp)++ = b;
+ b->b_visited = 1;
+ }
+ }
+}
+
+static void
+scan_block_for_locals(basicblock *b, basicblock ***sp)
+{
+ // bit i is set if local i is potentially uninitialized
+ uint64_t unsafe_mask = b->b_unsafe_locals_mask;
+ for (int i = 0; i < b->b_iused; i++) {
+ cfg_instr *instr = &b->b_instr[i];
+ assert(instr->i_opcode != EXTENDED_ARG);
+ assert(!IS_SUPERINSTRUCTION_OPCODE(instr->i_opcode));
+ if (instr->i_except != NULL) {
+ maybe_push(instr->i_except, unsafe_mask, sp);
+ }
+ if (instr->i_oparg >= 64) {
+ continue;
+ }
+ assert(instr->i_oparg >= 0);
+ uint64_t bit = (uint64_t)1 << instr->i_oparg;
+ switch (instr->i_opcode) {
+ case DELETE_FAST:
+ unsafe_mask |= bit;
+ break;
+ case STORE_FAST:
+ unsafe_mask &= ~bit;
+ break;
+ case LOAD_FAST_CHECK:
+ // If this doesn't raise, then the local is defined.
+ unsafe_mask &= ~bit;
+ break;
+ case LOAD_FAST:
+ if (unsafe_mask & bit) {
+ instr->i_opcode = LOAD_FAST_CHECK;
+ }
+ unsafe_mask &= ~bit;
+ break;
+ }
+ }
+ if (b->b_next && BB_HAS_FALLTHROUGH(b)) {
+ maybe_push(b->b_next, unsafe_mask, sp);
+ }
+ cfg_instr *last = _PyCfg_BasicblockLastInstr(b);
+ if (last && is_jump(last)) {
+ assert(last->i_target != NULL);
+ maybe_push(last->i_target, unsafe_mask, sp);
+ }
+}
+
+static int
+fast_scan_many_locals(basicblock *entryblock, int nlocals)
+{
+ assert(nlocals > 64);
+ Py_ssize_t *states = PyMem_Calloc(nlocals - 64, sizeof(Py_ssize_t));
+ if (states == NULL) {
+ PyErr_NoMemory();
+ return ERROR;
+ }
+ Py_ssize_t blocknum = 0;
+ // state[i - 64] == blocknum if local i is guaranteed to
+ // be initialized, i.e., if it has had a previous LOAD_FAST or
+ // STORE_FAST within that basicblock (not followed by DELETE_FAST).
+ for (basicblock *b = entryblock; b != NULL; b = b->b_next) {
+ blocknum++;
+ for (int i = 0; i < b->b_iused; i++) {
+ cfg_instr *instr = &b->b_instr[i];
+ assert(instr->i_opcode != EXTENDED_ARG);
+ assert(!IS_SUPERINSTRUCTION_OPCODE(instr->i_opcode));
+ int arg = instr->i_oparg;
+ if (arg < 64) {
+ continue;
+ }
+ assert(arg >= 0);
+ switch (instr->i_opcode) {
+ case DELETE_FAST:
+ states[arg - 64] = blocknum - 1;
+ break;
+ case STORE_FAST:
+ states[arg - 64] = blocknum;
+ break;
+ case LOAD_FAST:
+ if (states[arg - 64] != blocknum) {
+ instr->i_opcode = LOAD_FAST_CHECK;
+ }
+ states[arg - 64] = blocknum;
+ break;
+ Py_UNREACHABLE();
+ }
+ }
+ }
+ PyMem_Free(states);
+ return SUCCESS;
+}
+
+static int
+remove_unused_consts(basicblock *entryblock, PyObject *consts)
+{
+ assert(PyList_CheckExact(consts));
+ Py_ssize_t nconsts = PyList_GET_SIZE(consts);
+ if (nconsts == 0) {
+ return SUCCESS; /* nothing to do */
+ }
+
+ Py_ssize_t *index_map = NULL;
+ Py_ssize_t *reverse_index_map = NULL;
+ int err = ERROR;
+
+ index_map = PyMem_Malloc(nconsts * sizeof(Py_ssize_t));
+ if (index_map == NULL) {
+ goto end;
+ }
+ for (Py_ssize_t i = 1; i < nconsts; i++) {
+ index_map[i] = -1;
+ }
+ // The first constant may be docstring; keep it always.
+ index_map[0] = 0;
+
+ /* mark used consts */
+ for (basicblock *b = entryblock; b != NULL; b = b->b_next) {
+ for (int i = 0; i < b->b_iused; i++) {
+ if (HAS_CONST(b->b_instr[i].i_opcode)) {
+ int index = b->b_instr[i].i_oparg;
+ index_map[index] = index;
+ }
+ }
+ }
+ /* now index_map[i] == i if consts[i] is used, -1 otherwise */
+ /* condense consts */
+ Py_ssize_t n_used_consts = 0;
+ for (int i = 0; i < nconsts; i++) {
+ if (index_map[i] != -1) {
+ assert(index_map[i] == i);
+ index_map[n_used_consts++] = index_map[i];
+ }
+ }
+ if (n_used_consts == nconsts) {
+ /* nothing to do */
+ err = SUCCESS;
+ goto end;
+ }
+
+ /* move all used consts to the beginning of the consts list */
+ assert(n_used_consts < nconsts);
+ for (Py_ssize_t i = 0; i < n_used_consts; i++) {
+ Py_ssize_t old_index = index_map[i];
+ assert(i <= old_index && old_index < nconsts);
+ if (i != old_index) {
+ PyObject *value = PyList_GET_ITEM(consts, index_map[i]);
+ assert(value != NULL);
+ PyList_SetItem(consts, i, Py_NewRef(value));
+ }
+ }
+
+ /* truncate the consts list at its new size */
+ if (PyList_SetSlice(consts, n_used_consts, nconsts, NULL) < 0) {
+ goto end;
+ }
+ /* adjust const indices in the bytecode */
+ reverse_index_map = PyMem_Malloc(nconsts * sizeof(Py_ssize_t));
+ if (reverse_index_map == NULL) {
+ goto end;
+ }
+ for (Py_ssize_t i = 0; i < nconsts; i++) {
+ reverse_index_map[i] = -1;
+ }
+ for (Py_ssize_t i = 0; i < n_used_consts; i++) {
+ assert(index_map[i] != -1);
+ assert(reverse_index_map[index_map[i]] == -1);
+ reverse_index_map[index_map[i]] = i;
+ }
+
+ for (basicblock *b = entryblock; b != NULL; b = b->b_next) {
+ for (int i = 0; i < b->b_iused; i++) {
+ if (HAS_CONST(b->b_instr[i].i_opcode)) {
+ int index = b->b_instr[i].i_oparg;
+ assert(reverse_index_map[index] >= 0);
+ assert(reverse_index_map[index] < n_used_consts);
+ b->b_instr[i].i_oparg = (int)reverse_index_map[index];
+ }
+ }
+ }
+
+ err = SUCCESS;
+end:
+ PyMem_Free(index_map);
+ PyMem_Free(reverse_index_map);
+ return err;
+}
+
+
+
+static int
+add_checks_for_loads_of_uninitialized_variables(basicblock *entryblock,
+ int nlocals,
+ int nparams)
+{
+ if (nlocals == 0) {
+ return SUCCESS;
+ }
+ if (nlocals > 64) {
+ // To avoid O(nlocals**2) compilation, locals beyond the first
+ // 64 are only analyzed one basicblock at a time: initialization
+ // info is not passed between basicblocks.
+ if (fast_scan_many_locals(entryblock, nlocals) < 0) {
+ return ERROR;
+ }
+ nlocals = 64;
+ }
+ basicblock **stack = make_cfg_traversal_stack(entryblock);
+ if (stack == NULL) {
+ return ERROR;
+ }
+ basicblock **sp = stack;
+
+ // First origin of being uninitialized:
+ // The non-parameter locals in the entry block.
+ uint64_t start_mask = 0;
+ for (int i = nparams; i < nlocals; i++) {
+ start_mask |= (uint64_t)1 << i;
+ }
+ maybe_push(entryblock, start_mask, &sp);
+
+ // Second origin of being uninitialized:
+ // There could be DELETE_FAST somewhere, so
+ // be sure to scan each basicblock at least once.
+ for (basicblock *b = entryblock; b != NULL; b = b->b_next) {
+ scan_block_for_locals(b, &sp);
+ }
+ // Now propagate the uncertainty from the origins we found: Use
+ // LOAD_FAST_CHECK for any LOAD_FAST where the local could be undefined.
+ while (sp > stack) {
+ basicblock *b = *--sp;
+ // mark as no longer on stack
+ b->b_visited = 0;
+ scan_block_for_locals(b, &sp);
+ }
+ PyMem_Free(stack);
+ return SUCCESS;
+}
+
+
+static int
+mark_warm(basicblock *entryblock) {
+ basicblock **stack = make_cfg_traversal_stack(entryblock);
+ if (stack == NULL) {
+ return ERROR;
+ }
+ basicblock **sp = stack;
+
+ *sp++ = entryblock;
+ entryblock->b_visited = 1;
+ while (sp > stack) {
+ basicblock *b = *(--sp);
+ assert(!b->b_except_handler);
+ b->b_warm = 1;
+ basicblock *next = b->b_next;
+ if (next && BB_HAS_FALLTHROUGH(b) && !next->b_visited) {
+ *sp++ = next;
+ next->b_visited = 1;
+ }
+ for (int i=0; i < b->b_iused; i++) {
+ cfg_instr *instr = &b->b_instr[i];
+ if (is_jump(instr) && !instr->i_target->b_visited) {
+ *sp++ = instr->i_target;
+ instr->i_target->b_visited = 1;
+ }
+ }
+ }
+ PyMem_Free(stack);
+ return SUCCESS;
+}
+
+static int
+mark_cold(basicblock *entryblock) {
+ for (basicblock *b = entryblock; b != NULL; b = b->b_next) {
+ assert(!b->b_cold && !b->b_warm);
+ }
+ if (mark_warm(entryblock) < 0) {
+ return ERROR;
+ }
+
+ basicblock **stack = make_cfg_traversal_stack(entryblock);
+ if (stack == NULL) {
+ return ERROR;
+ }
+
+ basicblock **sp = stack;
+ for (basicblock *b = entryblock; b != NULL; b = b->b_next) {
+ if (b->b_except_handler) {
+ assert(!b->b_warm);
+ *sp++ = b;
+ b->b_visited = 1;
+ }
+ }
+
+ while (sp > stack) {
+ basicblock *b = *(--sp);
+ b->b_cold = 1;
+ basicblock *next = b->b_next;
+ if (next && BB_HAS_FALLTHROUGH(b)) {
+ if (!next->b_warm && !next->b_visited) {
+ *sp++ = next;
+ next->b_visited = 1;
+ }
+ }
+ for (int i = 0; i < b->b_iused; i++) {
+ cfg_instr *instr = &b->b_instr[i];
+ if (is_jump(instr)) {
+ assert(i == b->b_iused - 1);
+ basicblock *target = b->b_instr[i].i_target;
+ if (!target->b_warm && !target->b_visited) {
+ *sp++ = target;
+ target->b_visited = 1;
+ }
+ }
+ }
+ }
+ PyMem_Free(stack);
+ return SUCCESS;
+}
+
+
+static int
+push_cold_blocks_to_end(cfg_builder *g, int code_flags) {
+ basicblock *entryblock = g->g_entryblock;
+ if (entryblock->b_next == NULL) {
+ /* single basicblock, no need to reorder */
+ return SUCCESS;
+ }
+ RETURN_IF_ERROR(mark_cold(entryblock));
+
+ /* If we have a cold block with fallthrough to a warm block, add */
+ /* an explicit jump instead of fallthrough */
+ for (basicblock *b = entryblock; b != NULL; b = b->b_next) {
+ if (b->b_cold && BB_HAS_FALLTHROUGH(b) && b->b_next && b->b_next->b_warm) {
+ basicblock *explicit_jump = cfg_builder_new_block(g);
+ if (explicit_jump == NULL) {
+ return ERROR;
+ }
+ basicblock_addop(explicit_jump, JUMP, b->b_next->b_label.id, NO_LOCATION);
+ explicit_jump->b_cold = 1;
+ explicit_jump->b_next = b->b_next;
+ b->b_next = explicit_jump;
+
+ /* set target */
+ cfg_instr *last = _PyCfg_BasicblockLastInstr(explicit_jump);
+ last->i_target = explicit_jump->b_next;
+ }
+ }
+
+ assert(!entryblock->b_cold); /* First block can't be cold */
+ basicblock *cold_blocks = NULL;
+ basicblock *cold_blocks_tail = NULL;
+
+ basicblock *b = entryblock;
+ while(b->b_next) {
+ assert(!b->b_cold);
+ while (b->b_next && !b->b_next->b_cold) {
+ b = b->b_next;
+ }
+ if (b->b_next == NULL) {
+ /* no more cold blocks */
+ break;
+ }
+
+ /* b->b_next is the beginning of a cold streak */
+ assert(!b->b_cold && b->b_next->b_cold);
+
+ basicblock *b_end = b->b_next;
+ while (b_end->b_next && b_end->b_next->b_cold) {
+ b_end = b_end->b_next;
+ }
+
+ /* b_end is the end of the cold streak */
+ assert(b_end && b_end->b_cold);
+ assert(b_end->b_next == NULL || !b_end->b_next->b_cold);
+
+ if (cold_blocks == NULL) {
+ cold_blocks = b->b_next;
+ }
+ else {
+ cold_blocks_tail->b_next = b->b_next;
+ }
+ cold_blocks_tail = b_end;
+ b->b_next = b_end->b_next;
+ b_end->b_next = NULL;
+ }
+ assert(b != NULL && b->b_next == NULL);
+ b->b_next = cold_blocks;
+
+ if (cold_blocks != NULL) {
+ RETURN_IF_ERROR(remove_redundant_jumps(g));
+ }
+ return SUCCESS;
+}
+
+int
+_PyCfg_OptimizeCodeUnit(cfg_builder *g, PyObject *consts, PyObject *const_cache,
+ int code_flags, int nlocals, int nparams)
+{
+ assert(cfg_builder_check(g));
+ /** Preprocessing **/
+ /* Map labels to targets and mark exception handlers */
+ RETURN_IF_ERROR(translate_jump_labels_to_targets(g->g_entryblock));
+ RETURN_IF_ERROR(mark_except_handlers(g->g_entryblock));
+ RETURN_IF_ERROR(label_exception_targets(g->g_entryblock));
+
+ /** Optimization **/
+ RETURN_IF_ERROR(optimize_cfg(g, consts, const_cache));
+ RETURN_IF_ERROR(remove_unused_consts(g->g_entryblock, consts));
+ RETURN_IF_ERROR(
+ add_checks_for_loads_of_uninitialized_variables(
+ g->g_entryblock, nlocals, nparams));
+
+ RETURN_IF_ERROR(push_cold_blocks_to_end(g, code_flags));
+ return SUCCESS;
+}
+
+void
+_PyCfg_ConvertExceptionHandlersToNops(basicblock *entryblock)
+{
+ for (basicblock *b = entryblock; b != NULL; b = b->b_next) {
+ for (int i = 0; i < b->b_iused; i++) {
+ cfg_instr *instr = &b->b_instr[i];
+ if (is_block_push(instr) || instr->i_opcode == POP_BLOCK) {
+ INSTR_SET_OP0(instr, NOP);
+ }
+ }
+ }
+ for (basicblock *b = entryblock; b != NULL; b = b->b_next) {
+ remove_redundant_nops(b);
+ }
+}
+
+static inline bool
+is_exit_without_lineno(basicblock *b) {
+ if (!basicblock_exits_scope(b)) {
+ return false;
+ }
+ for (int i = 0; i < b->b_iused; i++) {
+ if (b->b_instr[i].i_loc.lineno >= 0) {
+ return false;
+ }
+ }
+ return true;
+}
+
+/* PEP 626 mandates that the f_lineno of a frame is correct
+ * after a frame terminates. It would be prohibitively expensive
+ * to continuously update the f_lineno field at runtime,
+ * so we make sure that all exiting instruction (raises and returns)
+ * have a valid line number, allowing us to compute f_lineno lazily.
+ * We can do this by duplicating the exit blocks without line number
+ * so that none have more than one predecessor. We can then safely
+ * copy the line number from the sole predecessor block.
+ */
+static int
+duplicate_exits_without_lineno(cfg_builder *g)
+{
+ assert(no_empty_basic_blocks(g));
+ /* Copy all exit blocks without line number that are targets of a jump.
+ */
+ basicblock *entryblock = g->g_entryblock;
+ for (basicblock *b = entryblock; b != NULL; b = b->b_next) {
+ cfg_instr *last = _PyCfg_BasicblockLastInstr(b);
+ assert(last != NULL);
+ if (is_jump(last)) {
+ basicblock *target = last->i_target;
+ if (is_exit_without_lineno(target) && target->b_predecessors > 1) {
+ basicblock *new_target = copy_basicblock(g, target);
+ if (new_target == NULL) {
+ return ERROR;
+ }
+ new_target->b_instr[0].i_loc = last->i_loc;
+ last->i_target = new_target;
+ target->b_predecessors--;
+ new_target->b_predecessors = 1;
+ new_target->b_next = target->b_next;
+ target->b_next = new_target;
+ }
+ }
+ }
+
+ /* Any remaining reachable exit blocks without line number can only be reached by
+ * fall through, and thus can only have a single predecessor */
+ for (basicblock *b = entryblock; b != NULL; b = b->b_next) {
+ if (BB_HAS_FALLTHROUGH(b) && b->b_next && b->b_iused > 0) {
+ if (is_exit_without_lineno(b->b_next)) {
+ cfg_instr *last = _PyCfg_BasicblockLastInstr(b);
+ assert(last != NULL);
+ b->b_next->b_instr[0].i_loc = last->i_loc;
+ }
+ }
+ }
+ return SUCCESS;
+}
+
+
+/* If an instruction has no line number, but it's predecessor in the BB does,
+ * then copy the line number. If a successor block has no line number, and only
+ * one predecessor, then inherit the line number.
+ * This ensures that all exit blocks (with one predecessor) receive a line number.
+ * Also reduces the size of the line number table,
+ * but has no impact on the generated line number events.
+ */
+static void
+propagate_line_numbers(basicblock *entryblock) {
+ for (basicblock *b = entryblock; b != NULL; b = b->b_next) {
+ cfg_instr *last = _PyCfg_BasicblockLastInstr(b);
+ if (last == NULL) {
+ continue;
+ }
+
+ location prev_location = NO_LOCATION;
+ for (int i = 0; i < b->b_iused; i++) {
+ if (b->b_instr[i].i_loc.lineno < 0) {
+ b->b_instr[i].i_loc = prev_location;
+ }
+ else {
+ prev_location = b->b_instr[i].i_loc;
+ }
+ }
+ if (BB_HAS_FALLTHROUGH(b) && b->b_next->b_predecessors == 1) {
+ assert(b->b_next->b_iused);
+ if (b->b_next->b_instr[0].i_loc.lineno < 0) {
+ b->b_next->b_instr[0].i_loc = prev_location;
+ }
+ }
+ if (is_jump(last)) {
+ basicblock *target = last->i_target;
+ if (target->b_predecessors == 1) {
+ if (target->b_instr[0].i_loc.lineno < 0) {
+ target->b_instr[0].i_loc = prev_location;
+ }
+ }
+ }
+ }
+}
+
+/* Make sure that all returns have a line number, even if early passes
+ * have failed to propagate a correct line number.
+ * The resulting line number may not be correct according to PEP 626,
+ * but should be "good enough", and no worse than in older versions. */
+static void
+guarantee_lineno_for_exits(basicblock *entryblock, int firstlineno) {
+ int lineno = firstlineno;
+ assert(lineno > 0);
+ for (basicblock *b = entryblock; b != NULL; b = b->b_next) {
+ cfg_instr *last = _PyCfg_BasicblockLastInstr(b);
+ if (last == NULL) {
+ continue;
+ }
+ if (last->i_loc.lineno < 0) {
+ if (last->i_opcode == RETURN_VALUE) {
+ for (int i = 0; i < b->b_iused; i++) {
+ assert(b->b_instr[i].i_loc.lineno < 0);
+
+ b->b_instr[i].i_loc.lineno = lineno;
+ }
+ }
+ }
+ else {
+ lineno = last->i_loc.lineno;
+ }
+ }
+}
+
+int
+_PyCfg_ResolveLineNumbers(cfg_builder *g, int firstlineno)
+{
+ RETURN_IF_ERROR(duplicate_exits_without_lineno(g));
+ propagate_line_numbers(g->g_entryblock);
+ guarantee_lineno_for_exits(g->g_entryblock, firstlineno);
+ return SUCCESS;
+}
+
diff --git a/Python/opcode_metadata.h b/Python/opcode_metadata.h
index 08032519f383..5c984eb4ae12 100644
--- a/Python/opcode_metadata.h
+++ b/Python/opcode_metadata.h
@@ -3,7 +3,7 @@
// Python/bytecodes.c
// Do not edit!
-#ifndef NEED_OPCODE_TABLES
+#ifndef NEED_OPCODE_METADATA
extern int _PyOpcode_num_popped(int opcode, int oparg, bool jump);
#else
int
@@ -349,7 +349,7 @@ _PyOpcode_num_popped(int opcode, int oparg, bool jump) {
}
#endif
-#ifndef NEED_OPCODE_TABLES
+#ifndef NEED_OPCODE_METADATA
extern int _PyOpcode_num_pushed(int opcode, int oparg, bool jump);
#else
int
@@ -701,7 +701,7 @@ struct opcode_metadata {
enum InstructionFormat instr_format;
};
-#ifndef NEED_OPCODE_TABLES
+#ifndef NEED_OPCODE_METADATA
extern const struct opcode_metadata _PyOpcode_opcode_metadata[256];
#else
const struct opcode_metadata _PyOpcode_opcode_metadata[256] = {
diff --git a/Tools/build/generate_opcode_h.py b/Tools/build/generate_opcode_h.py
index 9b2112f7f5f3..614b17df7409 100644
--- a/Tools/build/generate_opcode_h.py
+++ b/Tools/build/generate_opcode_h.py
@@ -60,7 +60,7 @@ def write_int_array_from_ops(name, ops, out):
bits = 0
for op in ops:
bits |= 1<<op
- out.write(f"static const uint32_t {name}[9] = {{\n")
+ out.write(f"const uint32_t {name}[9] = {{\n")
for i in range(9):
out.write(f" {bits & UINT32_MASK}U,\n")
bits >>= 32
@@ -130,6 +130,8 @@ def main(opcode_py, outfile='Include/opcode.h', internaloutfile='Include/interna
for name, op in specialized_opmap.items():
fobj.write(DEFINE.format(name, op))
+ iobj.write("\nextern const uint32_t _PyOpcode_RelativeJump[9];\n")
+ iobj.write("\nextern const uint32_t _PyOpcode_Jump[9];\n")
iobj.write("\nextern const uint8_t _PyOpcode_Caches[256];\n")
iobj.write("\nextern const uint8_t _PyOpcode_Deopt[256];\n")
iobj.write("\n#ifdef NEED_OPCODE_TABLES\n")
diff --git a/Tools/cases_generator/generate_cases.py b/Tools/cases_generator/generate_cases.py
index a0bba65545d4..62ddeac0265a 100644
--- a/Tools/cases_generator/generate_cases.py
+++ b/Tools/cases_generator/generate_cases.py
@@ -930,7 +930,7 @@ def write_function(
direction: str, data: list[tuple[AnyInstruction, str]]
) -> None:
self.out.emit("")
- self.out.emit("#ifndef NEED_OPCODE_TABLES")
+ self.out.emit("#ifndef NEED_OPCODE_METADATA")
self.out.emit(f"extern int _PyOpcode_num_{direction}(int opcode, int oparg, bool jump);")
self.out.emit("#else")
self.out.emit("int")
@@ -999,7 +999,7 @@ def write_metadata(self) -> None:
self.out.emit("")
# Write metadata array declaration
- self.out.emit("#ifndef NEED_OPCODE_TABLES")
+ self.out.emit("#ifndef NEED_OPCODE_METADATA")
self.out.emit("extern const struct opcode_metadata _PyOpcode_opcode_metadata[256];")
self.out.emit("#else")
self.out.emit("const struct opcode_metadata _PyOpcode_opcode_metadata[256] = {")
1
0
https://github.com/python/cpython/commit/b0422e140df8fdc83c51cc64a3ed542618…
commit: b0422e140df8fdc83c51cc64a3ed5426188de7f1
branch: main
author: James De Bias <81095953+DBJim(a)users.noreply.github.com>
committer: merwok <merwok(a)netwok.org>
date: 2023-03-31T11:02:47-04:00
summary:
gh-102871: Remove obsolete browsers from webbrowser (#102872)
files:
A Misc/NEWS.d/next/Library/2023-03-21-15-17-07.gh-issue-102871.U9mchn.rst
M Doc/library/webbrowser.rst
M Doc/whatsnew/3.12.rst
M Lib/test/test_webbrowser.py
M Lib/webbrowser.py
diff --git a/Doc/library/webbrowser.rst b/Doc/library/webbrowser.rst
index 734b6321e5a7..61db80420936 100644
--- a/Doc/library/webbrowser.rst
+++ b/Doc/library/webbrowser.rst
@@ -115,13 +115,7 @@ for the controller classes, all defined in this module.
+------------------------+-----------------------------------------+-------+
| ``'firefox'`` | :class:`Mozilla('mozilla')` | |
+------------------------+-----------------------------------------+-------+
-| ``'netscape'`` | :class:`Mozilla('netscape')` | |
-+------------------------+-----------------------------------------+-------+
-| ``'galeon'`` | :class:`Galeon('galeon')` | |
-+------------------------+-----------------------------------------+-------+
-| ``'epiphany'`` | :class:`Galeon('epiphany')` | |
-+------------------------+-----------------------------------------+-------+
-| ``'skipstone'`` | :class:`BackgroundBrowser('skipstone')` | |
+| ``'epiphany'`` | :class:`Epiphany('epiphany')` | |
+------------------------+-----------------------------------------+-------+
| ``'kfmclient'`` | :class:`Konqueror()` | \(1) |
+------------------------+-----------------------------------------+-------+
@@ -129,12 +123,8 @@ for the controller classes, all defined in this module.
+------------------------+-----------------------------------------+-------+
| ``'kfm'`` | :class:`Konqueror()` | \(1) |
+------------------------+-----------------------------------------+-------+
-| ``'mosaic'`` | :class:`BackgroundBrowser('mosaic')` | |
-+------------------------+-----------------------------------------+-------+
| ``'opera'`` | :class:`Opera()` | |
+------------------------+-----------------------------------------+-------+
-| ``'grail'`` | :class:`Grail()` | |
-+------------------------+-----------------------------------------+-------+
| ``'links'`` | :class:`GenericBrowser('links')` | |
+------------------------+-----------------------------------------+-------+
| ``'elinks'`` | :class:`Elinks('elinks')` | |
@@ -176,6 +166,11 @@ Notes:
.. versionadded:: 3.3
Support for Chrome/Chromium has been added.
+.. versionchanged:: 3.12
+ Support for several obsolete browsers has been removed.
+ Removed browsers include Grail, Mosaic, Netscape, Galeon,
+ Skipstone, Iceape, and Firefox versions 35 and below.
+
.. deprecated-removed:: 3.11 3.13
:class:`MacOSX` is deprecated, use :class:`MacOSXOSAScript` instead.
diff --git a/Doc/whatsnew/3.12.rst b/Doc/whatsnew/3.12.rst
index 00a51ab3c137..bd9be531fdd7 100644
--- a/Doc/whatsnew/3.12.rst
+++ b/Doc/whatsnew/3.12.rst
@@ -777,6 +777,10 @@ Removed
*context* parameter instead.
(Contributed by Victor Stinner in :gh:`94172`.)
+* Remove support for obsolete browsers from :mod:`webbrowser`.
+ Removed browsers include: Grail, Mosaic, Netscape, Galeon, Skipstone,
+ Iceape, Firebird, and Firefox versions 35 and below (:gh:`102871`).
+
Porting to Python 3.12
======================
diff --git a/Lib/test/test_webbrowser.py b/Lib/test/test_webbrowser.py
index 9d608d63a01e..147a113c7fd7 100644
--- a/Lib/test/test_webbrowser.py
+++ b/Lib/test/test_webbrowser.py
@@ -11,7 +11,7 @@
if not support.has_subprocess_support:
raise unittest.SkipTest("test webserver requires subprocess")
-URL = 'http://www.example.com'
+URL = 'https://www.example.com'
CMD_NAME = 'test'
@@ -120,34 +120,9 @@ def test_open_new_tab(self):
arguments=['-new-tab', URL])
-class NetscapeCommandTest(CommandTestMixin, unittest.TestCase):
+class EpiphanyCommandTest(CommandTestMixin, unittest.TestCase):
- browser_class = webbrowser.Netscape
-
- def test_open(self):
- self._test('open',
- options=['-raise', '-remote'],
- arguments=['openURL({})'.format(URL)])
-
- def test_open_with_autoraise_false(self):
- self._test('open', kw=dict(autoraise=False),
- options=['-noraise', '-remote'],
- arguments=['openURL({})'.format(URL)])
-
- def test_open_new(self):
- self._test('open_new',
- options=['-raise', '-remote'],
- arguments=['openURL({},new-window)'.format(URL)])
-
- def test_open_new_tab(self):
- self._test('open_new_tab',
- options=['-raise', '-remote'],
- arguments=['openURL({},new-tab)'.format(URL)])
-
-
-class GaleonCommandTest(CommandTestMixin, unittest.TestCase):
-
- browser_class = webbrowser.Galeon
+ browser_class = webbrowser.Epiphany
def test_open(self):
self._test('open',
diff --git a/Lib/webbrowser.py b/Lib/webbrowser.py
index 4336597e68f6..d98c5997d2f4 100755
--- a/Lib/webbrowser.py
+++ b/Lib/webbrowser.py
@@ -292,19 +292,8 @@ class Mozilla(UnixBrowser):
background = True
-class Netscape(UnixBrowser):
- """Launcher class for Netscape browser."""
-
- raise_opts = ["-noraise", "-raise"]
- remote_args = ['-remote', 'openURL(%s%action)']
- remote_action = ""
- remote_action_newwin = ",new-window"
- remote_action_newtab = ",new-tab"
- background = True
-
-
-class Galeon(UnixBrowser):
- """Launcher class for Galeon/Epiphany browsers."""
+class Epiphany(UnixBrowser):
+ """Launcher class for Epiphany browser."""
raise_opts = ["-noraise", ""]
remote_args = ['%action', '%s']
@@ -402,44 +391,6 @@ def open(self, url, new=0, autoraise=True):
return (p.poll() is None)
-class Grail(BaseBrowser):
- # There should be a way to maintain a connection to Grail, but the
- # Grail remote control protocol doesn't really allow that at this
- # point. It probably never will!
- def _find_grail_rc(self):
- import glob
- import pwd
- import socket
- import tempfile
- tempdir = os.path.join(tempfile.gettempdir(),
- ".grail-unix")
- user = pwd.getpwuid(os.getuid())[0]
- filename = os.path.join(glob.escape(tempdir), glob.escape(user) + "-*")
- maybes = glob.glob(filename)
- if not maybes:
- return None
- s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
- for fn in maybes:
- # need to PING each one until we find one that's live
- try:
- s.connect(fn)
- except OSError:
- # no good; attempt to clean it out, but don't fail:
- try:
- os.unlink(fn)
- except OSError:
- pass
- else:
- return s
-
- def _remote(self, action):
- s = self._find_grail_rc()
- if not s:
- return 0
- s.send(action)
- s.close()
- return 1
-
def open(self, url, new=0, autoraise=True):
sys.audit("webbrowser.open", url)
if new:
@@ -475,35 +426,25 @@ def register_X_browsers():
if "KDE_FULL_SESSION" in os.environ and shutil.which("kfmclient"):
register("kfmclient", Konqueror, Konqueror("kfmclient"))
+ # Common symbolic link for the default X11 browser
if shutil.which("x-www-browser"):
register("x-www-browser", None, BackgroundBrowser("x-www-browser"))
# The Mozilla browsers
- for browser in ("firefox", "iceweasel", "iceape", "seamonkey"):
+ for browser in ("firefox", "iceweasel", "seamonkey", "mozilla-firefox",
+ "mozilla"):
if shutil.which(browser):
register(browser, None, Mozilla(browser))
- # The Netscape and old Mozilla browsers
- for browser in ("mozilla-firefox",
- "mozilla-firebird", "firebird",
- "mozilla", "netscape"):
- if shutil.which(browser):
- register(browser, None, Netscape(browser))
-
# Konqueror/kfm, the KDE browser.
if shutil.which("kfm"):
register("kfm", Konqueror, Konqueror("kfm"))
elif shutil.which("konqueror"):
register("konqueror", Konqueror, Konqueror("konqueror"))
- # Gnome's Galeon and Epiphany
- for browser in ("galeon", "epiphany"):
- if shutil.which(browser):
- register(browser, None, Galeon(browser))
-
- # Skipstone, another Gtk/Mozilla based browser
- if shutil.which("skipstone"):
- register("skipstone", None, BackgroundBrowser("skipstone"))
+ # Gnome's Epiphany
+ if shutil.which("epiphany"):
+ register("epiphany", None, Epiphany("epiphany"))
# Google Chrome/Chromium browsers
for browser in ("google-chrome", "chrome", "chromium", "chromium-browser"):
@@ -514,13 +455,6 @@ def register_X_browsers():
if shutil.which("opera"):
register("opera", None, Opera("opera"))
- # Next, Mosaic -- old but still in use.
- if shutil.which("mosaic"):
- register("mosaic", None, BackgroundBrowser("mosaic"))
-
- # Grail, the Python browser. Does anybody still use it?
- if shutil.which("grail"):
- register("grail", Grail, None)
def register_standard_browsers():
global _tryorder
@@ -549,7 +483,7 @@ def register_standard_browsers():
# location in 32-bit Windows
edge32 = os.path.join(os.environ.get("PROGRAMFILES", "C:\\Program Files"),
"Microsoft\\Edge\\Application\\msedge.exe")
- for browser in ("firefox", "firebird", "seamonkey", "mozilla",
+ for browser in ("firefox", "seamonkey", "mozilla", "chrome",
"opera", edge64, edge32):
if shutil.which(browser):
register(browser, None, BackgroundBrowser(browser))
@@ -570,14 +504,15 @@ def register_standard_browsers():
# Also try console browsers
if os.environ.get("TERM"):
+ # Common symbolic link for the default text-based browser
if shutil.which("www-browser"):
register("www-browser", None, GenericBrowser("www-browser"))
- # The Links/elinks browsers <http://artax.karlin.mff.cuni.cz/~mikulas/links/>
+ # The Links/elinks browsers <http://links.twibright.com/>
if shutil.which("links"):
register("links", None, GenericBrowser("links"))
if shutil.which("elinks"):
register("elinks", None, Elinks("elinks"))
- # The Lynx browser <http://lynx.isc.org/>, <http://lynx.browser.org/>
+ # The Lynx browser <https://lynx.invisible-island.net/>, <http://lynx.browser.org/>
if shutil.which("lynx"):
register("lynx", None, GenericBrowser("lynx"))
# The w3m browser <http://w3m.sourceforge.net/>
@@ -727,7 +662,7 @@ def main():
for o, a in opts:
if o == '-n': new_win = 1
elif o == '-t': new_win = 2
- elif o == '-h' or o == '--help':
+ elif o == '-h' or o == '--help':
print(usage, file=sys.stderr)
sys.exit()
if len(args) != 1:
diff --git a/Misc/NEWS.d/next/Library/2023-03-21-15-17-07.gh-issue-102871.U9mchn.rst b/Misc/NEWS.d/next/Library/2023-03-21-15-17-07.gh-issue-102871.U9mchn.rst
new file mode 100644
index 000000000000..3ef0e74d21ca
--- /dev/null
+++ b/Misc/NEWS.d/next/Library/2023-03-21-15-17-07.gh-issue-102871.U9mchn.rst
@@ -0,0 +1,3 @@
+Remove support for obsolete browsers from :mod:`webbrowser`.
+Removed browsers include Grail, Mosaic, Netscape, Galeon, Skipstone,
+Iceape, Firebird, and Firefox versions 35 and below.
1
0
31 Mar '23
https://github.com/python/cpython/commit/048d6243d45b9381dd4fcaad28ae9c5d44…
commit: 048d6243d45b9381dd4fcaad28ae9c5d443b8b4b
branch: main
author: Furkan Onder <furkanonder(a)protonmail.com>
committer: terryjreedy <tjreedy(a)udel.edu>
date: 2023-03-31T09:54:17-04:00
summary:
GH-84783: Mention Author for GH-101264 (make slices hashable) (#103146)
Will Bradshaw contributed original patch on bpo-40603.
---------
Co-authored-by: Terry Jan Reedy <tjreedy(a)udel.edu>
files:
M Doc/whatsnew/3.12.rst
M Misc/NEWS.d/3.12.0a6.rst
diff --git a/Doc/whatsnew/3.12.rst b/Doc/whatsnew/3.12.rst
index e5bcfdecd9a4..00a51ab3c137 100644
--- a/Doc/whatsnew/3.12.rst
+++ b/Doc/whatsnew/3.12.rst
@@ -190,7 +190,7 @@ Other Language Changes
(Contributed by Nikita Sobolev in :gh:`100581`.)
* :class:`slice` objects are now hashable, allowing them to be used as dict keys and
- set items. (Contributed by Furkan Onder in :gh:`101264`.)
+ set items. (Contributed by Will Bradshaw and Furkan Onder in :gh:`101264`.)
New Modules
===========
diff --git a/Misc/NEWS.d/3.12.0a6.rst b/Misc/NEWS.d/3.12.0a6.rst
index 2bcb4c8c854d..f6beb5b7ec3d 100644
--- a/Misc/NEWS.d/3.12.0a6.rst
+++ b/Misc/NEWS.d/3.12.0a6.rst
@@ -189,7 +189,7 @@ not just glibc. This fixes support for musl.
.. nonce: _P5sMa
.. section: Core and Builtins
-Make the slice object hashable.
+Make the slice object hashable. Patch by Will Bradshaw and Furkan Onder.
..
1
0
31 Mar '23
https://github.com/python/cpython/commit/4664a7cf689946f0c9854cadee7c6aa9c2…
commit: 4664a7cf689946f0c9854cadee7c6aa9c276a8cf
branch: 3.11
author: Miss Islington (bot) <31488909+miss-islington(a)users.noreply.github.com>
committer: miss-islington <31488909+miss-islington(a)users.noreply.github.com>
date: 2023-03-31T05:52:52-07:00
summary:
Quote literal tokens in standard format specifier grammar (GH-102902)
Reported by Damian Dureck: https://mail.python.org/archives/list/docs@python.org/thread/UZTWBJIXC3MBKT…
(cherry picked from commit f6405a46627e1f74c279f712c8776a165b0ba9fd)
Co-authored-by: Petr Viktorin <encukou(a)gmail.com>
files:
M Doc/library/string.rst
diff --git a/Doc/library/string.rst b/Doc/library/string.rst
index 3b96813e6838..5ada82732818 100644
--- a/Doc/library/string.rst
+++ b/Doc/library/string.rst
@@ -310,7 +310,7 @@ non-empty format specification typically modifies the result.
The general form of a *standard format specifier* is:
.. productionlist:: format-spec
- format_spec: [[`fill`]`align`][`sign`][z][#][0][`width`][`grouping_option`][.`precision`][`type`]
+ format_spec: [[`fill`]`align`][`sign`]["z"]["#"]["0"][`width`][`grouping_option`]["." `precision`][`type`]
fill: <any character>
align: "<" | ">" | "=" | "^"
sign: "+" | "-" | " "
1
0