Python-checkins
Threads by month
- ----- 2024 -----
- July
- June
- May
- April
- March
- February
- January
- ----- 2023 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2022 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2021 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2020 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2019 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2018 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2017 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2016 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2015 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2014 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2013 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2012 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2011 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2010 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2009 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2008 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2007 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2006 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2005 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2004 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2003 -----
- December
- November
- October
- September
- August
April 2019
- 3 participants
- 318 discussions
https://github.com/python/cpython/commit/4d29983185bc12ca685a1eb3873bacb8a7…
commit: 4d29983185bc12ca685a1eb3873bacb8a7b67416
branch: master
author: Victor Stinner <vstinner(a)redhat.com>
committer: GitHub <noreply(a)github.com>
date: 2019-04-26T04:08:53+02:00
summary:
bpo-36725: regrtest: add TestResult type (GH-12960)
* Add TestResult and MultiprocessResult types to ensure that results
always have the same fields.
* runtest() now handles KeyboardInterrupt
* accumulate_result() and format_test_result() now takes a TestResult
* cleanup_test_droppings() is now called by runtest() and mark the
test as ENV_CHANGED if the test leaks support.TESTFN file.
* runtest() now includes code "around" the test in the test timing
* Add print_warning() in test.libregrtest.utils to standardize how
libregrtest logs warnings to ease parsing the test output.
* support.unload() is now called with abstest rather than test_name
* Rename 'test' variable/parameter to 'test_name'
* dash_R(): remove unused the_module parameter
* Remove unused imports
files:
M Lib/test/libregrtest/main.py
M Lib/test/libregrtest/refleak.py
M Lib/test/libregrtest/runtest.py
M Lib/test/libregrtest/runtest_mp.py
M Lib/test/libregrtest/save_env.py
M Lib/test/libregrtest/utils.py
M Lib/test/libregrtest/win_utils.py
M Lib/test/test_regrtest.py
diff --git a/Lib/test/libregrtest/main.py b/Lib/test/libregrtest/main.py
index d20e1746781f..ef1336a7e233 100644
--- a/Lib/test/libregrtest/main.py
+++ b/Lib/test/libregrtest/main.py
@@ -105,26 +105,30 @@ def __init__(self):
# used by --junit-xml
self.testsuite_xml = None
- def accumulate_result(self, test, result):
- ok, test_time, xml_data = result
+ def accumulate_result(self, result):
+ test_name = result.test_name
+ ok = result.result
+
if ok not in (CHILD_ERROR, INTERRUPTED):
- self.test_times.append((test_time, test))
+ self.test_times.append((result.test_time, test_name))
+
if ok == PASSED:
- self.good.append(test)
+ self.good.append(test_name)
elif ok in (FAILED, CHILD_ERROR):
- self.bad.append(test)
+ self.bad.append(test_name)
elif ok == ENV_CHANGED:
- self.environment_changed.append(test)
+ self.environment_changed.append(test_name)
elif ok == SKIPPED:
- self.skipped.append(test)
+ self.skipped.append(test_name)
elif ok == RESOURCE_DENIED:
- self.skipped.append(test)
- self.resource_denieds.append(test)
+ self.skipped.append(test_name)
+ self.resource_denieds.append(test_name)
elif ok == TEST_DID_NOT_RUN:
- self.run_no_tests.append(test)
+ self.run_no_tests.append(test_name)
elif ok != INTERRUPTED:
raise ValueError("invalid test result: %r" % ok)
+ xml_data = result.xml_data
if xml_data:
import xml.etree.ElementTree as ET
for e in xml_data:
@@ -134,7 +138,7 @@ def accumulate_result(self, test, result):
print(xml_data, file=sys.__stderr__)
raise
- def display_progress(self, test_index, test):
+ def display_progress(self, test_index, text):
if self.ns.quiet:
return
@@ -143,7 +147,7 @@ def display_progress(self, test_index, test):
fails = len(self.bad) + len(self.environment_changed)
if fails and not self.ns.pgo:
line = f"{line}/{fails}"
- line = f"[{line}] {test}"
+ line = f"[{line}] {text}"
# add the system load prefix: "load avg: 1.80 "
if self.getloadavg:
@@ -275,13 +279,13 @@ def list_cases(self):
support.verbose = False
support.set_match_tests(self.ns.match_tests)
- for test in self.selected:
- abstest = get_abs_module(self.ns, test)
+ for test_name in self.selected:
+ abstest = get_abs_module(self.ns, test_name)
try:
suite = unittest.defaultTestLoader.loadTestsFromName(abstest)
self._list_cases(suite)
except unittest.SkipTest:
- self.skipped.append(test)
+ self.skipped.append(test_name)
if self.skipped:
print(file=sys.stderr)
@@ -298,19 +302,19 @@ def rerun_failed_tests(self):
print()
print("Re-running failed tests in verbose mode")
self.rerun = self.bad[:]
- for test in self.rerun:
- print("Re-running test %r in verbose mode" % test, flush=True)
- try:
- self.ns.verbose = True
- ok = runtest(self.ns, test)
- except KeyboardInterrupt:
- self.interrupted = True
+ for test_name in self.rerun:
+ print("Re-running test %r in verbose mode" % test_name, flush=True)
+ self.ns.verbose = True
+ ok = runtest(self.ns, test_name)
+
+ if ok[0] in {PASSED, ENV_CHANGED, SKIPPED, RESOURCE_DENIED}:
+ self.bad.remove(test_name)
+
+ if ok.result == INTERRUPTED:
# print a newline separate from the ^C
print()
+ self.interrupted = True
break
- else:
- if ok[0] in {PASSED, ENV_CHANGED, SKIPPED, RESOURCE_DENIED}:
- self.bad.remove(test)
else:
if self.bad:
print(count(len(self.bad), 'test'), "failed again:")
@@ -348,8 +352,8 @@ def display_result(self):
self.test_times.sort(reverse=True)
print()
print("10 slowest tests:")
- for time, test in self.test_times[:10]:
- print("- %s: %s" % (test, format_duration(time)))
+ for test_time, test in self.test_times[:10]:
+ print("- %s: %s" % (test, format_duration(test_time)))
if self.bad:
print()
@@ -387,10 +391,10 @@ def run_tests_sequential(self):
print("Run tests sequentially")
previous_test = None
- for test_index, test in enumerate(self.tests, 1):
+ for test_index, test_name in enumerate(self.tests, 1):
start_time = time.monotonic()
- text = test
+ text = test_name
if previous_test:
text = '%s -- %s' % (text, previous_test)
self.display_progress(test_index, text)
@@ -398,22 +402,20 @@ def run_tests_sequential(self):
if self.tracer:
# If we're tracing code coverage, then we don't exit with status
# if on a false return value from main.
- cmd = ('result = runtest(self.ns, test); '
- 'self.accumulate_result(test, result)')
+ cmd = ('result = runtest(self.ns, test_name); '
+ 'self.accumulate_result(result)')
ns = dict(locals())
self.tracer.runctx(cmd, globals=globals(), locals=ns)
result = ns['result']
else:
- try:
- result = runtest(self.ns, test)
- except KeyboardInterrupt:
- self.interrupted = True
- self.accumulate_result(test, (INTERRUPTED, None, None))
- break
- else:
- self.accumulate_result(test, result)
-
- previous_test = format_test_result(test, result[0])
+ result = runtest(self.ns, test_name)
+ self.accumulate_result(result)
+
+ if result.result == INTERRUPTED:
+ self.interrupted = True
+ break
+
+ previous_test = format_test_result(result)
test_time = time.monotonic() - start_time
if test_time >= PROGRESS_MIN_TIME:
previous_test = "%s in %s" % (previous_test, format_duration(test_time))
@@ -441,8 +443,8 @@ def run_tests_sequential(self):
def _test_forever(self, tests):
while True:
- for test in tests:
- yield test
+ for test_name in tests:
+ yield test_name
if self.bad:
return
if self.ns.fail_env_changed and self.environment_changed:
diff --git a/Lib/test/libregrtest/refleak.py b/Lib/test/libregrtest/refleak.py
index 235d6bfd3af6..8d221232eb6c 100644
--- a/Lib/test/libregrtest/refleak.py
+++ b/Lib/test/libregrtest/refleak.py
@@ -1,4 +1,3 @@
-import errno
import os
import re
import sys
@@ -18,7 +17,7 @@ def _get_dump(cls):
cls._abc_negative_cache, cls._abc_negative_cache_version)
-def dash_R(ns, the_module, test_name, test_func):
+def dash_R(ns, test_name, test_func):
"""Run a test multiple times, looking for reference leaks.
Returns:
diff --git a/Lib/test/libregrtest/runtest.py b/Lib/test/libregrtest/runtest.py
index 0a9533c8a573..55913b3842d7 100644
--- a/Lib/test/libregrtest/runtest.py
+++ b/Lib/test/libregrtest/runtest.py
@@ -1,4 +1,6 @@
+import collections
import faulthandler
+import functools
import importlib
import io
import os
@@ -9,6 +11,7 @@
from test import support
from test.libregrtest.refleak import dash_R, clear_caches
from test.libregrtest.save_env import saved_test_environment
+from test.libregrtest.utils import print_warning
# Test result constants.
@@ -55,9 +58,17 @@
NOTTESTS = set()
-def format_test_result(test_name, result):
- fmt = _FORMAT_TEST_RESULT.get(result, "%s")
- return fmt % test_name
+# used by --findleaks, store for gc.garbage
+found_garbage = []
+
+
+def format_test_result(result):
+ fmt = _FORMAT_TEST_RESULT.get(result.result, "%s")
+ return fmt % result.test_name
+
+
+def findtestdir(path=None):
+ return path or os.path.dirname(os.path.dirname(__file__)) or os.curdir
def findtests(testdir=None, stdtests=STDTESTS, nottests=NOTTESTS):
@@ -73,48 +84,34 @@ def findtests(testdir=None, stdtests=STDTESTS, nottests=NOTTESTS):
return stdtests + sorted(tests)
-def get_abs_module(ns, test):
- if test.startswith('test.') or ns.testdir:
- return test
+def get_abs_module(ns, test_name):
+ if test_name.startswith('test.') or ns.testdir:
+ return test_name
else:
- # Always import it from the test package
- return 'test.' + test
-
+ # Import it from the test package
+ return 'test.' + test_name
-def runtest(ns, test):
- """Run a single test.
-
- ns -- regrtest namespace of options
- test -- the name of the test
- Returns the tuple (result, test_time, xml_data), where result is one
- of the constants:
-
- INTERRUPTED KeyboardInterrupt when run under -j
- RESOURCE_DENIED test skipped because resource denied
- SKIPPED test skipped for some other reason
- ENV_CHANGED test failed because it changed the execution environment
- FAILED test failed
- PASSED test passed
- EMPTY_TEST_SUITE test ran no subtests.
+TestResult = collections.namedtuple('TestResult',
+ 'test_name result test_time xml_data')
- If ns.xmlpath is not None, xml_data is a list containing each
- generated testsuite element.
- """
+def _runtest(ns, test_name):
+ # Handle faulthandler timeout, capture stdout+stderr, XML serialization
+ # and measure time.
output_on_failure = ns.verbose3
use_timeout = (ns.timeout is not None)
if use_timeout:
faulthandler.dump_traceback_later(ns.timeout, exit=True)
+
+ start_time = time.perf_counter()
try:
support.set_match_tests(ns.match_tests)
- # reset the environment_altered flag to detect if a test altered
- # the environment
- support.environment_altered = False
support.junit_xml_list = xml_list = [] if ns.xmlpath else None
if ns.failfast:
support.failfast = True
+
if output_on_failure:
support.verbose = True
@@ -124,8 +121,9 @@ def runtest(ns, test):
try:
sys.stdout = stream
sys.stderr = stream
- result = runtest_inner(ns, test, display_failure=False)
- if result[0] != PASSED:
+ result = _runtest_inner(ns, test_name,
+ display_failure=False)
+ if result != PASSED:
output = stream.getvalue()
orig_stderr.write(output)
orig_stderr.flush()
@@ -133,98 +131,170 @@ def runtest(ns, test):
sys.stdout = orig_stdout
sys.stderr = orig_stderr
else:
- support.verbose = ns.verbose # Tell tests to be moderately quiet
- result = runtest_inner(ns, test, display_failure=not ns.verbose)
+ # Tell tests to be moderately quiet
+ support.verbose = ns.verbose
+
+ result = _runtest_inner(ns, test_name,
+ display_failure=not ns.verbose)
if xml_list:
import xml.etree.ElementTree as ET
xml_data = [ET.tostring(x).decode('us-ascii') for x in xml_list]
else:
xml_data = None
- return result + (xml_data,)
+
+ test_time = time.perf_counter() - start_time
+
+ return TestResult(test_name, result, test_time, xml_data)
finally:
if use_timeout:
faulthandler.cancel_dump_traceback_later()
- cleanup_test_droppings(test, ns.verbose)
support.junit_xml_list = None
+def runtest(ns, test_name):
+ """Run a single test.
+
+ ns -- regrtest namespace of options
+ test_name -- the name of the test
+
+ Returns the tuple (result, test_time, xml_data), where result is one
+ of the constants:
+
+ INTERRUPTED KeyboardInterrupt
+ RESOURCE_DENIED test skipped because resource denied
+ SKIPPED test skipped for some other reason
+ ENV_CHANGED test failed because it changed the execution environment
+ FAILED test failed
+ PASSED test passed
+ EMPTY_TEST_SUITE test ran no subtests.
+
+ If ns.xmlpath is not None, xml_data is a list containing each
+ generated testsuite element.
+ """
+ try:
+ return _runtest(ns, test_name)
+ except:
+ if not ns.pgo:
+ msg = traceback.format_exc()
+ print(f"test {test_name} crashed -- {msg}",
+ file=sys.stderr, flush=True)
+ return TestResult(test_name, FAILED, 0.0, None)
+
+
def post_test_cleanup():
+ support.gc_collect()
support.reap_children()
-def runtest_inner(ns, test, display_failure=True):
- support.unload(test)
+def _test_module(the_module):
+ loader = unittest.TestLoader()
+ tests = loader.loadTestsFromModule(the_module)
+ for error in loader.errors:
+ print(error, file=sys.stderr)
+ if loader.errors:
+ raise Exception("errors while loading tests")
+ support.run_unittest(tests)
+
+
+def _runtest_inner2(ns, test_name):
+ # Load the test function, run the test function, handle huntrleaks
+ # and findleaks to detect leaks
+
+ abstest = get_abs_module(ns, test_name)
+
+ # remove the module from sys.module to reload it if it was already imported
+ support.unload(abstest)
+
+ the_module = importlib.import_module(abstest)
+
+ # If the test has a test_main, that will run the appropriate
+ # tests. If not, use normal unittest test loading.
+ test_runner = getattr(the_module, "test_main", None)
+ if test_runner is None:
+ test_runner = functools.partial(_test_module, the_module)
+
+ try:
+ if ns.huntrleaks:
+ # Return True if the test leaked references
+ refleak = dash_R(ns, test_name, test_runner)
+ else:
+ test_runner()
+ refleak = False
+ finally:
+ cleanup_test_droppings(test_name, ns.verbose)
+
+ if ns.findleaks:
+ import gc
+ support.gc_collect()
+ if gc.garbage:
+ import gc
+ gc.garbage = [1]
+ print_warning(f"{test_name} created {len(gc.garbage)} "
+ f"uncollectable object(s).")
+ # move the uncollectable objects somewhere,
+ # so we don't see them again
+ found_garbage.extend(gc.garbage)
+ gc.garbage.clear()
+ support.environment_altered = True
+
+ post_test_cleanup()
+
+ return refleak
+
+
+def _runtest_inner(ns, test_name, display_failure=True):
+ # Detect environment changes, handle exceptions.
+
+ # Reset the environment_altered flag to detect if a test altered
+ # the environment
+ support.environment_altered = False
+
+ if ns.pgo:
+ display_failure = False
- test_time = 0.0
- refleak = False # True if the test leaked references.
try:
- abstest = get_abs_module(ns, test)
clear_caches()
- with saved_test_environment(test, ns.verbose, ns.quiet, pgo=ns.pgo) as environment:
- start_time = time.perf_counter()
- the_module = importlib.import_module(abstest)
- # If the test has a test_main, that will run the appropriate
- # tests. If not, use normal unittest test loading.
- test_runner = getattr(the_module, "test_main", None)
- if test_runner is None:
- def test_runner():
- loader = unittest.TestLoader()
- tests = loader.loadTestsFromModule(the_module)
- for error in loader.errors:
- print(error, file=sys.stderr)
- if loader.errors:
- raise Exception("errors while loading tests")
- support.run_unittest(tests)
- if ns.huntrleaks:
- refleak = dash_R(ns, the_module, test, test_runner)
- else:
- test_runner()
- test_time = time.perf_counter() - start_time
- post_test_cleanup()
+
+ with saved_test_environment(test_name, ns.verbose, ns.quiet, pgo=ns.pgo) as environment:
+ refleak = _runtest_inner2(ns, test_name)
except support.ResourceDenied as msg:
if not ns.quiet and not ns.pgo:
- print(test, "skipped --", msg, flush=True)
- return RESOURCE_DENIED, test_time
+ print(f"{test_name} skipped -- {msg}", flush=True)
+ return RESOURCE_DENIED
except unittest.SkipTest as msg:
if not ns.quiet and not ns.pgo:
- print(test, "skipped --", msg, flush=True)
- return SKIPPED, test_time
- except KeyboardInterrupt:
- raise
- except support.TestFailed as msg:
- if not ns.pgo:
- if display_failure:
- print("test", test, "failed --", msg, file=sys.stderr,
- flush=True)
- else:
- print("test", test, "failed", file=sys.stderr, flush=True)
- return FAILED, test_time
+ print(f"{test_name} skipped -- {msg}", flush=True)
+ return SKIPPED
+ except support.TestFailed as exc:
+ msg = f"test {test_name} failed"
+ if display_failure:
+ msg = f"{msg} -- {exc}"
+ print(msg, file=sys.stderr, flush=True)
+ return FAILED
except support.TestDidNotRun:
- return TEST_DID_NOT_RUN, test_time
+ return TEST_DID_NOT_RUN
+ except KeyboardInterrupt:
+ return INTERRUPTED
except:
- msg = traceback.format_exc()
if not ns.pgo:
- print("test", test, "crashed --", msg, file=sys.stderr,
- flush=True)
- return FAILED, test_time
- else:
- if refleak:
- return FAILED, test_time
- if environment.changed:
- return ENV_CHANGED, test_time
- return PASSED, test_time
+ msg = traceback.format_exc()
+ print(f"test {test_name} crashed -- {msg}",
+ file=sys.stderr, flush=True)
+ return FAILED
+ if refleak:
+ return FAILED
+ if environment.changed:
+ return ENV_CHANGED
+ return PASSED
-def cleanup_test_droppings(testname, verbose):
- import shutil
- import stat
- import gc
+def cleanup_test_droppings(test_name, verbose):
# First kill any dangling references to open files etc.
# This can also issue some ResourceWarnings which would otherwise get
# triggered during the following test run, and possibly produce failures.
- gc.collect()
+ support.gc_collect()
# Try to clean up junk commonly left behind. While tests shouldn't leave
# any files or directories behind, when a test fails that can be tedious
@@ -239,23 +309,23 @@ def cleanup_test_droppings(testname, verbose):
continue
if os.path.isdir(name):
+ import shutil
kind, nuker = "directory", shutil.rmtree
elif os.path.isfile(name):
kind, nuker = "file", os.unlink
else:
- raise SystemError("os.path says %r exists but is neither "
- "directory nor file" % name)
+ raise RuntimeError(f"os.path says {name!r} exists but is neither "
+ f"directory nor file")
if verbose:
- print("%r left behind %s %r" % (testname, kind, name))
+ print_warning("%r left behind %s %r" % (test_name, kind, name))
+ support.environment_altered = True
+
try:
+ import stat
# fix possible permissions problems that might prevent cleanup
os.chmod(name, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
nuker(name)
- except Exception as msg:
- print(("%r left behind %s %r and it couldn't be "
- "removed: %s" % (testname, kind, name, msg)), file=sys.stderr)
-
-
-def findtestdir(path=None):
- return path or os.path.dirname(os.path.dirname(__file__)) or os.curdir
+ except Exception as exc:
+ print_warning(f"{test_name} left behind {kind} {name!r} "
+ f"and it couldn't be removed: {exc}")
diff --git a/Lib/test/libregrtest/runtest_mp.py b/Lib/test/libregrtest/runtest_mp.py
index 6190574afdf8..0a95bf622b05 100644
--- a/Lib/test/libregrtest/runtest_mp.py
+++ b/Lib/test/libregrtest/runtest_mp.py
@@ -1,3 +1,4 @@
+import collections
import faulthandler
import json
import os
@@ -5,13 +6,12 @@
import sys
import threading
import time
-import traceback
import types
from test import support
from test.libregrtest.runtest import (
runtest, INTERRUPTED, CHILD_ERROR, PROGRESS_MIN_TIME,
- format_test_result)
+ format_test_result, TestResult)
from test.libregrtest.setup import setup_tests
from test.libregrtest.utils import format_duration
@@ -64,15 +64,9 @@ def run_tests_worker(worker_args):
setup_tests(ns)
- try:
- result = runtest(ns, testname)
- except KeyboardInterrupt:
- result = INTERRUPTED, '', None
- except BaseException as e:
- traceback.print_exc()
- result = CHILD_ERROR, str(e)
-
+ result = runtest(ns, testname)
print() # Force a newline (just in case)
+
print(json.dumps(result), flush=True)
sys.exit(0)
@@ -97,45 +91,51 @@ def __next__(self):
return next(self.tests)
+MultiprocessResult = collections.namedtuple('MultiprocessResult',
+ 'result stdout stderr error_msg')
+
class MultiprocessThread(threading.Thread):
def __init__(self, pending, output, ns):
super().__init__()
self.pending = pending
self.output = output
self.ns = ns
- self.current_test = None
+ self.current_test_name = None
self.start_time = None
def _runtest(self):
try:
- test = next(self.pending)
+ test_name = next(self.pending)
except StopIteration:
- self.output.put((None, None, None, None))
+ self.output.put(None)
return True
try:
self.start_time = time.monotonic()
- self.current_test = test
+ self.current_test_name = test_name
- retcode, stdout, stderr = run_test_in_subprocess(test, self.ns)
+ retcode, stdout, stderr = run_test_in_subprocess(test_name, self.ns)
finally:
- self.current_test = None
+ self.current_test_name = None
if retcode != 0:
- result = (CHILD_ERROR, "Exit code %s" % retcode, None)
- self.output.put((test, stdout.rstrip(), stderr.rstrip(),
- result))
+ test_time = time.monotonic() - self.start_time
+ result = TestResult(test_name, CHILD_ERROR, test_time, None)
+ err_msg = "Exit code %s" % retcode
+ mp_result = MultiprocessResult(result, stdout.rstrip(), stderr.rstrip(), err_msg)
+ self.output.put(mp_result)
return False
stdout, _, result = stdout.strip().rpartition("\n")
if not result:
- self.output.put((None, None, None, None))
+ self.output.put(None)
return True
+ # deserialize run_tests_worker() output
result = json.loads(result)
- assert len(result) == 3, f"Invalid result tuple: {result!r}"
- self.output.put((test, stdout.rstrip(), stderr.rstrip(),
- result))
+ result = TestResult(*result)
+ mp_result = MultiprocessResult(result, stdout.rstrip(), stderr.rstrip(), None)
+ self.output.put(mp_result)
return False
def run(self):
@@ -144,7 +144,7 @@ def run(self):
while not stop:
stop = self._runtest()
except BaseException:
- self.output.put((None, None, None, None))
+ self.output.put(None)
raise
@@ -164,12 +164,12 @@ def run_tests_multiprocess(regrtest):
def get_running(workers):
running = []
for worker in workers:
- current_test = worker.current_test
- if not current_test:
+ current_test_name = worker.current_test_name
+ if not current_test_name:
continue
dt = time.monotonic() - worker.start_time
if dt >= PROGRESS_MIN_TIME:
- text = '%s (%s)' % (current_test, format_duration(dt))
+ text = '%s (%s)' % (current_test_name, format_duration(dt))
running.append(text)
return running
@@ -182,40 +182,41 @@ def get_running(workers):
faulthandler.dump_traceback_later(test_timeout, exit=True)
try:
- item = output.get(timeout=get_timeout)
+ mp_result = output.get(timeout=get_timeout)
except queue.Empty:
running = get_running(workers)
if running and not regrtest.ns.pgo:
print('running: %s' % ', '.join(running), flush=True)
continue
- test, stdout, stderr, result = item
- if test is None:
+ if mp_result is None:
finished += 1
continue
- regrtest.accumulate_result(test, result)
+ result = mp_result.result
+ regrtest.accumulate_result(result)
# Display progress
- ok, test_time, xml_data = result
- text = format_test_result(test, ok)
+ ok = result.result
+
+ text = format_test_result(result)
if (ok not in (CHILD_ERROR, INTERRUPTED)
- and test_time >= PROGRESS_MIN_TIME
+ and result.test_time >= PROGRESS_MIN_TIME
and not regrtest.ns.pgo):
- text += ' (%s)' % format_duration(test_time)
+ text += ' (%s)' % format_duration(result.test_time)
elif ok == CHILD_ERROR:
- text = '%s (%s)' % (text, test_time)
+ text = '%s (%s)' % (text, mp_result.error_msg)
running = get_running(workers)
if running and not regrtest.ns.pgo:
text += ' -- running: %s' % ', '.join(running)
regrtest.display_progress(test_index, text)
# Copy stdout and stderr from the child process
- if stdout:
- print(stdout, flush=True)
- if stderr and not regrtest.ns.pgo:
- print(stderr, file=sys.stderr, flush=True)
+ if mp_result.stdout:
+ print(mp_result.stdout, flush=True)
+ if mp_result.stderr and not regrtest.ns.pgo:
+ print(mp_result.stderr, file=sys.stderr, flush=True)
- if result[0] == INTERRUPTED:
+ if result.result == INTERRUPTED:
raise KeyboardInterrupt
test_index += 1
except KeyboardInterrupt:
@@ -229,7 +230,7 @@ def get_running(workers):
# If tests are interrupted, wait until tests complete
wait_start = time.monotonic()
while True:
- running = [worker.current_test for worker in workers]
+ running = [worker.current_test_name for worker in workers]
running = list(filter(bool, running))
if not running:
break
diff --git a/Lib/test/libregrtest/save_env.py b/Lib/test/libregrtest/save_env.py
index 2313b71ec893..31931f2192f9 100644
--- a/Lib/test/libregrtest/save_env.py
+++ b/Lib/test/libregrtest/save_env.py
@@ -9,6 +9,7 @@
import threading
import warnings
from test import support
+from test.libregrtest.utils import print_warning
try:
import _multiprocessing, multiprocessing.process
except ImportError:
@@ -283,8 +284,7 @@ def __exit__(self, exc_type, exc_val, exc_tb):
self.changed = True
restore(original)
if not self.quiet and not self.pgo:
- print(f"Warning -- {name} was modified by {self.testname}",
- file=sys.stderr, flush=True)
+ print_warning(f"{name} was modified by {self.testname}")
print(f" Before: {original}\n After: {current} ",
file=sys.stderr, flush=True)
return False
diff --git a/Lib/test/libregrtest/utils.py b/Lib/test/libregrtest/utils.py
index d36bf9196626..fb9971a64f66 100644
--- a/Lib/test/libregrtest/utils.py
+++ b/Lib/test/libregrtest/utils.py
@@ -1,5 +1,6 @@
-import os.path
import math
+import os.path
+import sys
import textwrap
@@ -54,3 +55,7 @@ def printlist(x, width=70, indent=4, file=None):
print(textwrap.fill(' '.join(str(elt) for elt in sorted(x)), width,
initial_indent=blanks, subsequent_indent=blanks),
file=file)
+
+
+def print_warning(msg):
+ print(f"Warning -- {msg}", file=sys.stderr, flush=True)
diff --git a/Lib/test/libregrtest/win_utils.py b/Lib/test/libregrtest/win_utils.py
index 2e6492289e15..ca27f3681f9e 100644
--- a/Lib/test/libregrtest/win_utils.py
+++ b/Lib/test/libregrtest/win_utils.py
@@ -1,8 +1,7 @@
-import subprocess
-import sys
-import os
import _winapi
import msvcrt
+import os
+import subprocess
import uuid
from test import support
diff --git a/Lib/test/test_regrtest.py b/Lib/test/test_regrtest.py
index a9febd00eda0..5c65e6dd8520 100644
--- a/Lib/test/test_regrtest.py
+++ b/Lib/test/test_regrtest.py
@@ -26,8 +26,9 @@
ROOT_DIR = os.path.abspath(os.path.normpath(ROOT_DIR))
TEST_INTERRUPTED = textwrap.dedent("""
- from signal import SIGINT, raise_signal
+ from signal import SIGINT
try:
+ from signal import raise_signal
raise_signal(SIGINT)
except ImportError:
import os
@@ -108,7 +109,7 @@ def test_quiet(self):
self.assertTrue(ns.quiet)
self.assertEqual(ns.verbose, 0)
- def test_slow(self):
+ def test_slowest(self):
for opt in '-o', '--slowest':
with self.subTest(opt=opt):
ns = libregrtest._parse_args([opt])
@@ -780,22 +781,23 @@ def test_slowest(self):
% (self.TESTNAME_REGEX, len(tests)))
self.check_line(output, regex)
- def test_slow_interrupted(self):
+ def test_slowest_interrupted(self):
# Issue #25373: test --slowest with an interrupted test
code = TEST_INTERRUPTED
test = self.create_test("sigint", code=code)
for multiprocessing in (False, True):
- if multiprocessing:
- args = ("--slowest", "-j2", test)
- else:
- args = ("--slowest", test)
- output = self.run_tests(*args, exitcode=130)
- self.check_executed_tests(output, test,
- omitted=test, interrupted=True)
-
- regex = ('10 slowest tests:\n')
- self.check_line(output, regex)
+ with self.subTest(multiprocessing=multiprocessing):
+ if multiprocessing:
+ args = ("--slowest", "-j2", test)
+ else:
+ args = ("--slowest", test)
+ output = self.run_tests(*args, exitcode=130)
+ self.check_executed_tests(output, test,
+ omitted=test, interrupted=True)
+
+ regex = ('10 slowest tests:\n')
+ self.check_line(output, regex)
def test_coverage(self):
# test --coverage
1
0
https://github.com/python/cpython/commit/9db0324712f6982d89620b420f507a6aa5…
commit: 9db0324712f6982d89620b420f507a6aa5da152f
branch: master
author: Victor Stinner <vstinner(a)redhat.com>
committer: GitHub <noreply(a)github.com>
date: 2019-04-26T02:32:01+02:00
summary:
bpo-36710: Add runtime parameter in gcmodule.c (GH-12958)
Add 'state' or 'runtime' parameter to functions in gcmodule.c to
avoid to rely directly on the global variable _PyRuntime.
files:
M Include/internal/pycore_pylifecycle.h
M Modules/gcmodule.c
M Python/import.c
diff --git a/Include/internal/pycore_pylifecycle.h b/Include/internal/pycore_pylifecycle.h
index 69709b10b8f8..24847f4f454e 100644
--- a/Include/internal/pycore_pylifecycle.h
+++ b/Include/internal/pycore_pylifecycle.h
@@ -83,7 +83,7 @@ extern void _PyGILState_Init(
PyThreadState *tstate);
extern void _PyGILState_Fini(_PyRuntimeState *runtime);
-PyAPI_FUNC(void) _PyGC_DumpShutdownStats(void);
+PyAPI_FUNC(void) _PyGC_DumpShutdownStats(_PyRuntimeState *runtime);
PyAPI_FUNC(_PyInitError) _Py_PreInitializeFromCoreConfig(
const _PyCoreConfig *coreconfig);
diff --git a/Modules/gcmodule.c b/Modules/gcmodule.c
index f36c7f5d5e40..be9b73a84460 100644
--- a/Modules/gcmodule.c
+++ b/Modules/gcmodule.c
@@ -123,14 +123,14 @@ static PyObject *gc_str = NULL;
DEBUG_UNCOLLECTABLE | \
DEBUG_SAVEALL
-#define GEN_HEAD(n) (&_PyRuntime.gc.generations[n].head)
+#define GEN_HEAD(state, n) (&(state)->generations[n].head)
void
_PyGC_Initialize(struct _gc_runtime_state *state)
{
state->enabled = 1; /* automatic collection enabled? */
-#define _GEN_HEAD(n) (&state->generations[n].head)
+#define _GEN_HEAD(n) GEN_HEAD(state, n)
struct gc_generation generations[NUM_GENERATIONS] = {
/* PyGC_Head, threshold, count */
{{(uintptr_t)_GEN_HEAD(0), (uintptr_t)_GEN_HEAD(0)}, 700, 0},
@@ -140,7 +140,7 @@ _PyGC_Initialize(struct _gc_runtime_state *state)
for (int i = 0; i < NUM_GENERATIONS; i++) {
state->generations[i] = generations[i];
};
- state->generation0 = GEN_HEAD(0);
+ state->generation0 = GEN_HEAD(state, 0);
struct gc_generation permanent_generation = {
{(uintptr_t)&state->permanent_generation.head,
(uintptr_t)&state->permanent_generation.head}, 0, 0
@@ -808,21 +808,22 @@ debug_cycle(const char *msg, PyObject *op)
* merged into the old list regardless.
*/
static void
-handle_legacy_finalizers(PyGC_Head *finalizers, PyGC_Head *old)
+handle_legacy_finalizers(struct _gc_runtime_state *state,
+ PyGC_Head *finalizers, PyGC_Head *old)
{
- PyGC_Head *gc = GC_NEXT(finalizers);
-
assert(!PyErr_Occurred());
- if (_PyRuntime.gc.garbage == NULL) {
- _PyRuntime.gc.garbage = PyList_New(0);
- if (_PyRuntime.gc.garbage == NULL)
+
+ PyGC_Head *gc = GC_NEXT(finalizers);
+ if (state->garbage == NULL) {
+ state->garbage = PyList_New(0);
+ if (state->garbage == NULL)
Py_FatalError("gc couldn't create gc.garbage list");
}
for (; gc != finalizers; gc = GC_NEXT(gc)) {
PyObject *op = FROM_GC(gc);
- if ((_PyRuntime.gc.debug & DEBUG_SAVEALL) || has_legacy_finalizer(op)) {
- if (PyList_Append(_PyRuntime.gc.garbage, op) < 0) {
+ if ((state->debug & DEBUG_SAVEALL) || has_legacy_finalizer(op)) {
+ if (PyList_Append(state->garbage, op) < 0) {
PyErr_Clear();
break;
}
@@ -904,11 +905,11 @@ check_garbage(PyGC_Head *collectable)
* objects may be freed. It is possible I screwed something up here.
*/
static void
-delete_garbage(PyGC_Head *collectable, PyGC_Head *old)
+delete_garbage(struct _gc_runtime_state *state,
+ PyGC_Head *collectable, PyGC_Head *old)
{
- inquiry clear;
-
assert(!PyErr_Occurred());
+
while (!gc_list_is_empty(collectable)) {
PyGC_Head *gc = GC_NEXT(collectable);
PyObject *op = FROM_GC(gc);
@@ -916,13 +917,14 @@ delete_garbage(PyGC_Head *collectable, PyGC_Head *old)
_PyObject_ASSERT_WITH_MSG(op, Py_REFCNT(op) > 0,
"refcount is too small");
- if (_PyRuntime.gc.debug & DEBUG_SAVEALL) {
- assert(_PyRuntime.gc.garbage != NULL);
- if (PyList_Append(_PyRuntime.gc.garbage, op) < 0) {
+ if (state->debug & DEBUG_SAVEALL) {
+ assert(state->garbage != NULL);
+ if (PyList_Append(state->garbage, op) < 0) {
PyErr_Clear();
}
}
else {
+ inquiry clear;
if ((clear = Py_TYPE(op)->tp_clear) != NULL) {
Py_INCREF(op);
(void) clear(op);
@@ -965,8 +967,8 @@ clear_freelists(void)
/* This is the main function. Read this to understand how the
* collection process works. */
static Py_ssize_t
-collect(int generation, Py_ssize_t *n_collected, Py_ssize_t *n_uncollectable,
- int nofail)
+collect(struct _gc_runtime_state *state, int generation,
+ Py_ssize_t *n_collected, Py_ssize_t *n_uncollectable, int nofail)
{
int i;
Py_ssize_t m = 0; /* # objects collected */
@@ -978,17 +980,15 @@ collect(int generation, Py_ssize_t *n_collected, Py_ssize_t *n_uncollectable,
PyGC_Head *gc;
_PyTime_t t1 = 0; /* initialize to prevent a compiler warning */
- struct gc_generation_stats *stats = &_PyRuntime.gc.generation_stats[generation];
-
- if (_PyRuntime.gc.debug & DEBUG_STATS) {
+ if (state->debug & DEBUG_STATS) {
PySys_WriteStderr("gc: collecting generation %d...\n",
generation);
PySys_WriteStderr("gc: objects in each generation:");
for (i = 0; i < NUM_GENERATIONS; i++)
PySys_FormatStderr(" %zd",
- gc_list_size(GEN_HEAD(i)));
+ gc_list_size(GEN_HEAD(state, i)));
PySys_WriteStderr("\ngc: objects in permanent generation: %zd",
- gc_list_size(&_PyRuntime.gc.permanent_generation.head));
+ gc_list_size(&state->permanent_generation.head));
t1 = _PyTime_GetMonotonicClock();
PySys_WriteStderr("\n");
@@ -999,19 +999,19 @@ collect(int generation, Py_ssize_t *n_collected, Py_ssize_t *n_uncollectable,
/* update collection and allocation counters */
if (generation+1 < NUM_GENERATIONS)
- _PyRuntime.gc.generations[generation+1].count += 1;
+ state->generations[generation+1].count += 1;
for (i = 0; i <= generation; i++)
- _PyRuntime.gc.generations[i].count = 0;
+ state->generations[i].count = 0;
/* merge younger generations with one we are currently collecting */
for (i = 0; i < generation; i++) {
- gc_list_merge(GEN_HEAD(i), GEN_HEAD(generation));
+ gc_list_merge(GEN_HEAD(state, i), GEN_HEAD(state, generation));
}
/* handy references */
- young = GEN_HEAD(generation);
+ young = GEN_HEAD(state, generation);
if (generation < NUM_GENERATIONS-1)
- old = GEN_HEAD(generation+1);
+ old = GEN_HEAD(state, generation+1);
else
old = young;
@@ -1039,7 +1039,7 @@ collect(int generation, Py_ssize_t *n_collected, Py_ssize_t *n_uncollectable,
/* Move reachable objects to next generation. */
if (young != old) {
if (generation == NUM_GENERATIONS - 2) {
- _PyRuntime.gc.long_lived_pending += gc_list_size(young);
+ state->long_lived_pending += gc_list_size(young);
}
gc_list_merge(young, old);
}
@@ -1047,8 +1047,8 @@ collect(int generation, Py_ssize_t *n_collected, Py_ssize_t *n_uncollectable,
/* We only untrack dicts in full collections, to avoid quadratic
dict build-up. See issue #14775. */
untrack_dicts(young);
- _PyRuntime.gc.long_lived_pending = 0;
- _PyRuntime.gc.long_lived_total = gc_list_size(young);
+ state->long_lived_pending = 0;
+ state->long_lived_total = gc_list_size(young);
}
/* All objects in unreachable are trash, but objects reachable from
@@ -1072,7 +1072,7 @@ collect(int generation, Py_ssize_t *n_collected, Py_ssize_t *n_uncollectable,
*/
for (gc = GC_NEXT(&unreachable); gc != &unreachable; gc = GC_NEXT(gc)) {
m++;
- if (_PyRuntime.gc.debug & DEBUG_COLLECTABLE) {
+ if (state->debug & DEBUG_COLLECTABLE) {
debug_cycle("collectable", FROM_GC(gc));
}
}
@@ -1094,17 +1094,17 @@ collect(int generation, Py_ssize_t *n_collected, Py_ssize_t *n_uncollectable,
* the reference cycles to be broken. It may also cause some objects
* in finalizers to be freed.
*/
- delete_garbage(&unreachable, old);
+ delete_garbage(state, &unreachable, old);
}
/* Collect statistics on uncollectable objects found and print
* debugging information. */
for (gc = GC_NEXT(&finalizers); gc != &finalizers; gc = GC_NEXT(gc)) {
n++;
- if (_PyRuntime.gc.debug & DEBUG_UNCOLLECTABLE)
+ if (state->debug & DEBUG_UNCOLLECTABLE)
debug_cycle("uncollectable", FROM_GC(gc));
}
- if (_PyRuntime.gc.debug & DEBUG_STATS) {
+ if (state->debug & DEBUG_STATS) {
_PyTime_t t2 = _PyTime_GetMonotonicClock();
if (m == 0 && n == 0)
@@ -1121,7 +1121,7 @@ collect(int generation, Py_ssize_t *n_collected, Py_ssize_t *n_uncollectable,
* reachable list of garbage. The programmer has to deal with
* this if they insist on creating this type of structure.
*/
- handle_legacy_finalizers(&finalizers, old);
+ handle_legacy_finalizers(state, &finalizers, old);
validate_list(old, 0);
/* Clear free list only during the collection of the highest
@@ -1143,16 +1143,21 @@ collect(int generation, Py_ssize_t *n_collected, Py_ssize_t *n_uncollectable,
}
/* Update stats */
- if (n_collected)
+ if (n_collected) {
*n_collected = m;
- if (n_uncollectable)
+ }
+ if (n_uncollectable) {
*n_uncollectable = n;
+ }
+
+ struct gc_generation_stats *stats = &state->generation_stats[generation];
stats->collections++;
stats->collected += m;
stats->uncollectable += n;
- if (PyDTrace_GC_DONE_ENABLED())
+ if (PyDTrace_GC_DONE_ENABLED()) {
PyDTrace_GC_DONE(n+m);
+ }
assert(!PyErr_Occurred());
return n+m;
@@ -1162,19 +1167,21 @@ collect(int generation, Py_ssize_t *n_collected, Py_ssize_t *n_uncollectable,
* is starting or stopping
*/
static void
-invoke_gc_callback(const char *phase, int generation,
- Py_ssize_t collected, Py_ssize_t uncollectable)
+invoke_gc_callback(struct _gc_runtime_state *state, const char *phase,
+ int generation, Py_ssize_t collected,
+ Py_ssize_t uncollectable)
{
- Py_ssize_t i;
- PyObject *info = NULL;
-
assert(!PyErr_Occurred());
+
/* we may get called very early */
- if (_PyRuntime.gc.callbacks == NULL)
+ if (state->callbacks == NULL) {
return;
+ }
+
/* The local variable cannot be rebound, check it for sanity */
- assert(PyList_CheckExact(_PyRuntime.gc.callbacks));
- if (PyList_GET_SIZE(_PyRuntime.gc.callbacks) != 0) {
+ assert(PyList_CheckExact(state->callbacks));
+ PyObject *info = NULL;
+ if (PyList_GET_SIZE(state->callbacks) != 0) {
info = Py_BuildValue("{sisnsn}",
"generation", generation,
"collected", collected,
@@ -1184,8 +1191,8 @@ invoke_gc_callback(const char *phase, int generation,
return;
}
}
- for (i=0; i<PyList_GET_SIZE(_PyRuntime.gc.callbacks); i++) {
- PyObject *r, *cb = PyList_GET_ITEM(_PyRuntime.gc.callbacks, i);
+ for (Py_ssize_t i=0; i<PyList_GET_SIZE(state->callbacks); i++) {
+ PyObject *r, *cb = PyList_GET_ITEM(state->callbacks, i);
Py_INCREF(cb); /* make sure cb doesn't go away */
r = PyObject_CallFunction(cb, "sO", phase, info);
if (r == NULL) {
@@ -1204,36 +1211,34 @@ invoke_gc_callback(const char *phase, int generation,
* progress callbacks.
*/
static Py_ssize_t
-collect_with_callback(int generation)
+collect_with_callback(struct _gc_runtime_state *state, int generation)
{
- Py_ssize_t result, collected, uncollectable;
assert(!PyErr_Occurred());
- invoke_gc_callback("start", generation, 0, 0);
- result = collect(generation, &collected, &uncollectable, 0);
- invoke_gc_callback("stop", generation, collected, uncollectable);
+ Py_ssize_t result, collected, uncollectable;
+ invoke_gc_callback(state, "start", generation, 0, 0);
+ result = collect(state, generation, &collected, &uncollectable, 0);
+ invoke_gc_callback(state, "stop", generation, collected, uncollectable);
assert(!PyErr_Occurred());
return result;
}
static Py_ssize_t
-collect_generations(void)
+collect_generations(struct _gc_runtime_state *state)
{
- int i;
- Py_ssize_t n = 0;
-
/* Find the oldest generation (highest numbered) where the count
* exceeds the threshold. Objects in the that generation and
* generations younger than it will be collected. */
- for (i = NUM_GENERATIONS-1; i >= 0; i--) {
- if (_PyRuntime.gc.generations[i].count > _PyRuntime.gc.generations[i].threshold) {
+ Py_ssize_t n = 0;
+ for (int i = NUM_GENERATIONS-1; i >= 0; i--) {
+ if (state->generations[i].count > state->generations[i].threshold) {
/* Avoid quadratic performance degradation in number
of tracked objects. See comments at the beginning
of this file, and issue #4074.
*/
if (i == NUM_GENERATIONS - 1
- && _PyRuntime.gc.long_lived_pending < _PyRuntime.gc.long_lived_total / 4)
+ && state->long_lived_pending < state->long_lived_total / 4)
continue;
- n = collect_with_callback(i);
+ n = collect_with_callback(state, i);
break;
}
}
@@ -1301,21 +1306,23 @@ static Py_ssize_t
gc_collect_impl(PyObject *module, int generation)
/*[clinic end generated code: output=b697e633043233c7 input=40720128b682d879]*/
{
- Py_ssize_t n;
if (generation < 0 || generation >= NUM_GENERATIONS) {
PyErr_SetString(PyExc_ValueError, "invalid generation");
return -1;
}
- if (_PyRuntime.gc.collecting)
- n = 0; /* already collecting, don't do anything */
+ struct _gc_runtime_state *state = &_PyRuntime.gc;
+ Py_ssize_t n;
+ if (state->collecting) {
+ /* already collecting, don't do anything */
+ n = 0;
+ }
else {
- _PyRuntime.gc.collecting = 1;
- n = collect_with_callback(generation);
- _PyRuntime.gc.collecting = 0;
+ state->collecting = 1;
+ n = collect_with_callback(state, generation);
+ state->collecting = 0;
}
-
return n;
}
@@ -1366,19 +1373,18 @@ PyDoc_STRVAR(gc_set_thresh__doc__,
"collection.\n");
static PyObject *
-gc_set_thresh(PyObject *self, PyObject *args)
+gc_set_threshold(PyObject *self, PyObject *args)
{
- int i;
+ struct _gc_runtime_state *state = &_PyRuntime.gc;
if (!PyArg_ParseTuple(args, "i|ii:set_threshold",
- &_PyRuntime.gc.generations[0].threshold,
- &_PyRuntime.gc.generations[1].threshold,
- &_PyRuntime.gc.generations[2].threshold))
+ &state->generations[0].threshold,
+ &state->generations[1].threshold,
+ &state->generations[2].threshold))
return NULL;
- for (i = 3; i < NUM_GENERATIONS; i++) {
+ for (int i = 3; i < NUM_GENERATIONS; i++) {
/* generations higher than 2 get the same threshold */
- _PyRuntime.gc.generations[i].threshold = _PyRuntime.gc.generations[2].threshold;
+ state->generations[i].threshold = state->generations[2].threshold;
}
-
Py_RETURN_NONE;
}
@@ -1392,10 +1398,11 @@ static PyObject *
gc_get_threshold_impl(PyObject *module)
/*[clinic end generated code: output=7902bc9f41ecbbd8 input=286d79918034d6e6]*/
{
+ struct _gc_runtime_state *state = &_PyRuntime.gc;
return Py_BuildValue("(iii)",
- _PyRuntime.gc.generations[0].threshold,
- _PyRuntime.gc.generations[1].threshold,
- _PyRuntime.gc.generations[2].threshold);
+ state->generations[0].threshold,
+ state->generations[1].threshold,
+ state->generations[2].threshold);
}
/*[clinic input]
@@ -1408,10 +1415,11 @@ static PyObject *
gc_get_count_impl(PyObject *module)
/*[clinic end generated code: output=354012e67b16398f input=a392794a08251751]*/
{
+ struct _gc_runtime_state *state = &_PyRuntime.gc;
return Py_BuildValue("(iii)",
- _PyRuntime.gc.generations[0].count,
- _PyRuntime.gc.generations[1].count,
- _PyRuntime.gc.generations[2].count);
+ state->generations[0].count,
+ state->generations[1].count,
+ state->generations[2].count);
}
static int
@@ -1454,8 +1462,9 @@ gc_get_referrers(PyObject *self, PyObject *args)
PyObject *result = PyList_New(0);
if (!result) return NULL;
+ struct _gc_runtime_state *state = &_PyRuntime.gc;
for (i = 0; i < NUM_GENERATIONS; i++) {
- if (!(gc_referrers_for(args, GEN_HEAD(i), result))) {
+ if (!(gc_referrers_for(args, GEN_HEAD(state, i), result))) {
Py_DECREF(result);
return NULL;
}
@@ -1517,6 +1526,7 @@ gc_get_objects_impl(PyObject *module, Py_ssize_t generation)
{
int i;
PyObject* result;
+ struct _gc_runtime_state *state = &_PyRuntime.gc;
result = PyList_New(0);
if (result == NULL) {
@@ -1539,7 +1549,7 @@ gc_get_objects_impl(PyObject *module, Py_ssize_t generation)
goto error;
}
- if (append_objects(result, GEN_HEAD(generation))) {
+ if (append_objects(result, GEN_HEAD(state, generation))) {
goto error;
}
@@ -1548,7 +1558,7 @@ gc_get_objects_impl(PyObject *module, Py_ssize_t generation)
/* If generation is not passed or None, get all objects from all generations */
for (i = 0; i < NUM_GENERATIONS; i++) {
- if (append_objects(result, GEN_HEAD(i))) {
+ if (append_objects(result, GEN_HEAD(state, i))) {
goto error;
}
}
@@ -1570,16 +1580,16 @@ gc_get_stats_impl(PyObject *module)
/*[clinic end generated code: output=a8ab1d8a5d26f3ab input=1ef4ed9d17b1a470]*/
{
int i;
- PyObject *result;
struct gc_generation_stats stats[NUM_GENERATIONS], *st;
/* To get consistent values despite allocations while constructing
the result list, we use a snapshot of the running stats. */
+ struct _gc_runtime_state *state = &_PyRuntime.gc;
for (i = 0; i < NUM_GENERATIONS; i++) {
- stats[i] = _PyRuntime.gc.generation_stats[i];
+ stats[i] = state->generation_stats[i];
}
- result = PyList_New(0);
+ PyObject *result = PyList_New(0);
if (result == NULL)
return NULL;
@@ -1646,9 +1656,10 @@ static PyObject *
gc_freeze_impl(PyObject *module)
/*[clinic end generated code: output=502159d9cdc4c139 input=b602b16ac5febbe5]*/
{
+ struct _gc_runtime_state *state = &_PyRuntime.gc;
for (int i = 0; i < NUM_GENERATIONS; ++i) {
- gc_list_merge(GEN_HEAD(i), &_PyRuntime.gc.permanent_generation.head);
- _PyRuntime.gc.generations[i].count = 0;
+ gc_list_merge(GEN_HEAD(state, i), &state->permanent_generation.head);
+ state->generations[i].count = 0;
}
Py_RETURN_NONE;
}
@@ -1665,7 +1676,8 @@ static PyObject *
gc_unfreeze_impl(PyObject *module)
/*[clinic end generated code: output=1c15f2043b25e169 input=2dd52b170f4cef6c]*/
{
- gc_list_merge(&_PyRuntime.gc.permanent_generation.head, GEN_HEAD(NUM_GENERATIONS-1));
+ struct _gc_runtime_state *state = &_PyRuntime.gc;
+ gc_list_merge(&state->permanent_generation.head, GEN_HEAD(state, NUM_GENERATIONS-1));
Py_RETURN_NONE;
}
@@ -1711,7 +1723,7 @@ static PyMethodDef GcMethods[] = {
GC_SET_DEBUG_METHODDEF
GC_GET_DEBUG_METHODDEF
GC_GET_COUNT_METHODDEF
- {"set_threshold", gc_set_thresh, METH_VARARGS, gc_set_thresh__doc__},
+ {"set_threshold", gc_set_threshold, METH_VARARGS, gc_set_thresh__doc__},
GC_GET_THRESHOLD_METHODDEF
GC_COLLECT_METHODDEF
GC_GET_OBJECTS_METHODDEF
@@ -1746,25 +1758,27 @@ PyInit_gc(void)
m = PyModule_Create(&gcmodule);
- if (m == NULL)
+ if (m == NULL) {
return NULL;
+ }
- if (_PyRuntime.gc.garbage == NULL) {
- _PyRuntime.gc.garbage = PyList_New(0);
- if (_PyRuntime.gc.garbage == NULL)
+ struct _gc_runtime_state *state = &_PyRuntime.gc;
+ if (state->garbage == NULL) {
+ state->garbage = PyList_New(0);
+ if (state->garbage == NULL)
return NULL;
}
- Py_INCREF(_PyRuntime.gc.garbage);
- if (PyModule_AddObject(m, "garbage", _PyRuntime.gc.garbage) < 0)
+ Py_INCREF(state->garbage);
+ if (PyModule_AddObject(m, "garbage", state->garbage) < 0)
return NULL;
- if (_PyRuntime.gc.callbacks == NULL) {
- _PyRuntime.gc.callbacks = PyList_New(0);
- if (_PyRuntime.gc.callbacks == NULL)
+ if (state->callbacks == NULL) {
+ state->callbacks = PyList_New(0);
+ if (state->callbacks == NULL)
return NULL;
}
- Py_INCREF(_PyRuntime.gc.callbacks);
- if (PyModule_AddObject(m, "callbacks", _PyRuntime.gc.callbacks) < 0)
+ Py_INCREF(state->callbacks);
+ if (PyModule_AddObject(m, "callbacks", state->callbacks) < 0)
return NULL;
#define ADD_INT(NAME) if (PyModule_AddIntConstant(m, #NAME, NAME) < 0) return NULL
@@ -1781,17 +1795,23 @@ PyInit_gc(void)
Py_ssize_t
PyGC_Collect(void)
{
- Py_ssize_t n;
+ struct _gc_runtime_state *state = &_PyRuntime.gc;
+ if (!state->enabled) {
+ return 0;
+ }
- if (_PyRuntime.gc.collecting)
- n = 0; /* already collecting, don't do anything */
+ Py_ssize_t n;
+ if (state->collecting) {
+ /* already collecting, don't do anything */
+ n = 0;
+ }
else {
PyObject *exc, *value, *tb;
- _PyRuntime.gc.collecting = 1;
+ state->collecting = 1;
PyErr_Fetch(&exc, &value, &tb);
- n = collect_with_callback(NUM_GENERATIONS - 1);
+ n = collect_with_callback(state, NUM_GENERATIONS - 1);
PyErr_Restore(exc, value, tb);
- _PyRuntime.gc.collecting = 0;
+ state->collecting = 0;
}
return n;
@@ -1800,41 +1820,42 @@ PyGC_Collect(void)
Py_ssize_t
_PyGC_CollectIfEnabled(void)
{
- if (!_PyRuntime.gc.enabled)
- return 0;
-
return PyGC_Collect();
}
Py_ssize_t
_PyGC_CollectNoFail(void)
{
+ assert(!PyErr_Occurred());
+
+ struct _gc_runtime_state *state = &_PyRuntime.gc;
Py_ssize_t n;
- assert(!PyErr_Occurred());
/* Ideally, this function is only called on interpreter shutdown,
and therefore not recursively. Unfortunately, when there are daemon
threads, a daemon thread can start a cyclic garbage collection
during interpreter shutdown (and then never finish it).
See http://bugs.python.org/issue8713#msg195178 for an example.
*/
- if (_PyRuntime.gc.collecting)
+ if (state->collecting) {
n = 0;
+ }
else {
- _PyRuntime.gc.collecting = 1;
- n = collect(NUM_GENERATIONS - 1, NULL, NULL, 1);
- _PyRuntime.gc.collecting = 0;
+ state->collecting = 1;
+ n = collect(state, NUM_GENERATIONS - 1, NULL, NULL, 1);
+ state->collecting = 0;
}
return n;
}
void
-_PyGC_DumpShutdownStats(void)
+_PyGC_DumpShutdownStats(_PyRuntimeState *runtime)
{
- if (!(_PyRuntime.gc.debug & DEBUG_SAVEALL)
- && _PyRuntime.gc.garbage != NULL && PyList_GET_SIZE(_PyRuntime.gc.garbage) > 0) {
+ struct _gc_runtime_state *state = &runtime->gc;
+ if (!(state->debug & DEBUG_SAVEALL)
+ && state->garbage != NULL && PyList_GET_SIZE(state->garbage) > 0) {
const char *message;
- if (_PyRuntime.gc.debug & DEBUG_UNCOLLECTABLE)
+ if (state->debug & DEBUG_UNCOLLECTABLE)
message = "gc: %zd uncollectable objects at " \
"shutdown";
else
@@ -1845,13 +1866,13 @@ _PyGC_DumpShutdownStats(void)
already. */
if (PyErr_WarnExplicitFormat(PyExc_ResourceWarning, "gc", 0,
"gc", NULL, message,
- PyList_GET_SIZE(_PyRuntime.gc.garbage)))
+ PyList_GET_SIZE(state->garbage)))
PyErr_WriteUnraisable(NULL);
- if (_PyRuntime.gc.debug & DEBUG_UNCOLLECTABLE) {
+ if (state->debug & DEBUG_UNCOLLECTABLE) {
PyObject *repr = NULL, *bytes = NULL;
- repr = PyObject_Repr(_PyRuntime.gc.garbage);
+ repr = PyObject_Repr(state->garbage);
if (!repr || !(bytes = PyUnicode_EncodeFSDefault(repr)))
- PyErr_WriteUnraisable(_PyRuntime.gc.garbage);
+ PyErr_WriteUnraisable(state->garbage);
else {
PySys_WriteStderr(
" %s\n",
@@ -1867,8 +1888,9 @@ _PyGC_DumpShutdownStats(void)
void
_PyGC_Fini(_PyRuntimeState *runtime)
{
- struct _gc_runtime_state *gc = &runtime->gc;
- Py_CLEAR(gc->callbacks);
+ struct _gc_runtime_state *state = &runtime->gc;
+ Py_CLEAR(state->garbage);
+ Py_CLEAR(state->callbacks);
}
/* for debugging */
@@ -1908,6 +1930,7 @@ PyObject_GC_UnTrack(void *op_raw)
static PyObject *
_PyObject_GC_Alloc(int use_calloc, size_t basicsize)
{
+ struct _gc_runtime_state *state = &_PyRuntime.gc;
PyObject *op;
PyGC_Head *g;
size_t size;
@@ -1923,15 +1946,15 @@ _PyObject_GC_Alloc(int use_calloc, size_t basicsize)
assert(((uintptr_t)g & 3) == 0); // g must be aligned 4bytes boundary
g->_gc_next = 0;
g->_gc_prev = 0;
- _PyRuntime.gc.generations[0].count++; /* number of allocated GC objects */
- if (_PyRuntime.gc.generations[0].count > _PyRuntime.gc.generations[0].threshold &&
- _PyRuntime.gc.enabled &&
- _PyRuntime.gc.generations[0].threshold &&
- !_PyRuntime.gc.collecting &&
+ state->generations[0].count++; /* number of allocated GC objects */
+ if (state->generations[0].count > state->generations[0].threshold &&
+ state->enabled &&
+ state->generations[0].threshold &&
+ !state->collecting &&
!PyErr_Occurred()) {
- _PyRuntime.gc.collecting = 1;
- collect_generations();
- _PyRuntime.gc.collecting = 0;
+ state->collecting = 1;
+ collect_generations(state);
+ state->collecting = 0;
}
op = FROM_GC(g);
return op;
@@ -2000,8 +2023,9 @@ PyObject_GC_Del(void *op)
if (_PyObject_GC_IS_TRACKED(op)) {
gc_list_remove(g);
}
- if (_PyRuntime.gc.generations[0].count > 0) {
- _PyRuntime.gc.generations[0].count--;
+ struct _gc_runtime_state *state = &_PyRuntime.gc;
+ if (state->generations[0].count > 0) {
+ state->generations[0].count--;
}
PyObject_FREE(g);
}
diff --git a/Python/import.c b/Python/import.c
index c00c3aa640b0..3b2090b963dd 100644
--- a/Python/import.c
+++ b/Python/import.c
@@ -535,7 +535,7 @@ PyImport_Cleanup(void)
_PyGC_CollectNoFail();
/* Dump GC stats before it's too late, since it uses the warnings
machinery. */
- _PyGC_DumpShutdownStats();
+ _PyGC_DumpShutdownStats(&_PyRuntime);
/* Now, if there are any modules left alive, clear their globals to
minimize potential leaks. All C extension modules actually end
1
0
https://github.com/python/cpython/commit/10c8e6af910e3a26e59f913a3c1e4830ca…
commit: 10c8e6af910e3a26e59f913a3c1e4830ca71b1af
branch: master
author: Victor Stinner <vstinner(a)redhat.com>
committer: GitHub <noreply(a)github.com>
date: 2019-04-26T01:53:18+02:00
summary:
bpo-36710: Add runtime variable in pystate.c (GH-12956)
Add 'gilstate', 'runtime' or 'xidregistry' parameter to many
functions on pystate.c to avoid lying on _PyRuntime global.
files:
M Python/pystate.c
diff --git a/Python/pystate.c b/Python/pystate.c
index a413f9d7ab4c..f964f4951b9c 100644
--- a/Python/pystate.c
+++ b/Python/pystate.c
@@ -6,11 +6,6 @@
#include "pycore_pymem.h"
#include "pycore_pystate.h"
-#define _PyThreadState_SET(value) \
- _Py_atomic_store_relaxed(&_PyRuntime.gilstate.tstate_current, \
- (uintptr_t)(value))
-
-
/* --------------------------------------------------------------------------
CAUTION
@@ -34,6 +29,18 @@ to avoid the expense of doing their own locking).
extern "C" {
#endif
+#define _PyRuntimeGILState_GetThreadState(gilstate) \
+ ((PyThreadState*)_Py_atomic_load_relaxed(&(gilstate)->tstate_current))
+#define _PyRuntimeGILState_SetThreadState(gilstate, value) \
+ _Py_atomic_store_relaxed(&(gilstate)->tstate_current, \
+ (uintptr_t)(value))
+
+/* Forward declarations */
+static PyThreadState *_PyGILState_GetThisThreadState(struct _gilstate_runtime_state *gilstate);
+static void _PyThreadState_Delete(_PyRuntimeState *runtime, PyThreadState *tstate);
+static PyThreadState *_PyThreadState_Swap(struct _gilstate_runtime_state *gilstate, PyThreadState *newts);
+
+
static _PyInitError
_PyRuntimeState_Init_impl(_PyRuntimeState *runtime)
{
@@ -137,9 +144,10 @@ _PyRuntimeState_ReInitThreads(_PyRuntimeState *runtime)
}
}
-#define HEAD_LOCK() PyThread_acquire_lock(_PyRuntime.interpreters.mutex, \
- WAIT_LOCK)
-#define HEAD_UNLOCK() PyThread_release_lock(_PyRuntime.interpreters.mutex)
+#define HEAD_LOCK(runtime) \
+ PyThread_acquire_lock((runtime)->interpreters.mutex, WAIT_LOCK)
+#define HEAD_UNLOCK(runtime) \
+ PyThread_release_lock((runtime)->interpreters.mutex)
/* Forward declaration */
static void _PyGILState_NoteThreadState(
@@ -148,21 +156,22 @@ static void _PyGILState_NoteThreadState(
_PyInitError
_PyInterpreterState_Enable(_PyRuntimeState *runtime)
{
- runtime->interpreters.next_id = 0;
+ struct pyinterpreters *interpreters = &runtime->interpreters;
+ interpreters->next_id = 0;
/* Py_Finalize() calls _PyRuntimeState_Fini() which clears the mutex.
Create a new mutex if needed. */
- if (runtime->interpreters.mutex == NULL) {
+ if (interpreters->mutex == NULL) {
/* Force default allocator, since _PyRuntimeState_Fini() must
use the same allocator than this function. */
PyMemAllocatorEx old_alloc;
_PyMem_SetDefaultAllocator(PYMEM_DOMAIN_RAW, &old_alloc);
- runtime->interpreters.mutex = PyThread_allocate_lock();
+ interpreters->mutex = PyThread_allocate_lock();
PyMem_SetAllocator(PYMEM_DOMAIN_RAW, &old_alloc);
- if (runtime->interpreters.mutex == NULL) {
+ if (interpreters->mutex == NULL) {
return _Py_INIT_ERR("Can't initialize threads for interpreter");
}
}
@@ -173,9 +182,7 @@ _PyInterpreterState_Enable(_PyRuntimeState *runtime)
PyInterpreterState *
PyInterpreterState_New(void)
{
- PyInterpreterState *interp = (PyInterpreterState *)
- PyMem_RawMalloc(sizeof(PyInterpreterState));
-
+ PyInterpreterState *interp = PyMem_RawMalloc(sizeof(PyInterpreterState));
if (interp == NULL) {
return NULL;
}
@@ -193,23 +200,27 @@ PyInterpreterState_New(void)
#endif
#endif
- HEAD_LOCK();
- if (_PyRuntime.interpreters.next_id < 0) {
+ _PyRuntimeState *runtime = &_PyRuntime;
+ struct pyinterpreters *interpreters = &runtime->interpreters;
+
+ HEAD_LOCK(runtime);
+ if (interpreters->next_id < 0) {
/* overflow or Py_Initialize() not called! */
PyErr_SetString(PyExc_RuntimeError,
"failed to get an interpreter ID");
PyMem_RawFree(interp);
interp = NULL;
- } else {
- interp->id = _PyRuntime.interpreters.next_id;
- _PyRuntime.interpreters.next_id += 1;
- interp->next = _PyRuntime.interpreters.head;
- if (_PyRuntime.interpreters.main == NULL) {
- _PyRuntime.interpreters.main = interp;
+ }
+ else {
+ interp->id = interpreters->next_id;
+ interpreters->next_id += 1;
+ interp->next = interpreters->head;
+ if (interpreters->main == NULL) {
+ interpreters->main = interp;
}
- _PyRuntime.interpreters.head = interp;
+ interpreters->head = interp;
}
- HEAD_UNLOCK();
+ HEAD_UNLOCK(runtime);
if (interp == NULL) {
return NULL;
@@ -221,14 +232,14 @@ PyInterpreterState_New(void)
}
-void
-PyInterpreterState_Clear(PyInterpreterState *interp)
+static void
+_PyInterpreterState_Clear(_PyRuntimeState *runtime, PyInterpreterState *interp)
{
- PyThreadState *p;
- HEAD_LOCK();
- for (p = interp->tstate_head; p != NULL; p = p->next)
+ HEAD_LOCK(runtime);
+ for (PyThreadState *p = interp->tstate_head; p != NULL; p = p->next) {
PyThreadState_Clear(p);
- HEAD_UNLOCK();
+ }
+ HEAD_UNLOCK(runtime);
_PyCoreConfig_Clear(&interp->core_config);
Py_CLEAR(interp->codec_search_path);
Py_CLEAR(interp->codec_search_cache);
@@ -251,41 +262,52 @@ PyInterpreterState_Clear(PyInterpreterState *interp)
// objects have been cleaned up at the point.
}
+void
+PyInterpreterState_Clear(PyInterpreterState *interp)
+{
+ _PyInterpreterState_Clear(&_PyRuntime, interp);
+}
+
static void
-zapthreads(PyInterpreterState *interp)
+zapthreads(_PyRuntimeState *runtime, PyInterpreterState *interp)
{
PyThreadState *p;
/* No need to lock the mutex here because this should only happen
when the threads are all really dead (XXX famous last words). */
while ((p = interp->tstate_head) != NULL) {
- PyThreadState_Delete(p);
+ _PyThreadState_Delete(runtime, p);
}
}
-void
-PyInterpreterState_Delete(PyInterpreterState *interp)
+static void
+_PyInterpreterState_Delete(_PyRuntimeState *runtime,
+ PyInterpreterState *interp)
{
+ struct pyinterpreters *interpreters = &runtime->interpreters;
+ zapthreads(runtime, interp);
+ HEAD_LOCK(runtime);
PyInterpreterState **p;
- zapthreads(interp);
- HEAD_LOCK();
- for (p = &_PyRuntime.interpreters.head; ; p = &(*p)->next) {
- if (*p == NULL)
- Py_FatalError(
- "PyInterpreterState_Delete: invalid interp");
- if (*p == interp)
+ for (p = &interpreters->head; ; p = &(*p)->next) {
+ if (*p == NULL) {
+ Py_FatalError("PyInterpreterState_Delete: invalid interp");
+ }
+ if (*p == interp) {
break;
+ }
}
- if (interp->tstate_head != NULL)
+ if (interp->tstate_head != NULL) {
Py_FatalError("PyInterpreterState_Delete: remaining threads");
+ }
*p = interp->next;
- if (_PyRuntime.interpreters.main == interp) {
- _PyRuntime.interpreters.main = NULL;
- if (_PyRuntime.interpreters.head != NULL)
+ if (interpreters->main == interp) {
+ interpreters->main = NULL;
+ if (interpreters->head != NULL) {
Py_FatalError("PyInterpreterState_Delete: remaining subinterpreters");
+ }
}
- HEAD_UNLOCK();
+ HEAD_UNLOCK(runtime);
if (interp->id_mutex != NULL) {
PyThread_free_lock(interp->id_mutex);
}
@@ -293,6 +315,13 @@ PyInterpreterState_Delete(PyInterpreterState *interp)
}
+void
+PyInterpreterState_Delete(PyInterpreterState *interp)
+{
+ _PyInterpreterState_Delete(&_PyRuntime, interp);
+}
+
+
/*
* Delete all interpreter states except the main interpreter. If there
* is a current interpreter state, it *must* be the main interpreter.
@@ -300,14 +329,15 @@ PyInterpreterState_Delete(PyInterpreterState *interp)
void
_PyInterpreterState_DeleteExceptMain(_PyRuntimeState *runtime)
{
+ struct _gilstate_runtime_state *gilstate = &runtime->gilstate;
struct pyinterpreters *interpreters = &runtime->interpreters;
- PyThreadState *tstate = PyThreadState_Swap(NULL);
+ PyThreadState *tstate = _PyThreadState_Swap(gilstate, NULL);
if (tstate != NULL && tstate->interp != interpreters->main) {
Py_FatalError("PyInterpreterState_DeleteExceptMain: not main interpreter");
}
- HEAD_LOCK();
+ HEAD_LOCK(runtime);
PyInterpreterState *interp = interpreters->head;
interpreters->head = NULL;
while (interp != NULL) {
@@ -318,8 +348,8 @@ _PyInterpreterState_DeleteExceptMain(_PyRuntimeState *runtime)
continue;
}
- PyInterpreterState_Clear(interp); // XXX must activate?
- zapthreads(interp);
+ _PyInterpreterState_Clear(runtime, interp); // XXX must activate?
+ zapthreads(runtime, interp);
if (interp->id_mutex != NULL) {
PyThread_free_lock(interp->id_mutex);
}
@@ -327,12 +357,12 @@ _PyInterpreterState_DeleteExceptMain(_PyRuntimeState *runtime)
interp = interp->next;
PyMem_RawFree(prev_interp);
}
- HEAD_UNLOCK();
+ HEAD_UNLOCK(runtime);
if (interpreters->head == NULL) {
Py_FatalError("PyInterpreterState_DeleteExceptMain: missing main");
}
- PyThreadState_Swap(tstate);
+ _PyThreadState_Swap(gilstate, tstate);
}
@@ -363,9 +393,9 @@ PyInterpreterState_GetID(PyInterpreterState *interp)
static PyInterpreterState *
-interp_look_up_id(PY_INT64_T requested_id)
+interp_look_up_id(_PyRuntimeState *runtime, PY_INT64_T requested_id)
{
- PyInterpreterState *interp = PyInterpreterState_Head();
+ PyInterpreterState *interp = runtime->interpreters.head;
while (interp != NULL) {
PY_INT64_T id = PyInterpreterState_GetID(interp);
if (id < 0) {
@@ -384,9 +414,10 @@ _PyInterpreterState_LookUpID(PY_INT64_T requested_id)
{
PyInterpreterState *interp = NULL;
if (requested_id >= 0) {
- HEAD_LOCK();
- interp = interp_look_up_id(requested_id);
- HEAD_UNLOCK();
+ _PyRuntimeState *runtime = &_PyRuntime;
+ HEAD_LOCK(runtime);
+ interp = interp_look_up_id(runtime, requested_id);
+ HEAD_UNLOCK(runtime);
}
if (interp == NULL && !PyErr_Occurred()) {
PyErr_Format(PyExc_RuntimeError,
@@ -431,6 +462,7 @@ _PyInterpreterState_IDDecref(PyInterpreterState *interp)
if (interp->id_mutex == NULL) {
return;
}
+ struct _gilstate_runtime_state *gilstate = &_PyRuntime.gilstate;
PyThread_acquire_lock(interp->id_mutex, WAIT_LOCK);
assert(interp->id_refcount != 0);
interp->id_refcount -= 1;
@@ -441,9 +473,9 @@ _PyInterpreterState_IDDecref(PyInterpreterState *interp)
// XXX Using the "head" thread isn't strictly correct.
PyThreadState *tstate = PyInterpreterState_ThreadHead(interp);
// XXX Possible GILState issues?
- PyThreadState *save_tstate = PyThreadState_Swap(tstate);
+ PyThreadState *save_tstate = _PyThreadState_Swap(gilstate, tstate);
Py_EndInterpreter(tstate);
- PyThreadState_Swap(save_tstate);
+ _PyThreadState_Swap(gilstate, save_tstate);
}
}
@@ -498,6 +530,7 @@ threadstate_getframe(PyThreadState *self)
static PyThreadState *
new_threadstate(PyInterpreterState *interp, int init)
{
+ _PyRuntimeState *runtime = &_PyRuntime;
PyThreadState *tstate = (PyThreadState *)PyMem_RawMalloc(sizeof(PyThreadState));
if (tstate == NULL) {
return NULL;
@@ -556,16 +589,16 @@ new_threadstate(PyInterpreterState *interp, int init)
tstate->id = ++interp->tstate_next_unique_id;
if (init) {
- _PyThreadState_Init(&_PyRuntime, tstate);
+ _PyThreadState_Init(runtime, tstate);
}
- HEAD_LOCK();
+ HEAD_LOCK(runtime);
tstate->prev = NULL;
tstate->next = interp->tstate_head;
if (tstate->next)
tstate->next->prev = tstate;
interp->tstate_head = tstate;
- HEAD_UNLOCK();
+ HEAD_UNLOCK(runtime);
return tstate;
}
@@ -750,22 +783,23 @@ PyThreadState_Clear(PyThreadState *tstate)
/* Common code for PyThreadState_Delete() and PyThreadState_DeleteCurrent() */
static void
-tstate_delete_common(PyThreadState *tstate)
+tstate_delete_common(_PyRuntimeState *runtime, PyThreadState *tstate)
{
- PyInterpreterState *interp;
- if (tstate == NULL)
+ if (tstate == NULL) {
Py_FatalError("PyThreadState_Delete: NULL tstate");
- interp = tstate->interp;
- if (interp == NULL)
+ }
+ PyInterpreterState *interp = tstate->interp;
+ if (interp == NULL) {
Py_FatalError("PyThreadState_Delete: NULL interp");
- HEAD_LOCK();
+ }
+ HEAD_LOCK(runtime);
if (tstate->prev)
tstate->prev->next = tstate->next;
else
interp->tstate_head = tstate->next;
if (tstate->next)
tstate->next->prev = tstate->prev;
- HEAD_UNLOCK();
+ HEAD_UNLOCK(runtime);
if (tstate->on_delete != NULL) {
tstate->on_delete(tstate->on_delete_data);
}
@@ -773,37 +807,53 @@ tstate_delete_common(PyThreadState *tstate)
}
-void
-PyThreadState_Delete(PyThreadState *tstate)
+static void
+_PyThreadState_Delete(_PyRuntimeState *runtime, PyThreadState *tstate)
{
- if (tstate == _PyThreadState_GET())
+ struct _gilstate_runtime_state *gilstate = &runtime->gilstate;
+ if (tstate == _PyRuntimeGILState_GetThreadState(gilstate)) {
Py_FatalError("PyThreadState_Delete: tstate is still current");
- if (_PyRuntime.gilstate.autoInterpreterState &&
- PyThread_tss_get(&_PyRuntime.gilstate.autoTSSkey) == tstate)
+ }
+ if (gilstate->autoInterpreterState &&
+ PyThread_tss_get(&gilstate->autoTSSkey) == tstate)
{
- PyThread_tss_set(&_PyRuntime.gilstate.autoTSSkey, NULL);
+ PyThread_tss_set(&gilstate->autoTSSkey, NULL);
}
- tstate_delete_common(tstate);
+ tstate_delete_common(runtime, tstate);
}
void
-PyThreadState_DeleteCurrent()
+PyThreadState_Delete(PyThreadState *tstate)
{
- PyThreadState *tstate = _PyThreadState_GET();
+ return _PyThreadState_Delete(&_PyRuntime, tstate);
+}
+
+
+static void
+_PyThreadState_DeleteCurrent(_PyRuntimeState *runtime)
+{
+ struct _gilstate_runtime_state *gilstate = &runtime->gilstate;
+ PyThreadState *tstate = _PyRuntimeGILState_GetThreadState(gilstate);
if (tstate == NULL)
Py_FatalError(
"PyThreadState_DeleteCurrent: no current tstate");
- tstate_delete_common(tstate);
- if (_PyRuntime.gilstate.autoInterpreterState &&
- PyThread_tss_get(&_PyRuntime.gilstate.autoTSSkey) == tstate)
+ tstate_delete_common(runtime, tstate);
+ if (gilstate->autoInterpreterState &&
+ PyThread_tss_get(&gilstate->autoTSSkey) == tstate)
{
- PyThread_tss_set(&_PyRuntime.gilstate.autoTSSkey, NULL);
+ PyThread_tss_set(&gilstate->autoTSSkey, NULL);
}
- _PyThreadState_SET(NULL);
+ _PyRuntimeGILState_SetThreadState(gilstate, NULL);
PyEval_ReleaseLock();
}
+void
+PyThreadState_DeleteCurrent()
+{
+ _PyThreadState_DeleteCurrent(&_PyRuntime);
+}
+
/*
* Delete all thread states except the one passed as argument.
@@ -815,9 +865,10 @@ PyThreadState_DeleteCurrent()
void
_PyThreadState_DeleteExcept(PyThreadState *tstate)
{
+ _PyRuntimeState *runtime = &_PyRuntime;
PyInterpreterState *interp = tstate->interp;
PyThreadState *p, *next, *garbage;
- HEAD_LOCK();
+ HEAD_LOCK(runtime);
/* Remove all thread states, except tstate, from the linked list of
thread states. This will allow calling PyThreadState_Clear()
without holding the lock. */
@@ -830,7 +881,7 @@ _PyThreadState_DeleteExcept(PyThreadState *tstate)
tstate->next->prev = tstate->prev;
tstate->prev = tstate->next = NULL;
interp->tstate_head = tstate;
- HEAD_UNLOCK();
+ HEAD_UNLOCK(runtime);
/* Clear and deallocate all stale thread states. Even if this
executes Python code, we should be safe since it executes
in the current thread, not one of the stale threads. */
@@ -860,12 +911,12 @@ PyThreadState_Get(void)
}
-PyThreadState *
-PyThreadState_Swap(PyThreadState *newts)
+static PyThreadState *
+_PyThreadState_Swap(struct _gilstate_runtime_state *gilstate, PyThreadState *newts)
{
- PyThreadState *oldts = _PyThreadState_GET();
+ PyThreadState *oldts = _PyRuntimeGILState_GetThreadState(gilstate);
- _PyThreadState_SET(newts);
+ _PyRuntimeGILState_SetThreadState(gilstate, newts);
/* It should not be possible for more than one thread state
to be used for a thread. Check this the best we can in debug
builds.
@@ -876,7 +927,7 @@ PyThreadState_Swap(PyThreadState *newts)
to it, we need to ensure errno doesn't change.
*/
int err = errno;
- PyThreadState *check = PyGILState_GetThisThreadState();
+ PyThreadState *check = _PyGILState_GetThisThreadState(gilstate);
if (check && check->interp == newts->interp && check != newts)
Py_FatalError("Invalid thread state for this thread");
errno = err;
@@ -885,6 +936,12 @@ PyThreadState_Swap(PyThreadState *newts)
return oldts;
}
+PyThreadState *
+PyThreadState_Swap(PyThreadState *newts)
+{
+ return _PyThreadState_Swap(&_PyRuntime.gilstate, newts);
+}
+
/* An extension mechanism to store arbitrary additional per-thread state.
PyThreadState_GetDict() returns a dictionary that can be used to hold such
state; the caller should pick a unique key and store its state there. If
@@ -928,7 +985,8 @@ PyThreadState_SetAsyncExc(unsigned long id, PyObject *exc)
* list of thread states we're traversing, so to prevent that we lock
* head_mutex for the duration.
*/
- HEAD_LOCK();
+ _PyRuntimeState *runtime = &_PyRuntime;
+ HEAD_LOCK(runtime);
for (p = interp->tstate_head; p != NULL; p = p->next) {
if (p->thread_id == id) {
/* Tricky: we need to decref the current value
@@ -941,13 +999,13 @@ PyThreadState_SetAsyncExc(unsigned long id, PyObject *exc)
PyObject *old_exc = p->async_exc;
Py_XINCREF(exc);
p->async_exc = exc;
- HEAD_UNLOCK();
+ HEAD_UNLOCK(runtime);
Py_XDECREF(old_exc);
_PyEval_SignalAsyncExc();
return 1;
}
}
- HEAD_UNLOCK();
+ HEAD_UNLOCK(runtime);
return 0;
}
@@ -1003,8 +1061,9 @@ _PyThread_CurrentFrames(void)
* Because these lists can mutate even when the GIL is held, we
* need to grab head_mutex for the duration.
*/
- HEAD_LOCK();
- for (i = _PyRuntime.interpreters.head; i != NULL; i = i->next) {
+ _PyRuntimeState *runtime = &_PyRuntime;
+ HEAD_LOCK(runtime);
+ for (i = runtime->interpreters.head; i != NULL; i = i->next) {
PyThreadState *t;
for (t = i->tstate_head; t != NULL; t = t->next) {
PyObject *id;
@@ -1021,11 +1080,11 @@ _PyThread_CurrentFrames(void)
goto Fail;
}
}
- HEAD_UNLOCK();
+ HEAD_UNLOCK(runtime);
return result;
Fail:
- HEAD_UNLOCK();
+ HEAD_UNLOCK(runtime);
Py_DECREF(result);
return NULL;
}
@@ -1044,8 +1103,9 @@ static int
PyThreadState_IsCurrent(PyThreadState *tstate)
{
/* Must be the tstate for this thread */
- assert(PyGILState_GetThisThreadState()==tstate);
- return tstate == _PyThreadState_GET();
+ struct _gilstate_runtime_state *gilstate = &_PyRuntime.gilstate;
+ assert(_PyGILState_GetThisThreadState(gilstate) == tstate);
+ return tstate == _PyRuntimeGILState_GetThreadState(gilstate);
}
/* Internal initialization/finalization functions called by
@@ -1093,7 +1153,7 @@ void
_PyGILState_Reinit(_PyRuntimeState *runtime)
{
struct _gilstate_runtime_state *gilstate = &runtime->gilstate;
- PyThreadState *tstate = PyGILState_GetThisThreadState();
+ PyThreadState *tstate = _PyGILState_GetThisThreadState(gilstate);
PyThread_tss_delete(&gilstate->autoTSSkey);
if (PyThread_tss_create(&gilstate->autoTSSkey) != 0) {
@@ -1147,36 +1207,45 @@ _PyGILState_NoteThreadState(struct _gilstate_runtime_state *gilstate, PyThreadSt
}
/* The public functions */
+static PyThreadState *
+_PyGILState_GetThisThreadState(struct _gilstate_runtime_state *gilstate)
+{
+ if (gilstate->autoInterpreterState == NULL)
+ return NULL;
+ return (PyThreadState *)PyThread_tss_get(&gilstate->autoTSSkey);
+}
+
PyThreadState *
PyGILState_GetThisThreadState(void)
{
- if (_PyRuntime.gilstate.autoInterpreterState == NULL)
- return NULL;
- return (PyThreadState *)PyThread_tss_get(&_PyRuntime.gilstate.autoTSSkey);
+ return _PyGILState_GetThisThreadState(&_PyRuntime.gilstate);
}
int
PyGILState_Check(void)
{
- PyThreadState *tstate;
- if (!_PyGILState_check_enabled)
+ if (!_PyGILState_check_enabled) {
return 1;
+ }
- if (!PyThread_tss_is_created(&_PyRuntime.gilstate.autoTSSkey)) {
+ struct _gilstate_runtime_state *gilstate = &_PyRuntime.gilstate;
+ if (!PyThread_tss_is_created(&gilstate->autoTSSkey)) {
return 1;
}
- tstate = _PyThreadState_GET();
- if (tstate == NULL)
+ PyThreadState *tstate = _PyRuntimeGILState_GetThreadState(gilstate);
+ if (tstate == NULL) {
return 0;
+ }
- return (tstate == PyGILState_GetThisThreadState());
+ return (tstate == _PyGILState_GetThisThreadState(gilstate));
}
PyGILState_STATE
PyGILState_Ensure(void)
{
+ struct _gilstate_runtime_state *gilstate = &_PyRuntime.gilstate;
int current;
PyThreadState *tcur;
int need_init_threads = 0;
@@ -1187,14 +1256,14 @@ PyGILState_Ensure(void)
called Py_Initialize() and usually PyEval_InitThreads().
*/
/* Py_Initialize() hasn't been called! */
- assert(_PyRuntime.gilstate.autoInterpreterState);
+ assert(gilstate->autoInterpreterState);
- tcur = (PyThreadState *)PyThread_tss_get(&_PyRuntime.gilstate.autoTSSkey);
+ tcur = (PyThreadState *)PyThread_tss_get(&gilstate->autoTSSkey);
if (tcur == NULL) {
need_init_threads = 1;
/* Create a new thread state for this thread */
- tcur = PyThreadState_New(_PyRuntime.gilstate.autoInterpreterState);
+ tcur = PyThreadState_New(gilstate->autoInterpreterState);
if (tcur == NULL)
Py_FatalError("Couldn't create thread-state for new thread");
/* This is our thread state! We'll need to delete it in the
@@ -1230,18 +1299,21 @@ PyGILState_Ensure(void)
void
PyGILState_Release(PyGILState_STATE oldstate)
{
- PyThreadState *tcur = (PyThreadState *)PyThread_tss_get(
- &_PyRuntime.gilstate.autoTSSkey);
- if (tcur == NULL)
+ _PyRuntimeState *runtime = &_PyRuntime;
+ PyThreadState *tcur = PyThread_tss_get(&runtime->gilstate.autoTSSkey);
+ if (tcur == NULL) {
Py_FatalError("auto-releasing thread-state, "
"but no thread-state for this thread");
+ }
+
/* We must hold the GIL and have our thread state current */
/* XXX - remove the check - the assert should be fine,
but while this is very new (April 2003), the extra check
by release-only users can't hurt.
*/
- if (! PyThreadState_IsCurrent(tcur))
+ if (!PyThreadState_IsCurrent(tcur)) {
Py_FatalError("This thread state must be current when releasing");
+ }
assert(PyThreadState_IsCurrent(tcur));
--tcur->gilstate_counter;
assert(tcur->gilstate_counter >= 0); /* illegal counter value */
@@ -1258,7 +1330,7 @@ PyGILState_Release(PyGILState_STATE oldstate)
* races; see bugs 225673 and 1061968 (that nasty bug has a
* habit of coming back).
*/
- PyThreadState_DeleteCurrent();
+ _PyThreadState_DeleteCurrent(runtime);
}
/* Release the lock if necessary */
else if (oldstate == PyGILState_UNLOCKED)
@@ -1363,7 +1435,8 @@ _release_xidata(void *arg)
}
static void
-_call_in_interpreter(PyInterpreterState *interp,
+_call_in_interpreter(struct _gilstate_runtime_state *gilstate,
+ PyInterpreterState *interp,
void (*func)(void *), void *arg)
{
/* We would use Py_AddPendingCall() if it weren't specific to the
@@ -1371,18 +1444,18 @@ _call_in_interpreter(PyInterpreterState *interp,
* naive approach.
*/
PyThreadState *save_tstate = NULL;
- if (interp != _PyInterpreterState_Get()) {
+ if (interp != _PyRuntimeGILState_GetThreadState(gilstate)->interp) {
// XXX Using the "head" thread isn't strictly correct.
PyThreadState *tstate = PyInterpreterState_ThreadHead(interp);
// XXX Possible GILState issues?
- save_tstate = PyThreadState_Swap(tstate);
+ save_tstate = _PyThreadState_Swap(gilstate, tstate);
}
func(arg);
// Switch back.
if (save_tstate != NULL) {
- PyThreadState_Swap(save_tstate);
+ _PyThreadState_Swap(gilstate, save_tstate);
}
}
@@ -1405,7 +1478,8 @@ _PyCrossInterpreterData_Release(_PyCrossInterpreterData *data)
}
// "Release" the data and/or the object.
- _call_in_interpreter(interp, _release_xidata, data);
+ struct _gilstate_runtime_state *gilstate = &_PyRuntime.gilstate;
+ _call_in_interpreter(gilstate, interp, _release_xidata, data);
}
PyObject *
@@ -1421,7 +1495,8 @@ _PyCrossInterpreterData_NewObject(_PyCrossInterpreterData *data)
crossinterpdatafunc. It would be simpler and more efficient. */
static int
-_register_xidata(PyTypeObject *cls, crossinterpdatafunc getdata)
+_register_xidata(struct _xidregistry *xidregistry, PyTypeObject *cls,
+ crossinterpdatafunc getdata)
{
// Note that we effectively replace already registered classes
// rather than failing.
@@ -1430,12 +1505,12 @@ _register_xidata(PyTypeObject *cls, crossinterpdatafunc getdata)
return -1;
newhead->cls = cls;
newhead->getdata = getdata;
- newhead->next = _PyRuntime.xidregistry.head;
- _PyRuntime.xidregistry.head = newhead;
+ newhead->next = xidregistry->head;
+ xidregistry->head = newhead;
return 0;
}
-static void _register_builtins_for_crossinterpreter_data(void);
+static void _register_builtins_for_crossinterpreter_data(struct _xidregistry *xidregistry);
int
_PyCrossInterpreterData_RegisterClass(PyTypeObject *cls,
@@ -1453,12 +1528,13 @@ _PyCrossInterpreterData_RegisterClass(PyTypeObject *cls,
// Make sure the class isn't ever deallocated.
Py_INCREF((PyObject *)cls);
- PyThread_acquire_lock(_PyRuntime.xidregistry.mutex, WAIT_LOCK);
- if (_PyRuntime.xidregistry.head == NULL) {
- _register_builtins_for_crossinterpreter_data();
+ struct _xidregistry *xidregistry = &_PyRuntime.xidregistry ;
+ PyThread_acquire_lock(xidregistry->mutex, WAIT_LOCK);
+ if (xidregistry->head == NULL) {
+ _register_builtins_for_crossinterpreter_data(xidregistry);
}
- int res = _register_xidata(cls, getdata);
- PyThread_release_lock(_PyRuntime.xidregistry.mutex);
+ int res = _register_xidata(xidregistry, cls, getdata);
+ PyThread_release_lock(xidregistry->mutex);
return res;
}
@@ -1469,13 +1545,14 @@ _PyCrossInterpreterData_RegisterClass(PyTypeObject *cls,
crossinterpdatafunc
_PyCrossInterpreterData_Lookup(PyObject *obj)
{
+ struct _xidregistry *xidregistry = &_PyRuntime.xidregistry ;
PyObject *cls = PyObject_Type(obj);
crossinterpdatafunc getdata = NULL;
- PyThread_acquire_lock(_PyRuntime.xidregistry.mutex, WAIT_LOCK);
- struct _xidregitem *cur = _PyRuntime.xidregistry.head;
+ PyThread_acquire_lock(xidregistry->mutex, WAIT_LOCK);
+ struct _xidregitem *cur = xidregistry->head;
if (cur == NULL) {
- _register_builtins_for_crossinterpreter_data();
- cur = _PyRuntime.xidregistry.head;
+ _register_builtins_for_crossinterpreter_data(xidregistry);
+ cur = xidregistry->head;
}
for(; cur != NULL; cur = cur->next) {
if (cur->cls == (PyTypeObject *)cls) {
@@ -1484,7 +1561,7 @@ _PyCrossInterpreterData_Lookup(PyObject *obj)
}
}
Py_DECREF(cls);
- PyThread_release_lock(_PyRuntime.xidregistry.mutex);
+ PyThread_release_lock(xidregistry->mutex);
return getdata;
}
@@ -1591,25 +1668,25 @@ _none_shared(PyObject *obj, _PyCrossInterpreterData *data)
}
static void
-_register_builtins_for_crossinterpreter_data(void)
+_register_builtins_for_crossinterpreter_data(struct _xidregistry *xidregistry)
{
// None
- if (_register_xidata((PyTypeObject *)PyObject_Type(Py_None), _none_shared) != 0) {
+ if (_register_xidata(xidregistry, (PyTypeObject *)PyObject_Type(Py_None), _none_shared) != 0) {
Py_FatalError("could not register None for cross-interpreter sharing");
}
// int
- if (_register_xidata(&PyLong_Type, _long_shared) != 0) {
+ if (_register_xidata(xidregistry, &PyLong_Type, _long_shared) != 0) {
Py_FatalError("could not register int for cross-interpreter sharing");
}
// bytes
- if (_register_xidata(&PyBytes_Type, _bytes_shared) != 0) {
+ if (_register_xidata(xidregistry, &PyBytes_Type, _bytes_shared) != 0) {
Py_FatalError("could not register bytes for cross-interpreter sharing");
}
// str
- if (_register_xidata(&PyUnicode_Type, _str_shared) != 0) {
+ if (_register_xidata(xidregistry, &PyUnicode_Type, _str_shared) != 0) {
Py_FatalError("could not register str for cross-interpreter sharing");
}
}
1
0
bpo-36722: Debug build loads libraries built in release mode (GH-12952)
by Victor Stinner 25 Apr '19
by Victor Stinner 25 Apr '19
25 Apr '19
https://github.com/python/cpython/commit/5422e3cfb7ffc50b147b4662d6f596cd61…
commit: 5422e3cfb7ffc50b147b4662d6f596cd61533754
branch: master
author: Victor Stinner <vstinner(a)redhat.com>
committer: GitHub <noreply(a)github.com>
date: 2019-04-26T01:40:00+02:00
summary:
bpo-36722: Debug build loads libraries built in release mode (GH-12952)
In debug build, import now also looks for C extensions compiled in
release mode and for C extensions compiled in the stable ABI.
files:
A Misc/NEWS.d/next/Core and Builtins/2019-04-25-21-02-40.bpo-36722.8NApVM.rst
M Python/dynload_shlib.c
M configure
M configure.ac
M pyconfig.h.in
diff --git a/Misc/NEWS.d/next/Core and Builtins/2019-04-25-21-02-40.bpo-36722.8NApVM.rst b/Misc/NEWS.d/next/Core and Builtins/2019-04-25-21-02-40.bpo-36722.8NApVM.rst
new file mode 100644
index 000000000000..210a7e052592
--- /dev/null
+++ b/Misc/NEWS.d/next/Core and Builtins/2019-04-25-21-02-40.bpo-36722.8NApVM.rst
@@ -0,0 +1,2 @@
+In debug build, import now also looks for C extensions compiled in release
+mode and for C extensions compiled in the stable ABI.
diff --git a/Python/dynload_shlib.c b/Python/dynload_shlib.c
index e5bddaab6caa..c51f97abd286 100644
--- a/Python/dynload_shlib.c
+++ b/Python/dynload_shlib.c
@@ -38,9 +38,10 @@ const char *_PyImport_DynLoadFiletab[] = {
".dll",
#else /* !__CYGWIN__ */
"." SOABI ".so",
-#ifndef Py_DEBUG
+#ifdef ALT_SOABI
+ "." ALT_SOABI ".so",
+#endif
".abi" PYTHON_ABI_STRING ".so",
-#endif /* ! Py_DEBUG */
".so",
#endif /* __CYGWIN__ */
NULL,
diff --git a/configure b/configure
index b02d17c053c6..b2775cf04000 100755
--- a/configure
+++ b/configure
@@ -632,6 +632,7 @@ THREADHEADERS
LIBPL
PY_ENABLE_SHARED
EXT_SUFFIX
+ALT_SOABI
SOABI
LIBC
LIBM
@@ -15127,6 +15128,17 @@ SOABI='cpython-'`echo $VERSION | tr -d .`${ABIFLAGS}${PLATFORM_TRIPLET:+-$PLATFO
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $SOABI" >&5
$as_echo "$SOABI" >&6; }
+if test "$Py_DEBUG" = 'true'; then
+ # Similar to SOABI but remove "d" flag from ABIFLAGS
+
+ ALT_SOABI='cpython-'`echo $VERSION | tr -d .``echo $ABIFLAGS | tr -d d`${PLATFORM_TRIPLET:+-$PLATFORM_TRIPLET}
+
+cat >>confdefs.h <<_ACEOF
+#define ALT_SOABI "${ALT_SOABI}"
+_ACEOF
+
+fi
+
case $ac_sys_system in
Linux*|GNU*|Darwin|VxWorks)
diff --git a/configure.ac b/configure.ac
index 65d3f8e69129..312758788e4d 100644
--- a/configure.ac
+++ b/configure.ac
@@ -4627,6 +4627,14 @@ AC_MSG_CHECKING(SOABI)
SOABI='cpython-'`echo $VERSION | tr -d .`${ABIFLAGS}${PLATFORM_TRIPLET:+-$PLATFORM_TRIPLET}
AC_MSG_RESULT($SOABI)
+if test "$Py_DEBUG" = 'true'; then
+ # Similar to SOABI but remove "d" flag from ABIFLAGS
+ AC_SUBST(ALT_SOABI)
+ ALT_SOABI='cpython-'`echo $VERSION | tr -d .``echo $ABIFLAGS | tr -d d`${PLATFORM_TRIPLET:+-$PLATFORM_TRIPLET}
+ AC_DEFINE_UNQUOTED(ALT_SOABI, "${ALT_SOABI}",
+ [Alternative SOABI used in debug build to load C extensions built in release mode])
+fi
+
AC_SUBST(EXT_SUFFIX)
case $ac_sys_system in
Linux*|GNU*|Darwin|VxWorks)
diff --git a/pyconfig.h.in b/pyconfig.h.in
index 562c0271133b..4b7796147274 100644
--- a/pyconfig.h.in
+++ b/pyconfig.h.in
@@ -12,6 +12,10 @@
support for AIX C++ shared extension modules. */
#undef AIX_GENUINE_CPLUSPLUS
+/* Alternative SOABI used in debug build to load C extensions built in release
+ mode */
+#undef ALT_SOABI
+
/* The Android API level. */
#undef ANDROID_API_LEVEL
1
0
https://github.com/python/cpython/commit/62dfd7d6fe11bfa0cd1d7376382c8e7b12…
commit: 62dfd7d6fe11bfa0cd1d7376382c8e7b1275e38c
branch: master
author: Paul Monson <paulmon(a)users.noreply.github.com>
committer: Steve Dower <steve.dower(a)python.org>
date: 2019-04-25T18:36:45Z
summary:
bpo-35920: Windows 10 ARM32 platform support (GH-11774)
files:
A Misc/NEWS.d/next/Windows/2019-04-22-16-59-20.bpo-35920.VSfGOI.rst
M Doc/library/platform.rst
M Lib/distutils/_msvccompiler.py
M Lib/distutils/spawn.py
M Lib/distutils/sysconfig.py
M Lib/distutils/util.py
M Lib/platform.py
M Lib/sysconfig.py
M Lib/test/test_codecs.py
M Lib/test/test_mimetypes.py
M Lib/test/test_os.py
M Lib/test/test_startfile.py
M Lib/test/test_sundry.py
M Lib/test/test_winreg.py
M PC/bdist_wininst/bdist_wininst.vcxproj
M PCbuild/build.bat
M PCbuild/pcbuild.sln
diff --git a/Doc/library/platform.rst b/Doc/library/platform.rst
index 60c6089ad3cc..e07f9d613a0d 100644
--- a/Doc/library/platform.rst
+++ b/Doc/library/platform.rst
@@ -216,6 +216,21 @@ Windows Platform
later (support for this was added in Python 2.6). It obviously
only runs on Win32 compatible platforms.
+.. function:: win32_edition()
+
+ Returns a string representing the current Windows edition. Possible
+ values include but are not limited to ``'Enterprise'``, ``'IoTUAP'``,
+ ``'ServerStandard'``, and ``'nanoserver'``.
+
+ .. versionadded:: 3.8
+
+.. function:: win32_is_iot()
+
+ Returns True if the windows edition returned by win32_edition is recognized
+ as an IoT edition.
+
+ .. versionadded:: 3.8
+
Mac OS Platform
---------------
diff --git a/Lib/distutils/_msvccompiler.py b/Lib/distutils/_msvccompiler.py
index 58b20a210247..c7ac3f049ebf 100644
--- a/Lib/distutils/_msvccompiler.py
+++ b/Lib/distutils/_msvccompiler.py
@@ -89,13 +89,24 @@ def _find_vc2017():
return None, None
+PLAT_SPEC_TO_RUNTIME = {
+ 'x86' : 'x86',
+ 'x86_amd64' : 'x64',
+ 'x86_arm' : 'arm',
+}
+
def _find_vcvarsall(plat_spec):
_, best_dir = _find_vc2017()
vcruntime = None
- vcruntime_plat = 'x64' if 'amd64' in plat_spec else 'x86'
+
+ if plat_spec in PLAT_SPEC_TO_RUNTIME:
+ vcruntime_plat = PLAT_SPEC_TO_RUNTIME[plat_spec]
+ else:
+ vcruntime_plat = 'x64' if 'amd64' in plat_spec else 'x86'
+
if best_dir:
vcredist = os.path.join(best_dir, "..", "..", "redist", "MSVC", "**",
- "Microsoft.VC141.CRT", "vcruntime140.dll")
+ vcruntime_plat, "Microsoft.VC141.CRT", "vcruntime140.dll")
try:
import glob
vcruntime = glob.glob(vcredist, recursive=True)[-1]
@@ -178,6 +189,7 @@ def _find_exe(exe, paths=None):
PLAT_TO_VCVARS = {
'win32' : 'x86',
'win-amd64' : 'x86_amd64',
+ 'win-arm32' : 'x86_arm',
}
# A set containing the DLLs that are guaranteed to be available for
diff --git a/Lib/distutils/spawn.py b/Lib/distutils/spawn.py
index d3a12c283397..ceb94945dc8b 100644
--- a/Lib/distutils/spawn.py
+++ b/Lib/distutils/spawn.py
@@ -81,7 +81,6 @@ def _spawn_nt(cmd, search_path=1, verbose=0, dry_run=0):
"command %r failed with exit status %d" % (cmd, rc))
if sys.platform == 'darwin':
- from distutils import sysconfig
_cfg_target = None
_cfg_target_split = None
@@ -95,6 +94,7 @@ def _spawn_posix(cmd, search_path=1, verbose=0, dry_run=0):
if sys.platform == 'darwin':
global _cfg_target, _cfg_target_split
if _cfg_target is None:
+ from distutils import sysconfig
_cfg_target = sysconfig.get_config_var(
'MACOSX_DEPLOYMENT_TARGET') or ''
if _cfg_target:
diff --git a/Lib/distutils/sysconfig.py b/Lib/distutils/sysconfig.py
index 570a612d1b10..b51629eb94f8 100644
--- a/Lib/distutils/sysconfig.py
+++ b/Lib/distutils/sysconfig.py
@@ -15,6 +15,7 @@
import sys
from .errors import DistutilsPlatformError
+from .util import get_platform, get_host_platform
# These are needed in a couple of spots, so just compute them once.
PREFIX = os.path.normpath(sys.prefix)
diff --git a/Lib/distutils/util.py b/Lib/distutils/util.py
index 15cd2ad9a9af..50550e189341 100644
--- a/Lib/distutils/util.py
+++ b/Lib/distutils/util.py
@@ -15,7 +15,7 @@
from distutils import log
from distutils.errors import DistutilsByteCompileError
-def get_platform ():
+def get_host_platform():
"""Return a string that identifies the current platform. This is used mainly to
distinguish platform-specific build directories and platform-specific built
distributions. Typically includes the OS name and version and the
@@ -38,6 +38,8 @@ def get_platform ():
if os.name == 'nt':
if 'amd64' in sys.version.lower():
return 'win-amd64'
+ if '(arm)' in sys.version.lower():
+ return 'win-arm32'
return sys.platform
# Set for cross builds explicitly
@@ -90,8 +92,16 @@ def get_platform ():
return "%s-%s-%s" % (osname, release, machine)
-# get_platform ()
-
+def get_platform():
+ if os.name == 'nt':
+ TARGET_TO_PLAT = {
+ 'x86' : 'win32',
+ 'x64' : 'win-amd64',
+ 'arm' : 'win-arm32',
+ }
+ return TARGET_TO_PLAT.get(os.environ.get('VSCMD_ARG_TGT_ARCH')) or get_host_platform()
+ else:
+ return get_host_platform()
def convert_path (pathname):
"""Return 'pathname' as a name that will work on the native filesystem,
diff --git a/Lib/platform.py b/Lib/platform.py
index 21defd1095d2..9f7bd95980a1 100755
--- a/Lib/platform.py
+++ b/Lib/platform.py
@@ -334,6 +334,27 @@ def _syscmd_ver(system='', release='', version='',
(6, None): "post2012ServerR2",
}
+def win32_is_iot():
+ return win32_edition() in ('IoTUAP', 'NanoServer', 'WindowsCoreHeadless', 'IoTEdgeOS')
+
+def win32_edition():
+ try:
+ try:
+ import winreg
+ except ImportError:
+ import _winreg as winreg
+ except ImportError:
+ pass
+ else:
+ try:
+ cvkey = r'SOFTWARE\Microsoft\Windows NT\CurrentVersion'
+ with winreg.OpenKeyEx(winreg.HKEY_LOCAL_MACHINE, cvkey) as key:
+ return winreg.QueryValueEx(key, 'EditionId')[0]
+ except OSError:
+ pass
+
+ return None
+
def win32_ver(release='', version='', csd='', ptype=''):
try:
from sys import getwindowsversion
diff --git a/Lib/sysconfig.py b/Lib/sysconfig.py
index cc8c7962b1bc..8446c8deb242 100644
--- a/Lib/sysconfig.py
+++ b/Lib/sysconfig.py
@@ -626,6 +626,8 @@ def get_platform():
if os.name == 'nt':
if 'amd64' in sys.version.lower():
return 'win-amd64'
+ if '(arm)' in sys.version.lower():
+ return 'win-arm32'
return sys.platform
if os.name != "posix" or not hasattr(os, 'uname'):
diff --git a/Lib/test/test_codecs.py b/Lib/test/test_codecs.py
index 05843c54bd5f..027a84e275e3 100644
--- a/Lib/test/test_codecs.py
+++ b/Lib/test/test_codecs.py
@@ -27,6 +27,26 @@ def check(input, expect):
self.assertEqual(coder(input), (expect, len(input)))
return check
+# On small versions of Windows like Windows IoT or Windows Nano Server not all codepages are present
+def is_code_page_present(cp):
+ from ctypes import POINTER, WINFUNCTYPE, windll, WinError, Structure, WinDLL
+ from ctypes.wintypes import BOOL, UINT, BYTE, WCHAR, UINT, DWORD
+
+ MAX_LEADBYTES = 12 # 5 ranges, 2 bytes ea., 0 term.
+ MAX_DEFAULTCHAR = 2 # single or double byte
+ MAX_PATH = 260
+ class CPINFOEXW(ctypes.Structure):
+ _fields_ = [("MaxCharSize", UINT),
+ ("DefaultChar", BYTE*MAX_DEFAULTCHAR),
+ ("LeadByte", BYTE*MAX_LEADBYTES),
+ ("UnicodeDefaultChar", WCHAR),
+ ("CodePage", UINT),
+ ("CodePageName", WCHAR*MAX_PATH)]
+
+ prototype = WINFUNCTYPE(BOOL, UINT, DWORD, POINTER(CPINFOEXW))
+ GetCPInfoEx = prototype(("GetCPInfoExW", WinDLL("kernel32")))
+ info = CPINFOEXW()
+ return GetCPInfoEx(cp, 0, info)
class Queue(object):
"""
@@ -3078,9 +3098,19 @@ def test_multibyte_encoding(self):
def test_code_page_decode_flags(self):
# Issue #36312: For some code pages (e.g. UTF-7) flags for
# MultiByteToWideChar() must be set to 0.
+ if support.verbose:
+ sys.stdout.write('\n')
for cp in (50220, 50221, 50222, 50225, 50227, 50229,
*range(57002, 57011+1), 65000):
- self.assertEqual(codecs.code_page_decode(cp, b'abc'), ('abc', 3))
+ # On small versions of Windows like Windows IoT
+ # not all codepages are present.
+ # A missing codepage causes an OSError exception
+ # so check for the codepage before decoding
+ if is_code_page_present(cp):
+ self.assertEqual(codecs.code_page_decode(cp, b'abc'), ('abc', 3), f'cp{cp}')
+ else:
+ if support.verbose:
+ print(f" skipping cp={cp}")
self.assertEqual(codecs.code_page_decode(42, b'abc'),
('\uf061\uf062\uf063', 3))
diff --git a/Lib/test/test_mimetypes.py b/Lib/test/test_mimetypes.py
index 554d3d5cead5..c4b2fe2047a7 100644
--- a/Lib/test/test_mimetypes.py
+++ b/Lib/test/test_mimetypes.py
@@ -6,6 +6,7 @@
import unittest
from test import support
+from platform import win32_edition
# Tell it we don't know about external files:
mimetypes.knownfiles = []
@@ -116,6 +117,8 @@ def tearDown(self):
mimetypes.types_map.clear()
mimetypes.types_map.update(self.original_types_map)
+ @unittest.skipIf(win32_edition() in ('NanoServer', 'WindowsCoreHeadless', 'IoTEdgeOS'),
+ "MIME types registry keys unavailable")
def test_registry_parsing(self):
# the original, minimum contents of the MIME database in the
# Windows registry is undocumented AFAIK.
diff --git a/Lib/test/test_os.py b/Lib/test/test_os.py
index bbadb81069b9..a2021b1eba06 100644
--- a/Lib/test/test_os.py
+++ b/Lib/test/test_os.py
@@ -28,6 +28,7 @@
import uuid
import warnings
from test import support
+from platform import win32_is_iot
try:
import resource
@@ -2439,7 +2440,7 @@ def test_bad_fd(self):
# Return None when an fd doesn't actually exist.
self.assertIsNone(os.device_encoding(123456))
- @unittest.skipUnless(os.isatty(0) and (sys.platform.startswith('win') or
+ @unittest.skipUnless(os.isatty(0) and not win32_is_iot() and (sys.platform.startswith('win') or
(hasattr(locale, 'nl_langinfo') and hasattr(locale, 'CODESET'))),
'test requires a tty and either Windows or nl_langinfo(CODESET)')
def test_device_encoding(self):
diff --git a/Lib/test/test_startfile.py b/Lib/test/test_startfile.py
index f59252e97ad0..1a26a8025e62 100644
--- a/Lib/test/test_startfile.py
+++ b/Lib/test/test_startfile.py
@@ -10,6 +10,7 @@
import unittest
from test import support
import os
+import platform
import sys
from os import path
@@ -20,6 +21,7 @@ class TestCase(unittest.TestCase):
def test_nonexisting(self):
self.assertRaises(OSError, startfile, "nonexisting.vbs")
+ @unittest.skipIf(platform.win32_is_iot(), "starting files is not supported on Windows IoT Core or nanoserver")
def test_empty(self):
# We need to make sure the child process starts in a directory
# we're not about to delete. If we're running under -j, that
diff --git a/Lib/test/test_sundry.py b/Lib/test/test_sundry.py
index 6e36a6123daa..2accad1aeebd 100644
--- a/Lib/test/test_sundry.py
+++ b/Lib/test/test_sundry.py
@@ -1,5 +1,6 @@
"""Do a minimal test of all the modules that aren't otherwise tested."""
import importlib
+import platform
import sys
from test import support
import unittest
@@ -25,7 +26,7 @@ def test_untested_modules_can_be_imported(self):
import distutils.unixccompiler
import distutils.command.bdist_dumb
- if sys.platform.startswith('win'):
+ if sys.platform.startswith('win') and not platform.win32_is_iot():
import distutils.command.bdist_msi
import distutils.command.bdist
import distutils.command.bdist_rpm
diff --git a/Lib/test/test_winreg.py b/Lib/test/test_winreg.py
index 11d054e16cdb..dc2b46e42521 100644
--- a/Lib/test/test_winreg.py
+++ b/Lib/test/test_winreg.py
@@ -5,7 +5,7 @@
import unittest
from test import support
import threading
-from platform import machine
+from platform import machine, win32_edition
# Do this first so test will be skipped if module doesn't exist
support.import_module('winreg', required_on=['win'])
@@ -399,6 +399,7 @@ def test_named_arguments(self):
DeleteKeyEx(key=HKEY_CURRENT_USER, sub_key=test_key_name,
access=KEY_ALL_ACCESS, reserved=0)
+ @unittest.skipIf(win32_edition() in ('WindowsCoreHeadless', 'IoTEdgeOS'), "APIs not available on WindowsCoreHeadless")
def test_reflection_functions(self):
# Test that we can call the query, enable, and disable functions
# on a key which isn't on the reflection list with no consequences.
diff --git a/Misc/NEWS.d/next/Windows/2019-04-22-16-59-20.bpo-35920.VSfGOI.rst b/Misc/NEWS.d/next/Windows/2019-04-22-16-59-20.bpo-35920.VSfGOI.rst
new file mode 100644
index 000000000000..455e82450eb2
--- /dev/null
+++ b/Misc/NEWS.d/next/Windows/2019-04-22-16-59-20.bpo-35920.VSfGOI.rst
@@ -0,0 +1,3 @@
+Added platform.win32_edition() and platform.win32_is_iot(). Added support
+for cross-compiling packages for Windows ARM32. Skip tests that are not
+expected to work on Windows IoT Core ARM32.
diff --git a/PC/bdist_wininst/bdist_wininst.vcxproj b/PC/bdist_wininst/bdist_wininst.vcxproj
index 70bfb9c93379..d2f1bb75e30d 100644
--- a/PC/bdist_wininst/bdist_wininst.vcxproj
+++ b/PC/bdist_wininst/bdist_wininst.vcxproj
@@ -1,6 +1,10 @@
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup Label="ProjectConfigurations">
+ <ProjectConfiguration Include="Debug|ARM">
+ <Configuration>Debug</Configuration>
+ <Platform>ARM</Platform>
+ </ProjectConfiguration>
<ProjectConfiguration Include="Debug|Win32">
<Configuration>Debug</Configuration>
<Platform>Win32</Platform>
@@ -9,6 +13,10 @@
<Configuration>Debug</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
+ <ProjectConfiguration Include="PGInstrument|ARM">
+ <Configuration>PGInstrument</Configuration>
+ <Platform>ARM</Platform>
+ </ProjectConfiguration>
<ProjectConfiguration Include="PGInstrument|Win32">
<Configuration>PGInstrument</Configuration>
<Platform>Win32</Platform>
@@ -17,6 +25,10 @@
<Configuration>PGInstrument</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
+ <ProjectConfiguration Include="PGUpdate|ARM">
+ <Configuration>PGUpdate</Configuration>
+ <Platform>ARM</Platform>
+ </ProjectConfiguration>
<ProjectConfiguration Include="PGUpdate|Win32">
<Configuration>PGUpdate</Configuration>
<Platform>Win32</Platform>
@@ -25,6 +37,10 @@
<Configuration>PGUpdate</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
+ <ProjectConfiguration Include="Release|ARM">
+ <Configuration>Release</Configuration>
+ <Platform>ARM</Platform>
+ </ProjectConfiguration>
<ProjectConfiguration Include="Release|Win32">
<Configuration>Release</Configuration>
<Platform>Win32</Platform>
diff --git a/PCbuild/build.bat b/PCbuild/build.bat
index 759aa5221b42..cd0c07abbf35 100644
--- a/PCbuild/build.bat
+++ b/PCbuild/build.bat
@@ -41,7 +41,7 @@ echo.
echo.Available arguments:
echo. -c Release ^| Debug ^| PGInstrument ^| PGUpdate
echo. Set the configuration (default: Release)
-echo. -p x64 ^| Win32
+echo. -p x64 ^| Win32 ^| ARM
echo. Set the platform (default: Win32)
echo. -t Build ^| Rebuild ^| Clean ^| CleanAll
echo. Set the target manually
diff --git a/PCbuild/pcbuild.sln b/PCbuild/pcbuild.sln
index 951dc932a8e5..66be9ac7a4ad 100644
--- a/PCbuild/pcbuild.sln
+++ b/PCbuild/pcbuild.sln
@@ -597,16 +597,16 @@ Global
{D06B6426-4762-44CC-8BAD-D79052507F2F}.Release|Win32.Build.0 = Release|Win32
{D06B6426-4762-44CC-8BAD-D79052507F2F}.Release|x64.ActiveCfg = Release|x64
{D06B6426-4762-44CC-8BAD-D79052507F2F}.Release|x64.Build.0 = Release|x64
- {EB1C19C1-1F18-421E-9735-CAEE69DC6A3C}.Debug|ARM.ActiveCfg = Debug|Win32
+ {EB1C19C1-1F18-421E-9735-CAEE69DC6A3C}.Debug|ARM.ActiveCfg = Debug|ARM
{EB1C19C1-1F18-421E-9735-CAEE69DC6A3C}.Debug|Win32.ActiveCfg = Debug|Win32
{EB1C19C1-1F18-421E-9735-CAEE69DC6A3C}.Debug|x64.ActiveCfg = Release|x64
- {EB1C19C1-1F18-421E-9735-CAEE69DC6A3C}.PGInstrument|ARM.ActiveCfg = PGInstrument|Win32
+ {EB1C19C1-1F18-421E-9735-CAEE69DC6A3C}.PGInstrument|ARM.ActiveCfg = PGInstrument|ARM
{EB1C19C1-1F18-421E-9735-CAEE69DC6A3C}.PGInstrument|Win32.ActiveCfg = Release|Win32
{EB1C19C1-1F18-421E-9735-CAEE69DC6A3C}.PGInstrument|x64.ActiveCfg = Release|x64
- {EB1C19C1-1F18-421E-9735-CAEE69DC6A3C}.PGUpdate|ARM.ActiveCfg = PGUpdate|Win32
+ {EB1C19C1-1F18-421E-9735-CAEE69DC6A3C}.PGUpdate|ARM.ActiveCfg = PGUpdate|ARM
{EB1C19C1-1F18-421E-9735-CAEE69DC6A3C}.PGUpdate|Win32.ActiveCfg = Release|Win32
{EB1C19C1-1F18-421E-9735-CAEE69DC6A3C}.PGUpdate|x64.ActiveCfg = Release|x64
- {EB1C19C1-1F18-421E-9735-CAEE69DC6A3C}.Release|ARM.ActiveCfg = Release|Win32
+ {EB1C19C1-1F18-421E-9735-CAEE69DC6A3C}.Release|ARM.ActiveCfg = Release|ARM
{EB1C19C1-1F18-421E-9735-CAEE69DC6A3C}.Release|Win32.ActiveCfg = Release|Win32
{EB1C19C1-1F18-421E-9735-CAEE69DC6A3C}.Release|x64.ActiveCfg = Release|x64
{447F05A8-F581-4CAC-A466-5AC7936E207E}.Debug|ARM.ActiveCfg = Debug|ARM
@@ -896,6 +896,7 @@ Global
{0F6EE4A4-C75F-4578-B4B3-2D64F4B9B782}.PGUpdate|x64.ActiveCfg = PGUpdate|x64
{0F6EE4A4-C75F-4578-B4B3-2D64F4B9B782}.PGUpdate|x64.Build.0 = PGUpdate|x64
{0F6EE4A4-C75F-4578-B4B3-2D64F4B9B782}.Release|ARM.ActiveCfg = Release|ARM
+ {0F6EE4A4-C75F-4578-B4B3-2D64F4B9B782}.Release|ARM.Build.0 = Release|ARM
{0F6EE4A4-C75F-4578-B4B3-2D64F4B9B782}.Release|Win32.ActiveCfg = Release|Win32
{0F6EE4A4-C75F-4578-B4B3-2D64F4B9B782}.Release|Win32.Build.0 = Release|Win32
{0F6EE4A4-C75F-4578-B4B3-2D64F4B9B782}.Release|x64.ActiveCfg = Release|x64
1
0
25 Apr '19
https://github.com/python/cpython/commit/8c3ecc6bacc8d0cd534f2b5b53ed962dd1…
commit: 8c3ecc6bacc8d0cd534f2b5b53ed962dd1368c7b
branch: master
author: Victor Stinner <vstinner(a)redhat.com>
committer: GitHub <noreply(a)github.com>
date: 2019-04-25T20:13:10+02:00
summary:
bpo-21536: C extensions are no longer linked to libpython (GH-12946)
On Unix, C extensions are no longer linked to libpython.
It is now possible to load a C extension built using a shared library
Python with a statically linked Python.
When Python is embedded, libpython must not be loaded with
RTLD_LOCAL, but RTLD_GLOBAL instead. Previously, using RTLD_LOCAL, it
was already not possible to load C extensions which were not linked
to libpython, like C extensions of the standard library built by the
"*shared*" section of Modules/Setup.
distutils, python-config and python-config.py have been modified.
files:
A Misc/NEWS.d/next/Build/2019-04-25-01-51-52.bpo-21536.ACQkiC.rst
M Doc/distutils/apiref.rst
M Doc/whatsnew/3.8.rst
M Lib/distutils/command/build_ext.py
M Makefile.pre.in
M Misc/python-config.in
M Misc/python-config.sh.in
diff --git a/Doc/distutils/apiref.rst b/Doc/distutils/apiref.rst
index a825efc1a672..c3cdfc8a0a8e 100644
--- a/Doc/distutils/apiref.rst
+++ b/Doc/distutils/apiref.rst
@@ -277,6 +277,10 @@ the full reference.
| | simply skip the extension. | |
+------------------------+--------------------------------+---------------------------+
+ .. versionchanged:: 3.8
+
+ On Unix, C extensions are no longer linked to libpython.
+
.. class:: Distribution
diff --git a/Doc/whatsnew/3.8.rst b/Doc/whatsnew/3.8.rst
index 344656b9288f..2270334a281b 100644
--- a/Doc/whatsnew/3.8.rst
+++ b/Doc/whatsnew/3.8.rst
@@ -851,16 +851,19 @@ Changes in the Python API
Changes in the C API
--------------------
+* On Unix, C extensions are no longer linked to libpython. When Python is
+ embedded, ``libpython`` must not be loaded with ``RTLD_LOCAL``, but
+ ``RTLD_GLOBAL`` instead. Previously, using ``RTLD_LOCAL``, it was already not
+ possible to load C extensions which were not linked to ``libpython``, like C
+ extensions of the standard library built by the ``*shared*`` section of
+ ``Modules/Setup``.
+
* Use of ``#`` variants of formats in parsing or building value (e.g.
:c:func:`PyArg_ParseTuple`, :c:func:`Py_BuildValue`, :c:func:`PyObject_CallFunction`,
etc.) without ``PY_SSIZE_T_CLEAN`` defined raises ``DeprecationWarning`` now.
It will be removed in 3.10 or 4.0. Read :ref:`arg-parsing` for detail.
(Contributed by Inada Naoki in :issue:`36381`.)
-
-Changes in the C API
---------------------------
-
* Instances of heap-allocated types (such as those created with
:c:func:`PyType_FromSpec`) hold a reference to their type object.
Increasing the reference count of these type objects has been moved from
diff --git a/Lib/distutils/command/build_ext.py b/Lib/distutils/command/build_ext.py
index 0428466b00c9..1672d02acf1f 100644
--- a/Lib/distutils/command/build_ext.py
+++ b/Lib/distutils/command/build_ext.py
@@ -714,20 +714,5 @@ def get_libraries(self, ext):
# don't extend ext.libraries, it may be shared with other
# extensions, it is a reference to the original list
return ext.libraries + [pythonlib]
- else:
- return ext.libraries
- elif sys.platform == 'darwin':
- # Don't use the default code below
- return ext.libraries
- elif sys.platform[:3] == 'aix':
- # Don't use the default code below
- return ext.libraries
- else:
- from distutils import sysconfig
- if sysconfig.get_config_var('Py_ENABLE_SHARED'):
- pythonlib = 'python{}.{}{}'.format(
- sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff,
- sysconfig.get_config_var('ABIFLAGS'))
- return ext.libraries + [pythonlib]
- else:
- return ext.libraries
+
+ return ext.libraries
diff --git a/Makefile.pre.in b/Makefile.pre.in
index cd7098cac72b..68ac7723556e 100644
--- a/Makefile.pre.in
+++ b/Makefile.pre.in
@@ -1460,7 +1460,7 @@ libinstall: build_all $(srcdir)/Modules/xxmodule.c
-PYTHONPATH=$(DESTDIR)$(LIBDEST) $(RUNSHARED) \
$(PYTHON_FOR_BUILD) -m lib2to3.pgen2.driver $(DESTDIR)$(LIBDEST)/lib2to3/PatternGrammar.txt
-python-config: $(srcdir)/Misc/python-config.in Misc/python-config.sh
+python-config: $(srcdir)/Misc/python-config.in $(srcdir)/Misc/python-config.sh
@ # Substitution happens here, as the completely-expanded BINDIR
@ # is not available in configure
sed -e "s,@EXENAME@,$(BINDIR)/python$(LDVERSION)$(EXE)," < $(srcdir)/Misc/python-config.in >python-config.py
diff --git a/Misc/NEWS.d/next/Build/2019-04-25-01-51-52.bpo-21536.ACQkiC.rst b/Misc/NEWS.d/next/Build/2019-04-25-01-51-52.bpo-21536.ACQkiC.rst
new file mode 100644
index 000000000000..5e1e717b1ea8
--- /dev/null
+++ b/Misc/NEWS.d/next/Build/2019-04-25-01-51-52.bpo-21536.ACQkiC.rst
@@ -0,0 +1,12 @@
+On Unix, C extensions are no longer linked to libpython.
+
+It is now possible to load a C extension built using a shared library Python
+with a statically linked Python.
+
+When Python is embedded, ``libpython`` must not be loaded with ``RTLD_LOCAL``,
+but ``RTLD_GLOBAL`` instead. Previously, using ``RTLD_LOCAL``, it was already
+not possible to load C extensions which were not linked to ``libpython``, like
+C extensions of the standard library built by the ``*shared*`` section of
+``Modules/Setup``.
+
+distutils, python-config and python-config.py have been modified.
diff --git a/Misc/python-config.in b/Misc/python-config.in
index 714415222798..31ad55822e55 100644
--- a/Misc/python-config.in
+++ b/Misc/python-config.in
@@ -47,9 +47,7 @@ for opt in opt_flags:
print(' '.join(flags))
elif opt in ('--libs', '--ldflags'):
- libs = ['-lpython' + pyver + sys.abiflags]
- libs += getvar('LIBS').split()
- libs += getvar('SYSLIBS').split()
+ libs = getvar('LIBS').split() + getvar('SYSLIBS').split()
# add the prefix/lib/pythonX.Y/config dir, but only if there is no
# shared library in prefix/lib/.
if opt == '--ldflags':
diff --git a/Misc/python-config.sh.in b/Misc/python-config.sh.in
index a3c479ce571f..ac1a467678e4 100644
--- a/Misc/python-config.sh.in
+++ b/Misc/python-config.sh.in
@@ -41,7 +41,7 @@ LIBM="@LIBM@"
LIBC="@LIBC@"
SYSLIBS="$LIBM $LIBC"
ABIFLAGS="@ABIFLAGS@"
-LIBS="-lpython${VERSION}${ABIFLAGS} @LIBS@ $SYSLIBS"
+LIBS="@LIBS@ $SYSLIBS"
BASECFLAGS="@BASECFLAGS@"
LDLIBRARY="@LDLIBRARY@"
OPT="@OPT@"
1
0
25 Apr '19
https://github.com/python/cpython/commit/d7befad328ad1a6d1f812be2bf154c1cd1…
commit: d7befad328ad1a6d1f812be2bf154c1cd1e01fbc
branch: master
author: Victor Stinner <vstinner(a)redhat.com>
committer: GitHub <noreply(a)github.com>
date: 2019-04-25T14:30:16+02:00
summary:
bpo-35537: Document posix_spawn() change in subprocess (GH-11668)
Document that subprocess.Popen no longer raise an exception on error
like missing program on very specific platforms when using
os.posix_spawn() is used.
files:
M Doc/library/subprocess.rst
M Doc/whatsnew/3.8.rst
diff --git a/Doc/library/subprocess.rst b/Doc/library/subprocess.rst
index ca0813c7830a..3280c95cacbb 100644
--- a/Doc/library/subprocess.rst
+++ b/Doc/library/subprocess.rst
@@ -567,6 +567,13 @@ functions.
Popen destructor now emits a :exc:`ResourceWarning` warning if the child
process is still running.
+ .. versionchanged:: 3.8
+ Popen can use :func:`os.posix_spawn` in some cases for better
+ performance. On Windows Subsystem for Linux and QEMU User Emulation,
+ Popen constructor using :func:`os.posix_spawn` no longer raise an
+ exception on errors like missing program, but the child process fails
+ with a non-zero :attr:`~Popen.returncode`.
+
Exceptions
^^^^^^^^^^
diff --git a/Doc/whatsnew/3.8.rst b/Doc/whatsnew/3.8.rst
index ae8163a1b156..344656b9288f 100644
--- a/Doc/whatsnew/3.8.rst
+++ b/Doc/whatsnew/3.8.rst
@@ -729,6 +729,12 @@ Changes in Python behavior
Changes in the Python API
-------------------------
+* :class:`subprocess.Popen` can now use :func:`os.posix_spawn` in some cases
+ for better performance. On Windows Subsystem for Linux and QEMU User
+ Emulation, Popen constructor using :func:`os.posix_spawn` no longer raise an
+ exception on errors like missing program, but the child process fails with a
+ non-zero :attr:`~Popen.returncode`.
+
* The :meth:`imap.IMAP4.logout` method no longer ignores silently arbitrary
exceptions.
1
0
bpo-28552: Fix distutils.sysconfig for empty sys.executable (GH-12875) (GH-12949)
by Victor Stinner 25 Apr '19
by Victor Stinner 25 Apr '19
25 Apr '19
https://github.com/python/cpython/commit/f4edd39017a211d4544570a1e2ac2110ef…
commit: f4edd39017a211d4544570a1e2ac2110ef8e51b4
branch: 2.7
author: Victor Stinner <vstinner(a)redhat.com>
committer: GitHub <noreply(a)github.com>
date: 2019-04-25T13:16:02+02:00
summary:
bpo-28552: Fix distutils.sysconfig for empty sys.executable (GH-12875) (GH-12949)
bpo-28552, bpo-7774: Fix distutils.sysconfig if sys.executable is
None or an empty string: use os.getcwd() to initialize project_base.
Fix also the distutils build command: don't use sys.executable if
it's evaluated as false (None or empty string).
files:
A Misc/NEWS.d/next/Library/2019-04-18-16-10-29.bpo-28552.MW1TLt.rst
M Lib/distutils/command/build.py
M Lib/distutils/sysconfig.py
diff --git a/Lib/distutils/command/build.py b/Lib/distutils/command/build.py
index f84bf359dc60..2360091a23e8 100644
--- a/Lib/distutils/command/build.py
+++ b/Lib/distutils/command/build.py
@@ -114,7 +114,7 @@ def finalize_options(self):
self.build_scripts = os.path.join(self.build_base,
'scripts-' + sys.version[0:3])
- if self.executable is None:
+ if self.executable is None and sys.executable:
self.executable = os.path.normpath(sys.executable)
def run(self):
diff --git a/Lib/distutils/sysconfig.py b/Lib/distutils/sysconfig.py
index 8f49dac6b12b..1a4b79264417 100644
--- a/Lib/distutils/sysconfig.py
+++ b/Lib/distutils/sysconfig.py
@@ -25,7 +25,12 @@
# Path to the base directory of the project. On Windows the binary may
# live in project/PCBuild9. If we're dealing with an x64 Windows build,
# it'll live in project/PCbuild/amd64.
-project_base = os.path.dirname(os.path.abspath(sys.executable))
+if sys.executable:
+ project_base = os.path.dirname(os.path.abspath(sys.executable))
+else:
+ # sys.executable can be empty if argv[0] has been changed and Python is
+ # unable to retrieve the real program name
+ project_base = os.getcwd()
if os.name == "nt" and "pcbuild" in project_base[-8:].lower():
project_base = os.path.abspath(os.path.join(project_base, os.path.pardir))
# PC/VS7.1
@@ -79,7 +84,12 @@ def get_python_inc(plat_specific=0, prefix=None):
if os.name == "posix":
if python_build:
- buildir = os.path.dirname(sys.executable)
+ if sys.executable:
+ buildir = os.path.dirname(sys.executable)
+ else:
+ # sys.executable can be empty if argv[0] has been changed
+ # and Python is unable to retrieve the real program name
+ buildir = os.getcwd()
if plat_specific:
# python.h is located in the buildir
inc_dir = buildir
diff --git a/Misc/NEWS.d/next/Library/2019-04-18-16-10-29.bpo-28552.MW1TLt.rst b/Misc/NEWS.d/next/Library/2019-04-18-16-10-29.bpo-28552.MW1TLt.rst
new file mode 100644
index 000000000000..2aa30c98c452
--- /dev/null
+++ b/Misc/NEWS.d/next/Library/2019-04-18-16-10-29.bpo-28552.MW1TLt.rst
@@ -0,0 +1,4 @@
+Fix :mod:`distutils.sysconfig` if :data:`sys.executable` is ``None`` or an
+empty string: use :func:`os.getcwd` to initialize ``project_base``. Fix
+also the distutils build command: don't use :data:`sys.executable` if it is
+``None`` or an empty string.
1
0
bpo-28552: Fix distutils.sysconfig for empty sys.executable (GH-12875) (GH-12948)
by Victor Stinner 25 Apr '19
by Victor Stinner 25 Apr '19
25 Apr '19
https://github.com/python/cpython/commit/3076a3e0d1c54a2a6cc54c84521cd0f640…
commit: 3076a3e0d1c54a2a6cc54c84521cd0f640d7cffb
branch: 3.7
author: Miss Islington (bot) <31488909+miss-islington(a)users.noreply.github.com>
committer: Victor Stinner <vstinner(a)redhat.com>
date: 2019-04-25T13:15:47+02:00
summary:
bpo-28552: Fix distutils.sysconfig for empty sys.executable (GH-12875) (GH-12948)
bpo-28552, bpo-7774: Fix distutils.sysconfig if sys.executable is
None or an empty string: use os.getcwd() to initialize project_base.
Fix also the distutils build command: don't use sys.executable if
it's evaluated as false (None or empty string).
(cherry picked from commit 0ef8c157e9195df0115c54ba875a5efb92ac22fb)
Co-authored-by: Victor Stinner <vstinner(a)redhat.com>
files:
A Misc/NEWS.d/next/Library/2019-04-18-16-10-29.bpo-28552.MW1TLt.rst
M Lib/distutils/command/build.py
M Lib/distutils/sysconfig.py
diff --git a/Lib/distutils/command/build.py b/Lib/distutils/command/build.py
index c6f52e61e1bc..a86df0bc7f92 100644
--- a/Lib/distutils/command/build.py
+++ b/Lib/distutils/command/build.py
@@ -116,7 +116,7 @@ def finalize_options(self):
self.build_scripts = os.path.join(self.build_base,
'scripts-%d.%d' % sys.version_info[:2])
- if self.executable is None:
+ if self.executable is None and sys.executable:
self.executable = os.path.normpath(sys.executable)
if isinstance(self.parallel, str):
diff --git a/Lib/distutils/sysconfig.py b/Lib/distutils/sysconfig.py
index f803a1d13ac8..0a034ee09bda 100644
--- a/Lib/distutils/sysconfig.py
+++ b/Lib/distutils/sysconfig.py
@@ -28,7 +28,12 @@
if "_PYTHON_PROJECT_BASE" in os.environ:
project_base = os.path.abspath(os.environ["_PYTHON_PROJECT_BASE"])
else:
- project_base = os.path.dirname(os.path.abspath(sys.executable))
+ if sys.executable:
+ project_base = os.path.dirname(os.path.abspath(sys.executable))
+ else:
+ # sys.executable can be empty if argv[0] has been changed and Python is
+ # unable to retrieve the real program name
+ project_base = os.getcwd()
# python_build: (Boolean) if true, we're either building Python or
diff --git a/Misc/NEWS.d/next/Library/2019-04-18-16-10-29.bpo-28552.MW1TLt.rst b/Misc/NEWS.d/next/Library/2019-04-18-16-10-29.bpo-28552.MW1TLt.rst
new file mode 100644
index 000000000000..2aa30c98c452
--- /dev/null
+++ b/Misc/NEWS.d/next/Library/2019-04-18-16-10-29.bpo-28552.MW1TLt.rst
@@ -0,0 +1,4 @@
+Fix :mod:`distutils.sysconfig` if :data:`sys.executable` is ``None`` or an
+empty string: use :func:`os.getcwd` to initialize ``project_base``. Fix
+also the distutils build command: don't use :data:`sys.executable` if it is
+``None`` or an empty string.
1
0
25 Apr '19
https://github.com/python/cpython/commit/0ef8c157e9195df0115c54ba875a5efb92…
commit: 0ef8c157e9195df0115c54ba875a5efb92ac22fb
branch: master
author: Victor Stinner <vstinner(a)redhat.com>
committer: GitHub <noreply(a)github.com>
date: 2019-04-25T11:59:34+02:00
summary:
bpo-28552: Fix distutils.sysconfig for empty sys.executable (GH-12875)
bpo-28552, bpo-7774: Fix distutils.sysconfig if sys.executable is
None or an empty string: use os.getcwd() to initialize project_base.
Fix also the distutils build command: don't use sys.executable if
it's evaluated as false (None or empty string).
files:
A Misc/NEWS.d/next/Library/2019-04-18-16-10-29.bpo-28552.MW1TLt.rst
M Lib/distutils/command/build.py
M Lib/distutils/sysconfig.py
diff --git a/Lib/distutils/command/build.py b/Lib/distutils/command/build.py
index c6f52e61e1bc..a86df0bc7f92 100644
--- a/Lib/distutils/command/build.py
+++ b/Lib/distutils/command/build.py
@@ -116,7 +116,7 @@ def finalize_options(self):
self.build_scripts = os.path.join(self.build_base,
'scripts-%d.%d' % sys.version_info[:2])
- if self.executable is None:
+ if self.executable is None and sys.executable:
self.executable = os.path.normpath(sys.executable)
if isinstance(self.parallel, str):
diff --git a/Lib/distutils/sysconfig.py b/Lib/distutils/sysconfig.py
index a3494670db18..570a612d1b10 100644
--- a/Lib/distutils/sysconfig.py
+++ b/Lib/distutils/sysconfig.py
@@ -28,7 +28,12 @@
if "_PYTHON_PROJECT_BASE" in os.environ:
project_base = os.path.abspath(os.environ["_PYTHON_PROJECT_BASE"])
else:
- project_base = os.path.dirname(os.path.abspath(sys.executable))
+ if sys.executable:
+ project_base = os.path.dirname(os.path.abspath(sys.executable))
+ else:
+ # sys.executable can be empty if argv[0] has been changed and Python is
+ # unable to retrieve the real program name
+ project_base = os.getcwd()
# python_build: (Boolean) if true, we're either building Python or
diff --git a/Misc/NEWS.d/next/Library/2019-04-18-16-10-29.bpo-28552.MW1TLt.rst b/Misc/NEWS.d/next/Library/2019-04-18-16-10-29.bpo-28552.MW1TLt.rst
new file mode 100644
index 000000000000..2aa30c98c452
--- /dev/null
+++ b/Misc/NEWS.d/next/Library/2019-04-18-16-10-29.bpo-28552.MW1TLt.rst
@@ -0,0 +1,4 @@
+Fix :mod:`distutils.sysconfig` if :data:`sys.executable` is ``None`` or an
+empty string: use :func:`os.getcwd` to initialize ``project_base``. Fix
+also the distutils build command: don't use :data:`sys.executable` if it is
+``None`` or an empty string.
1
0