[Python-checkins] [3.7] bpo-34279: Synchronize regrtest with master (GH-10800)

Victor Stinner webhook-mailer at python.org
Thu Nov 29 15:15:05 EST 2018


https://github.com/python/cpython/commit/8a73cac618a050f4e74eb38ff43e48d9957a6dec
commit: 8a73cac618a050f4e74eb38ff43e48d9957a6dec
branch: 3.7
author: Victor Stinner <vstinner at redhat.com>
committer: GitHub <noreply at github.com>
date: 2018-11-29T21:14:59+01:00
summary:

[3.7] bpo-34279: Synchronize regrtest with master (GH-10800)

* bpo-34605, libregrtest: Rename --slaveargs to --worker-args (GH-9099)

Rename also run_tests_slave() to run_tests_worker().

(cherry picked from commit 012f5b968a738b15ae9b40c499a1c0778b0615a9)

* bpo-34279, regrtest: Issue a warning if no tests have been executed (GH-10150)

(cherry picked from commit 9724348b43a9005a449ba532ccd3c6726f031097)

* test_regrtest: remove unused threading import

files:
A Misc/NEWS.d/next/Tests/2018-10-27-13-41-55.bpo-34279.v0Xqxe.rst
M Lib/test/libregrtest/cmdline.py
M Lib/test/libregrtest/main.py
M Lib/test/libregrtest/runtest.py
M Lib/test/libregrtest/runtest_mp.py
M Lib/test/support/__init__.py
M Lib/test/test_regrtest.py

diff --git a/Lib/test/libregrtest/cmdline.py b/Lib/test/libregrtest/cmdline.py
index c08491fc0462..09270323611f 100644
--- a/Lib/test/libregrtest/cmdline.py
+++ b/Lib/test/libregrtest/cmdline.py
@@ -170,7 +170,7 @@ def _create_parser():
     group.add_argument('--wait', action='store_true',
                        help='wait for user input, e.g., allow a debugger '
                             'to be attached')
-    group.add_argument('--slaveargs', metavar='ARGS')
+    group.add_argument('--worker-args', metavar='ARGS')
     group.add_argument('-S', '--start', metavar='START',
                        help='the name of the test at which to start.' +
                             more_details)
diff --git a/Lib/test/libregrtest/main.py b/Lib/test/libregrtest/main.py
index 1438966d4a50..8d44caf2999a 100644
--- a/Lib/test/libregrtest/main.py
+++ b/Lib/test/libregrtest/main.py
@@ -14,7 +14,7 @@
 from test.libregrtest.runtest import (
     findtests, runtest, get_abs_module,
     STDTESTS, NOTTESTS, PASSED, FAILED, ENV_CHANGED, SKIPPED, RESOURCE_DENIED,
-    INTERRUPTED, CHILD_ERROR,
+    INTERRUPTED, CHILD_ERROR, TEST_DID_NOT_RUN,
     PROGRESS_MIN_TIME, format_test_result)
 from test.libregrtest.setup import setup_tests
 from test.libregrtest.utils import removepy, count, format_duration, printlist
@@ -79,6 +79,7 @@ def __init__(self):
         self.resource_denieds = []
         self.environment_changed = []
         self.rerun = []
+        self.run_no_tests = []
         self.first_result = None
         self.interrupted = False
 
@@ -118,6 +119,8 @@ def accumulate_result(self, test, result):
         elif ok == RESOURCE_DENIED:
             self.skipped.append(test)
             self.resource_denieds.append(test)
+        elif ok == TEST_DID_NOT_RUN:
+            self.run_no_tests.append(test)
         elif ok != INTERRUPTED:
             raise ValueError("invalid test result: %r" % ok)
 
@@ -368,6 +371,11 @@ def display_result(self):
             print("%s:" % count(len(self.rerun), "re-run test"))
             printlist(self.rerun)
 
+        if self.run_no_tests:
+            print()
+            print(count(len(self.run_no_tests), "test"), "run no tests:")
+            printlist(self.run_no_tests)
+
     def run_tests_sequential(self):
         if self.ns.trace:
             import trace
@@ -458,6 +466,9 @@ def get_tests_result(self):
             result.append("FAILURE")
         elif self.ns.fail_env_changed and self.environment_changed:
             result.append("ENV CHANGED")
+        elif not any((self.good, self.bad, self.skipped, self.interrupted,
+            self.environment_changed)):
+            result.append("NO TEST RUN")
 
         if self.interrupted:
             result.append("INTERRUPTED")
@@ -580,9 +591,9 @@ def _main(self, tests, kwargs):
                 print(msg, file=sys.stderr, flush=True)
                 sys.exit(2)
 
-        if self.ns.slaveargs is not None:
-            from test.libregrtest.runtest_mp import run_tests_slave
-            run_tests_slave(self.ns.slaveargs)
+        if self.ns.worker_args is not None:
+            from test.libregrtest.runtest_mp import run_tests_worker
+            run_tests_worker(self.ns.worker_args)
 
         if self.ns.wait:
             input("Press any key to continue...")
diff --git a/Lib/test/libregrtest/runtest.py b/Lib/test/libregrtest/runtest.py
index 4f41080d37b9..466b522db9ed 100644
--- a/Lib/test/libregrtest/runtest.py
+++ b/Lib/test/libregrtest/runtest.py
@@ -19,6 +19,7 @@
 RESOURCE_DENIED = -3
 INTERRUPTED = -4
 CHILD_ERROR = -5   # error in a child process
+TEST_DID_NOT_RUN = -6   # error in a child process
 
 _FORMAT_TEST_RESULT = {
     PASSED: '%s passed',
@@ -28,6 +29,7 @@
     RESOURCE_DENIED: '%s skipped (resource denied)',
     INTERRUPTED: '%s interrupted',
     CHILD_ERROR: '%s crashed',
+    TEST_DID_NOT_RUN: '%s run no tests',
 }
 
 # Minimum duration of a test to display its duration or to mention that
@@ -94,6 +96,7 @@ def runtest(ns, test):
         ENV_CHANGED      test failed because it changed the execution environment
         FAILED           test failed
         PASSED           test passed
+        EMPTY_TEST_SUITE test ran no subtests.
 
     If ns.xmlpath is not None, xml_data is a list containing each
     generated testsuite element.
@@ -197,6 +200,8 @@ def test_runner():
             else:
                 print("test", test, "failed", file=sys.stderr, flush=True)
         return FAILED, test_time
+    except support.TestDidNotRun:
+        return TEST_DID_NOT_RUN, test_time
     except:
         msg = traceback.format_exc()
         if not ns.pgo:
diff --git a/Lib/test/libregrtest/runtest_mp.py b/Lib/test/libregrtest/runtest_mp.py
index 779c429bff52..6190574afdf8 100644
--- a/Lib/test/libregrtest/runtest_mp.py
+++ b/Lib/test/libregrtest/runtest_mp.py
@@ -24,23 +24,23 @@
 
 
 def run_test_in_subprocess(testname, ns):
-    """Run the given test in a subprocess with --slaveargs.
+    """Run the given test in a subprocess with --worker-args.
 
     ns is the option Namespace parsed from command-line arguments. regrtest
-    is invoked in a subprocess with the --slaveargs argument; when the
+    is invoked in a subprocess with the --worker-args argument; when the
     subprocess exits, its return code, stdout and stderr are returned as a
     3-tuple.
     """
     from subprocess import Popen, PIPE
 
     ns_dict = vars(ns)
-    slaveargs = (ns_dict, testname)
-    slaveargs = json.dumps(slaveargs)
+    worker_args = (ns_dict, testname)
+    worker_args = json.dumps(worker_args)
 
     cmd = [sys.executable, *support.args_from_interpreter_flags(),
            '-u',    # Unbuffered stdout and stderr
            '-m', 'test.regrtest',
-           '--slaveargs', slaveargs]
+           '--worker-args', worker_args]
     if ns.pgo:
         cmd += ['--pgo']
 
@@ -58,8 +58,8 @@ def run_test_in_subprocess(testname, ns):
     return retcode, stdout, stderr
 
 
-def run_tests_slave(slaveargs):
-    ns_dict, testname = json.loads(slaveargs)
+def run_tests_worker(worker_args):
+    ns_dict, testname = json.loads(worker_args)
     ns = types.SimpleNamespace(**ns_dict)
 
     setup_tests(ns)
diff --git a/Lib/test/support/__init__.py b/Lib/test/support/__init__.py
index ce455e0dc461..2768e1147946 100644
--- a/Lib/test/support/__init__.py
+++ b/Lib/test/support/__init__.py
@@ -71,7 +71,7 @@
     # globals
     "PIPE_MAX_SIZE", "verbose", "max_memuse", "use_resources", "failfast",
     # exceptions
-    "Error", "TestFailed", "ResourceDenied",
+    "Error", "TestFailed", "TestDidNotRun", "ResourceDenied",
     # imports
     "import_module", "import_fresh_module", "CleanImport",
     # modules
@@ -117,6 +117,9 @@ class Error(Exception):
 class TestFailed(Error):
     """Test failed."""
 
+class TestDidNotRun(Error):
+    """Test did not run any subtests."""
+
 class ResourceDenied(unittest.SkipTest):
     """Test skipped because it requested a disallowed resource.
 
@@ -1890,6 +1893,8 @@ def _run_suite(suite):
     if junit_xml_list is not None:
         junit_xml_list.append(result.get_xml_element())
 
+    if not result.testsRun:
+        raise TestDidNotRun
     if not result.wasSuccessful():
         if len(result.errors) == 1 and not result.failures:
             err = result.errors[0][1]
diff --git a/Lib/test/test_regrtest.py b/Lib/test/test_regrtest.py
index af332ad15d92..db9bd6dfc043 100644
--- a/Lib/test/test_regrtest.py
+++ b/Lib/test/test_regrtest.py
@@ -15,7 +15,6 @@
 import sysconfig
 import tempfile
 import textwrap
-import threading
 import unittest
 from test import libregrtest
 from test import support
@@ -67,10 +66,10 @@ def test_wait(self):
         ns = libregrtest._parse_args(['--wait'])
         self.assertTrue(ns.wait)
 
-    def test_slaveargs(self):
-        ns = libregrtest._parse_args(['--slaveargs', '[[], {}]'])
-        self.assertEqual(ns.slaveargs, '[[], {}]')
-        self.checkError(['--slaveargs'], 'expected one argument')
+    def test_worker_args(self):
+        ns = libregrtest._parse_args(['--worker-args', '[[], {}]'])
+        self.assertEqual(ns.worker_args, '[[], {}]')
+        self.checkError(['--worker-args'], 'expected one argument')
 
     def test_start(self):
         for opt in '-S', '--start':
@@ -352,11 +351,20 @@ def setUp(self):
         self.tmptestdir = tempfile.mkdtemp()
         self.addCleanup(support.rmtree, self.tmptestdir)
 
-    def create_test(self, name=None, code=''):
+    def create_test(self, name=None, code=None):
         if not name:
             name = 'noop%s' % BaseTestCase.TEST_UNIQUE_ID
             BaseTestCase.TEST_UNIQUE_ID += 1
 
+        if code is None:
+            code = textwrap.dedent("""
+                    import unittest
+
+                    class Tests(unittest.TestCase):
+                        def test_empty_test(self):
+                            pass
+                """)
+
         # test_regrtest cannot be run twice in parallel because
         # of setUp() and create_test()
         name = self.TESTNAME_PREFIX + name
@@ -391,7 +399,7 @@ def parse_executed_tests(self, output):
 
     def check_executed_tests(self, output, tests, skipped=(), failed=(),
                              env_changed=(), omitted=(),
-                             rerun=(),
+                             rerun=(), no_test_ran=(),
                              randomize=False, interrupted=False,
                              fail_env_changed=False):
         if isinstance(tests, str):
@@ -406,6 +414,8 @@ def check_executed_tests(self, output, tests, skipped=(), failed=(),
             omitted = [omitted]
         if isinstance(rerun, str):
             rerun = [rerun]
+        if isinstance(no_test_ran, str):
+            no_test_ran = [no_test_ran]
 
         executed = self.parse_executed_tests(output)
         if randomize:
@@ -448,8 +458,12 @@ def list_regex(line_format, tests):
                 regex = "Re-running test %r in verbose mode" % name
                 self.check_line(output, regex)
 
+        if no_test_ran:
+            regex = list_regex('%s test%s run no tests', no_test_ran)
+            self.check_line(output, regex)
+
         good = (len(tests) - len(skipped) - len(failed)
-                - len(omitted) - len(env_changed))
+                - len(omitted) - len(env_changed) - len(no_test_ran))
         if good:
             regex = r'%s test%s OK\.$' % (good, plural(good))
             if not skipped and not failed and good > 1:
@@ -466,12 +480,16 @@ def list_regex(line_format, tests):
             result.append('ENV CHANGED')
         if interrupted:
             result.append('INTERRUPTED')
-        if not result:
+        if not any((good, result, failed, interrupted, skipped,
+                    env_changed, fail_env_changed)):
+            result.append("NO TEST RUN")
+        elif not result:
             result.append('SUCCESS')
         result = ', '.join(result)
         if rerun:
             self.check_line(output, 'Tests result: %s' % result)
             result = 'FAILURE then %s' % result
+
         self.check_line(output, 'Tests result: %s' % result)
 
     def parse_random_seed(self, output):
@@ -650,7 +668,14 @@ def test_resources(self):
         # test -u command line option
         tests = {}
         for resource in ('audio', 'network'):
-            code = 'from test import support\nsupport.requires(%r)' % resource
+            code = textwrap.dedent("""
+                        from test import support; support.requires(%r)
+                        import unittest
+                        class PassingTest(unittest.TestCase):
+                            def test_pass(self):
+                                pass
+                    """ % resource)
+
             tests[resource] = self.create_test(resource, code)
         test_names = sorted(tests.values())
 
@@ -979,6 +1004,56 @@ def test_bug(self):
         output = self.run_tests("-w", testname, exitcode=2)
         self.check_executed_tests(output, [testname],
                                   failed=testname, rerun=testname)
+    def test_no_tests_ran(self):
+        code = textwrap.dedent("""
+            import unittest
+
+            class Tests(unittest.TestCase):
+                def test_bug(self):
+                    pass
+        """)
+        testname = self.create_test(code=code)
+
+        output = self.run_tests(testname, "-m", "nosuchtest", exitcode=0)
+        self.check_executed_tests(output, [testname], no_test_ran=testname)
+
+    def test_no_tests_ran_multiple_tests_nonexistent(self):
+        code = textwrap.dedent("""
+            import unittest
+
+            class Tests(unittest.TestCase):
+                def test_bug(self):
+                    pass
+        """)
+        testname = self.create_test(code=code)
+        testname2 = self.create_test(code=code)
+
+        output = self.run_tests(testname, testname2, "-m", "nosuchtest", exitcode=0)
+        self.check_executed_tests(output, [testname, testname2],
+                                  no_test_ran=[testname, testname2])
+
+    def test_no_test_ran_some_test_exist_some_not(self):
+        code = textwrap.dedent("""
+            import unittest
+
+            class Tests(unittest.TestCase):
+                def test_bug(self):
+                    pass
+        """)
+        testname = self.create_test(code=code)
+        other_code = textwrap.dedent("""
+            import unittest
+
+            class Tests(unittest.TestCase):
+                def test_other_bug(self):
+                    pass
+        """)
+        testname2 = self.create_test(code=other_code)
+
+        output = self.run_tests(testname, testname2, "-m", "nosuchtest",
+                                "-m", "test_other_bug", exitcode=0)
+        self.check_executed_tests(output, [testname, testname2],
+                                  no_test_ran=[testname])
 
 
 class TestUtils(unittest.TestCase):
diff --git a/Misc/NEWS.d/next/Tests/2018-10-27-13-41-55.bpo-34279.v0Xqxe.rst b/Misc/NEWS.d/next/Tests/2018-10-27-13-41-55.bpo-34279.v0Xqxe.rst
new file mode 100644
index 000000000000..a82fa6b304ac
--- /dev/null
+++ b/Misc/NEWS.d/next/Tests/2018-10-27-13-41-55.bpo-34279.v0Xqxe.rst
@@ -0,0 +1,3 @@
+regrtest issue a warning when no tests have been executed in a particular
+test file. Also, a new final result state is issued if no test have been
+executed across all test files. Patch by Pablo Galindo.



More information about the Python-checkins mailing list