[Python-checkins] bpo-34279, regrtest: Issue a warning if no tests have been executed (GH-10801)

Victor Stinner webhook-mailer at python.org
Thu Nov 29 15:14:48 EST 2018


https://github.com/python/cpython/commit/36003003f26d0c30fc15ec4dc3b0d7697dff908e
commit: 36003003f26d0c30fc15ec4dc3b0d7697dff908e
branch: 2.7
author: Victor Stinner <vstinner at redhat.com>
committer: GitHub <noreply at github.com>
date: 2018-11-29T21:14:42+01:00
summary:

bpo-34279, regrtest: Issue a warning if no tests have been executed (GH-10801)

Co-Authored-By: Pablo Galindo <Pablogsal at gmail.com>

files:
A Misc/NEWS.d/next/Tests/2018-10-27-13-41-55.bpo-34279.v0Xqxe.rst
M Lib/test/regrtest.py
M Lib/test/support/__init__.py
M Lib/test/test_regrtest.py

diff --git a/Lib/test/regrtest.py b/Lib/test/regrtest.py
index 6b49dafd9368..70c51226e923 100755
--- a/Lib/test/regrtest.py
+++ b/Lib/test/regrtest.py
@@ -241,6 +241,7 @@
 RESOURCE_DENIED = -3
 INTERRUPTED = -4
 CHILD_ERROR = -5   # error in a child process
+TEST_DID_NOT_RUN = -6   # error in a child process
 
 # Minimum duration of a test to display its duration or to mention that
 # the test is running in background
@@ -300,6 +301,7 @@ def format_duration(seconds):
     RESOURCE_DENIED: '%s skipped (resource denied)',
     INTERRUPTED: '%s interrupted',
     CHILD_ERROR: '%s crashed',
+    TEST_DID_NOT_RUN: '%s run no tests',
 }
 
 
@@ -548,6 +550,7 @@ def main(tests=None, testdir=None, verbose=0, quiet=False,
     resource_denieds = []
     environment_changed = []
     rerun = []
+    run_no_tests = []
     first_result = None
     interrupted = False
 
@@ -644,6 +647,8 @@ def accumulate_result(test, result):
         elif ok == RESOURCE_DENIED:
             skipped.append(test)
             resource_denieds.append(test)
+        elif ok == TEST_DID_NOT_RUN:
+            run_no_tests.append(test)
         elif ok != INTERRUPTED:
             raise ValueError("invalid test result: %r" % ok)
 
@@ -925,6 +930,8 @@ def get_tests_result():
             result.append("FAILURE")
         elif fail_env_changed and environment_changed:
             result.append("ENV CHANGED")
+        elif not any((good, bad, skipped, interrupted, environment_changed)):
+            result.append("NO TEST RUN")
 
         if interrupted:
             result.append("INTERRUPTED")
@@ -994,10 +1001,15 @@ def display_result():
                 print "expected to get skipped on", plat + "."
 
         if rerun:
-            print
+            print("")
             print("%s:" % count(len(rerun), "re-run test"))
             printlist(rerun)
 
+        if run_no_tests:
+            print("")
+            print("%s run no tests:" % count(len(run_no_tests), "test"))
+            printlist(run_no_tests)
+
 
     display_result()
 
@@ -1109,6 +1121,7 @@ def runtest(test, verbose, quiet,
         ENV_CHANGED      test failed because it changed the execution environment
         FAILED           test failed
         PASSED           test passed
+        EMPTY_TEST_SUITE test ran no subtests.
     """
 
     support.verbose = verbose  # Tell tests to be moderately quiet
@@ -1344,6 +1357,8 @@ def runtest_inner(test, verbose, quiet, huntrleaks=False, pgo=False, testdir=Non
             print >>sys.stderr, "test", test, "failed --", msg
         sys.stderr.flush()
         return FAILED, test_time
+    except support.TestDidNotRun:
+        return TEST_DID_NOT_RUN, test_time
     except:
         type, value = sys.exc_info()[:2]
         if not pgo:
diff --git a/Lib/test/support/__init__.py b/Lib/test/support/__init__.py
index c2cc009b9f48..23b7065174ee 100644
--- a/Lib/test/support/__init__.py
+++ b/Lib/test/support/__init__.py
@@ -29,7 +29,7 @@
 except ImportError:
     thread = None
 
-__all__ = ["Error", "TestFailed", "ResourceDenied", "import_module",
+__all__ = ["Error", "TestFailed", "TestDidNotRun", "ResourceDenied", "import_module",
            "verbose", "use_resources", "max_memuse", "record_original_stdout",
            "get_original_stdout", "unload", "unlink", "rmtree", "forget",
            "is_resource_enabled", "requires", "requires_mac_ver",
@@ -53,6 +53,9 @@ class Error(Exception):
 class TestFailed(Error):
     """Test failed."""
 
+class TestDidNotRun(Error):
+    """Test did not run any subtests."""
+
 class ResourceDenied(unittest.SkipTest):
     """Test skipped because it requested a disallowed resource.
 
@@ -1536,6 +1539,8 @@ def _run_suite(suite):
         runner = BasicTestRunner()
 
     result = runner.run(suite)
+    if not result.testsRun:
+        raise TestDidNotRun
     if not result.wasSuccessful():
         if len(result.errors) == 1 and not result.failures:
             err = result.errors[0][1]
diff --git a/Lib/test/test_regrtest.py b/Lib/test/test_regrtest.py
index a459504e445e..593df7ac8ff9 100644
--- a/Lib/test/test_regrtest.py
+++ b/Lib/test/test_regrtest.py
@@ -51,11 +51,24 @@ def setUp(self):
         self.tmptestdir = tempfile.mkdtemp()
         self.addCleanup(support.rmtree, self.tmptestdir)
 
-    def create_test(self, name=None, code=''):
+    def create_test(self, name=None, code=None):
         if not name:
             name = 'noop%s' % BaseTestCase.TEST_UNIQUE_ID
             BaseTestCase.TEST_UNIQUE_ID += 1
 
+        if code is None:
+            code = textwrap.dedent("""
+                    import unittest
+                    from test import support
+
+                    class Tests(unittest.TestCase):
+                        def test_empty_test(self):
+                            pass
+
+                    def test_main():
+                        support.run_unittest(Tests)
+                """)
+
         # test_regrtest cannot be run twice in parallel because
         # of setUp() and create_test()
         name = self.TESTNAME_PREFIX + name
@@ -94,7 +107,7 @@ def parse_executed_tests(self, output):
 
     def check_executed_tests(self, output, tests, skipped=(), failed=(),
                              env_changed=(), omitted=(),
-                             rerun=(),
+                             rerun=(), no_test_ran=(),
                              randomize=False, interrupted=False,
                              fail_env_changed=False):
         if isinstance(tests, str):
@@ -109,6 +122,8 @@ def check_executed_tests(self, output, tests, skipped=(), failed=(),
             omitted = [omitted]
         if isinstance(rerun, str):
             rerun = [rerun]
+        if isinstance(no_test_ran, str):
+            no_test_ran = [no_test_ran]
 
         executed = self.parse_executed_tests(output)
         if randomize:
@@ -152,7 +167,7 @@ def list_regex(line_format, tests):
                 self.check_line(output, regex)
 
         good = (len(tests) - len(skipped) - len(failed)
-                - len(omitted) - len(env_changed))
+                - len(omitted) - len(env_changed) - len(no_test_ran))
         if good:
             regex = r'%s test%s OK\.$' % (good, plural(good))
             if not skipped and not failed and good > 1:
@@ -169,12 +184,16 @@ def list_regex(line_format, tests):
             result.append('ENV CHANGED')
         if interrupted:
             result.append('INTERRUPTED')
-        if not result:
+        if not any((good, result, failed, interrupted, skipped,
+                    env_changed, fail_env_changed)):
+            result.append("NO TEST RUN")
+        elif not result:
             result.append('SUCCESS')
         result = ', '.join(result)
         if rerun:
             self.check_line(output, 'Tests result: %s' % result)
             result = 'FAILURE then %s' % result
+
         self.check_line(output, 'Tests result: %s' % result)
 
     def parse_random_seed(self, output):
@@ -358,7 +377,17 @@ def test_resources(self):
         # test -u command line option
         tests = {}
         for resource in ('audio', 'network'):
-            code = 'from test import support\nsupport.requires(%r)' % resource
+            code = textwrap.dedent("""
+                        from test import support; support.requires(%r)
+                        import unittest
+                        class PassingTest(unittest.TestCase):
+                            def test_pass(self):
+                                pass
+
+                        def test_main():
+                            support.run_unittest(PassingTest)
+                    """ % resource)
+
             tests[resource] = self.create_test(resource, code)
         test_names = sorted(tests.values())
 
@@ -669,6 +698,7 @@ def test_main():
     def test_rerun_fail(self):
         code = textwrap.dedent("""
             import unittest
+            from test import support
 
             class Tests(unittest.TestCase):
                 def test_bug(self):
@@ -684,6 +714,76 @@ def test_main():
         self.check_executed_tests(output, [testname],
                                   failed=testname, rerun=testname)
 
+    def test_no_tests_ran(self):
+        code = textwrap.dedent("""
+            import unittest
+            from test import support
+
+            class Tests(unittest.TestCase):
+                def test_bug(self):
+                    pass
+
+            def test_main():
+                support.run_unittest(Tests)
+        """)
+        testname = self.create_test(code=code)
+
+        output = self.run_tests("-m", "nosuchtest", testname, exitcode=0)
+        self.check_executed_tests(output, [testname], no_test_ran=testname)
+
+    def test_no_tests_ran_multiple_tests_nonexistent(self):
+        code = textwrap.dedent("""
+            import unittest
+            from test import support
+
+            class Tests(unittest.TestCase):
+                def test_bug(self):
+                    pass
+
+            def test_main():
+                support.run_unittest(Tests)
+        """)
+        testname = self.create_test(code=code)
+        testname2 = self.create_test(code=code)
+
+        output = self.run_tests("-m", "nosuchtest",
+                                testname, testname2,
+                                exitcode=0)
+        self.check_executed_tests(output, [testname, testname2],
+                                  no_test_ran=[testname, testname2])
+
+    def test_no_test_ran_some_test_exist_some_not(self):
+        code = textwrap.dedent("""
+            import unittest
+            from test import support
+
+            class Tests(unittest.TestCase):
+                def test_bug(self):
+                    pass
+
+            def test_main():
+                support.run_unittest(Tests)
+        """)
+        testname = self.create_test(code=code)
+        other_code = textwrap.dedent("""
+            import unittest
+            from test import support
+
+            class Tests(unittest.TestCase):
+                def test_other_bug(self):
+                    pass
+
+            def test_main():
+                support.run_unittest(Tests)
+        """)
+        testname2 = self.create_test(code=other_code)
+
+        output = self.run_tests("-m", "nosuchtest", "-m", "test_other_bug",
+                                testname, testname2,
+                                exitcode=0)
+        self.check_executed_tests(output, [testname, testname2],
+                                  no_test_ran=[testname])
+
 
 class TestUtils(unittest.TestCase):
     def test_format_duration(self):
diff --git a/Misc/NEWS.d/next/Tests/2018-10-27-13-41-55.bpo-34279.v0Xqxe.rst b/Misc/NEWS.d/next/Tests/2018-10-27-13-41-55.bpo-34279.v0Xqxe.rst
new file mode 100644
index 000000000000..a82fa6b304ac
--- /dev/null
+++ b/Misc/NEWS.d/next/Tests/2018-10-27-13-41-55.bpo-34279.v0Xqxe.rst
@@ -0,0 +1,3 @@
+regrtest issue a warning when no tests have been executed in a particular
+test file. Also, a new final result state is issued if no test have been
+executed across all test files. Patch by Pablo Galindo.



More information about the Python-checkins mailing list