[py-svn] commit/pytest: hpk42: fix skip/xfail confusion, reported and discussed on

Bitbucket commits-noreply at bitbucket.org
Sat Jun 23 11:33:03 CEST 2012


1 new commit in pytest:


https://bitbucket.org/hpk42/pytest/changeset/86ea199221ab/
changeset:   86ea199221ab
user:        hpk42
date:        2012-06-23 11:32:32
summary:     fix skip/xfail confusion, reported and discussed on
http://stackoverflow.com/questions/11105828/in-py-test-when-i-explicitly-skip-a-test-that-is-marked-as-xfail-how-can-i-get
affected #:  10 files

diff -r 2bfb9da30dfdd53c7f4aeaf5bfe0d82280d0ccdd -r 86ea199221ab3ee87432d2f55ed81f9b18da7f04 CHANGELOG
--- a/CHANGELOG
+++ b/CHANGELOG
@@ -1,21 +1,33 @@
 Changes between 2.2.4 and 2.2.5.dev
 -----------------------------------
 
+- fix xfail/skip confusion: a skip-mark or an imperative pytest.skip
+  will now take precedence before xfail-markers because we
+  can't determine xfail/xpass status in case of a skip. see also:
+  http://stackoverflow.com/questions/11105828/in-py-test-when-i-explicitly-skip-a-test-that-is-marked-as-xfail-how-can-i-get
+
 - always report installed 3rd party plugins
+
 - fix issue160: a failing setup of an xfail-marked tests should
   be reported as xfail (not xpass)
+
 - fix issue128: show captured output when capsys/capfd are used
+
 - pluginmanager.register(...) now raises ValueError if the
   plugin has been already registered or the name is taken
+
 - fix issue159: improve http://pytest.org/latest/faq.html 
   especially with respect to the "magic" history, also mention
   pytest-django, trial and unittest integration.
+
 - reporting refinements:
   - pytest_report_header now receives a "startdir" so that
     you can use startdir.bestrelpath(yourpath) to show
     nice relative path
+
   - allow plugins to implement both pytest_report_header and 
     pytest_sessionstart (sessionstart is invoked first).
+
   - don't show deselected reason line if there is none
 
 Changes between 2.2.3 and 2.2.4


diff -r 2bfb9da30dfdd53c7f4aeaf5bfe0d82280d0ccdd -r 86ea199221ab3ee87432d2f55ed81f9b18da7f04 ISSUES.txt
--- a/ISSUES.txt
+++ b/ISSUES.txt
@@ -355,3 +355,31 @@
     result = pytester.runpytest(p)
     assert result.ret == 0
     assert result.passed == 1
+
+Another idea is to allow to define a full scenario including the run
+in one content string::
+
+    runscenario("""
+        test_{TESTNAME}.py:
+            import pytest
+            @pytest.mark.xfail
+            def test_that_fails():
+                assert 0
+
+            @pytest.mark.skipif("True")
+            def test_hello():
+                pass
+
+        conftest.py:
+            import pytest
+            def pytest_runsetup_setup(item):
+                pytest.skip("abc")
+
+        runpytest -rsxX
+        *SKIP*{TESTNAME}*
+        *1 skipped* 
+    """)
+
+This could be run with at least three different ways to invoke pytest:
+through the shell, through "python -m pytest" and inlined. As inlined
+would be the fastest it could be run first (or "--fast" mode).


diff -r 2bfb9da30dfdd53c7f4aeaf5bfe0d82280d0ccdd -r 86ea199221ab3ee87432d2f55ed81f9b18da7f04 _pytest/__init__.py
--- a/_pytest/__init__.py
+++ b/_pytest/__init__.py
@@ -1,2 +1,2 @@
 #
-__version__ = '2.2.5.dev3'
+__version__ = '2.2.5.dev4'


diff -r 2bfb9da30dfdd53c7f4aeaf5bfe0d82280d0ccdd -r 86ea199221ab3ee87432d2f55ed81f9b18da7f04 _pytest/junitxml.py
--- a/_pytest/junitxml.py
+++ b/_pytest/junitxml.py
@@ -114,7 +114,7 @@
 
     def append_failure(self, report):
         #msg = str(report.longrepr.reprtraceback.extraline)
-        if "xfail" in report.keywords:
+        if hasattr(report, "wasxfail"):
             self.append(
                 Junit.skipped(message="xfail-marked test passes unexpectedly"))
             self.skipped += 1
@@ -148,8 +148,8 @@
         self.errors += 1
 
     def append_skipped(self, report):
-        if "xfail" in report.keywords:
-            self.append(Junit.skipped(str(report.keywords['xfail']),
+        if hasattr(report, "wasxfail"):
+            self.append(Junit.skipped(str(report.wasxfail),
                                       message="expected test failure"))
         else:
             filename, lineno, skipreason = report.longrepr


diff -r 2bfb9da30dfdd53c7f4aeaf5bfe0d82280d0ccdd -r 86ea199221ab3ee87432d2f55ed81f9b18da7f04 _pytest/main.py
--- a/_pytest/main.py
+++ b/_pytest/main.py
@@ -387,7 +387,7 @@
             raise self.Interrupted(self.shouldstop)
 
     def pytest_runtest_logreport(self, report):
-        if report.failed and 'xfail' not in getattr(report, 'keywords', []):
+        if report.failed and not hasattr(report, 'wasxfail'):
             self._testsfailed += 1
             maxfail = self.config.getvalue("maxfail")
             if maxfail and self._testsfailed >= maxfail:


diff -r 2bfb9da30dfdd53c7f4aeaf5bfe0d82280d0ccdd -r 86ea199221ab3ee87432d2f55ed81f9b18da7f04 _pytest/pdb.py
--- a/_pytest/pdb.py
+++ b/_pytest/pdb.py
@@ -50,7 +50,7 @@
 
 def pytest_runtest_makereport():
     pytestPDB.item = None
-    
+
 class PdbInvoke:
     @pytest.mark.tryfirst
     def pytest_runtest_makereport(self, item, call, __multicall__):
@@ -59,7 +59,7 @@
             call.excinfo.errisinstance(pytest.skip.Exception) or \
             call.excinfo.errisinstance(py.std.bdb.BdbQuit):
             return rep
-        if "xfail" in rep.keywords:
+        if hasattr(rep, "wasxfail"):
             return rep
         # we assume that the above execute() suspended capturing
         # XXX we re-use the TerminalReporter's terminalwriter


diff -r 2bfb9da30dfdd53c7f4aeaf5bfe0d82280d0ccdd -r 86ea199221ab3ee87432d2f55ed81f9b18da7f04 _pytest/skipping.py
--- a/_pytest/skipping.py
+++ b/_pytest/skipping.py
@@ -138,7 +138,7 @@
         rep = __multicall__.execute()
         if rep.when == "call":
             # we need to translate into how py.test encodes xpass
-            rep.keywords['xfail'] = "reason: " + repr(item._unexpectedsuccess)
+            rep.wasxfail = "reason: " + repr(item._unexpectedsuccess)
             rep.outcome = "failed"
         return rep
     if not (call.excinfo and
@@ -149,27 +149,27 @@
     if call.excinfo and call.excinfo.errisinstance(py.test.xfail.Exception):
         if not item.config.getvalue("runxfail"):
             rep = __multicall__.execute()
-            rep.keywords['xfail'] = "reason: " + call.excinfo.value.msg
+            rep.wasxfail = "reason: " + call.excinfo.value.msg
             rep.outcome = "skipped"
             return rep
     rep = __multicall__.execute()
     evalxfail = item._evalxfail
-    if not item.config.option.runxfail:
-        if evalxfail.wasvalid() and evalxfail.istrue():
-            if call.excinfo:
-                rep.outcome = "skipped"
-                rep.keywords['xfail'] = evalxfail.getexplanation()
-            elif call.when == "call":
-                rep.outcome = "failed"
-                rep.keywords['xfail'] = evalxfail.getexplanation()
-            return rep
-    if 'xfail' in rep.keywords:
-        del rep.keywords['xfail']
+    if not rep.skipped:
+        if not item.config.option.runxfail:
+            if evalxfail.wasvalid() and evalxfail.istrue():
+                if call.excinfo:
+                    rep.outcome = "skipped"
+                elif call.when == "call":
+                    rep.outcome = "failed"
+                else:
+                    return rep
+                rep.wasxfail = evalxfail.getexplanation()
+                return rep
     return rep
 
 # called by terminalreporter progress reporting
 def pytest_report_teststatus(report):
-    if 'xfail' in report.keywords:
+    if hasattr(report, "wasxfail"):
         if report.skipped:
             return "xfailed", "x", "xfail"
         elif report.failed:
@@ -216,7 +216,7 @@
     if xfailed:
         for rep in xfailed:
             pos = rep.nodeid
-            reason = rep.keywords['xfail']
+            reason = rep.wasxfail
             lines.append("XFAIL %s" % (pos,))
             if reason:
                 lines.append("  " + str(reason))
@@ -226,7 +226,7 @@
     if xpassed:
         for rep in xpassed:
             pos = rep.nodeid
-            reason = rep.keywords['xfail']
+            reason = rep.wasxfail
             lines.append("XPASS %s %s" %(pos, reason))
 
 def cached_eval(config, expr, d):


diff -r 2bfb9da30dfdd53c7f4aeaf5bfe0d82280d0ccdd -r 86ea199221ab3ee87432d2f55ed81f9b18da7f04 setup.py
--- a/setup.py
+++ b/setup.py
@@ -24,7 +24,7 @@
         name='pytest',
         description='py.test: simple powerful testing with Python',
         long_description = long_description,
-        version='2.2.5.dev3',
+        version='2.2.5.dev4',
         url='http://pytest.org',
         license='MIT license',
         platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'],


diff -r 2bfb9da30dfdd53c7f4aeaf5bfe0d82280d0ccdd -r 86ea199221ab3ee87432d2f55ed81f9b18da7f04 testing/test_junitxml.py
--- a/testing/test_junitxml.py
+++ b/testing/test_junitxml.py
@@ -138,7 +138,7 @@
                 sys.stderr.write("hello-stderr\\n")
                 raise ValueError(42)
         """)
-            
+
         result, dom = runandparse(testdir)
         assert result.ret
         node = dom.getElementsByTagName("testsuite")[0]
@@ -366,7 +366,7 @@
                 27, # issue #126
                0xD800, 0xDFFF, 0xFFFE, 0x0FFFF) #, 0x110000)
     valid = (0x9, 0xA, 0x20,) # 0xD, 0xD7FF, 0xE000, 0xFFFD, 0x10000, 0x10FFFF)
-    
+
     from _pytest.junitxml import bin_xml_escape
 
 


diff -r 2bfb9da30dfdd53c7f4aeaf5bfe0d82280d0ccdd -r 86ea199221ab3ee87432d2f55ed81f9b18da7f04 testing/test_skipping.py
--- a/testing/test_skipping.py
+++ b/testing/test_skipping.py
@@ -113,8 +113,7 @@
         assert len(reports) == 3
         callreport = reports[1]
         assert callreport.skipped
-        expl = callreport.keywords['xfail']
-        assert expl == ""
+        assert callreport.wasxfail == ""
 
     def test_xfail_xpassed(self, testdir):
         item = testdir.getitem("""
@@ -127,8 +126,7 @@
         assert len(reports) == 3
         callreport = reports[1]
         assert callreport.failed
-        expl = callreport.keywords['xfail']
-        assert expl == ""
+        assert callreport.wasxfail == ""
 
     def test_xfail_run_anyway(self, testdir):
         testdir.makepyfile("""
@@ -155,7 +153,8 @@
         reports = runtestprotocol(item, log=False)
         callreport = reports[1]
         assert callreport.failed
-        assert 'xfail' not in callreport.keywords
+        assert not hasattr(callreport, "wasxfail")
+        assert 'xfail' in callreport.keywords
 
     def test_xfail_not_report_default(self, testdir):
         p = testdir.makepyfile(test_one="""
@@ -572,3 +571,28 @@
     assert result.ret == 0
     assert 'xfailed' in result.stdout.str()
     assert 'xpassed' not in result.stdout.str()
+
+def test_imperativeskip_on_xfail_test(testdir):
+    testdir.makepyfile("""
+        import pytest
+        @pytest.mark.xfail
+        def test_that_fails():
+            assert 0
+
+        @pytest.mark.skipif("True")
+        def test_hello():
+            pass
+    """)
+    testdir.makeconftest("""
+        import pytest
+        def pytest_runtest_setup(item):
+            pytest.skip("abc")
+    """)
+    result = testdir.runpytest("-rsxX")
+    result.stdout.fnmatch_lines_random("""
+        *SKIP*abc*
+        *SKIP*condition: True*
+        *2 skipped*
+    """)
+
+

Repository URL: https://bitbucket.org/hpk42/pytest/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the pytest-commit mailing list