[py-svn] py-trunk commit 06a7ad82f24c: fix issue91 introduce new py.test.xfail(reason) helper

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Thu May 20 14:32:13 CEST 2010


# HG changeset patch -- Bitbucket.org
# Project py-trunk
# URL http://bitbucket.org/hpk42/py-trunk/overview
# User holger krekel <holger at merlinux.eu>
# Date 1274354991 -7200
# Node ID 06a7ad82f24cf06ecb01e7c66796a1b6e0ed5dcc
# Parent  cafe6568b5aeb2a151658752867ce7143ee3b7ba
fix issue91 introduce new py.test.xfail(reason) helper
to imperatively mark a test as expected to fail. Can
be used from within setup and test functions. This is
useful especially for parametrized tests when certain
configurations are expected-to-fail.  In this case the
declarative approach with the @py.test.mark.xfail cannot
be used as it would mark all configurations as xfail.

--- a/testing/plugin/test_pytest_skipping.py
+++ b/testing/plugin/test_pytest_skipping.py
@@ -169,6 +169,43 @@ class TestXFail:
         ])
         assert result.ret == 1
 
+    def test_xfail_imperative(self, testdir):
+        p = testdir.makepyfile("""
+            import py
+            def test_this():
+                py.test.xfail("hello")
+        """)
+        result = testdir.runpytest(p)
+        result.stdout.fnmatch_lines([
+            "*1 xfailed*",
+        ])
+        result = testdir.runpytest(p, "-rx")
+        result.stdout.fnmatch_lines([
+            "*XFAIL*test_this*reason:*hello*",
+        ])
+
+    def test_xfail_imperative_in_setup_function(self, testdir):
+        p = testdir.makepyfile("""
+            import py
+            def setup_function(function):
+                py.test.xfail("hello")
+            
+            def test_this():
+                assert 0
+        """)
+        result = testdir.runpytest(p)
+        result.stdout.fnmatch_lines([
+            "*1 xfailed*",
+        ])
+        result = testdir.runpytest(p, "-rx")
+        result.stdout.fnmatch_lines([
+            "*XFAIL*test_this*reason:*hello*",
+        ])
+
+
+
+
+
 class TestSkipif:
     def test_skipif_conditional(self, testdir):
         item = testdir.getitem("""

--- a/py/_plugin/pytest_runner.py
+++ b/py/_plugin/pytest_runner.py
@@ -10,6 +10,7 @@ def pytest_namespace():
         'skip'         : skip,
         'importorskip' : importorskip,
         'fail'         : fail, 
+        'xfail'        : xfail, 
         'exit'         : exit, 
     }
 
@@ -295,6 +296,10 @@ class Failed(OutcomeException):
     """ raised from an explicit call to py.test.fail() """
     __module__ = 'builtins'
 
+class XFailed(OutcomeException): 
+    """ raised from an explicit call to py.test.xfail() """
+    __module__ = 'builtins'
+
 class ExceptionFailure(Failed): 
     """ raised by py.test.raises on an exception-assertion mismatch. """
     def __init__(self, expr, expected, msg=None, excinfo=None): 
@@ -335,6 +340,14 @@ def fail(msg=""):
 
 fail.Exception = Failed
 
+def xfail(reason=""):
+    """ xfail an executing test or setup functions, taking an optional 
+    reason string.
+    """
+    __tracebackhide__ = True
+    raise XFailed(reason)
+xfail.Exception = XFailed
+
 def raises(ExpectedException, *args, **kwargs):
     """ if args[0] is callable: raise AssertionError if calling it with 
         the remaining arguments does not raise the expected exception.  

--- a/py/_plugin/pytest_skipping.py
+++ b/py/_plugin/pytest_skipping.py
@@ -185,9 +185,17 @@ def pytest_runtest_setup(item):
 def pytest_runtest_makereport(__multicall__, item, call):
     if not isinstance(item, py.test.collect.Function):
         return
-    evalxfail = getattr(item, '_evalxfail', None)
-    if not evalxfail:
-        return
+    if not (call.excinfo and 
+        call.excinfo.errisinstance(py.test.xfail.Exception)):
+        evalxfail = getattr(item, '_evalxfail', None)
+        if not evalxfail:
+            return
+    if call.excinfo and call.excinfo.errisinstance(py.test.xfail.Exception):
+        rep = __multicall__.execute()
+        rep.keywords['xfail'] = "reason: " + call.excinfo.value.msg
+        rep.skipped = True
+        rep.failed = False
+        return rep
     if call.when == "setup":
         rep = __multicall__.execute()
         if rep.skipped and evalxfail.istrue():

--- a/CHANGELOG
+++ b/CHANGELOG
@@ -9,6 +9,14 @@ Changes between 1.3.0 and 1.3.1
 - fix issue95: late-import zlib so that it's not required 
   for general py.test startup. 
 
+- fix issue91: introduce new py.test.xfail(reason) helper 
+  to imperatively mark a test as expected to fail. Can 
+  be used from within setup and test functions. This is
+  useful especially for parametrized tests when certain 
+  configurations are expected-to-fail.  In this case the
+  declarative approach with the @py.test.mark.xfail cannot
+  be used as it would mark all configurations as xfail. 
+
 - make py.test.cmdline.main() return the exitstatus 
   instead of raising (which is still done by py.cmdline.pytest())
   and make it so that py.test.cmdline.main() can be called



More information about the pytest-commit mailing list