[Pytest-commit] commit/pytest: 6 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Mon Jul 28 09:55:09 CEST 2014


6 new commits in pytest:

https://bitbucket.org/hpk42/pytest/commits/712b5fcfc605/
Changeset:   712b5fcfc605
Branch:      xfail-cause
User:        da... at mcbf.net
Date:        2014-07-26 15:11:05
Summary:     Add mark.xfail argument raises so that unexpected exceptions show up as test failures.
Affected #:  2 files

diff -r 853ffb854a4813a6a2f9e0ad4229dde770f6c7a8 -r 712b5fcfc605043261980789e63cdb01b7f49345 _pytest/skipping.py
--- a/_pytest/skipping.py
+++ b/_pytest/skipping.py
@@ -26,11 +26,13 @@
         "http://pytest.org/latest/skipping.html"
     )
     config.addinivalue_line("markers",
-        "xfail(condition, reason=None, run=True): mark the the test function "
+        "xfail(condition, reason=None, run=True, raises=None): mark the the test function "
         "as an expected failure if eval(condition) has a True value. "
         "Optionally specify a reason for better reporting and run=False if "
-        "you don't even want to execute the test function. See "
-        "http://pytest.org/latest/skipping.html"
+        "you don't even want to execute the test function. If only specific "
+        "exception(s) are expected, you can list them in raises, and if the test fails "
+        "in other ways, it will be reported as a true failure. "
+        "See http://pytest.org/latest/skipping.html"
     )
 
 def pytest_namespace():
@@ -60,6 +62,15 @@
     def wasvalid(self):
         return not hasattr(self, 'exc')
 
+    def invalidraise(self, exctype):
+        raises = self.get('raises')
+        if not raises:
+            return
+        if isinstance(raises, tuple):
+            return exctype not in raises
+        else:
+            return raises != exctype
+
     def istrue(self):
         try:
             return self._istrue()
@@ -171,7 +182,11 @@
         if not item.config.option.runxfail:
             if evalxfail.wasvalid() and evalxfail.istrue():
                 if call.excinfo:
-                    rep.outcome = "skipped"
+                    if evalxfail.invalidraise(call.excinfo.type):
+                        rep.outcome = "failed"
+                        return rep
+                    else:
+                        rep.outcome = "skipped"
                 elif call.when == "call":
                     rep.outcome = "failed"
                 else:

diff -r 853ffb854a4813a6a2f9e0ad4229dde770f6c7a8 -r 712b5fcfc605043261980789e63cdb01b7f49345 testing/test_skipping.py
--- a/testing/test_skipping.py
+++ b/testing/test_skipping.py
@@ -330,6 +330,53 @@
             "*1 xfailed*",
         ])
 
+    def test_xfail_raises_match(self, testdir):
+        p = testdir.makepyfile("""
+            import pytest
+            @pytest.mark.xfail(raises=TypeError)
+            def test_raises():
+                raise TypeError()
+        """)
+        result = testdir.runpytest(p)
+        result.stdout.fnmatch_lines([
+            "*1 xfailed*",
+        ])
+
+    def test_xfail_raises_mismatch(self, testdir):
+        p = testdir.makepyfile("""
+            import pytest
+            @pytest.mark.xfail(raises=IndexError)
+            def test_raises():
+                raise TypeError()
+        """)
+        result = testdir.runpytest(p)
+        result.stdout.fnmatch_lines([
+            "*1 failed*",
+        ])
+    def test_xfail_raises_tuple_match(self, testdir):
+        p = testdir.makepyfile("""
+            import pytest
+            @pytest.mark.xfail(raises=(AttributeError, TypeError))
+            def test_raises():
+                raise TypeError()
+        """)
+        result = testdir.runpytest(p)
+        result.stdout.fnmatch_lines([
+            "*1 xfailed*",
+        ])
+
+    def test_xfail_raises_tuple_mismatch(self, testdir):
+        p = testdir.makepyfile("""
+            import pytest
+            @pytest.mark.xfail(raises=(AttributeError, IndexError))
+            def test_raises():
+                raise TypeError()
+        """)
+        result = testdir.runpytest(p)
+        result.stdout.fnmatch_lines([
+            "*1 failed*",
+        ])
+
 class TestXFailwithSetupTeardown:
     def test_failing_setup_issue9(self, testdir):
         testdir.makepyfile("""
@@ -575,7 +622,7 @@
     result = testdir.runpytest("--markers")
     result.stdout.fnmatch_lines([
         "*skipif(*condition)*skip*",
-        "*xfail(*condition, reason=None, run=True)*expected failure*",
+        "*xfail(*condition, reason=None, run=True, raises=None)*expected failure*",
     ])
 
 def test_xfail_test_setup_exception(testdir):
@@ -617,7 +664,6 @@
         *2 skipped*
     """)
 
-
 class TestBooleanCondition:
     def test_skipif(self, testdir):
         testdir.makepyfile("""


https://bitbucket.org/hpk42/pytest/commits/86566f7208bf/
Changeset:   86566f7208bf
Branch:      xfail-cause
User:        da... at mcbf.net
Date:        2014-07-26 17:46:50
Summary:     isinstance() on exception value instead of comparing types, consolidate tests
Affected #:  2 files

diff -r 712b5fcfc605043261980789e63cdb01b7f49345 -r 86566f7208bf9302fad9ce7aa5c4d455df74ebd3 _pytest/skipping.py
--- a/_pytest/skipping.py
+++ b/_pytest/skipping.py
@@ -62,14 +62,11 @@
     def wasvalid(self):
         return not hasattr(self, 'exc')
 
-    def invalidraise(self, exctype):
+    def invalidraise(self, exc):
         raises = self.get('raises')
         if not raises:
             return
-        if isinstance(raises, tuple):
-            return exctype not in raises
-        else:
-            return raises != exctype
+        return not isinstance(exc, raises)
 
     def istrue(self):
         try:
@@ -182,7 +179,7 @@
         if not item.config.option.runxfail:
             if evalxfail.wasvalid() and evalxfail.istrue():
                 if call.excinfo:
-                    if evalxfail.invalidraise(call.excinfo.type):
+                    if evalxfail.invalidraise(call.excinfo.value):
                         rep.outcome = "failed"
                         return rep
                     else:

diff -r 712b5fcfc605043261980789e63cdb01b7f49345 -r 86566f7208bf9302fad9ce7aa5c4d455df74ebd3 testing/test_skipping.py
--- a/testing/test_skipping.py
+++ b/testing/test_skipping.py
@@ -330,52 +330,27 @@
             "*1 xfailed*",
         ])
 
-    def test_xfail_raises_match(self, testdir):
+
+    @pytest.mark.parametrize('params', [('TypeError', 'TypeError', "*1 xfailed*"),
+                                        ('(AttributeError, TypeError)', 'TypeError',
+                                         "*1 xfailed*"),
+                                        ('TypeError', 'IndexError', "*1 failed*"),
+                                        ('(AttributeError, TypeError)', 'IndexError',
+                                         "*1 failed*"),
+                                        ])
+    def test_xfail_raises(self, params, testdir):
+        expected, actual, matchline = params
         p = testdir.makepyfile("""
             import pytest
-            @pytest.mark.xfail(raises=TypeError)
+            @pytest.mark.xfail(raises=%s)
             def test_raises():
-                raise TypeError()
-        """)
+                raise %s()
+        """ % (expected, actual))
         result = testdir.runpytest(p)
         result.stdout.fnmatch_lines([
-            "*1 xfailed*",
+            matchline,
         ])
 
-    def test_xfail_raises_mismatch(self, testdir):
-        p = testdir.makepyfile("""
-            import pytest
-            @pytest.mark.xfail(raises=IndexError)
-            def test_raises():
-                raise TypeError()
-        """)
-        result = testdir.runpytest(p)
-        result.stdout.fnmatch_lines([
-            "*1 failed*",
-        ])
-    def test_xfail_raises_tuple_match(self, testdir):
-        p = testdir.makepyfile("""
-            import pytest
-            @pytest.mark.xfail(raises=(AttributeError, TypeError))
-            def test_raises():
-                raise TypeError()
-        """)
-        result = testdir.runpytest(p)
-        result.stdout.fnmatch_lines([
-            "*1 xfailed*",
-        ])
-
-    def test_xfail_raises_tuple_mismatch(self, testdir):
-        p = testdir.makepyfile("""
-            import pytest
-            @pytest.mark.xfail(raises=(AttributeError, IndexError))
-            def test_raises():
-                raise TypeError()
-        """)
-        result = testdir.runpytest(p)
-        result.stdout.fnmatch_lines([
-            "*1 failed*",
-        ])
 
 class TestXFailwithSetupTeardown:
     def test_failing_setup_issue9(self, testdir):


https://bitbucket.org/hpk42/pytest/commits/b8b2c7d3d129/
Changeset:   b8b2c7d3d129
Branch:      xfail-cause
User:        da... at mcbf.net
Date:        2014-07-26 18:10:32
Summary:     Update documentation
Affected #:  3 files

diff -r 86566f7208bf9302fad9ce7aa5c4d455df74ebd3 -r b8b2c7d3d129ca21350fdd9f554c9639ae63ab98 doc/en/assert.txt
--- a/doc/en/assert.txt
+++ b/doc/en/assert.txt
@@ -95,6 +95,22 @@
 provide you with helpful output in case of failures such as *no
 exception* or *wrong exception*.
 
+Note that it is also possible to specify a "raises" argument to
+``pytest.mark.xfail``, which checks that the test is failing in a more
+specific way than just having any exception raised::
+
+    @pytest.mark.xfail(raises=IndexError)
+    def test_f():
+        f()
+
+Using ``pytest.raises`` is likely to be better for cases where you are testing
+exceptions your own code is deliberately raising, whereas using
+``@pytest.mark.xfail`` with a check function is probably better for something
+like documenting unfixed bugs (where the test describes what "should" happen)
+or bugs in dependencies.
+
+
+
 .. _newreport:
 
 Making use of context-sensitive comparisons

diff -r 86566f7208bf9302fad9ce7aa5c4d455df74ebd3 -r b8b2c7d3d129ca21350fdd9f554c9639ae63ab98 doc/en/example/xfail_demo.py
--- a/doc/en/example/xfail_demo.py
+++ b/doc/en/example/xfail_demo.py
@@ -23,3 +23,8 @@
 
 def test_hello6():
     pytest.xfail("reason")
+
+ at xfail(raises=IndexError)
+def test_hello7()
+    x = []
+    assert x[1] == 1

diff -r 86566f7208bf9302fad9ce7aa5c4d455df74ebd3 -r b8b2c7d3d129ca21350fdd9f554c9639ae63ab98 doc/en/skipping.txt
--- a/doc/en/skipping.txt
+++ b/doc/en/skipping.txt
@@ -149,6 +149,11 @@
     def test_function():
         ...
 
+If you want to be more specific as to why the test is failing, you can specify
+a single exception, or a list of exceptions, in the ``raises`` argument. Then
+the test will be reported as a regular failure if it fails with an
+exception not mentioned in ``raises``.
+
 You can furthermore prevent the running of an "xfail" test or
 specify a reason such as a bug ID or similar.  Here is
 a simple test file with the several usages:


https://bitbucket.org/hpk42/pytest/commits/bbbbedb1af92/
Changeset:   bbbbedb1af92
Branch:      xfail-cause
User:        da... at mcbf.net
Date:        2014-07-26 18:19:27
Summary:     Directly pass multiple parameters with mark.parametrize()
Affected #:  1 file

diff -r b8b2c7d3d129ca21350fdd9f554c9639ae63ab98 -r bbbbedb1af9240ca346a87a218313da5a169f9ee testing/test_skipping.py
--- a/testing/test_skipping.py
+++ b/testing/test_skipping.py
@@ -331,15 +331,13 @@
         ])
 
 
-    @pytest.mark.parametrize('params', [('TypeError', 'TypeError', "*1 xfailed*"),
-                                        ('(AttributeError, TypeError)', 'TypeError',
-                                         "*1 xfailed*"),
-                                        ('TypeError', 'IndexError', "*1 failed*"),
-                                        ('(AttributeError, TypeError)', 'IndexError',
-                                         "*1 failed*"),
-                                        ])
-    def test_xfail_raises(self, params, testdir):
-        expected, actual, matchline = params
+    @pytest.mark.parametrize('expected, actual, matchline',
+                             [('TypeError', 'TypeError', "*1 xfailed*"),
+                              ('(AttributeError, TypeError)', 'TypeError', "*1 xfailed*"),
+                              ('TypeError', 'IndexError', "*1 failed*"),
+                              ('(AttributeError, TypeError)', 'IndexError', "*1 failed*"),
+                              ])
+    def test_xfail_raises(self, expected, actual, matchline, testdir):
         p = testdir.makepyfile("""
             import pytest
             @pytest.mark.xfail(raises=%s)


https://bitbucket.org/hpk42/pytest/commits/cdde7d5fbe9f/
Changeset:   cdde7d5fbe9f
Branch:      xfail-cause
User:        da... at mcbf.net
Date:        2014-07-26 18:24:55
Summary:     Tiny example update for clarification
Affected #:  1 file

diff -r bbbbedb1af9240ca346a87a218313da5a169f9ee -r cdde7d5fbe9f83cb359e331ca2a3ea9a5aba19ca doc/en/example/xfail_demo.py
--- a/doc/en/example/xfail_demo.py
+++ b/doc/en/example/xfail_demo.py
@@ -27,4 +27,4 @@
 @xfail(raises=IndexError)
 def test_hello7()
     x = []
-    assert x[1] == 1
+    x[1] = 1


https://bitbucket.org/hpk42/pytest/commits/3decf8c96e3c/
Changeset:   3decf8c96e3c
User:        hpk42
Date:        2014-07-28 09:55:02
Summary:     Merged in squisher/pytest/xfail-cause (pull request #183)

Add mark.xfail argument raises so that unexpected exceptions show up as test failures.
Affected #:  5 files

diff -r 853ffb854a4813a6a2f9e0ad4229dde770f6c7a8 -r 3decf8c96e3c85b722302a2618b088ed93a8628a _pytest/skipping.py
--- a/_pytest/skipping.py
+++ b/_pytest/skipping.py
@@ -26,11 +26,13 @@
         "http://pytest.org/latest/skipping.html"
     )
     config.addinivalue_line("markers",
-        "xfail(condition, reason=None, run=True): mark the the test function "
+        "xfail(condition, reason=None, run=True, raises=None): mark the the test function "
         "as an expected failure if eval(condition) has a True value. "
         "Optionally specify a reason for better reporting and run=False if "
-        "you don't even want to execute the test function. See "
-        "http://pytest.org/latest/skipping.html"
+        "you don't even want to execute the test function. If only specific "
+        "exception(s) are expected, you can list them in raises, and if the test fails "
+        "in other ways, it will be reported as a true failure. "
+        "See http://pytest.org/latest/skipping.html"
     )
 
 def pytest_namespace():
@@ -60,6 +62,12 @@
     def wasvalid(self):
         return not hasattr(self, 'exc')
 
+    def invalidraise(self, exc):
+        raises = self.get('raises')
+        if not raises:
+            return
+        return not isinstance(exc, raises)
+
     def istrue(self):
         try:
             return self._istrue()
@@ -171,7 +179,11 @@
         if not item.config.option.runxfail:
             if evalxfail.wasvalid() and evalxfail.istrue():
                 if call.excinfo:
-                    rep.outcome = "skipped"
+                    if evalxfail.invalidraise(call.excinfo.value):
+                        rep.outcome = "failed"
+                        return rep
+                    else:
+                        rep.outcome = "skipped"
                 elif call.when == "call":
                     rep.outcome = "failed"
                 else:

diff -r 853ffb854a4813a6a2f9e0ad4229dde770f6c7a8 -r 3decf8c96e3c85b722302a2618b088ed93a8628a doc/en/assert.txt
--- a/doc/en/assert.txt
+++ b/doc/en/assert.txt
@@ -95,6 +95,22 @@
 provide you with helpful output in case of failures such as *no
 exception* or *wrong exception*.
 
+Note that it is also possible to specify a "raises" argument to
+``pytest.mark.xfail``, which checks that the test is failing in a more
+specific way than just having any exception raised::
+
+    @pytest.mark.xfail(raises=IndexError)
+    def test_f():
+        f()
+
+Using ``pytest.raises`` is likely to be better for cases where you are testing
+exceptions your own code is deliberately raising, whereas using
+``@pytest.mark.xfail`` with a check function is probably better for something
+like documenting unfixed bugs (where the test describes what "should" happen)
+or bugs in dependencies.
+
+
+
 .. _newreport:
 
 Making use of context-sensitive comparisons

diff -r 853ffb854a4813a6a2f9e0ad4229dde770f6c7a8 -r 3decf8c96e3c85b722302a2618b088ed93a8628a doc/en/example/xfail_demo.py
--- a/doc/en/example/xfail_demo.py
+++ b/doc/en/example/xfail_demo.py
@@ -23,3 +23,8 @@
 
 def test_hello6():
     pytest.xfail("reason")
+
+ at xfail(raises=IndexError)
+def test_hello7()
+    x = []
+    x[1] = 1

diff -r 853ffb854a4813a6a2f9e0ad4229dde770f6c7a8 -r 3decf8c96e3c85b722302a2618b088ed93a8628a doc/en/skipping.txt
--- a/doc/en/skipping.txt
+++ b/doc/en/skipping.txt
@@ -149,6 +149,11 @@
     def test_function():
         ...
 
+If you want to be more specific as to why the test is failing, you can specify
+a single exception, or a list of exceptions, in the ``raises`` argument. Then
+the test will be reported as a regular failure if it fails with an
+exception not mentioned in ``raises``.
+
 You can furthermore prevent the running of an "xfail" test or
 specify a reason such as a bug ID or similar.  Here is
 a simple test file with the several usages:

diff -r 853ffb854a4813a6a2f9e0ad4229dde770f6c7a8 -r 3decf8c96e3c85b722302a2618b088ed93a8628a testing/test_skipping.py
--- a/testing/test_skipping.py
+++ b/testing/test_skipping.py
@@ -330,6 +330,26 @@
             "*1 xfailed*",
         ])
 
+
+    @pytest.mark.parametrize('expected, actual, matchline',
+                             [('TypeError', 'TypeError', "*1 xfailed*"),
+                              ('(AttributeError, TypeError)', 'TypeError', "*1 xfailed*"),
+                              ('TypeError', 'IndexError', "*1 failed*"),
+                              ('(AttributeError, TypeError)', 'IndexError', "*1 failed*"),
+                              ])
+    def test_xfail_raises(self, expected, actual, matchline, testdir):
+        p = testdir.makepyfile("""
+            import pytest
+            @pytest.mark.xfail(raises=%s)
+            def test_raises():
+                raise %s()
+        """ % (expected, actual))
+        result = testdir.runpytest(p)
+        result.stdout.fnmatch_lines([
+            matchline,
+        ])
+
+
 class TestXFailwithSetupTeardown:
     def test_failing_setup_issue9(self, testdir):
         testdir.makepyfile("""
@@ -575,7 +595,7 @@
     result = testdir.runpytest("--markers")
     result.stdout.fnmatch_lines([
         "*skipif(*condition)*skip*",
-        "*xfail(*condition, reason=None, run=True)*expected failure*",
+        "*xfail(*condition, reason=None, run=True, raises=None)*expected failure*",
     ])
 
 def test_xfail_test_setup_exception(testdir):
@@ -617,7 +637,6 @@
         *2 skipped*
     """)
 
-
 class TestBooleanCondition:
     def test_skipif(self, testdir):
         testdir.makepyfile("""

Repository URL: https://bitbucket.org/hpk42/pytest/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.


More information about the pytest-commit mailing list