[pypy-svn] r74364 - pypy/branch/py12/py/_plugin

hpk at codespeak.net hpk at codespeak.net
Tue May 4 21:32:06 CEST 2010


Author: hpk
Date: Tue May  4 21:32:03 2010
New Revision: 74364

Modified:
   pypy/branch/py12/py/_plugin/pytest__pytest.py
   pypy/branch/py12/py/_plugin/pytest_skipping.py
   pypy/branch/py12/py/_plugin/pytest_terminal.py
   pypy/branch/py12/py/_plugin/pytest_terminal.py.orig
Log:
update from py-trunk to have new xfail(run=False) marker and improved reporting


Modified: pypy/branch/py12/py/_plugin/pytest__pytest.py
==============================================================================
--- pypy/branch/py12/py/_plugin/pytest__pytest.py	(original)
+++ pypy/branch/py12/py/_plugin/pytest__pytest.py	Tue May  4 21:32:03 2010
@@ -46,7 +46,8 @@
             recorder = RecordCalls()
             self._recorders[hookspec] = recorder
             self._registry.register(recorder)
-        self.hook = HookRelay(hookspecs, registry=self._registry)
+        self.hook = HookRelay(hookspecs, registry=self._registry, 
+            prefix="pytest_")
 
     def finish_recording(self):
         for recorder in self._recorders.values():

Modified: pypy/branch/py12/py/_plugin/pytest_skipping.py
==============================================================================
--- pypy/branch/py12/py/_plugin/pytest_skipping.py	(original)
+++ pypy/branch/py12/py/_plugin/pytest_skipping.py	Tue May  4 21:32:03 2010
@@ -83,10 +83,17 @@
 depending on platform::
 
     @py.test.mark.xfail("sys.version_info >= (3,0)")
-
     def test_function():
         ...
 
+To not run a test and still regard it as "xfailed"::
+
+    @py.test.mark.xfail(..., run=False)
+
+To specify an explicit reason to be shown with xfailure detail::
+
+    @py.test.mark.xfail(..., reason="my reason")
+
 
 skipping on a missing import dependency
 --------------------------------------------------
@@ -116,34 +123,94 @@
             py.test.skip("unsuppored configuration")
 
 """
-# XXX py.test.skip, .importorskip and the Skipped class 
-# should also be defined in this plugin, requires thought/changes
 
 import py
 
+def pytest_addoption(parser):
+    group = parser.getgroup("general")
+    group.addoption('--runxfail', 
+           action="store_true", dest="runxfail", default=False,
+           help="run tests even if they are marked xfail")
+
+class MarkEvaluator:
+    def __init__(self, item, name):
+        self.item = item
+        self.name = name
+        self.holder = getattr(item.obj, name, None)
+
+    def __bool__(self):
+        return bool(self.holder)
+    __nonzero__ = __bool__
+
+    def istrue(self):
+        if self.holder:
+            d = {'os': py.std.os, 'sys': py.std.sys, 'config': self.item.config}
+            self.result = True
+            for expr in self.holder.args:
+                self.expr = expr
+                if isinstance(expr, str):
+                    result = cached_eval(self.item.config, expr, d)
+                else:
+                    result = expr
+                if not result:
+                    self.result = False
+                    self.expr = expr
+                    break
+        return getattr(self, 'result', False)
+
+    def get(self, attr, default=None):
+        return self.holder.kwargs.get(attr, default)
+
+    def getexplanation(self):
+        expl = self.get('reason', None)
+        if not expl:
+            if not hasattr(self, 'expr'):
+                return "condition: True"
+            else:
+                return "condition: " + self.expr
+        return expl
+        
 
 def pytest_runtest_setup(item):
-    expr, result = evalexpression(item, 'skipif')
-    if result:
-        py.test.skip(expr)
+    if not isinstance(item, py.test.collect.Function):
+        return
+    evalskip = MarkEvaluator(item, 'skipif')
+    if evalskip.istrue():
+        py.test.skip(evalskip.getexplanation())
+    item._evalxfail = MarkEvaluator(item, 'xfail')
+    if not item.config.getvalue("runxfail"):
+        if item._evalxfail.istrue():
+            if not item._evalxfail.get('run', True):
+                py.test.skip("xfail")
 
 def pytest_runtest_makereport(__multicall__, item, call):
-    if call.when != "call":
+    if not isinstance(item, py.test.collect.Function):
+        return
+    evalxfail = getattr(item, '_evalxfail', None)
+    if not evalxfail:
         return
-    expr, result = evalexpression(item, 'xfail')
-    rep = __multicall__.execute()
-    if result:
-        if call.excinfo:
-            rep.skipped = True
-            rep.failed = rep.passed = False
+    if call.when == "setup":
+        rep = __multicall__.execute()
+        if rep.skipped and evalxfail.istrue():
+            expl = evalxfail.getexplanation()
+            if not evalxfail.get("run", True):
+                expl = "[NOTRUN] " + expl
+            rep.keywords['xfail'] = expl
+        return rep
+    elif call.when == "call":
+        rep = __multicall__.execute()
+        if not item.config.getvalue("runxfail") and evalxfail.istrue():
+            if call.excinfo:
+                rep.skipped = True
+                rep.failed = rep.passed = False
+            else:
+                rep.skipped = rep.passed = False
+                rep.failed = True
+            rep.keywords['xfail'] = evalxfail.getexplanation()
         else:
-            rep.skipped = rep.passed = False
-            rep.failed = True
-        rep.keywords['xfail'] = expr 
-    else:
-        if 'xfail' in rep.keywords:
-            del rep.keywords['xfail']
-    return rep
+            if 'xfail' in rep.keywords:
+                del rep.keywords['xfail']
+        return rep
 
 # called by terminalreporter progress reporting
 def pytest_report_teststatus(report):
@@ -151,7 +218,7 @@
         if report.skipped:
             return "xfailed", "x", "xfail"
         elif report.failed:
-            return "xpassed", "P", "xpass"
+            return "xpassed", "P", "XPASS"
 
 # called by the terminalreporter instance/plugin
 def pytest_terminal_summary(terminalreporter):
@@ -169,40 +236,17 @@
             return
         tr.write_sep("_", "expected failures")
         for rep in xfailed:
-            entry = rep.longrepr.reprcrash
-            modpath = rep.item.getmodpath(includemodule=True)
-            pos = "%s %s:%d: " %(modpath, entry.path, entry.lineno)
-            reason = rep.longrepr.reprcrash.message
-            i = reason.find("\n")
-            if i != -1:
-                reason = reason[:i]
+            pos = terminalreporter.gettestid(rep.item)
+            reason = rep.keywords['xfail']
             tr._tw.line("%s %s" %(pos, reason))
 
     xpassed = terminalreporter.stats.get("xpassed")
     if xpassed:
         tr.write_sep("_", "UNEXPECTEDLY PASSING TESTS")
         for rep in xpassed:
-            fspath, lineno, modpath = rep.item.reportinfo()
-            pos = "%s %s:%d: unexpectedly passing" %(modpath, fspath, lineno)
-            tr._tw.line(pos)
-
-
-def evalexpression(item, keyword):
-    if isinstance(item, py.test.collect.Function):
-        markholder = getattr(item.obj, keyword, None)
-        result = False
-        if markholder:
-            d = {'os': py.std.os, 'sys': py.std.sys, 'config': item.config}
-            expr, result = None, True
-            for expr in markholder.args:
-                if isinstance(expr, str):
-                    result = cached_eval(item.config, expr, d)
-                else:
-                    result = expr
-                if not result:
-                    break
-            return expr, result
-    return None, False
+            pos = terminalreporter.gettestid(rep.item)
+            reason = rep.keywords['xfail']
+            tr._tw.line("%s %s" %(pos, reason))
 
 def cached_eval(config, expr, d):
     if not hasattr(config, '_evalcache'):

Modified: pypy/branch/py12/py/_plugin/pytest_terminal.py
==============================================================================
--- pypy/branch/py12/py/_plugin/pytest_terminal.py	(original)
+++ pypy/branch/py12/py/_plugin/pytest_terminal.py	Tue May  4 21:32:03 2010
@@ -128,6 +128,20 @@
         else: 
             return "???", dict(red=True)
 
+    def gettestid(self, item, relative=True):
+        fspath = item.fspath
+        chain = [x for x in item.listchain() if x.fspath == fspath]
+        chain = chain[1:]
+        names = [x.name for x in chain if x.name != "()"]
+        path = item.fspath
+        if relative:
+            relpath = path.relto(self.curdir)
+            if relpath:
+                path = relpath
+        names.insert(0, str(path))
+        return "::".join(names)
+
+
     def pytest_internalerror(self, excrepr):
         for line in str(excrepr).split("\n"):
             self.write_line("INTERNALERROR> " + line)

Modified: pypy/branch/py12/py/_plugin/pytest_terminal.py.orig
==============================================================================
--- pypy/branch/py12/py/_plugin/pytest_terminal.py.orig	(original)
+++ pypy/branch/py12/py/_plugin/pytest_terminal.py.orig	Tue May  4 21:32:03 2010
@@ -257,7 +257,7 @@
         self._sessionstarttime = py.std.time.time()
 
         verinfo = ".".join(map(str, sys.version_info[:3]))
-        msg = "python: platform %s -- Python %s" % (sys.platform, verinfo)
+        msg = "platform %s -- Python %s" % (sys.platform, verinfo)
         msg += " -- pytest-%s" % (py.__version__)
         if self.config.option.verbose or self.config.option.debug or getattr(self.config.option, 'pastebin', None):
             msg += " -- " + str(sys.executable)
@@ -420,8 +420,6 @@
         self._failed = []
 
     def outindent(self, line):
-        s = self.indent + str(line)
-        print ("printing: %s" % s)
         self.out.line(self.indent + str(line))
 
     def pytest_internalerror(self, excrepr):



More information about the Pypy-commit mailing list