[pypy-commit] benchmarks default: import the main pypy repo at revision b6be8465a274, tag 'benchmarked'
antocuni
noreply at buildbot.pypy.org
Mon Jul 25 14:24:13 CEST 2011
Author: Antonio Cuni <anto.cuni at gmail.com>
Branch:
Changeset: r131:f3830896d637
Date: 2011-07-25 13:52 +0200
http://bitbucket.org/pypy/benchmarks/changeset/f3830896d637/
Log: import the main pypy repo at revision b6be8465a274, tag
'benchmarked'
diff too long, truncating to 10000 out of 1743062 lines
diff --git a/Notes.txt b/Notes.txt
--- a/Notes.txt
+++ b/Notes.txt
@@ -1,6 +1,6 @@
Notes
======
-twisted can be updated by killing lib/twisted, then running::
+twisted can be updated by killing lib/twisted-trunk, then running::
svn export svn://svn.twistedmatrix.com/svn/Twisted/trunk lib/twisted-trunk -q
diff --git a/lib/pypy/.gitignore b/lib/pypy/.gitignore
new file mode 100644
--- /dev/null
+++ b/lib/pypy/.gitignore
@@ -0,0 +1,21 @@
+.hg
+.svn
+
+*.pyc
+*.pyo
+*~
+
+bin/pypy-c
+include/*.h
+lib_pypy/ctypes_config_cache/_[^_]*_*.py
+pypy/_cache
+pypy/doc/*.html
+pypy/doc/config/*.html
+pypy/doc/discussion/*.html
+pypy/module/cpyext/src/*.o
+pypy/module/cpyext/test/*.o
+pypy/module/test_lib_pypy/ctypes_tests/*.o
+pypy/translator/c/src/dtoa.o
+pypy/translator/goal/pypy-c
+pypy/translator/goal/target*-c
+release/
\ No newline at end of file
diff --git a/lib/pypy/.hg_archival.txt b/lib/pypy/.hg_archival.txt
new file mode 100644
--- /dev/null
+++ b/lib/pypy/.hg_archival.txt
@@ -0,0 +1,4 @@
+repo: 45eff22199974f85fd96138d25510f4660aba5a1
+node: b48df0bf4e75b81d98f19ce89d4a7dc3e1dab5e5
+branch: default
+tag: benchmarked
diff --git a/lib/pypy/.hgignore b/lib/pypy/.hgignore
new file mode 100644
--- /dev/null
+++ b/lib/pypy/.hgignore
@@ -0,0 +1,73 @@
+syntax: glob
+*.py[co]
+*~
+.*.swp
+
+syntax: regexp
+^testresult$
+^site-packages$
+^site-packages/.*$
+^site-packages/.*$
+^bin$
+^pypy/bin/pypy-c
+^pypy/module/cpyext/src/.+\.o$
+^pypy/module/cpyext/src/.+\.obj$
+^pypy/module/cpyext/test/.+\.errors$
+^pypy/module/cpyext/test/.+\.o$
+^pypy/module/cpyext/test/.+\.obj$
+^pypy/module/cpyext/test/.+\.manifest$
+^pypy/module/test_lib_pypy/ctypes_tests/.+\.o$
+^pypy/doc/.+\.html$
+^pypy/doc/config/.+\.rst$
+^pypy/doc/basicblock\.asc$
+^pypy/doc/.+\.svninfo$
+^pypy/translator/c/src/libffi_msvc/.+\.obj$
+^pypy/translator/c/src/libffi_msvc/.+\.dll$
+^pypy/translator/c/src/libffi_msvc/.+\.lib$
+^pypy/translator/c/src/libffi_msvc/.+\.exp$
+^pypy/translator/c/src/cjkcodecs/.+\.o$
+^pypy/translator/c/src/cjkcodecs/.+\.obj$
+^pypy/translator/jvm/\.project$
+^pypy/translator/jvm/\.classpath$
+^pypy/translator/jvm/eclipse-bin$
+^pypy/translator/jvm/src/pypy/.+\.class$
+^pypy/translator/benchmark/docutils$
+^pypy/translator/benchmark/templess$
+^pypy/translator/benchmark/gadfly$
+^pypy/translator/benchmark/mako$
+^pypy/translator/benchmark/bench-custom\.benchmark_result$
+^pypy/translator/benchmark/shootout_benchmarks$
+^pypy/translator/goal/pypy-translation-snapshot$
+^pypy/translator/goal/pypy-c
+^pypy/translator/goal/pypy-jvm
+^pypy/translator/goal/pypy-jvm.jar
+^pypy/translator/goal/.+\.exe$
+^pypy/translator/goal/.+\.dll$
+^pypy/translator/goal/target.+-c$
+^pypy/_cache$
+^pypy/doc/statistic/.+\.html$
+^pypy/doc/statistic/.+\.eps$
+^pypy/doc/statistic/.+\.pdf$
+^pypy/translator/cli/src/pypylib\.dll$
+^pypy/translator/cli/src/query\.exe$
+^pypy/translator/cli/src/main\.exe$
+^lib_pypy/ctypes_config_cache/_.+_cache\.py$
+^lib_pypy/ctypes_config_cache/_.+_.+_\.py$
+^pypy/translator/cli/query-descriptions$
+^pypy/doc/discussion/.+\.html$
+^include/.+\.h$
+^include/.+\.inl$
+^pypy/doc/_build/.*$
+^pypy/doc/config/.+\.html$
+^pypy/doc/config/style\.css$
+^pypy/doc/jit/.+\.html$
+^pypy/doc/jit/style\.css$
+^pypy/doc/image/lattice1\.png$
+^pypy/doc/image/lattice2\.png$
+^pypy/doc/image/lattice3\.png$
+^pypy/doc/image/stackless_informal\.png$
+^pypy/doc/image/parsing_example.+\.png$
+^pypy/module/test_lib_pypy/ctypes_tests/_ctypes_test\.o$
+^compiled
+^.git/
+^release/
diff --git a/lib/pypy/.hgsubstate b/lib/pypy/.hgsubstate
new file mode 100644
diff --git a/lib/pypy/.hgtags b/lib/pypy/.hgtags
new file mode 100644
--- /dev/null
+++ b/lib/pypy/.hgtags
@@ -0,0 +1,1 @@
+b590cf6de4190623aad9aa698694c22e614d67b9 release-1.5
diff --git a/lib/pypy/LICENSE b/lib/pypy/LICENSE
new file mode 100644
--- /dev/null
+++ b/lib/pypy/LICENSE
@@ -0,0 +1,249 @@
+License for files in the pypy/ directory
+==================================================
+
+Except when otherwise stated (look for LICENSE files in directories or
+information at the beginning of each file) all software and
+documentation in the 'pypy' directories is licensed as follows:
+
+ The MIT License
+
+ Permission is hereby granted, free of charge, to any person
+ obtaining a copy of this software and associated documentation
+ files (the "Software"), to deal in the Software without
+ restriction, including without limitation the rights to use,
+ copy, modify, merge, publish, distribute, sublicense, and/or
+ sell copies of the Software, and to permit persons to whom the
+ Software is furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+
+
+PyPy Copyright holders 2003-2011
+-----------------------------------
+
+Except when otherwise stated (look for LICENSE files or information at
+the beginning of each file) the files in the 'pypy' directory are each
+copyrighted by one or more of the following people and organizations:
+
+ Armin Rigo
+ Maciej Fijalkowski
+ Carl Friedrich Bolz
+ Amaury Forgeot d'Arc
+ Antonio Cuni
+ Samuele Pedroni
+ Michael Hudson
+ Holger Krekel
+ Christian Tismer
+ Benjamin Peterson
+ Eric van Riet Paap
+ Anders Chrigström
+ Håkan Ardö
+ Richard Emslie
+ Dan Villiom Podlaski Christiansen
+ Alexander Schremmer
+ Alex Gaynor
+ David Schneider
+ Aurelién Campeas
+ Anders Lehmann
+ Camillo Bruni
+ Niklaus Haldimann
+ Leonardo Santagada
+ Toon Verwaest
+ Seo Sanghyeon
+ Lawrence Oluyede
+ Bartosz Skowron
+ Jakub Gustak
+ Guido Wesdorp
+ Adrien Di Mascio
+ Laura Creighton
+ Ludovic Aubry
+ Niko Matsakis
+ Daniel Roberts
+ Jason Creighton
+ Jacob Hallén
+ Alex Martelli
+ Anders Hammarquist
+ Jan de Mooij
+ Stephan Diehl
+ Michael Foord
+ Stefan Schwarzer
+ Tomek Meka
+ Patrick Maupin
+ Bob Ippolito
+ Bruno Gola
+ Alexandre Fayolle
+ Marius Gedminas
+ Simon Burton
+ Jean-Paul Calderone
+ John Witulski
+ Wim Lavrijsen
+ Andreas Stührk
+ Jean-Philippe St. Pierre
+ Guido van Rossum
+ Pavel Vinogradov
+ Valentino Volonghi
+ Paul deGrandis
+ Adrian Kuhn
+ tav
+ Georg Brandl
+ Gerald Klix
+ Wanja Saatkamp
+ Boris Feigin
+ Oscar Nierstrasz
+ Dario Bertini
+ David Malcolm
+ Eugene Oden
+ Henry Mason
+ Lukas Renggli
+ Guenter Jantzen
+ Ronny Pfannschmidt
+ Bert Freudenberg
+ Amit Regmi
+ Ben Young
+ Nicolas Chauvat
+ Andrew Durdin
+ Michael Schneider
+ Nicholas Riley
+ Rocco Moretti
+ Gintautas Miliauskas
+ Michael Twomey
+ Igor Trindade Oliveira
+ Lucian Branescu Mihaila
+ Olivier Dormond
+ Jared Grubb
+ Karl Bartel
+ Gabriel Lavoie
+ Brian Dorsey
+ Victor Stinner
+ Stuart Williams
+ Toby Watson
+ Antoine Pitrou
+ Justas Sadzevicius
+ Neil Shepperd
+ Mikael Schönenberg
+ Gasper Zejn
+ Jonathan David Riehl
+ Elmo Mäntynen
+ Anders Qvist
+ Beatrice Düring
+ Alexander Sedov
+ Vincent Legoll
+ Alan McIntyre
+ Romain Guillebert
+ Alex Perry
+ Jens-Uwe Mager
+ Dan Stromberg
+ Lukas Diekmann
+ Carl Meyer
+ Pieter Zieschang
+ Alejandro J. Cura
+ Sylvain Thenault
+ Travis Francis Athougies
+ Henrik Vendelbo
+ Lutz Paelike
+ Jacob Oscarson
+ Martin Blais
+ Lucio Torre
+ Lene Wagner
+ Miguel de Val Borro
+ Ignas Mikalajunas
+ Artur Lisiecki
+ Joshua Gilbert
+ Godefroid Chappelle
+ Yusei Tahara
+ Christopher Armstrong
+ Stephan Busemann
+ Gustavo Niemeyer
+ William Leslie
+ Akira Li
+ Kristján Valur Jónsson
+ Bobby Impollonia
+ Andrew Thompson
+ Anders Sigfridsson
+ Jacek Generowicz
+ Dan Colish
+ Sven Hager
+ Zooko Wilcox-O Hearn
+ Anders Hammarquist
+ Dinu Gherman
+ Dan Colish
+ Daniel Neuhäuser
+ Michael Chermside
+ Konrad Delong
+ Anna Ravencroft
+ Greg Price
+ Armin Ronacher
+ Jim Baker
+ Philip Jenvey
+ Rodrigo Araújo
+ Brett Cannon
+
+ Heinrich-Heine University, Germany
+ Open End AB (formerly AB Strakt), Sweden
+ merlinux GmbH, Germany
+ tismerysoft GmbH, Germany
+ Logilab Paris, France
+ DFKI GmbH, Germany
+ Impara, Germany
+ Change Maker, Sweden
+
+The PyPy Logo as used by http://speed.pypy.org and others was created
+by Samuel Reis and is distributed on terms of Creative Commons Share Alike
+License.
+
+License for 'lib-python/2.7.0' and 'lib-python/2.7.0-modified'
+==============================================================
+
+Except when otherwise stated (look for LICENSE files or
+copyright/license information at the beginning of each file) the files
+in the 'lib-python/2.7.0' and 'lib-python/2.7.0-modified' directories
+are all copyrighted by the Python Software Foundation and licensed under
+the Python Software License of which you can find a copy here:
+http://www.python.org/doc/Copyright.html
+
+License for 'pypy/translator/jvm/src/jna.jar'
+=============================================
+
+The file 'pypy/translator/jvm/src/jna.jar' is licensed under the GNU
+Lesser General Public License of which you can find a copy here:
+http://www.gnu.org/licenses/lgpl.html
+
+License for 'pypy/translator/jvm/src/jasmin.jar'
+================================================
+
+The file 'pypy/translator/jvm/src/jasmin.jar' is copyright (c) 1996-2004 Jon Meyer
+and distributed with permission. The use of Jasmin by PyPy does not imply
+that PyPy is endorsed by Jon Meyer nor any of Jasmin's contributors. Furthermore,
+the following disclaimer applies to Jasmin:
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
+WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+License for 'pypy/module/unicodedata/'
+======================================
+
+The following files are from the website of The Unicode Consortium
+at http://www.unicode.org/. For the terms of use of these files, see
+http://www.unicode.org/terms_of_use.html . Or they are derived from
+files from the above website, and the same terms of use apply.
+
+ CompositionExclusions-*.txt
+ EastAsianWidth-*.txt
+ LineBreak-*.txt
+ UnicodeData-*.txt
+ UnihanNumeric-*.txt
diff --git a/lib/pypy/README b/lib/pypy/README
new file mode 100644
--- /dev/null
+++ b/lib/pypy/README
@@ -0,0 +1,24 @@
+=====================================
+PyPy: Python in Python Implementation
+=====================================
+
+Welcome to PyPy!
+
+PyPy is both an implementation of the Python programming language, and
+an extensive compiler framework for dynamic language implementations.
+You can build self-contained Python implementations which execute
+independently from CPython.
+
+The home page is:
+
+ http://pypy.org/
+
+The getting-started document will help guide you:
+
+ http://doc.pypy.org/en/latest/getting-started.html
+
+It will also point you to the rest of the documentation which is generated
+from files in the pypy/doc directory within the source repositories. Enjoy
+and send us feedback!
+
+ the pypy-dev team <pypy-dev at python.org>
diff --git a/lib/pypy/_pytest/__init__.py b/lib/pypy/_pytest/__init__.py
new file mode 100644
--- /dev/null
+++ b/lib/pypy/_pytest/__init__.py
@@ -0,0 +1,2 @@
+#
+__version__ = '2.1.0.dev4'
diff --git a/lib/pypy/_pytest/assertion/__init__.py b/lib/pypy/_pytest/assertion/__init__.py
new file mode 100644
--- /dev/null
+++ b/lib/pypy/_pytest/assertion/__init__.py
@@ -0,0 +1,128 @@
+"""
+support for presenting detailed information in failing assertions.
+"""
+import py
+import imp
+import marshal
+import struct
+import sys
+import pytest
+from _pytest.monkeypatch import monkeypatch
+from _pytest.assertion import reinterpret, util
+
+try:
+ from _pytest.assertion.rewrite import rewrite_asserts
+except ImportError:
+ rewrite_asserts = None
+else:
+ import ast
+
+def pytest_addoption(parser):
+ group = parser.getgroup("debugconfig")
+ group.addoption('--assertmode', action="store", dest="assertmode",
+ choices=("on", "old", "off", "default"), default="default",
+ metavar="on|old|off",
+ help="""control assertion debugging tools.
+'off' performs no assertion debugging.
+'old' reinterprets the expressions in asserts to glean information.
+'on' (the default) rewrites the assert statements in test modules to provide
+sub-expression results.""")
+ group.addoption('--no-assert', action="store_true", default=False,
+ dest="noassert", help="DEPRECATED equivalent to --assertmode=off")
+ group.addoption('--nomagic', action="store_true", default=False,
+ dest="nomagic", help="DEPRECATED equivalent to --assertmode=off")
+
+class AssertionState:
+ """State for the assertion plugin."""
+
+ def __init__(self, config, mode):
+ self.mode = mode
+ self.trace = config.trace.root.get("assertion")
+
+def pytest_configure(config):
+ warn_about_missing_assertion()
+ mode = config.getvalue("assertmode")
+ if config.getvalue("noassert") or config.getvalue("nomagic"):
+ if mode not in ("off", "default"):
+ raise pytest.UsageError("assertion options conflict")
+ mode = "off"
+ elif mode == "default":
+ mode = "on"
+ if mode != "off":
+ def callbinrepr(op, left, right):
+ hook_result = config.hook.pytest_assertrepr_compare(
+ config=config, op=op, left=left, right=right)
+ for new_expl in hook_result:
+ if new_expl:
+ return '\n~'.join(new_expl)
+ m = monkeypatch()
+ config._cleanup.append(m.undo)
+ m.setattr(py.builtin.builtins, 'AssertionError',
+ reinterpret.AssertionError)
+ m.setattr(util, '_reprcompare', callbinrepr)
+ if mode == "on" and rewrite_asserts is None:
+ mode = "old"
+ config._assertstate = AssertionState(config, mode)
+ config._assertstate.trace("configured with mode set to %r" % (mode,))
+
+def _write_pyc(co, source_path):
+ if hasattr(imp, "cache_from_source"):
+ # Handle PEP 3147 pycs.
+ pyc = py.path.local(imp.cache_from_source(str(source_path)))
+ pyc.ensure()
+ else:
+ pyc = source_path + "c"
+ mtime = int(source_path.mtime())
+ fp = pyc.open("wb")
+ try:
+ fp.write(imp.get_magic())
+ fp.write(struct.pack("<l", mtime))
+ marshal.dump(co, fp)
+ finally:
+ fp.close()
+ return pyc
+
+def before_module_import(mod):
+ if mod.config._assertstate.mode != "on":
+ return
+ # Some deep magic: load the source, rewrite the asserts, and write a
+ # fake pyc, so that it'll be loaded when the module is imported.
+ source = mod.fspath.read()
+ try:
+ tree = ast.parse(source)
+ except SyntaxError:
+ # Let this pop up again in the real import.
+ mod.config._assertstate.trace("failed to parse: %r" % (mod.fspath,))
+ return
+ rewrite_asserts(tree)
+ try:
+ co = compile(tree, str(mod.fspath), "exec")
+ except SyntaxError:
+ # It's possible that this error is from some bug in the assertion
+ # rewriting, but I don't know of a fast way to tell.
+ mod.config._assertstate.trace("failed to compile: %r" % (mod.fspath,))
+ return
+ mod._pyc = _write_pyc(co, mod.fspath)
+ mod.config._assertstate.trace("wrote pyc: %r" % (mod._pyc,))
+
+def after_module_import(mod):
+ if not hasattr(mod, "_pyc"):
+ return
+ state = mod.config._assertstate
+ try:
+ mod._pyc.remove()
+ except py.error.ENOENT:
+ state.trace("couldn't find pyc: %r" % (mod._pyc,))
+ else:
+ state.trace("removed pyc: %r" % (mod._pyc,))
+
+def warn_about_missing_assertion():
+ try:
+ assert False
+ except AssertionError:
+ pass
+ else:
+ sys.stderr.write("WARNING: failing tests may report as passing because "
+ "assertions are turned off! (are you using python -O?)\n")
+
+pytest_assertrepr_compare = util.assertrepr_compare
diff --git a/lib/pypy/_pytest/assertion/newinterpret.py b/lib/pypy/_pytest/assertion/newinterpret.py
new file mode 100644
--- /dev/null
+++ b/lib/pypy/_pytest/assertion/newinterpret.py
@@ -0,0 +1,333 @@
+"""
+Find intermediate evalutation results in assert statements through builtin AST.
+This should replace oldinterpret.py eventually.
+"""
+
+import sys
+import ast
+
+import py
+from _pytest.assertion import util
+from _pytest.assertion.reinterpret import BuiltinAssertionError
+
+
+if sys.platform.startswith("java") and sys.version_info < (2, 5, 2):
+ # See http://bugs.jython.org/issue1497
+ _exprs = ("BoolOp", "BinOp", "UnaryOp", "Lambda", "IfExp", "Dict",
+ "ListComp", "GeneratorExp", "Yield", "Compare", "Call",
+ "Repr", "Num", "Str", "Attribute", "Subscript", "Name",
+ "List", "Tuple")
+ _stmts = ("FunctionDef", "ClassDef", "Return", "Delete", "Assign",
+ "AugAssign", "Print", "For", "While", "If", "With", "Raise",
+ "TryExcept", "TryFinally", "Assert", "Import", "ImportFrom",
+ "Exec", "Global", "Expr", "Pass", "Break", "Continue")
+ _expr_nodes = set(getattr(ast, name) for name in _exprs)
+ _stmt_nodes = set(getattr(ast, name) for name in _stmts)
+ def _is_ast_expr(node):
+ return node.__class__ in _expr_nodes
+ def _is_ast_stmt(node):
+ return node.__class__ in _stmt_nodes
+else:
+ def _is_ast_expr(node):
+ return isinstance(node, ast.expr)
+ def _is_ast_stmt(node):
+ return isinstance(node, ast.stmt)
+
+
+class Failure(Exception):
+ """Error found while interpreting AST."""
+
+ def __init__(self, explanation=""):
+ self.cause = sys.exc_info()
+ self.explanation = explanation
+
+
+def interpret(source, frame, should_fail=False):
+ mod = ast.parse(source)
+ visitor = DebugInterpreter(frame)
+ try:
+ visitor.visit(mod)
+ except Failure:
+ failure = sys.exc_info()[1]
+ return getfailure(failure)
+ if should_fail:
+ return ("(assertion failed, but when it was re-run for "
+ "printing intermediate values, it did not fail. Suggestions: "
+ "compute assert expression before the assert or use --no-assert)")
+
+def run(offending_line, frame=None):
+ if frame is None:
+ frame = py.code.Frame(sys._getframe(1))
+ return interpret(offending_line, frame)
+
+def getfailure(e):
+ explanation = util.format_explanation(e.explanation)
+ value = e.cause[1]
+ if str(value):
+ lines = explanation.split('\n')
+ lines[0] += " << %s" % (value,)
+ explanation = '\n'.join(lines)
+ text = "%s: %s" % (e.cause[0].__name__, explanation)
+ if text.startswith('AssertionError: assert '):
+ text = text[16:]
+ return text
+
+operator_map = {
+ ast.BitOr : "|",
+ ast.BitXor : "^",
+ ast.BitAnd : "&",
+ ast.LShift : "<<",
+ ast.RShift : ">>",
+ ast.Add : "+",
+ ast.Sub : "-",
+ ast.Mult : "*",
+ ast.Div : "/",
+ ast.FloorDiv : "//",
+ ast.Mod : "%",
+ ast.Eq : "==",
+ ast.NotEq : "!=",
+ ast.Lt : "<",
+ ast.LtE : "<=",
+ ast.Gt : ">",
+ ast.GtE : ">=",
+ ast.Pow : "**",
+ ast.Is : "is",
+ ast.IsNot : "is not",
+ ast.In : "in",
+ ast.NotIn : "not in"
+}
+
+unary_map = {
+ ast.Not : "not %s",
+ ast.Invert : "~%s",
+ ast.USub : "-%s",
+ ast.UAdd : "+%s"
+}
+
+
+class DebugInterpreter(ast.NodeVisitor):
+ """Interpret AST nodes to gleam useful debugging information. """
+
+ def __init__(self, frame):
+ self.frame = frame
+
+ def generic_visit(self, node):
+ # Fallback when we don't have a special implementation.
+ if _is_ast_expr(node):
+ mod = ast.Expression(node)
+ co = self._compile(mod)
+ try:
+ result = self.frame.eval(co)
+ except Exception:
+ raise Failure()
+ explanation = self.frame.repr(result)
+ return explanation, result
+ elif _is_ast_stmt(node):
+ mod = ast.Module([node])
+ co = self._compile(mod, "exec")
+ try:
+ self.frame.exec_(co)
+ except Exception:
+ raise Failure()
+ return None, None
+ else:
+ raise AssertionError("can't handle %s" %(node,))
+
+ def _compile(self, source, mode="eval"):
+ return compile(source, "<assertion interpretation>", mode)
+
+ def visit_Expr(self, expr):
+ return self.visit(expr.value)
+
+ def visit_Module(self, mod):
+ for stmt in mod.body:
+ self.visit(stmt)
+
+ def visit_Name(self, name):
+ explanation, result = self.generic_visit(name)
+ # See if the name is local.
+ source = "%r in locals() is not globals()" % (name.id,)
+ co = self._compile(source)
+ try:
+ local = self.frame.eval(co)
+ except Exception:
+ # have to assume it isn't
+ local = None
+ if local is None or not self.frame.is_true(local):
+ return name.id, result
+ return explanation, result
+
+ def visit_Compare(self, comp):
+ left = comp.left
+ left_explanation, left_result = self.visit(left)
+ for op, next_op in zip(comp.ops, comp.comparators):
+ next_explanation, next_result = self.visit(next_op)
+ op_symbol = operator_map[op.__class__]
+ explanation = "%s %s %s" % (left_explanation, op_symbol,
+ next_explanation)
+ source = "__exprinfo_left %s __exprinfo_right" % (op_symbol,)
+ co = self._compile(source)
+ try:
+ result = self.frame.eval(co, __exprinfo_left=left_result,
+ __exprinfo_right=next_result)
+ except Exception:
+ raise Failure(explanation)
+ try:
+ if not self.frame.is_true(result):
+ break
+ except KeyboardInterrupt:
+ raise
+ except:
+ break
+ left_explanation, left_result = next_explanation, next_result
+
+ if util._reprcompare is not None:
+ res = util._reprcompare(op_symbol, left_result, next_result)
+ if res:
+ explanation = res
+ return explanation, result
+
+ def visit_BoolOp(self, boolop):
+ is_or = isinstance(boolop.op, ast.Or)
+ explanations = []
+ for operand in boolop.values:
+ explanation, result = self.visit(operand)
+ explanations.append(explanation)
+ if result == is_or:
+ break
+ name = is_or and " or " or " and "
+ explanation = "(" + name.join(explanations) + ")"
+ return explanation, result
+
+ def visit_UnaryOp(self, unary):
+ pattern = unary_map[unary.op.__class__]
+ operand_explanation, operand_result = self.visit(unary.operand)
+ explanation = pattern % (operand_explanation,)
+ co = self._compile(pattern % ("__exprinfo_expr",))
+ try:
+ result = self.frame.eval(co, __exprinfo_expr=operand_result)
+ except Exception:
+ raise Failure(explanation)
+ return explanation, result
+
+ def visit_BinOp(self, binop):
+ left_explanation, left_result = self.visit(binop.left)
+ right_explanation, right_result = self.visit(binop.right)
+ symbol = operator_map[binop.op.__class__]
+ explanation = "(%s %s %s)" % (left_explanation, symbol,
+ right_explanation)
+ source = "__exprinfo_left %s __exprinfo_right" % (symbol,)
+ co = self._compile(source)
+ try:
+ result = self.frame.eval(co, __exprinfo_left=left_result,
+ __exprinfo_right=right_result)
+ except Exception:
+ raise Failure(explanation)
+ return explanation, result
+
+ def visit_Call(self, call):
+ func_explanation, func = self.visit(call.func)
+ arg_explanations = []
+ ns = {"__exprinfo_func" : func}
+ arguments = []
+ for arg in call.args:
+ arg_explanation, arg_result = self.visit(arg)
+ arg_name = "__exprinfo_%s" % (len(ns),)
+ ns[arg_name] = arg_result
+ arguments.append(arg_name)
+ arg_explanations.append(arg_explanation)
+ for keyword in call.keywords:
+ arg_explanation, arg_result = self.visit(keyword.value)
+ arg_name = "__exprinfo_%s" % (len(ns),)
+ ns[arg_name] = arg_result
+ keyword_source = "%s=%%s" % (keyword.arg)
+ arguments.append(keyword_source % (arg_name,))
+ arg_explanations.append(keyword_source % (arg_explanation,))
+ if call.starargs:
+ arg_explanation, arg_result = self.visit(call.starargs)
+ arg_name = "__exprinfo_star"
+ ns[arg_name] = arg_result
+ arguments.append("*%s" % (arg_name,))
+ arg_explanations.append("*%s" % (arg_explanation,))
+ if call.kwargs:
+ arg_explanation, arg_result = self.visit(call.kwargs)
+ arg_name = "__exprinfo_kwds"
+ ns[arg_name] = arg_result
+ arguments.append("**%s" % (arg_name,))
+ arg_explanations.append("**%s" % (arg_explanation,))
+ args_explained = ", ".join(arg_explanations)
+ explanation = "%s(%s)" % (func_explanation, args_explained)
+ args = ", ".join(arguments)
+ source = "__exprinfo_func(%s)" % (args,)
+ co = self._compile(source)
+ try:
+ result = self.frame.eval(co, **ns)
+ except Exception:
+ raise Failure(explanation)
+ pattern = "%s\n{%s = %s\n}"
+ rep = self.frame.repr(result)
+ explanation = pattern % (rep, rep, explanation)
+ return explanation, result
+
+ def _is_builtin_name(self, name):
+ pattern = "%r not in globals() and %r not in locals()"
+ source = pattern % (name.id, name.id)
+ co = self._compile(source)
+ try:
+ return self.frame.eval(co)
+ except Exception:
+ return False
+
+ def visit_Attribute(self, attr):
+ if not isinstance(attr.ctx, ast.Load):
+ return self.generic_visit(attr)
+ source_explanation, source_result = self.visit(attr.value)
+ explanation = "%s.%s" % (source_explanation, attr.attr)
+ source = "__exprinfo_expr.%s" % (attr.attr,)
+ co = self._compile(source)
+ try:
+ result = self.frame.eval(co, __exprinfo_expr=source_result)
+ except Exception:
+ raise Failure(explanation)
+ explanation = "%s\n{%s = %s.%s\n}" % (self.frame.repr(result),
+ self.frame.repr(result),
+ source_explanation, attr.attr)
+ # Check if the attr is from an instance.
+ source = "%r in getattr(__exprinfo_expr, '__dict__', {})"
+ source = source % (attr.attr,)
+ co = self._compile(source)
+ try:
+ from_instance = self.frame.eval(co, __exprinfo_expr=source_result)
+ except Exception:
+ from_instance = None
+ if from_instance is None or self.frame.is_true(from_instance):
+ rep = self.frame.repr(result)
+ pattern = "%s\n{%s = %s\n}"
+ explanation = pattern % (rep, rep, explanation)
+ return explanation, result
+
+ def visit_Assert(self, assrt):
+ test_explanation, test_result = self.visit(assrt.test)
+ explanation = "assert %s" % (test_explanation,)
+ if not self.frame.is_true(test_result):
+ try:
+ raise BuiltinAssertionError
+ except Exception:
+ raise Failure(explanation)
+ return explanation, test_result
+
+ def visit_Assign(self, assign):
+ value_explanation, value_result = self.visit(assign.value)
+ explanation = "... = %s" % (value_explanation,)
+ name = ast.Name("__exprinfo_expr", ast.Load(),
+ lineno=assign.value.lineno,
+ col_offset=assign.value.col_offset)
+ new_assign = ast.Assign(assign.targets, name, lineno=assign.lineno,
+ col_offset=assign.col_offset)
+ mod = ast.Module([new_assign])
+ co = self._compile(mod, "exec")
+ try:
+ self.frame.exec_(co, __exprinfo_expr=value_result)
+ except Exception:
+ raise Failure(explanation)
+ return explanation, value_result
diff --git a/lib/pypy/_pytest/assertion/oldinterpret.py b/lib/pypy/_pytest/assertion/oldinterpret.py
new file mode 100644
--- /dev/null
+++ b/lib/pypy/_pytest/assertion/oldinterpret.py
@@ -0,0 +1,552 @@
+import py
+import sys, inspect
+from compiler import parse, ast, pycodegen
+from _pytest.assertion.util import format_explanation
+from _pytest.assertion.reinterpret import BuiltinAssertionError
+
+passthroughex = py.builtin._sysex
+
+class Failure:
+ def __init__(self, node):
+ self.exc, self.value, self.tb = sys.exc_info()
+ self.node = node
+
+class View(object):
+ """View base class.
+
+ If C is a subclass of View, then C(x) creates a proxy object around
+ the object x. The actual class of the proxy is not C in general,
+ but a *subclass* of C determined by the rules below. To avoid confusion
+ we call view class the class of the proxy (a subclass of C, so of View)
+ and object class the class of x.
+
+ Attributes and methods not found in the proxy are automatically read on x.
+ Other operations like setting attributes are performed on the proxy, as
+ determined by its view class. The object x is available from the proxy
+ as its __obj__ attribute.
+
+ The view class selection is determined by the __view__ tuples and the
+ optional __viewkey__ method. By default, the selected view class is the
+ most specific subclass of C whose __view__ mentions the class of x.
+ If no such subclass is found, the search proceeds with the parent
+ object classes. For example, C(True) will first look for a subclass
+ of C with __view__ = (..., bool, ...) and only if it doesn't find any
+ look for one with __view__ = (..., int, ...), and then ..., object,...
+ If everything fails the class C itself is considered to be the default.
+
+ Alternatively, the view class selection can be driven by another aspect
+ of the object x, instead of the class of x, by overriding __viewkey__.
+ See last example at the end of this module.
+ """
+
+ _viewcache = {}
+ __view__ = ()
+
+ def __new__(rootclass, obj, *args, **kwds):
+ self = object.__new__(rootclass)
+ self.__obj__ = obj
+ self.__rootclass__ = rootclass
+ key = self.__viewkey__()
+ try:
+ self.__class__ = self._viewcache[key]
+ except KeyError:
+ self.__class__ = self._selectsubclass(key)
+ return self
+
+ def __getattr__(self, attr):
+ # attributes not found in the normal hierarchy rooted on View
+ # are looked up in the object's real class
+ return getattr(self.__obj__, attr)
+
+ def __viewkey__(self):
+ return self.__obj__.__class__
+
+ def __matchkey__(self, key, subclasses):
+ if inspect.isclass(key):
+ keys = inspect.getmro(key)
+ else:
+ keys = [key]
+ for key in keys:
+ result = [C for C in subclasses if key in C.__view__]
+ if result:
+ return result
+ return []
+
+ def _selectsubclass(self, key):
+ subclasses = list(enumsubclasses(self.__rootclass__))
+ for C in subclasses:
+ if not isinstance(C.__view__, tuple):
+ C.__view__ = (C.__view__,)
+ choices = self.__matchkey__(key, subclasses)
+ if not choices:
+ return self.__rootclass__
+ elif len(choices) == 1:
+ return choices[0]
+ else:
+ # combine the multiple choices
+ return type('?', tuple(choices), {})
+
+ def __repr__(self):
+ return '%s(%r)' % (self.__rootclass__.__name__, self.__obj__)
+
+
+def enumsubclasses(cls):
+ for subcls in cls.__subclasses__():
+ for subsubclass in enumsubclasses(subcls):
+ yield subsubclass
+ yield cls
+
+
+class Interpretable(View):
+ """A parse tree node with a few extra methods."""
+ explanation = None
+
+ def is_builtin(self, frame):
+ return False
+
+ def eval(self, frame):
+ # fall-back for unknown expression nodes
+ try:
+ expr = ast.Expression(self.__obj__)
+ expr.filename = '<eval>'
+ self.__obj__.filename = '<eval>'
+ co = pycodegen.ExpressionCodeGenerator(expr).getCode()
+ result = frame.eval(co)
+ except passthroughex:
+ raise
+ except:
+ raise Failure(self)
+ self.result = result
+ self.explanation = self.explanation or frame.repr(self.result)
+
+ def run(self, frame):
+ # fall-back for unknown statement nodes
+ try:
+ expr = ast.Module(None, ast.Stmt([self.__obj__]))
+ expr.filename = '<run>'
+ co = pycodegen.ModuleCodeGenerator(expr).getCode()
+ frame.exec_(co)
+ except passthroughex:
+ raise
+ except:
+ raise Failure(self)
+
+ def nice_explanation(self):
+ return format_explanation(self.explanation)
+
+
+class Name(Interpretable):
+ __view__ = ast.Name
+
+ def is_local(self, frame):
+ source = '%r in locals() is not globals()' % self.name
+ try:
+ return frame.is_true(frame.eval(source))
+ except passthroughex:
+ raise
+ except:
+ return False
+
+ def is_global(self, frame):
+ source = '%r in globals()' % self.name
+ try:
+ return frame.is_true(frame.eval(source))
+ except passthroughex:
+ raise
+ except:
+ return False
+
+ def is_builtin(self, frame):
+ source = '%r not in locals() and %r not in globals()' % (
+ self.name, self.name)
+ try:
+ return frame.is_true(frame.eval(source))
+ except passthroughex:
+ raise
+ except:
+ return False
+
+ def eval(self, frame):
+ super(Name, self).eval(frame)
+ if not self.is_local(frame):
+ self.explanation = self.name
+
+class Compare(Interpretable):
+ __view__ = ast.Compare
+
+ def eval(self, frame):
+ expr = Interpretable(self.expr)
+ expr.eval(frame)
+ for operation, expr2 in self.ops:
+ if hasattr(self, 'result'):
+ # shortcutting in chained expressions
+ if not frame.is_true(self.result):
+ break
+ expr2 = Interpretable(expr2)
+ expr2.eval(frame)
+ self.explanation = "%s %s %s" % (
+ expr.explanation, operation, expr2.explanation)
+ source = "__exprinfo_left %s __exprinfo_right" % operation
+ try:
+ self.result = frame.eval(source,
+ __exprinfo_left=expr.result,
+ __exprinfo_right=expr2.result)
+ except passthroughex:
+ raise
+ except:
+ raise Failure(self)
+ expr = expr2
+
+class And(Interpretable):
+ __view__ = ast.And
+
+ def eval(self, frame):
+ explanations = []
+ for expr in self.nodes:
+ expr = Interpretable(expr)
+ expr.eval(frame)
+ explanations.append(expr.explanation)
+ self.result = expr.result
+ if not frame.is_true(expr.result):
+ break
+ self.explanation = '(' + ' and '.join(explanations) + ')'
+
+class Or(Interpretable):
+ __view__ = ast.Or
+
+ def eval(self, frame):
+ explanations = []
+ for expr in self.nodes:
+ expr = Interpretable(expr)
+ expr.eval(frame)
+ explanations.append(expr.explanation)
+ self.result = expr.result
+ if frame.is_true(expr.result):
+ break
+ self.explanation = '(' + ' or '.join(explanations) + ')'
+
+
+# == Unary operations ==
+keepalive = []
+for astclass, astpattern in {
+ ast.Not : 'not __exprinfo_expr',
+ ast.Invert : '(~__exprinfo_expr)',
+ }.items():
+
+ class UnaryArith(Interpretable):
+ __view__ = astclass
+
+ def eval(self, frame, astpattern=astpattern):
+ expr = Interpretable(self.expr)
+ expr.eval(frame)
+ self.explanation = astpattern.replace('__exprinfo_expr',
+ expr.explanation)
+ try:
+ self.result = frame.eval(astpattern,
+ __exprinfo_expr=expr.result)
+ except passthroughex:
+ raise
+ except:
+ raise Failure(self)
+
+ keepalive.append(UnaryArith)
+
+# == Binary operations ==
+for astclass, astpattern in {
+ ast.Add : '(__exprinfo_left + __exprinfo_right)',
+ ast.Sub : '(__exprinfo_left - __exprinfo_right)',
+ ast.Mul : '(__exprinfo_left * __exprinfo_right)',
+ ast.Div : '(__exprinfo_left / __exprinfo_right)',
+ ast.Mod : '(__exprinfo_left % __exprinfo_right)',
+ ast.Power : '(__exprinfo_left ** __exprinfo_right)',
+ }.items():
+
+ class BinaryArith(Interpretable):
+ __view__ = astclass
+
+ def eval(self, frame, astpattern=astpattern):
+ left = Interpretable(self.left)
+ left.eval(frame)
+ right = Interpretable(self.right)
+ right.eval(frame)
+ self.explanation = (astpattern
+ .replace('__exprinfo_left', left .explanation)
+ .replace('__exprinfo_right', right.explanation))
+ try:
+ self.result = frame.eval(astpattern,
+ __exprinfo_left=left.result,
+ __exprinfo_right=right.result)
+ except passthroughex:
+ raise
+ except:
+ raise Failure(self)
+
+ keepalive.append(BinaryArith)
+
+
+class CallFunc(Interpretable):
+ __view__ = ast.CallFunc
+
+ def is_bool(self, frame):
+ source = 'isinstance(__exprinfo_value, bool)'
+ try:
+ return frame.is_true(frame.eval(source,
+ __exprinfo_value=self.result))
+ except passthroughex:
+ raise
+ except:
+ return False
+
+ def eval(self, frame):
+ node = Interpretable(self.node)
+ node.eval(frame)
+ explanations = []
+ vars = {'__exprinfo_fn': node.result}
+ source = '__exprinfo_fn('
+ for a in self.args:
+ if isinstance(a, ast.Keyword):
+ keyword = a.name
+ a = a.expr
+ else:
+ keyword = None
+ a = Interpretable(a)
+ a.eval(frame)
+ argname = '__exprinfo_%d' % len(vars)
+ vars[argname] = a.result
+ if keyword is None:
+ source += argname + ','
+ explanations.append(a.explanation)
+ else:
+ source += '%s=%s,' % (keyword, argname)
+ explanations.append('%s=%s' % (keyword, a.explanation))
+ if self.star_args:
+ star_args = Interpretable(self.star_args)
+ star_args.eval(frame)
+ argname = '__exprinfo_star'
+ vars[argname] = star_args.result
+ source += '*' + argname + ','
+ explanations.append('*' + star_args.explanation)
+ if self.dstar_args:
+ dstar_args = Interpretable(self.dstar_args)
+ dstar_args.eval(frame)
+ argname = '__exprinfo_kwds'
+ vars[argname] = dstar_args.result
+ source += '**' + argname + ','
+ explanations.append('**' + dstar_args.explanation)
+ self.explanation = "%s(%s)" % (
+ node.explanation, ', '.join(explanations))
+ if source.endswith(','):
+ source = source[:-1]
+ source += ')'
+ try:
+ self.result = frame.eval(source, **vars)
+ except passthroughex:
+ raise
+ except:
+ raise Failure(self)
+ if not node.is_builtin(frame) or not self.is_bool(frame):
+ r = frame.repr(self.result)
+ self.explanation = '%s\n{%s = %s\n}' % (r, r, self.explanation)
+
+class Getattr(Interpretable):
+ __view__ = ast.Getattr
+
+ def eval(self, frame):
+ expr = Interpretable(self.expr)
+ expr.eval(frame)
+ source = '__exprinfo_expr.%s' % self.attrname
+ try:
+ self.result = frame.eval(source, __exprinfo_expr=expr.result)
+ except passthroughex:
+ raise
+ except:
+ raise Failure(self)
+ self.explanation = '%s.%s' % (expr.explanation, self.attrname)
+ # if the attribute comes from the instance, its value is interesting
+ source = ('hasattr(__exprinfo_expr, "__dict__") and '
+ '%r in __exprinfo_expr.__dict__' % self.attrname)
+ try:
+ from_instance = frame.is_true(
+ frame.eval(source, __exprinfo_expr=expr.result))
+ except passthroughex:
+ raise
+ except:
+ from_instance = True
+ if from_instance:
+ r = frame.repr(self.result)
+ self.explanation = '%s\n{%s = %s\n}' % (r, r, self.explanation)
+
+# == Re-interpretation of full statements ==
+
+class Assert(Interpretable):
+ __view__ = ast.Assert
+
+ def run(self, frame):
+ test = Interpretable(self.test)
+ test.eval(frame)
+ # print the result as 'assert <explanation>'
+ self.result = test.result
+ self.explanation = 'assert ' + test.explanation
+ if not frame.is_true(test.result):
+ try:
+ raise BuiltinAssertionError
+ except passthroughex:
+ raise
+ except:
+ raise Failure(self)
+
+class Assign(Interpretable):
+ __view__ = ast.Assign
+
+ def run(self, frame):
+ expr = Interpretable(self.expr)
+ expr.eval(frame)
+ self.result = expr.result
+ self.explanation = '... = ' + expr.explanation
+ # fall-back-run the rest of the assignment
+ ass = ast.Assign(self.nodes, ast.Name('__exprinfo_expr'))
+ mod = ast.Module(None, ast.Stmt([ass]))
+ mod.filename = '<run>'
+ co = pycodegen.ModuleCodeGenerator(mod).getCode()
+ try:
+ frame.exec_(co, __exprinfo_expr=expr.result)
+ except passthroughex:
+ raise
+ except:
+ raise Failure(self)
+
+class Discard(Interpretable):
+ __view__ = ast.Discard
+
+ def run(self, frame):
+ expr = Interpretable(self.expr)
+ expr.eval(frame)
+ self.result = expr.result
+ self.explanation = expr.explanation
+
+class Stmt(Interpretable):
+ __view__ = ast.Stmt
+
+ def run(self, frame):
+ for stmt in self.nodes:
+ stmt = Interpretable(stmt)
+ stmt.run(frame)
+
+
+def report_failure(e):
+ explanation = e.node.nice_explanation()
+ if explanation:
+ explanation = ", in: " + explanation
+ else:
+ explanation = ""
+ sys.stdout.write("%s: %s%s\n" % (e.exc.__name__, e.value, explanation))
+
+def check(s, frame=None):
+ if frame is None:
+ frame = sys._getframe(1)
+ frame = py.code.Frame(frame)
+ expr = parse(s, 'eval')
+ assert isinstance(expr, ast.Expression)
+ node = Interpretable(expr.node)
+ try:
+ node.eval(frame)
+ except passthroughex:
+ raise
+ except Failure:
+ e = sys.exc_info()[1]
+ report_failure(e)
+ else:
+ if not frame.is_true(node.result):
+ sys.stderr.write("assertion failed: %s\n" % node.nice_explanation())
+
+
+###########################################################
+# API / Entry points
+# #########################################################
+
+def interpret(source, frame, should_fail=False):
+ module = Interpretable(parse(source, 'exec').node)
+ #print "got module", module
+ if isinstance(frame, py.std.types.FrameType):
+ frame = py.code.Frame(frame)
+ try:
+ module.run(frame)
+ except Failure:
+ e = sys.exc_info()[1]
+ return getfailure(e)
+ except passthroughex:
+ raise
+ except:
+ import traceback
+ traceback.print_exc()
+ if should_fail:
+ return ("(assertion failed, but when it was re-run for "
+ "printing intermediate values, it did not fail. Suggestions: "
+ "compute assert expression before the assert or use --nomagic)")
+ else:
+ return None
+
+def getmsg(excinfo):
+ if isinstance(excinfo, tuple):
+ excinfo = py.code.ExceptionInfo(excinfo)
+ #frame, line = gettbline(tb)
+ #frame = py.code.Frame(frame)
+ #return interpret(line, frame)
+
+ tb = excinfo.traceback[-1]
+ source = str(tb.statement).strip()
+ x = interpret(source, tb.frame, should_fail=True)
+ if not isinstance(x, str):
+ raise TypeError("interpret returned non-string %r" % (x,))
+ return x
+
+def getfailure(e):
+ explanation = e.node.nice_explanation()
+ if str(e.value):
+ lines = explanation.split('\n')
+ lines[0] += " << %s" % (e.value,)
+ explanation = '\n'.join(lines)
+ text = "%s: %s" % (e.exc.__name__, explanation)
+ if text.startswith('AssertionError: assert '):
+ text = text[16:]
+ return text
+
+def run(s, frame=None):
+ if frame is None:
+ frame = sys._getframe(1)
+ frame = py.code.Frame(frame)
+ module = Interpretable(parse(s, 'exec').node)
+ try:
+ module.run(frame)
+ except Failure:
+ e = sys.exc_info()[1]
+ report_failure(e)
+
+
+if __name__ == '__main__':
+ # example:
+ def f():
+ return 5
+ def g():
+ return 3
+ def h(x):
+ return 'never'
+ check("f() * g() == 5")
+ check("not f()")
+ check("not (f() and g() or 0)")
+ check("f() == g()")
+ i = 4
+ check("i == f()")
+ check("len(f()) == 0")
+ check("isinstance(2+3+4, float)")
+
+ run("x = i")
+ check("x == 5")
+
+ run("assert not f(), 'oops'")
+ run("a, b, c = 1, 2")
+ run("a, b, c = f()")
+
+ check("max([f(),g()]) == 4")
+ check("'hello'[g()] == 'h'")
+ run("'guk%d' % h(f())")
diff --git a/lib/pypy/_pytest/assertion/reinterpret.py b/lib/pypy/_pytest/assertion/reinterpret.py
new file mode 100644
--- /dev/null
+++ b/lib/pypy/_pytest/assertion/reinterpret.py
@@ -0,0 +1,48 @@
+import sys
+import py
+
+BuiltinAssertionError = py.builtin.builtins.AssertionError
+
+class AssertionError(BuiltinAssertionError):
+ def __init__(self, *args):
+ BuiltinAssertionError.__init__(self, *args)
+ if args:
+ try:
+ self.msg = str(args[0])
+ except py.builtin._sysex:
+ raise
+ except:
+ self.msg = "<[broken __repr__] %s at %0xd>" %(
+ args[0].__class__, id(args[0]))
+ else:
+ f = py.code.Frame(sys._getframe(1))
+ try:
+ source = f.code.fullsource
+ if source is not None:
+ try:
+ source = source.getstatement(f.lineno, assertion=True)
+ except IndexError:
+ source = None
+ else:
+ source = str(source.deindent()).strip()
+ except py.error.ENOENT:
+ source = None
+ # this can also occur during reinterpretation, when the
+ # co_filename is set to "<run>".
+ if source:
+ self.msg = reinterpret(source, f, should_fail=True)
+ else:
+ self.msg = "<could not determine information>"
+ if not self.args:
+ self.args = (self.msg,)
+
+if sys.version_info > (3, 0):
+ AssertionError.__module__ = "builtins"
+ reinterpret_old = "old reinterpretation not available for py3"
+else:
+ from _pytest.assertion.oldinterpret import interpret as reinterpret_old
+if sys.version_info >= (2, 6) or (sys.platform.startswith("java")):
+ from _pytest.assertion.newinterpret import interpret as reinterpret
+else:
+ reinterpret = reinterpret_old
+
diff --git a/lib/pypy/_pytest/assertion/rewrite.py b/lib/pypy/_pytest/assertion/rewrite.py
new file mode 100644
--- /dev/null
+++ b/lib/pypy/_pytest/assertion/rewrite.py
@@ -0,0 +1,340 @@
+"""Rewrite assertion AST to produce nice error messages"""
+
+import ast
+import collections
+import itertools
+import sys
+
+import py
+from _pytest.assertion import util
+
+
+def rewrite_asserts(mod):
+ """Rewrite the assert statements in mod."""
+ AssertionRewriter().run(mod)
+
+
+_saferepr = py.io.saferepr
+from _pytest.assertion.util import format_explanation as _format_explanation
+
+def _format_boolop(operands, explanations, is_or):
+ show_explanations = []
+ for operand, expl in zip(operands, explanations):
+ show_explanations.append(expl)
+ if operand == is_or:
+ break
+ return "(" + (is_or and " or " or " and ").join(show_explanations) + ")"
+
+def _call_reprcompare(ops, results, expls, each_obj):
+ for i, res, expl in zip(range(len(ops)), results, expls):
+ try:
+ done = not res
+ except Exception:
+ done = True
+ if done:
+ break
+ if util._reprcompare is not None:
+ custom = util._reprcompare(ops[i], each_obj[i], each_obj[i + 1])
+ if custom is not None:
+ return custom
+ return expl
+
+
+unary_map = {
+ ast.Not : "not %s",
+ ast.Invert : "~%s",
+ ast.USub : "-%s",
+ ast.UAdd : "+%s"
+}
+
+binop_map = {
+ ast.BitOr : "|",
+ ast.BitXor : "^",
+ ast.BitAnd : "&",
+ ast.LShift : "<<",
+ ast.RShift : ">>",
+ ast.Add : "+",
+ ast.Sub : "-",
+ ast.Mult : "*",
+ ast.Div : "/",
+ ast.FloorDiv : "//",
+ ast.Mod : "%",
+ ast.Eq : "==",
+ ast.NotEq : "!=",
+ ast.Lt : "<",
+ ast.LtE : "<=",
+ ast.Gt : ">",
+ ast.GtE : ">=",
+ ast.Pow : "**",
+ ast.Is : "is",
+ ast.IsNot : "is not",
+ ast.In : "in",
+ ast.NotIn : "not in"
+}
+
+
+def set_location(node, lineno, col_offset):
+ """Set node location information recursively."""
+ def _fix(node, lineno, col_offset):
+ if "lineno" in node._attributes:
+ node.lineno = lineno
+ if "col_offset" in node._attributes:
+ node.col_offset = col_offset
+ for child in ast.iter_child_nodes(node):
+ _fix(child, lineno, col_offset)
+ _fix(node, lineno, col_offset)
+ return node
+
+
+class AssertionRewriter(ast.NodeVisitor):
+
+ def run(self, mod):
+ """Find all assert statements in *mod* and rewrite them."""
+ if not mod.body:
+ # Nothing to do.
+ return
+ # Insert some special imports at the top of the module but after any
+ # docstrings and __future__ imports.
+ aliases = [ast.alias(py.builtin.builtins.__name__, "@py_builtins"),
+ ast.alias("_pytest.assertion.rewrite", "@pytest_ar")]
+ expect_docstring = True
+ pos = 0
+ lineno = 0
+ for item in mod.body:
+ if (expect_docstring and isinstance(item, ast.Expr) and
+ isinstance(item.value, ast.Str)):
+ doc = item.value.s
+ if "PYTEST_DONT_REWRITE" in doc:
+ # The module has disabled assertion rewriting.
+ return
+ lineno += len(doc) - 1
+ expect_docstring = False
+ elif (not isinstance(item, ast.ImportFrom) or item.level > 0 and
+ item.identifier != "__future__"):
+ lineno = item.lineno
+ break
+ pos += 1
+ imports = [ast.Import([alias], lineno=lineno, col_offset=0)
+ for alias in aliases]
+ mod.body[pos:pos] = imports
+ # Collect asserts.
+ nodes = collections.deque([mod])
+ while nodes:
+ node = nodes.popleft()
+ for name, field in ast.iter_fields(node):
+ if isinstance(field, list):
+ new = []
+ for i, child in enumerate(field):
+ if isinstance(child, ast.Assert):
+ # Transform assert.
+ new.extend(self.visit(child))
+ else:
+ new.append(child)
+ if isinstance(child, ast.AST):
+ nodes.append(child)
+ setattr(node, name, new)
+ elif (isinstance(field, ast.AST) and
+ # Don't recurse into expressions as they can't contain
+ # asserts.
+ not isinstance(field, ast.expr)):
+ nodes.append(field)
+
+ def variable(self):
+ """Get a new variable."""
+ # Use a character invalid in python identifiers to avoid clashing.
+ name = "@py_assert" + str(next(self.variable_counter))
+ self.variables.add(name)
+ return name
+
+ def assign(self, expr):
+ """Give *expr* a name."""
+ name = self.variable()
+ self.statements.append(ast.Assign([ast.Name(name, ast.Store())], expr))
+ return ast.Name(name, ast.Load())
+
+ def display(self, expr):
+ """Call py.io.saferepr on the expression."""
+ return self.helper("saferepr", expr)
+
+ def helper(self, name, *args):
+ """Call a helper in this module."""
+ py_name = ast.Name("@pytest_ar", ast.Load())
+ attr = ast.Attribute(py_name, "_" + name, ast.Load())
+ return ast.Call(attr, list(args), [], None, None)
+
+ def builtin(self, name):
+ """Return the builtin called *name*."""
+ builtin_name = ast.Name("@py_builtins", ast.Load())
+ return ast.Attribute(builtin_name, name, ast.Load())
+
+ def explanation_param(self, expr):
+ specifier = "py" + str(next(self.variable_counter))
+ self.explanation_specifiers[specifier] = expr
+ return "%(" + specifier + ")s"
+
+ def push_format_context(self):
+ self.explanation_specifiers = {}
+ self.stack.append(self.explanation_specifiers)
+
+ def pop_format_context(self, expl_expr):
+ current = self.stack.pop()
+ if self.stack:
+ self.explanation_specifiers = self.stack[-1]
+ keys = [ast.Str(key) for key in current.keys()]
+ format_dict = ast.Dict(keys, list(current.values()))
+ form = ast.BinOp(expl_expr, ast.Mod(), format_dict)
+ name = "@py_format" + str(next(self.variable_counter))
+ self.on_failure.append(ast.Assign([ast.Name(name, ast.Store())], form))
+ return ast.Name(name, ast.Load())
+
+ def generic_visit(self, node):
+ """Handle expressions we don't have custom code for."""
+ assert isinstance(node, ast.expr)
+ res = self.assign(node)
+ return res, self.explanation_param(self.display(res))
+
+ def visit_Assert(self, assert_):
+ if assert_.msg:
+ # There's already a message. Don't mess with it.
+ return [assert_]
+ self.statements = []
+ self.variables = set()
+ self.variable_counter = itertools.count()
+ self.stack = []
+ self.on_failure = []
+ self.push_format_context()
+ # Rewrite assert into a bunch of statements.
+ top_condition, explanation = self.visit(assert_.test)
+ # Create failure message.
+ body = self.on_failure
+ negation = ast.UnaryOp(ast.Not(), top_condition)
+ self.statements.append(ast.If(negation, body, []))
+ explanation = "assert " + explanation
+ template = ast.Str(explanation)
+ msg = self.pop_format_context(template)
+ fmt = self.helper("format_explanation", msg)
+ err_name = ast.Name("AssertionError", ast.Load())
+ exc = ast.Call(err_name, [fmt], [], None, None)
+ if sys.version_info[0] >= 3:
+ raise_ = ast.Raise(exc, None)
+ else:
+ raise_ = ast.Raise(exc, None, None)
+ body.append(raise_)
+ # Delete temporary variables.
+ names = [ast.Name(name, ast.Del()) for name in self.variables]
+ if names:
+ delete = ast.Delete(names)
+ self.statements.append(delete)
+ # Fix line numbers.
+ for stmt in self.statements:
+ set_location(stmt, assert_.lineno, assert_.col_offset)
+ return self.statements
+
+ def visit_Name(self, name):
+ # Check if the name is local or not.
+ locs = ast.Call(self.builtin("locals"), [], [], None, None)
+ globs = ast.Call(self.builtin("globals"), [], [], None, None)
+ ops = [ast.In(), ast.IsNot()]
+ test = ast.Compare(ast.Str(name.id), ops, [locs, globs])
+ expr = ast.IfExp(test, self.display(name), ast.Str(name.id))
+ return name, self.explanation_param(expr)
+
+ def visit_BoolOp(self, boolop):
+ operands = []
+ explanations = []
+ self.push_format_context()
+ for operand in boolop.values:
+ res, explanation = self.visit(operand)
+ operands.append(res)
+ explanations.append(explanation)
+ expls = ast.Tuple([ast.Str(expl) for expl in explanations], ast.Load())
+ is_or = ast.Num(isinstance(boolop.op, ast.Or))
+ expl_template = self.helper("format_boolop",
+ ast.Tuple(operands, ast.Load()), expls,
+ is_or)
+ expl = self.pop_format_context(expl_template)
+ res = self.assign(ast.BoolOp(boolop.op, operands))
+ return res, self.explanation_param(expl)
+
+ def visit_UnaryOp(self, unary):
+ pattern = unary_map[unary.op.__class__]
+ operand_res, operand_expl = self.visit(unary.operand)
+ res = self.assign(ast.UnaryOp(unary.op, operand_res))
+ return res, pattern % (operand_expl,)
+
+ def visit_BinOp(self, binop):
+ symbol = binop_map[binop.op.__class__]
+ left_expr, left_expl = self.visit(binop.left)
+ right_expr, right_expl = self.visit(binop.right)
+ explanation = "(%s %s %s)" % (left_expl, symbol, right_expl)
+ res = self.assign(ast.BinOp(left_expr, binop.op, right_expr))
+ return res, explanation
+
+ def visit_Call(self, call):
+ new_func, func_expl = self.visit(call.func)
+ arg_expls = []
+ new_args = []
+ new_kwargs = []
+ new_star = new_kwarg = None
+ for arg in call.args:
+ res, expl = self.visit(arg)
+ new_args.append(res)
+ arg_expls.append(expl)
+ for keyword in call.keywords:
+ res, expl = self.visit(keyword.value)
+ new_kwargs.append(ast.keyword(keyword.arg, res))
+ arg_expls.append(keyword.arg + "=" + expl)
+ if call.starargs:
+ new_star, expl = self.visit(call.starargs)
+ arg_expls.append("*" + expl)
+ if call.kwargs:
+ new_kwarg, expl = self.visit(call.kwarg)
+ arg_expls.append("**" + expl)
+ expl = "%s(%s)" % (func_expl, ', '.join(arg_expls))
+ new_call = ast.Call(new_func, new_args, new_kwargs, new_star, new_kwarg)
+ res = self.assign(new_call)
+ res_expl = self.explanation_param(self.display(res))
+ outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl)
+ return res, outer_expl
+
+ def visit_Attribute(self, attr):
+ if not isinstance(attr.ctx, ast.Load):
+ return self.generic_visit(attr)
+ value, value_expl = self.visit(attr.value)
+ res = self.assign(ast.Attribute(value, attr.attr, ast.Load()))
+ res_expl = self.explanation_param(self.display(res))
+ pat = "%s\n{%s = %s.%s\n}"
+ expl = pat % (res_expl, res_expl, value_expl, attr.attr)
+ return res, expl
+
+ def visit_Compare(self, comp):
+ self.push_format_context()
+ left_res, left_expl = self.visit(comp.left)
+ res_variables = [self.variable() for i in range(len(comp.ops))]
+ load_names = [ast.Name(v, ast.Load()) for v in res_variables]
+ store_names = [ast.Name(v, ast.Store()) for v in res_variables]
+ it = zip(range(len(comp.ops)), comp.ops, comp.comparators)
+ expls = []
+ syms = []
+ results = [left_res]
+ for i, op, next_operand in it:
+ next_res, next_expl = self.visit(next_operand)
+ results.append(next_res)
+ sym = binop_map[op.__class__]
+ syms.append(ast.Str(sym))
+ expl = "%s %s %s" % (left_expl, sym, next_expl)
+ expls.append(ast.Str(expl))
+ res_expr = ast.Compare(left_res, [op], [next_res])
+ self.statements.append(ast.Assign([store_names[i]], res_expr))
+ left_res, left_expl = next_res, next_expl
+ # Use py.code._reprcompare if that's available.
+ expl_call = self.helper("call_reprcompare",
+ ast.Tuple(syms, ast.Load()),
+ ast.Tuple(load_names, ast.Load()),
+ ast.Tuple(expls, ast.Load()),
+ ast.Tuple(results, ast.Load()))
+ if len(comp.ops) > 1:
+ res = ast.BoolOp(ast.And(), load_names)
+ else:
+ res = load_names[0]
+ return res, self.explanation_param(self.pop_format_context(expl_call))
diff --git a/lib/pypy/_pytest/assertion/util.py b/lib/pypy/_pytest/assertion/util.py
new file mode 100644
--- /dev/null
+++ b/lib/pypy/_pytest/assertion/util.py
@@ -0,0 +1,213 @@
+"""Utilities for assertion debugging"""
+
+import py
+
+
+# The _reprcompare attribute on the util module is used by the new assertion
+# interpretation code and assertion rewriter to detect this plugin was
+# loaded and in turn call the hooks defined here as part of the
+# DebugInterpreter.
+_reprcompare = None
+
+def format_explanation(explanation):
+ """This formats an explanation
+
+ Normally all embedded newlines are escaped, however there are
+ three exceptions: \n{, \n} and \n~. The first two are intended
+ cover nested explanations, see function and attribute explanations
+ for examples (.visit_Call(), visit_Attribute()). The last one is
+ for when one explanation needs to span multiple lines, e.g. when
+ displaying diffs.
+ """
+ # simplify 'assert False where False = ...'
+ where = 0
+ while True:
+ start = where = explanation.find("False\n{False = ", where)
+ if where == -1:
+ break
+ level = 0
+ for i, c in enumerate(explanation[start:]):
+ if c == "{":
+ level += 1
+ elif c == "}":
+ level -= 1
+ if not level:
+ break
+ else:
+ raise AssertionError("unbalanced braces: %r" % (explanation,))
+ end = start + i
+ where = end
+ if explanation[end - 1] == '\n':
+ explanation = (explanation[:start] + explanation[start+15:end-1] +
+ explanation[end+1:])
+ where -= 17
+ raw_lines = (explanation or '').split('\n')
+ # escape newlines not followed by {, } and ~
+ lines = [raw_lines[0]]
+ for l in raw_lines[1:]:
+ if l.startswith('{') or l.startswith('}') or l.startswith('~'):
+ lines.append(l)
+ else:
+ lines[-1] += '\\n' + l
+
+ result = lines[:1]
+ stack = [0]
+ stackcnt = [0]
+ for line in lines[1:]:
+ if line.startswith('{'):
+ if stackcnt[-1]:
+ s = 'and '
+ else:
+ s = 'where '
+ stack.append(len(result))
+ stackcnt[-1] += 1
+ stackcnt.append(0)
+ result.append(' +' + ' '*(len(stack)-1) + s + line[1:])
+ elif line.startswith('}'):
+ assert line.startswith('}')
+ stack.pop()
+ stackcnt.pop()
+ result[stack[-1]] += line[1:]
+ else:
+ assert line.startswith('~')
+ result.append(' '*len(stack) + line[1:])
+ assert len(stack) == 1
+ return '\n'.join(result)
+
+
+# Provide basestring in python3
+try:
+ basestring = basestring
+except NameError:
+ basestring = str
+
+
+def assertrepr_compare(op, left, right):
+ """return specialised explanations for some operators/operands"""
+ width = 80 - 15 - len(op) - 2 # 15 chars indentation, 1 space around op
+ left_repr = py.io.saferepr(left, maxsize=int(width/2))
+ right_repr = py.io.saferepr(right, maxsize=width-len(left_repr))
+ summary = '%s %s %s' % (left_repr, op, right_repr)
+
+ issequence = lambda x: isinstance(x, (list, tuple))
+ istext = lambda x: isinstance(x, basestring)
+ isdict = lambda x: isinstance(x, dict)
+ isset = lambda x: isinstance(x, set)
+
+ explanation = None
+ try:
+ if op == '==':
+ if istext(left) and istext(right):
+ explanation = _diff_text(left, right)
+ elif issequence(left) and issequence(right):
+ explanation = _compare_eq_sequence(left, right)
+ elif isset(left) and isset(right):
+ explanation = _compare_eq_set(left, right)
+ elif isdict(left) and isdict(right):
+ explanation = _diff_text(py.std.pprint.pformat(left),
+ py.std.pprint.pformat(right))
+ elif op == 'not in':
+ if istext(left) and istext(right):
+ explanation = _notin_text(left, right)
+ except py.builtin._sysex:
+ raise
+ except:
+ excinfo = py.code.ExceptionInfo()
+ explanation = ['(pytest_assertion plugin: representation of '
+ 'details failed. Probably an object has a faulty __repr__.)',
+ str(excinfo)
+ ]
+
+
+ if not explanation:
+ return None
+
+ # Don't include pageloads of data, should be configurable
+ if len(''.join(explanation)) > 80*8:
+ explanation = ['Detailed information too verbose, truncated']
+
+ return [summary] + explanation
+
+
+def _diff_text(left, right):
+ """Return the explanation for the diff between text
+
+ This will skip leading and trailing characters which are
+ identical to keep the diff minimal.
+ """
+ explanation = []
+ i = 0 # just in case left or right has zero length
+ for i in range(min(len(left), len(right))):
+ if left[i] != right[i]:
+ break
+ if i > 42:
+ i -= 10 # Provide some context
+ explanation = ['Skipping %s identical '
+ 'leading characters in diff' % i]
+ left = left[i:]
+ right = right[i:]
+ if len(left) == len(right):
+ for i in range(len(left)):
+ if left[-i] != right[-i]:
+ break
+ if i > 42:
+ i -= 10 # Provide some context
+ explanation += ['Skipping %s identical '
+ 'trailing characters in diff' % i]
+ left = left[:-i]
+ right = right[:-i]
+ explanation += [line.strip('\n')
+ for line in py.std.difflib.ndiff(left.splitlines(),
+ right.splitlines())]
+ return explanation
+
+
+def _compare_eq_sequence(left, right):
+ explanation = []
+ for i in range(min(len(left), len(right))):
+ if left[i] != right[i]:
+ explanation += ['At index %s diff: %r != %r' %
+ (i, left[i], right[i])]
+ break
+ if len(left) > len(right):
+ explanation += ['Left contains more items, '
+ 'first extra item: %s' % py.io.saferepr(left[len(right)],)]
+ elif len(left) < len(right):
+ explanation += ['Right contains more items, '
+ 'first extra item: %s' % py.io.saferepr(right[len(left)],)]
+ return explanation # + _diff_text(py.std.pprint.pformat(left),
+ # py.std.pprint.pformat(right))
+
+
+def _compare_eq_set(left, right):
+ explanation = []
+ diff_left = left - right
+ diff_right = right - left
+ if diff_left:
+ explanation.append('Extra items in the left set:')
+ for item in diff_left:
+ explanation.append(py.io.saferepr(item))
+ if diff_right:
+ explanation.append('Extra items in the right set:')
+ for item in diff_right:
+ explanation.append(py.io.saferepr(item))
+ return explanation
+
+
+def _notin_text(term, text):
+ index = text.find(term)
+ head = text[:index]
+ tail = text[index+len(term):]
+ correct_text = head + tail
+ diff = _diff_text(correct_text, text)
+ newdiff = ['%s is contained here:' % py.io.saferepr(term, maxsize=42)]
+ for line in diff:
+ if line.startswith('Skipping'):
+ continue
+ if line.startswith('- '):
+ continue
+ if line.startswith('+ '):
+ newdiff.append(' ' + line[2:])
+ else:
+ newdiff.append(line)
+ return newdiff
diff --git a/lib/pypy/_pytest/capture.py b/lib/pypy/_pytest/capture.py
new file mode 100644
--- /dev/null
+++ b/lib/pypy/_pytest/capture.py
@@ -0,0 +1,226 @@
+""" per-test stdout/stderr capturing mechanisms, ``capsys`` and ``capfd`` function arguments. """
+
+import pytest, py
+import os
+
+def pytest_addoption(parser):
+ group = parser.getgroup("general")
+ group._addoption('--capture', action="store", default=None,
+ metavar="method", type="choice", choices=['fd', 'sys', 'no'],
+ help="per-test capturing method: one of fd (default)|sys|no.")
+ group._addoption('-s', action="store_const", const="no", dest="capture",
+ help="shortcut for --capture=no.")
+
+def addouterr(rep, outerr):
+ repr = getattr(rep, 'longrepr', None)
+ if not hasattr(repr, 'addsection'):
+ return
+ for secname, content in zip(["out", "err"], outerr):
+ if content:
+ repr.addsection("Captured std%s" % secname, content.rstrip())
+
+def pytest_unconfigure(config):
+ # registered in config.py during early conftest.py loading
+ capman = config.pluginmanager.getplugin('capturemanager')
+ while capman._method2capture:
+ name, cap = capman._method2capture.popitem()
+ # XXX logging module may wants to close it itself on process exit
+ # otherwise we could do finalization here and call "reset()".
+ cap.suspend()
+
+class NoCapture:
+ def startall(self):
+ pass
+ def resume(self):
+ pass
+ def reset(self):
+ pass
+ def suspend(self):
+ return "", ""
+
+class CaptureManager:
+ def __init__(self):
+ self._method2capture = {}
+
+ def _maketempfile(self):
+ f = py.std.tempfile.TemporaryFile()
+ newf = py.io.dupfile(f, encoding="UTF-8")
+ f.close()
+ return newf
+
+ def _makestringio(self):
+ return py.io.TextIO()
+
+ def _getcapture(self, method):
+ if method == "fd":
+ return py.io.StdCaptureFD(now=False,
+ out=self._maketempfile(), err=self._maketempfile()
+ )
+ elif method == "sys":
+ return py.io.StdCapture(now=False,
+ out=self._makestringio(), err=self._makestringio()
+ )
+ elif method == "no":
+ return NoCapture()
+ else:
+ raise ValueError("unknown capturing method: %r" % method)
+
+ def _getmethod_preoptionparse(self, args):
+ if '-s' in args or "--capture=no" in args:
+ return "no"
+ elif hasattr(os, 'dup') and '--capture=sys' not in args:
+ return "fd"
+ else:
+ return "sys"
+
+ def _getmethod(self, config, fspath):
+ if config.option.capture:
+ method = config.option.capture
+ else:
+ try:
+ method = config._conftest.rget("option_capture", path=fspath)
+ except KeyError:
+ method = "fd"
+ if method == "fd" and not hasattr(os, 'dup'): # e.g. jython
+ method = "sys"
+ return method
+
+ def resumecapture_item(self, item):
+ method = self._getmethod(item.config, item.fspath)
+ if not hasattr(item, 'outerr'):
+ item.outerr = ('', '') # we accumulate outerr on the item
+ return self.resumecapture(method)
+
+ def resumecapture(self, method):
+ if hasattr(self, '_capturing'):
+ raise ValueError("cannot resume, already capturing with %r" %
+ (self._capturing,))
+ cap = self._method2capture.get(method)
+ self._capturing = method
+ if cap is None:
+ self._method2capture[method] = cap = self._getcapture(method)
+ cap.startall()
+ else:
+ cap.resume()
+
+ def suspendcapture(self, item=None):
+ self.deactivate_funcargs()
+ if hasattr(self, '_capturing'):
+ method = self._capturing
+ cap = self._method2capture.get(method)
+ if cap is not None:
+ outerr = cap.suspend()
+ del self._capturing
+ if item:
+ outerr = (item.outerr[0] + outerr[0],
+ item.outerr[1] + outerr[1])
+ return outerr
+ if hasattr(item, 'outerr'):
+ return item.outerr
+ return "", ""
+
+ def activate_funcargs(self, pyfuncitem):
+ if not hasattr(pyfuncitem, 'funcargs'):
+ return
+ assert not hasattr(self, '_capturing_funcargs')
+ self._capturing_funcargs = capturing_funcargs = []
+ for name, capfuncarg in pyfuncitem.funcargs.items():
+ if name in ('capsys', 'capfd'):
+ capturing_funcargs.append(capfuncarg)
+ capfuncarg._start()
+
+ def deactivate_funcargs(self):
+ capturing_funcargs = getattr(self, '_capturing_funcargs', None)
+ if capturing_funcargs is not None:
+ while capturing_funcargs:
+ capfuncarg = capturing_funcargs.pop()
+ capfuncarg._finalize()
+ del self._capturing_funcargs
+
+ def pytest_make_collect_report(self, __multicall__, collector):
+ method = self._getmethod(collector.config, collector.fspath)
+ try:
+ self.resumecapture(method)
+ except ValueError:
+ return # recursive collect, XXX refactor capturing
+ # to allow for more lightweight recursive capturing
+ try:
+ rep = __multicall__.execute()
+ finally:
+ outerr = self.suspendcapture()
+ addouterr(rep, outerr)
+ return rep
+
+ @pytest.mark.tryfirst
+ def pytest_runtest_setup(self, item):
+ self.resumecapture_item(item)
+
+ @pytest.mark.tryfirst
+ def pytest_runtest_call(self, item):
+ self.resumecapture_item(item)
+ self.activate_funcargs(item)
+
+ @pytest.mark.tryfirst
+ def pytest_runtest_teardown(self, item):
+ self.resumecapture_item(item)
+
+ def pytest__teardown_final(self, __multicall__, session):
+ method = self._getmethod(session.config, None)
+ self.resumecapture(method)
+ try:
+ rep = __multicall__.execute()
+ finally:
+ outerr = self.suspendcapture()
+ if rep:
+ addouterr(rep, outerr)
+ return rep
+
+ def pytest_keyboard_interrupt(self, excinfo):
+ if hasattr(self, '_capturing'):
+ self.suspendcapture()
+
+ @pytest.mark.tryfirst
+ def pytest_runtest_makereport(self, __multicall__, item, call):
+ self.deactivate_funcargs()
+ rep = __multicall__.execute()
+ outerr = self.suspendcapture(item)
+ if not rep.passed:
+ addouterr(rep, outerr)
+ if not rep.passed or rep.when == "teardown":
+ outerr = ('', '')
+ item.outerr = outerr
+ return rep
+
+def pytest_funcarg__capsys(request):
+ """enables capturing of writes to sys.stdout/sys.stderr and makes
+ captured output available via ``capsys.readouterr()`` method calls
+ which return a ``(out, err)`` tuple.
+ """
+ return CaptureFuncarg(py.io.StdCapture)
+
+def pytest_funcarg__capfd(request):
+ """enables capturing of writes to file descriptors 1 and 2 and makes
+ captured output available via ``capsys.readouterr()`` method calls
+ which return a ``(out, err)`` tuple.
+ """
+ if not hasattr(os, 'dup'):
+ py.test.skip("capfd funcarg needs os.dup")
+ return CaptureFuncarg(py.io.StdCaptureFD)
+
+class CaptureFuncarg:
+ def __init__(self, captureclass):
+ self.capture = captureclass(now=False)
+
+ def _start(self):
+ self.capture.startall()
+
+ def _finalize(self):
+ if hasattr(self, 'capture'):
+ self.capture.reset()
+ del self.capture
+
+ def readouterr(self):
+ return self.capture.readouterr()
+
+ def close(self):
+ self._finalize()
diff --git a/lib/pypy/_pytest/config.py b/lib/pypy/_pytest/config.py
new file mode 100644
--- /dev/null
+++ b/lib/pypy/_pytest/config.py
@@ -0,0 +1,449 @@
+""" command line options, ini-file and conftest.py processing. """
+
+import py
+import sys, os
+from _pytest.core import PluginManager
+import pytest
+
+def pytest_cmdline_parse(pluginmanager, args):
+ config = Config(pluginmanager)
+ config.parse(args)
+ if config.option.debug:
+ config.trace.root.setwriter(sys.stderr.write)
+ return config
+
+def pytest_unconfigure(config):
+ for func in config._cleanup:
+ func()
+
+class Parser:
+ """ Parser for command line arguments. """
+
+ def __init__(self, usage=None, processopt=None):
+ self._anonymous = OptionGroup("custom options", parser=self)
+ self._groups = []
+ self._processopt = processopt
+ self._usage = usage
+ self._inidict = {}
+ self._ininames = []
+ self.hints = []
+
+ def processoption(self, option):
+ if self._processopt:
+ if option.dest:
+ self._processopt(option)
+
+ def addnote(self, note):
+ self._notes.append(note)
+
+ def getgroup(self, name, description="", after=None):
+ """ get (or create) a named option Group.
+
+ :name: unique name of the option group.
+ :description: long description for --help output.
+ :after: name of other group, used for ordering --help output.
+ """
+ for group in self._groups:
+ if group.name == name:
+ return group
+ group = OptionGroup(name, description, parser=self)
+ i = 0
+ for i, grp in enumerate(self._groups):
+ if grp.name == after:
+ break
+ self._groups.insert(i+1, group)
+ return group
+
+ def addoption(self, *opts, **attrs):
+ """ add an optparse-style option. """
+ self._anonymous.addoption(*opts, **attrs)
+
+ def parse(self, args):
+ self.optparser = optparser = MyOptionParser(self)
+ groups = self._groups + [self._anonymous]
+ for group in groups:
+ if group.options:
+ desc = group.description or group.name
+ optgroup = py.std.optparse.OptionGroup(optparser, desc)
+ optgroup.add_options(group.options)
+ optparser.add_option_group(optgroup)
+ return self.optparser.parse_args([str(x) for x in args])
+
+ def parse_setoption(self, args, option):
+ parsedoption, args = self.parse(args)
+ for name, value in parsedoption.__dict__.items():
+ setattr(option, name, value)
+ return args
+
+ def addini(self, name, help, type=None, default=None):
+ """ add an ini-file option with the given name and description. """
+ assert type in (None, "pathlist", "args", "linelist")
+ self._inidict[name] = (help, type, default)
+ self._ininames.append(name)
+
+class OptionGroup:
+ def __init__(self, name, description="", parser=None):
+ self.name = name
+ self.description = description
+ self.options = []
+ self.parser = parser
+
+ def addoption(self, *optnames, **attrs):
+ """ add an option to this group. """
+ option = py.std.optparse.Option(*optnames, **attrs)
+ self._addoption_instance(option, shortupper=False)
+
+ def _addoption(self, *optnames, **attrs):
+ option = py.std.optparse.Option(*optnames, **attrs)
+ self._addoption_instance(option, shortupper=True)
+
+ def _addoption_instance(self, option, shortupper=False):
+ if not shortupper:
+ for opt in option._short_opts:
+ if opt[0] == '-' and opt[1].islower():
+ raise ValueError("lowercase shortoptions reserved")
+ if self.parser:
+ self.parser.processoption(option)
+ self.options.append(option)
+
+
+class MyOptionParser(py.std.optparse.OptionParser):
+ def __init__(self, parser):
+ self._parser = parser
+ py.std.optparse.OptionParser.__init__(self, usage=parser._usage,
+ add_help_option=False)
+ def format_epilog(self, formatter):
+ hints = self._parser.hints
+ if hints:
+ s = "\n".join(["hint: " + x for x in hints]) + "\n"
+ s = "\n" + s + "\n"
+ return s
+ return ""
+
+class Conftest(object):
+ """ the single place for accessing values and interacting
+ towards conftest modules from py.test objects.
+ """
+ def __init__(self, onimport=None, confcutdir=None):
+ self._path2confmods = {}
+ self._onimport = onimport
+ self._conftestpath2mod = {}
+ self._confcutdir = confcutdir
+
+ def setinitial(self, args):
+ """ try to find a first anchor path for looking up global values
+ from conftests. This function is usually called _before_
+ argument parsing. conftest files may add command line options
+ and we thus have no completely safe way of determining
+ which parts of the arguments are actually related to options
+ and which are file system paths. We just try here to get
+ bootstrapped ...
+ """
+ current = py.path.local()
+ opt = '--confcutdir'
+ for i in range(len(args)):
+ opt1 = str(args[i])
+ if opt1.startswith(opt):
+ if opt1 == opt:
+ if len(args) > i:
+ p = current.join(args[i+1], abs=True)
+ elif opt1.startswith(opt + "="):
+ p = current.join(opt1[len(opt)+1:], abs=1)
+ self._confcutdir = p
+ break
+ for arg in args + [current]:
+ if hasattr(arg, 'startswith') and arg.startswith("--"):
+ continue
+ anchor = current.join(arg, abs=1)
+ if anchor.check(): # we found some file object
+ self._path2confmods[None] = self.getconftestmodules(anchor)
+ # let's also consider test* dirs
+ if anchor.check(dir=1):
+ for x in anchor.listdir("test*"):
+ if x.check(dir=1):
+ self.getconftestmodules(x)
+ break
+ else:
+ assert 0, "no root of filesystem?"
+
+ def getconftestmodules(self, path):
+ """ return a list of imported conftest modules for the given path. """
+ try:
+ clist = self._path2confmods[path]
+ except KeyError:
+ if path is None:
+ raise ValueError("missing default confest.")
+ dp = path.dirpath()
+ clist = []
+ if dp != path:
+ cutdir = self._confcutdir
+ if cutdir and path != cutdir and not path.relto(cutdir):
+ pass
+ else:
+ conftestpath = path.join("conftest.py")
+ if conftestpath.check(file=1):
+ clist.append(self.importconftest(conftestpath))
+ clist[:0] = self.getconftestmodules(dp)
+ self._path2confmods[path] = clist
+ # be defensive: avoid changes from caller side to
+ # affect us by always returning a copy of the actual list
+ return clist[:]
+
+ def rget(self, name, path=None):
+ mod, value = self.rget_with_confmod(name, path)
+ return value
+
+ def rget_with_confmod(self, name, path=None):
+ modules = self.getconftestmodules(path)
+ modules.reverse()
+ for mod in modules:
+ try:
+ return mod, getattr(mod, name)
+ except AttributeError:
+ continue
+ raise KeyError(name)
+
+ def importconftest(self, conftestpath):
+ assert conftestpath.check(), conftestpath
+ try:
+ return self._conftestpath2mod[conftestpath]
+ except KeyError:
+ pkgpath = conftestpath.pypkgpath()
+ if pkgpath is None:
+ _ensure_removed_sysmodule(conftestpath.purebasename)
+ self._conftestpath2mod[conftestpath] = mod = conftestpath.pyimport()
+ dirpath = conftestpath.dirpath()
+ if dirpath in self._path2confmods:
+ for path, mods in self._path2confmods.items():
+ if path and path.relto(dirpath) or path == dirpath:
+ assert mod not in mods
+ mods.append(mod)
+ self._postimport(mod)
+ return mod
+
+ def _postimport(self, mod):
+ if self._onimport:
+ self._onimport(mod)
+ return mod
+
+def _ensure_removed_sysmodule(modname):
+ try:
+ del sys.modules[modname]
+ except KeyError:
+ pass
+
+class CmdOptions(object):
+ """ holds cmdline options as attributes."""
+ def __init__(self, **kwargs):
+ self.__dict__.update(kwargs)
+ def __repr__(self):
+ return "<CmdOptions %r>" %(self.__dict__,)
+
+class Config(object):
+ """ access to configuration values, pluginmanager and plugin hooks. """
+ def __init__(self, pluginmanager=None):
+ #: command line option values, usually added via parser.addoption(...)
+ #: or parser.getgroup(...).addoption(...) calls
+ self.option = CmdOptions()
+ self._parser = Parser(
+ usage="usage: %prog [options] [file_or_dir] [file_or_dir] [...]",
+ processopt=self._processopt,
+ )
+ #: a pluginmanager instance
+ self.pluginmanager = pluginmanager or PluginManager(load=True)
+ self.trace = self.pluginmanager.trace.root.get("config")
+ self._conftest = Conftest(onimport=self._onimportconftest)
+ self.hook = self.pluginmanager.hook
+ self._inicache = {}
+ self._cleanup = []
+
+ @classmethod
+ def fromdictargs(cls, option_dict, args):
+ """ constructor useable for subprocesses. """
+ config = cls()
+ config._preparse(args, addopts=False)
+ config.option.__dict__.update(option_dict)
+ for x in config.option.plugins:
+ config.pluginmanager.consider_pluginarg(x)
+ return config
+
+ def _onimportconftest(self, conftestmodule):
+ self.trace("loaded conftestmodule %r" %(conftestmodule,))
+ self.pluginmanager.consider_conftest(conftestmodule)
+
+ def _processopt(self, opt):
+ if hasattr(opt, 'default') and opt.dest:
+ if not hasattr(self.option, opt.dest):
+ setattr(self.option, opt.dest, opt.default)
+
+ def _getmatchingplugins(self, fspath):
+ allconftests = self._conftest._conftestpath2mod.values()
+ plugins = [x for x in self.pluginmanager.getplugins()
+ if x not in allconftests]
+ plugins += self._conftest.getconftestmodules(fspath)
+ return plugins
+
+ def _setinitialconftest(self, args):
+ # capture output during conftest init (#issue93)
+ from _pytest.capture import CaptureManager
+ capman = CaptureManager()
+ self.pluginmanager.register(capman, 'capturemanager')
+ # will be unregistered in capture.py's unconfigure()
+ capman.resumecapture(capman._getmethod_preoptionparse(args))
+ try:
+ try:
+ self._conftest.setinitial(args)
+ finally:
+ out, err = capman.suspendcapture() # logging might have got it
+ except:
+ sys.stdout.write(out)
+ sys.stderr.write(err)
+ raise
+
+ def _initini(self, args):
+ self.inicfg = getcfg(args, ["pytest.ini", "tox.ini", "setup.cfg"])
+ self._parser.addini('addopts', 'extra command line options', 'args')
+ self._parser.addini('minversion', 'minimally required pytest version')
+
+ def _preparse(self, args, addopts=True):
+ self._initini(args)
+ if addopts:
+ args[:] = self.getini("addopts") + args
+ self._checkversion()
+ self.pluginmanager.consider_preparse(args)
+ self.pluginmanager.consider_setuptools_entrypoints()
+ self.pluginmanager.consider_env()
+ self._setinitialconftest(args)
+ self.pluginmanager.do_addoption(self._parser)
+ if addopts:
+ self.hook.pytest_cmdline_preparse(config=self, args=args)
+
+ def _checkversion(self):
+ minver = self.inicfg.get('minversion', None)
+ if minver:
+ ver = minver.split(".")
+ myver = pytest.__version__.split(".")
+ if myver < ver:
+ raise pytest.UsageError(
+ "%s:%d: requires pytest-%s, actual pytest-%s'" %(
+ self.inicfg.config.path, self.inicfg.lineof('minversion'),
+ minver, pytest.__version__))
+
+ def parse(self, args):
+ # parse given cmdline arguments into this config object.
+ # Note that this can only be called once per testing process.
+ assert not hasattr(self, 'args'), (
+ "can only parse cmdline args at most once per Config object")
+ self._preparse(args)
+ self._parser.hints.extend(self.pluginmanager._hints)
+ args = self._parser.parse_setoption(args, self.option)
+ if not args:
+ args.append(py.std.os.getcwd())
+ self.args = args
+
+ def getini(self, name):
+ """ return configuration value from an ini file. If the
+ specified name hasn't been registered through a prior ``parse.addini``
+ call (usually from a plugin), a ValueError is raised. """
+ try:
+ return self._inicache[name]
+ except KeyError:
+ self._inicache[name] = val = self._getini(name)
+ return val
+
+ def _getini(self, name):
+ try:
+ description, type, default = self._parser._inidict[name]
+ except KeyError:
+ raise ValueError("unknown configuration value: %r" %(name,))
+ try:
+ value = self.inicfg[name]
+ except KeyError:
+ if default is not None:
+ return default
+ if type is None:
+ return ''
+ return []
+ if type == "pathlist":
+ dp = py.path.local(self.inicfg.config.path).dirpath()
+ l = []
+ for relpath in py.std.shlex.split(value):
+ l.append(dp.join(relpath, abs=True))
+ return l
+ elif type == "args":
+ return py.std.shlex.split(value)
+ elif type == "linelist":
+ return [t for t in map(lambda x: x.strip(), value.split("\n")) if t]
+ else:
+ assert type is None
+ return value
+
+ def _getconftest_pathlist(self, name, path=None):
+ try:
+ mod, relroots = self._conftest.rget_with_confmod(name, path)
+ except KeyError:
+ return None
+ modpath = py.path.local(mod.__file__).dirpath()
+ l = []
+ for relroot in relroots:
+ if not isinstance(relroot, py.path.local):
+ relroot = relroot.replace("/", py.path.local.sep)
+ relroot = modpath.join(relroot, abs=True)
+ l.append(relroot)
+ return l
+
+ def _getconftest(self, name, path=None, check=False):
+ if check:
+ self._checkconftest(name)
+ return self._conftest.rget(name, path)
+
+ def getvalue(self, name, path=None):
+ """ return ``name`` value looked set from command line options.
+
+ (deprecated) if we can't find the option also lookup
+ the name in a matching conftest file.
+ """
+ try:
+ return getattr(self.option, name)
+ except AttributeError:
+ return self._getconftest(name, path, check=False)
+
+ def getvalueorskip(self, name, path=None):
+ """ (deprecated) return getvalue(name) or call
+ py.test.skip if no value exists. """
+ __tracebackhide__ = True
+ try:
+ val = self.getvalue(name, path)
+ if val is None:
+ raise KeyError(name)
+ return val
+ except KeyError:
+ py.test.skip("no %r value found" %(name,))
+
+
+def getcfg(args, inibasenames):
+ args = [x for x in args if str(x)[0] != "-"]
+ if not args:
+ args = [py.path.local()]
+ for arg in args:
+ arg = py.path.local(arg)
+ for base in arg.parts(reverse=True):
+ for inibasename in inibasenames:
+ p = base.join(inibasename)
+ if p.check():
+ iniconfig = py.iniconfig.IniConfig(p)
+ if 'pytest' in iniconfig.sections:
+ return iniconfig['pytest']
+ return {}
+
+def findupwards(current, basename):
+ current = py.path.local(current)
+ while 1:
+ p = current.join(basename)
+ if p.check():
+ return p
+ p = current.dirpath()
+ if p == current:
+ return
+ current = p
+
diff --git a/lib/pypy/_pytest/core.py b/lib/pypy/_pytest/core.py
new file mode 100644
--- /dev/null
+++ b/lib/pypy/_pytest/core.py
@@ -0,0 +1,467 @@
+"""
+pytest PluginManager, basic initialization and tracing.
+(c) Holger Krekel 2004-2010
+"""
+import sys, os
+import inspect
+import py
+from _pytest import hookspec # the extension point definitions
+
+assert py.__version__.split(".")[:2] >= ['1', '4'], ("installation problem: "
+ "%s is too old, remove or upgrade 'py'" % (py.__version__))
+
+default_plugins = (
+ "config mark main terminal runner python pdb unittest capture skipping "
+ "tmpdir monkeypatch recwarn pastebin helpconfig nose assertion genscript "
+ "junitxml resultlog doctest").split()
+
+class TagTracer:
+ def __init__(self, prefix="[pytest] "):
+ self._tag2proc = {}
+ self.writer = None
+ self.indent = 0
+ self.prefix = prefix
+
+ def get(self, name):
+ return TagTracerSub(self, (name,))
+
+ def processmessage(self, tags, args):
+ if self.writer is not None:
+ if args:
+ indent = " " * self.indent
+ content = " ".join(map(str, args))
+ self.writer("%s%s%s\n" %(self.prefix, indent, content))
+ try:
+ self._tag2proc[tags](tags, args)
+ except KeyError:
+ pass
+
+ def setwriter(self, writer):
+ self.writer = writer
+
+ def setprocessor(self, tags, processor):
+ if isinstance(tags, str):
+ tags = tuple(tags.split(":"))
+ else:
+ assert isinstance(tags, tuple)
+ self._tag2proc[tags] = processor
+
+class TagTracerSub:
+ def __init__(self, root, tags):
+ self.root = root
+ self.tags = tags
+ def __call__(self, *args):
+ self.root.processmessage(self.tags, args)
+ def setmyprocessor(self, processor):
+ self.root.setprocessor(self.tags, processor)
+ def get(self, name):
+ return self.__class__(self.root, self.tags + (name,))
+
+class PluginManager(object):
+ def __init__(self, load=False):
+ self._name2plugin = {}
+ self._listattrcache = {}
+ self._plugins = []
+ self._hints = []
+ self.trace = TagTracer().get("pluginmanage")
+ self._plugin_distinfo = []
+ if os.environ.get('PYTEST_DEBUG'):
+ err = sys.stderr
+ encoding = getattr(err, 'encoding', 'utf8')
+ try:
+ err = py.io.dupfile(err, encoding=encoding)
+ except Exception:
+ pass
+ self.trace.root.setwriter(err.write)
+ self.hook = HookRelay([hookspec], pm=self)
+ self.register(self)
+ if load:
+ for spec in default_plugins:
+ self.import_plugin(spec)
+
+ def register(self, plugin, name=None, prepend=False):
+ assert not self.isregistered(plugin), plugin
+ name = name or getattr(plugin, '__name__', str(id(plugin)))
+ if name in self._name2plugin:
+ return False
+ #self.trace("registering", name, plugin)
+ self._name2plugin[name] = plugin
+ self.call_plugin(plugin, "pytest_addhooks", {'pluginmanager': self})
+ self.hook.pytest_plugin_registered(manager=self, plugin=plugin)
+ if not prepend:
+ self._plugins.append(plugin)
+ else:
+ self._plugins.insert(0, plugin)
+ return True
+
+ def unregister(self, plugin=None, name=None):
+ if plugin is None:
+ plugin = self.getplugin(name=name)
+ self._plugins.remove(plugin)
+ self.hook.pytest_plugin_unregistered(plugin=plugin)
+ for name, value in list(self._name2plugin.items()):
+ if value == plugin:
+ del self._name2plugin[name]
+
+ def isregistered(self, plugin, name=None):
+ if self.getplugin(name) is not None:
+ return True
+ for val in self._name2plugin.values():
+ if plugin == val:
+ return True
+
+ def addhooks(self, spec):
+ self.hook._addhooks(spec, prefix="pytest_")
+
+ def getplugins(self):
+ return list(self._plugins)
+
+ def skipifmissing(self, name):
+ if not self.hasplugin(name):
+ py.test.skip("plugin %r is missing" % name)
+
+ def hasplugin(self, name):
+ return bool(self.getplugin(name))
+
+ def getplugin(self, name):
+ if name is None:
+ return None
+ try:
+ return self._name2plugin[name]
+ except KeyError:
+ return self._name2plugin.get("_pytest." + name, None)
+
+ # API for bootstrapping
+ #
+ def _envlist(self, varname):
+ val = py.std.os.environ.get(varname, None)
+ if val is not None:
+ return val.split(',')
+ return ()
+
+ def consider_env(self):
+ for spec in self._envlist("PYTEST_PLUGINS"):
+ self.import_plugin(spec)
+
+ def consider_setuptools_entrypoints(self):
+ try:
+ from pkg_resources import iter_entry_points, DistributionNotFound
+ except ImportError:
+ return # XXX issue a warning
+ for ep in iter_entry_points('pytest11'):
+ name = ep.name
+ if name.startswith("pytest_"):
+ name = name[7:]
+ if ep.name in self._name2plugin or name in self._name2plugin:
+ continue
+ try:
+ plugin = ep.load()
+ except DistributionNotFound:
+ continue
+ self._plugin_distinfo.append((ep.dist, plugin))
+ self.register(plugin, name=name)
+
+ def consider_preparse(self, args):
+ for opt1,opt2 in zip(args, args[1:]):
+ if opt1 == "-p":
+ self.consider_pluginarg(opt2)
+
+ def consider_pluginarg(self, arg):
+ if arg.startswith("no:"):
+ name = arg[3:]
+ if self.getplugin(name) is not None:
+ self.unregister(None, name=name)
+ self._name2plugin[name] = -1
+ else:
+ if self.getplugin(arg) is None:
+ self.import_plugin(arg)
+
+ def consider_conftest(self, conftestmodule):
+ if self.register(conftestmodule, name=conftestmodule.__file__):
+ self.consider_module(conftestmodule)
+
+ def consider_module(self, mod):
+ attr = getattr(mod, "pytest_plugins", ())
+ if attr:
+ if not isinstance(attr, (list, tuple)):
+ attr = (attr,)
+ for spec in attr:
+ self.import_plugin(spec)
+
+ def import_plugin(self, modname):
+ assert isinstance(modname, str)
+ if self.getplugin(modname) is not None:
+ return
+ try:
+ #self.trace("importing", modname)
+ mod = importplugin(modname)
+ except KeyboardInterrupt:
+ raise
+ except ImportError:
+ if modname.startswith("pytest_"):
+ return self.import_plugin(modname[7:])
+ raise
+ except:
+ e = py.std.sys.exc_info()[1]
+ if not hasattr(py.test, 'skip'):
+ raise
+ elif not isinstance(e, py.test.skip.Exception):
+ raise
+ self._hints.append("skipped plugin %r: %s" %((modname, e.msg)))
+ else:
+ self.register(mod, modname)
+ self.consider_module(mod)
+
+ def pytest_plugin_registered(self, plugin):
+ import pytest
+ dic = self.call_plugin(plugin, "pytest_namespace", {}) or {}
+ if dic:
+ self._setns(pytest, dic)
+ if hasattr(self, '_config'):
+ self.call_plugin(plugin, "pytest_addoption",
+ {'parser': self._config._parser})
+ self.call_plugin(plugin, "pytest_configure",
+ {'config': self._config})
+
+ def _setns(self, obj, dic):
+ import pytest
+ for name, value in dic.items():
+ if isinstance(value, dict):
+ mod = getattr(obj, name, None)
+ if mod is None:
+ modname = "pytest.%s" % name
+ mod = py.std.types.ModuleType(modname)
+ sys.modules[modname] = mod
+ mod.__all__ = []
+ setattr(obj, name, mod)
+ obj.__all__.append(name)
+ self._setns(mod, value)
+ else:
+ setattr(obj, name, value)
+ obj.__all__.append(name)
+ #if obj != pytest:
+ # pytest.__all__.append(name)
+ setattr(pytest, name, value)
+
+ def pytest_terminal_summary(self, terminalreporter):
+ tw = terminalreporter._tw
+ if terminalreporter.config.option.traceconfig:
+ for hint in self._hints:
+ tw.line("hint: %s" % hint)
+
+ def do_addoption(self, parser):
+ mname = "pytest_addoption"
+ methods = reversed(self.listattr(mname))
+ MultiCall(methods, {'parser': parser}).execute()
+
+ def do_configure(self, config):
+ assert not hasattr(self, '_config')
+ self._config = config
+ config.hook.pytest_configure(config=self._config)
+
+ def do_unconfigure(self, config):
+ config = self._config
+ del self._config
+ config.hook.pytest_unconfigure(config=config)
+ config.pluginmanager.unregister(self)
+
+ def notify_exception(self, excinfo, option=None):
+ if option and option.fulltrace:
+ style = "long"
+ else:
+ style = "native"
+ excrepr = excinfo.getrepr(funcargs=True,
+ showlocals=getattr(option, 'showlocals', False),
+ style=style,
+ )
+ res = self.hook.pytest_internalerror(excrepr=excrepr)
+ if not py.builtin.any(res):
+ for line in str(excrepr).split("\n"):
+ sys.stderr.write("INTERNALERROR> %s\n" %line)
+ sys.stderr.flush()
+
+ def listattr(self, attrname, plugins=None):
+ if plugins is None:
+ plugins = self._plugins
+ key = (attrname,) + tuple(plugins)
+ try:
+ return list(self._listattrcache[key])
+ except KeyError:
+ pass
+ l = []
+ last = []
+ for plugin in plugins:
+ try:
+ meth = getattr(plugin, attrname)
+ if hasattr(meth, 'tryfirst'):
+ last.append(meth)
+ elif hasattr(meth, 'trylast'):
+ l.insert(0, meth)
+ else:
+ l.append(meth)
+ except AttributeError:
+ continue
+ l.extend(last)
+ self._listattrcache[key] = list(l)
+ return l
+
+ def call_plugin(self, plugin, methname, kwargs):
+ return MultiCall(methods=self.listattr(methname, plugins=[plugin]),
+ kwargs=kwargs, firstresult=True).execute()
+
+
+def importplugin(importspec):
+ name = importspec
+ try:
+ mod = "_pytest." + name
+ return __import__(mod, None, None, '__doc__')
+ except ImportError:
+ #e = py.std.sys.exc_info()[1]
+ #if str(e).find(name) == -1:
+ # raise
+ pass #
+ return __import__(importspec, None, None, '__doc__')
+
+class MultiCall:
+ """ execute a call into multiple python functions/methods. """
+ def __init__(self, methods, kwargs, firstresult=False):
+ self.methods = list(methods)
+ self.kwargs = kwargs
+ self.results = []
+ self.firstresult = firstresult
+
+ def __repr__(self):
+ status = "%d results, %d meths" % (len(self.results), len(self.methods))
+ return "<MultiCall %s, kwargs=%r>" %(status, self.kwargs)
+
+ def execute(self):
+ while self.methods:
+ method = self.methods.pop()
+ kwargs = self.getkwargs(method)
+ res = method(**kwargs)
+ if res is not None:
+ self.results.append(res)
+ if self.firstresult:
+ return res
+ if not self.firstresult:
+ return self.results
+
+ def getkwargs(self, method):
+ kwargs = {}
+ for argname in varnames(method):
+ try:
+ kwargs[argname] = self.kwargs[argname]
+ except KeyError:
+ if argname == "__multicall__":
+ kwargs[argname] = self
+ return kwargs
+
+def varnames(func):
+ try:
+ return func._varnames
+ except AttributeError:
+ pass
+ if not inspect.isfunction(func) and not inspect.ismethod(func):
+ func = getattr(func, '__call__', func)
+ ismethod = inspect.ismethod(func)
+ rawcode = py.code.getrawcode(func)
+ try:
+ x = rawcode.co_varnames[ismethod:rawcode.co_argcount]
+ except AttributeError:
+ x = ()
+ py.builtin._getfuncdict(func)['_varnames'] = x
+ return x
+
+class HookRelay:
+ def __init__(self, hookspecs, pm, prefix="pytest_"):
+ if not isinstance(hookspecs, list):
+ hookspecs = [hookspecs]
+ self._hookspecs = []
+ self._pm = pm
+ self.trace = pm.trace.root.get("hook")
+ for hookspec in hookspecs:
+ self._addhooks(hookspec, prefix)
+
+ def _addhooks(self, hookspecs, prefix):
+ self._hookspecs.append(hookspecs)
+ added = False
+ for name, method in vars(hookspecs).items():
+ if name.startswith(prefix):
+ firstresult = getattr(method, 'firstresult', False)
+ hc = HookCaller(self, name, firstresult=firstresult)
+ setattr(self, name, hc)
+ added = True
+ #print ("setting new hook", name)
+ if not added:
+ raise ValueError("did not find new %r hooks in %r" %(
+ prefix, hookspecs,))
+
+
+class HookCaller:
+ def __init__(self, hookrelay, name, firstresult):
+ self.hookrelay = hookrelay
+ self.name = name
+ self.firstresult = firstresult
+ self.trace = self.hookrelay.trace
+
+ def __repr__(self):
+ return "<HookCaller %r>" %(self.name,)
+
+ def __call__(self, **kwargs):
+ methods = self.hookrelay._pm.listattr(self.name)
+ return self._docall(methods, kwargs)
+
+ def pcall(self, plugins, **kwargs):
+ methods = self.hookrelay._pm.listattr(self.name, plugins=plugins)
+ return self._docall(methods, kwargs)
+
+ def _docall(self, methods, kwargs):
+ self.trace(self.name, kwargs)
+ self.trace.root.indent += 1
+ mc = MultiCall(methods, kwargs, firstresult=self.firstresult)
+ try:
+ res = mc.execute()
+ if res:
+ self.trace("finish", self.name, "-->", res)
+ finally:
+ self.trace.root.indent -= 1
+ return res
+
+_preinit = []
+
+def _preloadplugins():
+ _preinit.append(PluginManager(load=True))
+
+def main(args=None, plugins=None):
+ """ returned exit code integer, after an in-process testing run
+ with the given command line arguments, preloading an optional list
+ of passed in plugin objects. """
+ if args is None:
+ args = sys.argv[1:]
+ elif isinstance(args, py.path.local):
+ args = [str(args)]
+ elif not isinstance(args, (tuple, list)):
+ if not isinstance(args, str):
+ raise ValueError("not a string or argument list: %r" % (args,))
+ args = py.std.shlex.split(args)
+ if _preinit:
+ _pluginmanager = _preinit.pop(0)
+ else: # subsequent calls to main will create a fresh instance
+ _pluginmanager = PluginManager(load=True)
+ hook = _pluginmanager.hook
+ try:
+ if plugins:
+ for plugin in plugins:
+ _pluginmanager.register(plugin)
+ config = hook.pytest_cmdline_parse(
+ pluginmanager=_pluginmanager, args=args)
+ exitstatus = hook.pytest_cmdline_main(config=config)
+ except UsageError:
+ e = sys.exc_info()[1]
+ sys.stderr.write("ERROR: %s\n" %(e.args[0],))
+ exitstatus = 3
+ return exitstatus
+
+class UsageError(Exception):
+ """ error in py.test usage or invocation"""
+
diff --git a/lib/pypy/_pytest/doctest.py b/lib/pypy/_pytest/doctest.py
new file mode 100644
--- /dev/null
+++ b/lib/pypy/_pytest/doctest.py
@@ -0,0 +1,87 @@
+""" discover and run doctests in modules and test files."""
+
+import pytest, py
+from py._code.code import TerminalRepr, ReprFileLocation
+
+def pytest_addoption(parser):
+ group = parser.getgroup("collect")
+ group.addoption("--doctest-modules",
+ action="store_true", default=False,
+ help="run doctests in all .py modules",
+ dest="doctestmodules")
+ group.addoption("--doctest-glob",
+ action="store", default="test*.txt", metavar="pat",
+ help="doctests file matching pattern, default: test*.txt",
+ dest="doctestglob")
+
+def pytest_collect_file(path, parent):
+ config = parent.config
+ if path.ext == ".py":
+ if config.option.doctestmodules:
+ return DoctestModule(path, parent)
+ elif (path.ext in ('.txt', '.rst') and parent.session.isinitpath(path)) or \
+ path.check(fnmatch=config.getvalue("doctestglob")):
+ return DoctestTextfile(path, parent)
+
+class ReprFailDoctest(TerminalRepr):
+ def __init__(self, reprlocation, lines):
+ self.reprlocation = reprlocation
+ self.lines = lines
+ def toterminal(self, tw):
+ for line in self.lines:
+ tw.line(line)
+ self.reprlocation.toterminal(tw)
+
+class DoctestItem(pytest.Item):
+ def repr_failure(self, excinfo):
+ doctest = py.std.doctest
+ if excinfo.errisinstance((doctest.DocTestFailure,
+ doctest.UnexpectedException)):
+ doctestfailure = excinfo.value
+ example = doctestfailure.example
+ test = doctestfailure.test
+ filename = test.filename
+ lineno = test.lineno + example.lineno + 1
+ message = excinfo.type.__name__
+ reprlocation = ReprFileLocation(filename, lineno, message)
+ checker = py.std.doctest.OutputChecker()
+ REPORT_UDIFF = py.std.doctest.REPORT_UDIFF
+ filelines = py.path.local(filename).readlines(cr=0)
+ i = max(test.lineno, max(0, lineno - 10)) # XXX?
+ lines = []
+ for line in filelines[i:lineno]:
+ lines.append("%03d %s" % (i+1, line))
+ i += 1
+ if excinfo.errisinstance(doctest.DocTestFailure):
+ lines += checker.output_difference(example,
+ doctestfailure.got, REPORT_UDIFF).split("\n")
+ else:
+ inner_excinfo = py.code.ExceptionInfo(excinfo.value.exc_info)
+ lines += ["UNEXPECTED EXCEPTION: %s" %
+ repr(inner_excinfo.value)]
+ lines += py.std.traceback.format_exception(*excinfo.value.exc_info)
+ return ReprFailDoctest(reprlocation, lines)
+ else:
+ return super(DoctestItem, self).repr_failure(excinfo)
+
+ def reportinfo(self):
+ return self.fspath, None, "[doctest]"
+
+class DoctestTextfile(DoctestItem, pytest.File):
+ def runtest(self):
+ doctest = py.std.doctest
+ failed, tot = doctest.testfile(
+ str(self.fspath), module_relative=False,
+ optionflags=doctest.ELLIPSIS,
+ raise_on_error=True, verbose=0)
+
+class DoctestModule(DoctestItem, pytest.File):
+ def runtest(self):
+ doctest = py.std.doctest
+ if self.fspath.basename == "conftest.py":
+ module = self.config._conftest.importconftest(self.fspath)
+ else:
+ module = self.fspath.pyimport()
+ failed, tot = doctest.testmod(
+ module, raise_on_error=True, verbose=0,
+ optionflags=doctest.ELLIPSIS)
diff --git a/lib/pypy/_pytest/genscript.py b/lib/pypy/_pytest/genscript.py
new file mode 100755
--- /dev/null
+++ b/lib/pypy/_pytest/genscript.py
@@ -0,0 +1,69 @@
+""" generate a single-file self-contained version of py.test """
+import py
+
+def find_toplevel(name):
+ for syspath in py.std.sys.path:
+ base = py.path.local(syspath)
+ lib = base/name
+ if lib.check(dir=1):
+ return lib
+ mod = base.join("%s.py" % name)
+ if mod.check(file=1):
+ return mod
+ raise LookupError(name)
+
+def pkgname(toplevel, rootpath, path):
+ parts = path.parts()[len(rootpath.parts()):]
+ return '.'.join([toplevel] + [x.purebasename for x in parts])
+
+def pkg_to_mapping(name):
+ toplevel = find_toplevel(name)
+ name2src = {}
+ if toplevel.check(file=1): # module
+ name2src[toplevel.purebasename] = toplevel.read()
+ else: # package
+ for pyfile in toplevel.visit('*.py'):
+ pkg = pkgname(name, toplevel, pyfile)
+ name2src[pkg] = pyfile.read()
+ return name2src
+
+def compress_mapping(mapping):
+ data = py.std.pickle.dumps(mapping, 2)
+ data = py.std.zlib.compress(data, 9)
+ data = py.std.base64.encodestring(data)
+ data = data.decode('ascii')
+ return data
+
+
+def compress_packages(names):
+ mapping = {}
+ for name in names:
+ mapping.update(pkg_to_mapping(name))
+ return compress_mapping(mapping)
+
+def generate_script(entry, packages):
+ data = compress_packages(packages)
+ tmpl = py.path.local(__file__).dirpath().join('standalonetemplate.py')
+ exe = tmpl.read()
+ exe = exe.replace('@SOURCES@', data)
+ exe = exe.replace('@ENTRY@', entry)
+ return exe
+
+
+def pytest_addoption(parser):
+ group = parser.getgroup("debugconfig")
+ group.addoption("--genscript", action="store", default=None,
+ dest="genscript", metavar="path",
+ help="create standalone py.test script at given target path.")
+
+def pytest_cmdline_main(config):
+ genscript = config.getvalue("genscript")
+ if genscript:
+ script = generate_script(
+ 'import py; raise SystemExit(py.test.cmdline.main())',
+ ['py', '_pytest', 'pytest'],
+ )
+
+ genscript = py.path.local(genscript)
+ genscript.write(script)
+ return 0
diff --git a/lib/pypy/_pytest/helpconfig.py b/lib/pypy/_pytest/helpconfig.py
new file mode 100644
--- /dev/null
+++ b/lib/pypy/_pytest/helpconfig.py
@@ -0,0 +1,174 @@
+""" version info, help messages, tracing configuration. """
+import py
+import pytest
+import inspect, sys
+from _pytest.core import varnames
+
+def pytest_addoption(parser):
+ group = parser.getgroup('debugconfig')
+ group.addoption('--version', action="store_true",
+ help="display pytest lib version and import information.")
+ group._addoption("-h", "--help", action="store_true", dest="help",
+ help="show help message and configuration info")
+ group._addoption('-p', action="append", dest="plugins", default = [],
+ metavar="name",
+ help="early-load given plugin (multi-allowed).")
+ group.addoption('--traceconfig',
+ action="store_true", dest="traceconfig", default=False,
+ help="trace considerations of conftest.py files."),
+ group.addoption('--debug',
+ action="store_true", dest="debug", default=False,
+ help="generate and show internal debugging information.")
+
+
+def pytest_cmdline_main(config):
+ if config.option.version:
+ p = py.path.local(pytest.__file__)
+ sys.stderr.write("This is py.test version %s, imported from %s\n" %
+ (pytest.__version__, p))
+ plugininfo = getpluginversioninfo(config)
+ if plugininfo:
+ for line in plugininfo:
+ sys.stderr.write(line + "\n")
+ return 0
+ elif config.option.help:
+ config.pluginmanager.do_configure(config)
+ showhelp(config)
+ return 0
+
+def showhelp(config):
+ tw = py.io.TerminalWriter()
+ tw.write(config._parser.optparser.format_help())
+ tw.line()
+ tw.line()
+ #tw.sep( "=", "config file settings")
+ tw.line("[pytest] ini-options in the next "
+ "pytest.ini|tox.ini|setup.cfg file:")
+ tw.line()
+
+ for name in config._parser._ininames:
+ help, type, default = config._parser._inidict[name]
+ if type is None:
+ type = "string"
+ spec = "%s (%s)" % (name, type)
+ line = " %-24s %s" %(spec, help)
+ tw.line(line[:tw.fullwidth])
+
+ tw.line() ; tw.line()
+ #tw.sep("=")
+ return
+
+ tw.line("conftest.py options:")
+ tw.line()
+ conftestitems = sorted(config._parser._conftestdict.items())
+ for name, help in conftest_options + conftestitems:
+ line = " %-15s %s" %(name, help)
+ tw.line(line[:tw.fullwidth])
+ tw.line()
+ #tw.sep( "=")
+
+conftest_options = [
+ ('pytest_plugins', 'list of plugin names to load'),
+]
+
+def getpluginversioninfo(config):
+ lines = []
+ plugininfo = config.pluginmanager._plugin_distinfo
+ if plugininfo:
+ lines.append("setuptools registered plugins:")
+ for dist, plugin in plugininfo:
+ loc = getattr(plugin, '__file__', repr(plugin))
+ content = "%s-%s at %s" % (dist.project_name, dist.version, loc)
+ lines.append(" " + content)
+ return lines
+
+def pytest_report_header(config):
+ lines = []
+ if config.option.debug or config.option.traceconfig:
+ lines.append("using: pytest-%s pylib-%s" %
+ (pytest.__version__,py.__version__))
+
+ verinfo = getpluginversioninfo(config)
+ if verinfo:
+ lines.extend(verinfo)
+
+ if config.option.traceconfig:
+ lines.append("active plugins:")
+ plugins = []
+ items = config.pluginmanager._name2plugin.items()
+ for name, plugin in items:
+ if hasattr(plugin, '__file__'):
+ r = plugin.__file__
+ else:
+ r = repr(plugin)
+ lines.append(" %-20s: %s" %(name, r))
+ return lines
+
+
+# =====================================================
+# validate plugin syntax and hooks
+# =====================================================
+
+def pytest_plugin_registered(manager, plugin):
+ methods = collectattr(plugin)
+ hooks = {}
+ for hookspec in manager.hook._hookspecs:
+ hooks.update(collectattr(hookspec))
+
+ stringio = py.io.TextIO()
+ def Print(*args):
+ if args:
+ stringio.write(" ".join(map(str, args)))
+ stringio.write("\n")
+
+ fail = False
+ while methods:
+ name, method = methods.popitem()
+ #print "checking", name
+ if isgenerichook(name):
+ continue
+ if name not in hooks:
+ if not getattr(method, 'optionalhook', False):
+ Print("found unknown hook:", name)
+ fail = True
+ else:
+ #print "checking", method
+ method_args = list(varnames(method))
+ if '__multicall__' in method_args:
+ method_args.remove('__multicall__')
+ hook = hooks[name]
+ hookargs = varnames(hook)
+ for arg in method_args:
+ if arg not in hookargs:
+ Print("argument %r not available" %(arg, ))
+ Print("actual definition: %s" %(formatdef(method)))
+ Print("available hook arguments: %s" %
+ ", ".join(hookargs))
+ fail = True
+ break
+ #if not fail:
+ # print "matching hook:", formatdef(method)
+ if fail:
+ name = getattr(plugin, '__name__', plugin)
+ raise PluginValidationError("%s:\n%s" % (name, stringio.getvalue()))
+
+class PluginValidationError(Exception):
+ """ plugin failed validation. """
+
+def isgenerichook(name):
+ return name == "pytest_plugins" or \
+ name.startswith("pytest_funcarg__")
+
+def collectattr(obj):
+ methods = {}
+ for apiname in dir(obj):
+ if apiname.startswith("pytest_"):
+ methods[apiname] = getattr(obj, apiname)
+ return methods
+
+def formatdef(func):
+ return "%s%s" % (
+ func.__name__,
+ inspect.formatargspec(*inspect.getargspec(func))
+ )
+
diff --git a/lib/pypy/_pytest/hookspec.py b/lib/pypy/_pytest/hookspec.py
new file mode 100644
--- /dev/null
+++ b/lib/pypy/_pytest/hookspec.py
@@ -0,0 +1,222 @@
+""" hook specifications for pytest plugins, invoked from main.py and builtin plugins. """
+
+# -------------------------------------------------------------------------
+# Initialization
+# -------------------------------------------------------------------------
+
+def pytest_addhooks(pluginmanager):
+ """called at plugin load time to allow adding new hooks via a call to
+ pluginmanager.registerhooks(module)."""
+
+
+def pytest_namespace():
+ """return dict of name->object to be made globally available in
+ the py.test/pytest namespace. This hook is called before command
+ line options are parsed.
+ """
+
+def pytest_cmdline_parse(pluginmanager, args):
+ """return initialized config object, parsing the specified args. """
+pytest_cmdline_parse.firstresult = True
+
+def pytest_cmdline_preparse(config, args):
+ """modify command line arguments before option parsing. """
+
+def pytest_addoption(parser):
+ """add optparse-style options and ini-style config values via calls
+ to ``parser.addoption`` and ``parser.addini(...)``.
+ """
+
+def pytest_cmdline_main(config):
+ """ called for performing the main command line action. The default
+ implementation will invoke the configure hooks and runtest_mainloop. """
+pytest_cmdline_main.firstresult = True
+
+def pytest_configure(config):
+ """ called after command line options have been parsed.
+ and all plugins and initial conftest files been loaded.
+ """
+
+def pytest_unconfigure(config):
+ """ called before test process is exited. """
+
+def pytest_runtestloop(session):
+ """ called for performing the main runtest loop
+ (after collection finished). """
+pytest_runtestloop.firstresult = True
+
+# -------------------------------------------------------------------------
+# collection hooks
+# -------------------------------------------------------------------------
+
+def pytest_collection(session):
+ """ perform the collection protocol for the given session. """
+pytest_collection.firstresult = True
+
+def pytest_collection_modifyitems(session, config, items):
+ """ called after collection has been performed, may filter or re-order
+ the items in-place."""
+
+def pytest_collection_finish(session):
+ """ called after collection has been performed and modified. """
+
+def pytest_ignore_collect(path, config):
+ """ return True to prevent considering this path for collection.
+ This hook is consulted for all files and directories prior to calling
+ more specific hooks.
+ """
+pytest_ignore_collect.firstresult = True
+
+def pytest_collect_directory(path, parent):
+ """ called before traversing a directory for collection files. """
+pytest_collect_directory.firstresult = True
+
+def pytest_collect_file(path, parent):
+ """ return collection Node or None for the given path. Any new node
+ needs to have the specified ``parent`` as a parent."""
+
+# logging hooks for collection
+def pytest_collectstart(collector):
+ """ collector starts collecting. """
+
+def pytest_itemcollected(item):
+ """ we just collected a test item. """
+
+def pytest_collectreport(report):
+ """ collector finished collecting. """
+
+def pytest_deselected(items):
+ """ called for test items deselected by keyword. """
+
+def pytest_make_collect_report(collector):
+ """ perform ``collector.collect()`` and return a CollectReport. """
+pytest_make_collect_report.firstresult = True
+
+# -------------------------------------------------------------------------
+# Python test function related hooks
+# -------------------------------------------------------------------------
+
+def pytest_pycollect_makemodule(path, parent):
+ """ return a Module collector or None for the given path.
+ This hook will be called for each matching test module path.
+ The pytest_collect_file hook needs to be used if you want to
+ create test modules for files that do not match as a test module.
+ """
+pytest_pycollect_makemodule.firstresult = True
+
+def pytest_pycollect_makeitem(collector, name, obj):
+ """ return custom item/collector for a python object in a module, or None. """
+pytest_pycollect_makeitem.firstresult = True
+
+def pytest_pyfunc_call(pyfuncitem):
+ """ call underlying test function. """
+pytest_pyfunc_call.firstresult = True
+
+def pytest_generate_tests(metafunc):
+ """ generate (multiple) parametrized calls to a test function."""
+
+# -------------------------------------------------------------------------
+# generic runtest related hooks
+# -------------------------------------------------------------------------
+def pytest_itemstart(item, node=None):
+ """ (deprecated, use pytest_runtest_logstart). """
+
+def pytest_runtest_protocol(item):
+ """ implements the standard runtest_setup/call/teardown protocol including
+ capturing exceptions and calling reporting hooks on the results accordingly.
+
+ :return boolean: True if no further hook implementations should be invoked.
+ """
+pytest_runtest_protocol.firstresult = True
+
+def pytest_runtest_logstart(nodeid, location):
+ """ signal the start of a test run. """
+
+def pytest_runtest_setup(item):
+ """ called before ``pytest_runtest_call(item)``. """
+
+def pytest_runtest_call(item):
+ """ called to execute the test ``item``. """
+
+def pytest_runtest_teardown(item):
+ """ called after ``pytest_runtest_call``. """
+
+def pytest_runtest_makereport(item, call):
+ """ return a :py:class:`_pytest.runner.TestReport` object
+ for the given :py:class:`pytest.Item` and
+ :py:class:`_pytest.runner.CallInfo`.
+ """
+pytest_runtest_makereport.firstresult = True
+
+def pytest_runtest_logreport(report):
+ """ process item test report. """
+
+# special handling for final teardown - somewhat internal for now
+def pytest__teardown_final(session):
+ """ called before test session finishes. """
+pytest__teardown_final.firstresult = True
+
+def pytest__teardown_final_logerror(report, session):
+ """ called if runtest_teardown_final failed. """
+
+# -------------------------------------------------------------------------
+# test session related hooks
+# -------------------------------------------------------------------------
+
+def pytest_sessionstart(session):
+ """ before session.main() is called. """
+
+def pytest_sessionfinish(session, exitstatus):
+ """ whole test run finishes. """
+
+
+# -------------------------------------------------------------------------
+# hooks for customising the assert methods
+# -------------------------------------------------------------------------
+
+def pytest_assertrepr_compare(config, op, left, right):
+ """return explanation for comparisons in failing assert expressions.
+
+ Return None for no custom explanation, otherwise return a list
+ of strings. The strings will be joined by newlines but any newlines
+ *in* a string will be escaped. Note that all but the first line will
+ be indented sligthly, the intention is for the first line to be a summary.
+ """
+
+# -------------------------------------------------------------------------
+# hooks for influencing reporting (invoked from _pytest_terminal)
+# -------------------------------------------------------------------------
+
+def pytest_report_header(config):
+ """ return a string to be displayed as header info for terminal reporting."""
+
+def pytest_report_teststatus(report):
+ """ return result-category, shortletter and verbose word for reporting."""
+pytest_report_teststatus.firstresult = True
+
+def pytest_terminal_summary(terminalreporter):
+ """ add additional section in terminal summary reporting. """
+
+# -------------------------------------------------------------------------
+# doctest hooks
+# -------------------------------------------------------------------------
+
+def pytest_doctest_prepare_content(content):
+ """ return processed content for a given doctest"""
+pytest_doctest_prepare_content.firstresult = True
+
+# -------------------------------------------------------------------------
+# error handling and internal debugging hooks
+# -------------------------------------------------------------------------
+
+def pytest_plugin_registered(plugin, manager):
+ """ a new py lib plugin got registered. """
+
+def pytest_plugin_unregistered(plugin):
+ """ a py lib plugin got unregistered. """
+
+def pytest_internalerror(excrepr):
+ """ called for internal errors. """
+
+def pytest_keyboard_interrupt(excinfo):
+ """ called for keyboard interrupt. """
diff --git a/lib/pypy/_pytest/junitxml.py b/lib/pypy/_pytest/junitxml.py
new file mode 100644
--- /dev/null
+++ b/lib/pypy/_pytest/junitxml.py
@@ -0,0 +1,221 @@
+""" report test results in JUnit-XML format, for use with Hudson and build integration servers.
+
+Based on initial code from Ross Lawley.
+"""
+
+import py
+import os
+import re
+import sys
+import time
+
+
+# Python 2.X and 3.X compatibility
+try:
+ unichr(65)
+except NameError:
+ unichr = chr
+try:
+ unicode('A')
+except NameError:
+ unicode = str
+try:
+ long(1)
+except NameError:
+ long = int
+
+
+# We need to get the subset of the invalid unicode ranges according to
+# XML 1.0 which are valid in this python build. Hence we calculate
+# this dynamically instead of hardcoding it. The spec range of valid
+# chars is: Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD]
+# | [#x10000-#x10FFFF]
+_illegal_unichrs = [(0x00, 0x08), (0x0B, 0x0C), (0x0E, 0x19),
+ (0xD800, 0xDFFF), (0xFDD0, 0xFFFF)]
+_illegal_ranges = [unicode("%s-%s") % (unichr(low), unichr(high))
+ for (low, high) in _illegal_unichrs
+ if low < sys.maxunicode]
+illegal_xml_re = re.compile(unicode('[%s]') %
+ unicode('').join(_illegal_ranges))
+del _illegal_unichrs
+del _illegal_ranges
+
+
+def pytest_addoption(parser):
+ group = parser.getgroup("terminal reporting")
+ group.addoption('--junitxml', action="store", dest="xmlpath",
+ metavar="path", default=None,
+ help="create junit-xml style report file at given path.")
+ group.addoption('--junitprefix', action="store", dest="junitprefix",
+ metavar="str", default=None,
+ help="prepend prefix to classnames in junit-xml output")
+
+def pytest_configure(config):
+ xmlpath = config.option.xmlpath
+ if xmlpath:
+ config._xml = LogXML(xmlpath, config.option.junitprefix)
+ config.pluginmanager.register(config._xml)
+
+def pytest_unconfigure(config):
+ xml = getattr(config, '_xml', None)
+ if xml:
+ del config._xml
+ config.pluginmanager.unregister(xml)
+
+
+class LogXML(object):
+ def __init__(self, logfile, prefix):
+ logfile = os.path.expanduser(os.path.expandvars(logfile))
+ self.logfile = os.path.normpath(logfile)
+ self.prefix = prefix
+ self.test_logs = []
+ self.passed = self.skipped = 0
+ self.failed = self.errors = 0
+ self._durations = {}
+
+ def _opentestcase(self, report):
+ names = report.nodeid.split("::")
+ names[0] = names[0].replace("/", '.')
+ names = tuple(names)
+ d = {'time': self._durations.pop(report.nodeid, "0")}
+ names = [x.replace(".py", "") for x in names if x != "()"]
+ classnames = names[:-1]
+ if self.prefix:
+ classnames.insert(0, self.prefix)
+ d['classname'] = ".".join(classnames)
+ d['name'] = py.xml.escape(names[-1])
+ attrs = ['%s="%s"' % item for item in sorted(d.items())]
+ self.test_logs.append("\n<testcase %s>" % " ".join(attrs))
+
+ def _closetestcase(self):
+ self.test_logs.append("</testcase>")
+
+ def appendlog(self, fmt, *args):
+ def repl(matchobj):
+ i = ord(matchobj.group())
+ if i <= 0xFF:
+ return unicode('#x%02X') % i
+ else:
+ return unicode('#x%04X') % i
+ args = tuple([illegal_xml_re.sub(repl, py.xml.escape(arg))
+ for arg in args])
+ self.test_logs.append(fmt % args)
+
+ def append_pass(self, report):
+ self.passed += 1
+ self._opentestcase(report)
+ self._closetestcase()
+
+ def append_failure(self, report):
+ self._opentestcase(report)
+ #msg = str(report.longrepr.reprtraceback.extraline)
+ if "xfail" in report.keywords:
+ self.appendlog(
+ '<skipped message="xfail-marked test passes unexpectedly"/>')
+ self.skipped += 1
+ else:
+ self.appendlog('<failure message="test failure">%s</failure>',
+ report.longrepr)
+ self.failed += 1
+ self._closetestcase()
+
+ def append_collect_failure(self, report):
+ self._opentestcase(report)
+ #msg = str(report.longrepr.reprtraceback.extraline)
+ self.appendlog('<failure message="collection failure">%s</failure>',
+ report.longrepr)
+ self._closetestcase()
+ self.errors += 1
+
+ def append_collect_skipped(self, report):
+ self._opentestcase(report)
+ #msg = str(report.longrepr.reprtraceback.extraline)
+ self.appendlog('<skipped message="collection skipped">%s</skipped>',
+ report.longrepr)
+ self._closetestcase()
+ self.skipped += 1
+
+ def append_error(self, report):
+ self._opentestcase(report)
+ self.appendlog('<error message="test setup failure">%s</error>',
+ report.longrepr)
+ self._closetestcase()
+ self.errors += 1
+
+ def append_skipped(self, report):
+ self._opentestcase(report)
+ if "xfail" in report.keywords:
+ self.appendlog(
+ '<skipped message="expected test failure">%s</skipped>',
+ report.keywords['xfail'])
+ else:
+ filename, lineno, skipreason = report.longrepr
+ if skipreason.startswith("Skipped: "):
+ skipreason = skipreason[9:]
+ self.appendlog('<skipped type="pytest.skip" '
+ 'message="%s">%s</skipped>',
+ skipreason, "%s:%s: %s" % report.longrepr,
+ )
+ self._closetestcase()
+ self.skipped += 1
+
+ def pytest_runtest_logreport(self, report):
+ if report.passed:
+ self.append_pass(report)
+ elif report.failed:
+ if report.when != "call":
+ self.append_error(report)
+ else:
+ self.append_failure(report)
+ elif report.skipped:
+ self.append_skipped(report)
+
+ def pytest_runtest_call(self, item, __multicall__):
+ start = time.time()
+ try:
+ return __multicall__.execute()
+ finally:
+ self._durations[item.nodeid] = time.time() - start
+
+ def pytest_collectreport(self, report):
+ if not report.passed:
+ if report.failed:
+ self.append_collect_failure(report)
+ else:
+ self.append_collect_skipped(report)
+
+ def pytest_internalerror(self, excrepr):
+ self.errors += 1
+ data = py.xml.escape(excrepr)
+ self.test_logs.append(
+ '\n<testcase classname="pytest" name="internal">'
+ ' <error message="internal error">'
+ '%s</error></testcase>' % data)
+
+ def pytest_sessionstart(self, session):
+ self.suite_start_time = time.time()
+
+ def pytest_sessionfinish(self, session, exitstatus, __multicall__):
+ if py.std.sys.version_info[0] < 3:
+ logfile = py.std.codecs.open(self.logfile, 'w', encoding='utf-8')
+ else:
+ logfile = open(self.logfile, 'w', encoding='utf-8')
+
+ suite_stop_time = time.time()
+ suite_time_delta = suite_stop_time - self.suite_start_time
+ numtests = self.passed + self.failed
+ logfile.write('<?xml version="1.0" encoding="utf-8"?>')
+ logfile.write('<testsuite ')
+ logfile.write('name="" ')
+ logfile.write('errors="%i" ' % self.errors)
+ logfile.write('failures="%i" ' % self.failed)
+ logfile.write('skips="%i" ' % self.skipped)
+ logfile.write('tests="%i" ' % numtests)
+ logfile.write('time="%.3f"' % suite_time_delta)
+ logfile.write(' >')
+ logfile.writelines(self.test_logs)
+ logfile.write('</testsuite>')
+ logfile.close()
+
+ def pytest_terminal_summary(self, terminalreporter):
+ terminalreporter.write_sep("-", "generated xml file: %s" % (self.logfile))
diff --git a/lib/pypy/_pytest/main.py b/lib/pypy/_pytest/main.py
new file mode 100644
--- /dev/null
+++ b/lib/pypy/_pytest/main.py
@@ -0,0 +1,552 @@
+""" core implementation of testing process: init, session, runtest loop. """
+
+import py
+import pytest, _pytest
+import os, sys
+tracebackcutdir = py.path.local(_pytest.__file__).dirpath()
+
+# exitcodes for the command line
+EXIT_OK = 0
+EXIT_TESTSFAILED = 1
+EXIT_INTERRUPTED = 2
+EXIT_INTERNALERROR = 3
+
+def pytest_addoption(parser):
+ parser.addini("norecursedirs", "directory patterns to avoid for recursion",
+ type="args", default=('.*', 'CVS', '_darcs', '{arch}'))
+ #parser.addini("dirpatterns",
+ # "patterns specifying possible locations of test files",
+ # type="linelist", default=["**/test_*.txt",
+ # "**/test_*.py", "**/*_test.py"]
+ #)
+ group = parser.getgroup("general", "running and selection options")
+ group._addoption('-x', '--exitfirst', action="store_true", default=False,
+ dest="exitfirst",
+ help="exit instantly on first error or failed test."),
+ group._addoption('--maxfail', metavar="num",
+ action="store", type="int", dest="maxfail", default=0,
+ help="exit after first num failures or errors.")
+
+ group = parser.getgroup("collect", "collection")
+ group.addoption('--collectonly',
+ action="store_true", dest="collectonly",
+ help="only collect tests, don't execute them."),
+ group.addoption('--pyargs', action="store_true",
+ help="try to interpret all arguments as python packages.")
+ group.addoption("--ignore", action="append", metavar="path",
+ help="ignore path during collection (multi-allowed).")
+ group.addoption('--confcutdir', dest="confcutdir", default=None,
+ metavar="dir",
+ help="only load conftest.py's relative to specified dir.")
+
+ group = parser.getgroup("debugconfig",
+ "test session debugging and configuration")
+ group.addoption('--basetemp', dest="basetemp", default=None, metavar="dir",
+ help="base temporary directory for this test run.")
+
+
+def pytest_namespace():
+ collect = dict(Item=Item, Collector=Collector, File=File, Session=Session)
+ return dict(collect=collect)
+
+def pytest_configure(config):
+ py.test.config = config # compatibiltiy
+ if config.option.exitfirst:
+ config.option.maxfail = 1
+
+def wrap_session(config, doit):
+ """Skeleton command line program"""
+ session = Session(config)
+ session.exitstatus = EXIT_OK
+ initstate = 0
+ try:
+ config.pluginmanager.do_configure(config)
+ initstate = 1
+ config.hook.pytest_sessionstart(session=session)
+ initstate = 2
+ doit(config, session)
+ except pytest.UsageError:
+ raise
+ except KeyboardInterrupt:
+ excinfo = py.code.ExceptionInfo()
+ config.hook.pytest_keyboard_interrupt(excinfo=excinfo)
+ session.exitstatus = EXIT_INTERRUPTED
+ except:
+ excinfo = py.code.ExceptionInfo()
+ config.pluginmanager.notify_exception(excinfo, config.option)
+ session.exitstatus = EXIT_INTERNALERROR
+ if excinfo.errisinstance(SystemExit):
+ sys.stderr.write("mainloop: caught Spurious SystemExit!\n")
+ if not session.exitstatus and session._testsfailed:
+ session.exitstatus = EXIT_TESTSFAILED
+ if initstate >= 2:
+ config.hook.pytest_sessionfinish(session=session,
+ exitstatus=session.exitstatus)
+ if initstate >= 1:
+ config.pluginmanager.do_unconfigure(config)
+ return session.exitstatus
+
+def pytest_cmdline_main(config):
+ return wrap_session(config, _main)
+
+def _main(config, session):
+ """ default command line protocol for initialization, session,
+ running tests and reporting. """
+ config.hook.pytest_collection(session=session)
+ config.hook.pytest_runtestloop(session=session)
+
+def pytest_collection(session):
+ return session.perform_collect()
+
+def pytest_runtestloop(session):
+ if session.config.option.collectonly:
+ return True
+ for item in session.session.items:
+ item.config.hook.pytest_runtest_protocol(item=item)
+ if session.shouldstop:
+ raise session.Interrupted(session.shouldstop)
+ return True
+
+def pytest_ignore_collect(path, config):
+ p = path.dirpath()
+ ignore_paths = config._getconftest_pathlist("collect_ignore", path=p)
+ ignore_paths = ignore_paths or []
+ excludeopt = config.getvalue("ignore")
+ if excludeopt:
+ ignore_paths.extend([py.path.local(x) for x in excludeopt])
+ return path in ignore_paths
+
+class HookProxy:
+ def __init__(self, fspath, config):
+ self.fspath = fspath
+ self.config = config
+ def __getattr__(self, name):
+ hookmethod = getattr(self.config.hook, name)
+ def call_matching_hooks(**kwargs):
+ plugins = self.config._getmatchingplugins(self.fspath)
+ return hookmethod.pcall(plugins, **kwargs)
+ return call_matching_hooks
+
+def compatproperty(name):
+ def fget(self):
+ return getattr(pytest, name)
+ return property(fget, None, None,
+ "deprecated attribute %r, use pytest.%s" % (name,name))
+
+class Node(object):
+ """ base class for all Nodes in the collection tree.
+ Collector subclasses have children, Items are terminal nodes."""
+
+ def __init__(self, name, parent=None, config=None, session=None):
+ #: a unique name with the scope of the parent
+ self.name = name
+
+ #: the parent collector node.
+ self.parent = parent
+
+ #: the test config object
+ self.config = config or parent.config
+
+ #: the collection this node is part of
+ self.session = session or parent.session
+
+ #: filesystem path where this node was collected from
+ self.fspath = getattr(parent, 'fspath', None)
+ self.ihook = self.session.gethookproxy(self.fspath)
+ self.keywords = {self.name: True}
+
+ Module = compatproperty("Module")
+ Class = compatproperty("Class")
+ Instance = compatproperty("Instance")
+ Function = compatproperty("Function")
+ File = compatproperty("File")
+ Item = compatproperty("Item")
+
+ def _getcustomclass(self, name):
+ cls = getattr(self, name)
+ if cls != getattr(pytest, name):
+ py.log._apiwarn("2.0", "use of node.%s is deprecated, "
+ "use pytest_pycollect_makeitem(...) to create custom "
+ "collection nodes" % name)
+ return cls
+
+ def __repr__(self):
+ return "<%s %r>" %(self.__class__.__name__, getattr(self, 'name', None))
+
+ # methods for ordering nodes
+ @property
+ def nodeid(self):
+ try:
+ return self._nodeid
+ except AttributeError:
+ self._nodeid = x = self._makeid()
+ return x
+
+ def _makeid(self):
+ return self.parent.nodeid + "::" + self.name
+
+ def __eq__(self, other):
+ if not isinstance(other, Node):
+ return False
+ return self.__class__ == other.__class__ and \
+ self.name == other.name and self.parent == other.parent
+
+ def __ne__(self, other):
+ return not self == other
+
+ def __hash__(self):
+ return hash((self.name, self.parent))
+
+ def setup(self):
+ pass
+
+ def teardown(self):
+ pass
+
+ def _memoizedcall(self, attrname, function):
+ exattrname = "_ex_" + attrname
+ failure = getattr(self, exattrname, None)
+ if failure is not None:
+ py.builtin._reraise(failure[0], failure[1], failure[2])
+ if hasattr(self, attrname):
+ return getattr(self, attrname)
+ try:
+ res = function()
+ except py.builtin._sysex:
+ raise
+ except:
+ failure = py.std.sys.exc_info()
+ setattr(self, exattrname, failure)
+ raise
+ setattr(self, attrname, res)
+ return res
+
+ def listchain(self):
+ """ return list of all parent collectors up to self,
+ starting from root of collection tree. """
+ l = [self]
+ while 1:
+ x = l[0]
+ if x.parent is not None: # and x.parent.parent is not None:
+ l.insert(0, x.parent)
+ else:
+ return l
+
+ def listnames(self):
+ return [x.name for x in self.listchain()]
+
+ def getplugins(self):
+ return self.config._getmatchingplugins(self.fspath)
+
+ def getparent(self, cls):
+ current = self
+ while current and not isinstance(current, cls):
+ current = current.parent
+ return current
+
+ def _prunetraceback(self, excinfo):
+ pass
+
+ def _repr_failure_py(self, excinfo, style=None):
+ if self.config.option.fulltrace:
+ style="long"
+ else:
+ self._prunetraceback(excinfo)
+ # XXX should excinfo.getrepr record all data and toterminal()
+ # process it?
+ if style is None:
+ if self.config.option.tbstyle == "short":
+ style = "short"
+ else:
+ style = "long"
+ return excinfo.getrepr(funcargs=True,
+ showlocals=self.config.option.showlocals,
+ style=style)
+
+ repr_failure = _repr_failure_py
+
+class Collector(Node):
+ """ Collector instances create children through collect()
+ and thus iteratively build a tree.
+ """
+ class CollectError(Exception):
+ """ an error during collection, contains a custom message. """
+
+ def collect(self):
+ """ returns a list of children (items and collectors)
+ for this collection node.
+ """
+ raise NotImplementedError("abstract")
+
+ def repr_failure(self, excinfo):
+ """ represent a collection failure. """
+ if excinfo.errisinstance(self.CollectError):
+ exc = excinfo.value
+ return str(exc.args[0])
+ return self._repr_failure_py(excinfo, style="short")
+
+ def _memocollect(self):
+ """ internal helper method to cache results of calling collect(). """
+ return self._memoizedcall('_collected', lambda: list(self.collect()))
+
+ def _prunetraceback(self, excinfo):
+ if hasattr(self, 'fspath'):
+ path = self.fspath
+ traceback = excinfo.traceback
+ ntraceback = traceback.cut(path=self.fspath)
+ if ntraceback == traceback:
+ ntraceback = ntraceback.cut(excludepath=tracebackcutdir)
+ excinfo.traceback = ntraceback.filter()
+
+class FSCollector(Collector):
+ def __init__(self, fspath, parent=None, config=None, session=None):
+ fspath = py.path.local(fspath) # xxx only for test_resultlog.py?
+ name = fspath.basename
+ if parent is not None:
+ rel = fspath.relto(parent.fspath)
+ if rel:
+ name = rel
+ name = name.replace(os.sep, "/")
+ super(FSCollector, self).__init__(name, parent, config, session)
+ self.fspath = fspath
+
+ def _makeid(self):
+ if self == self.session:
+ return "."
+ relpath = self.session.fspath.bestrelpath(self.fspath)
+ if os.sep != "/":
+ relpath = relpath.replace(os.sep, "/")
+ return relpath
+
+class File(FSCollector):
+ """ base class for collecting tests from a file. """
+
+class Item(Node):
+ """ a basic test invocation item. Note that for a single function
+ there might be multiple test invocation items.
+ """
+ def reportinfo(self):
+ return self.fspath, None, ""
+
+ @property
+ def location(self):
+ try:
+ return self._location
+ except AttributeError:
+ location = self.reportinfo()
+ # bestrelpath is a quite slow function
+ cache = self.config.__dict__.setdefault("_bestrelpathcache", {})
+ try:
+ fspath = cache[location[0]]
+ except KeyError:
+ fspath = self.session.fspath.bestrelpath(location[0])
+ cache[location[0]] = fspath
+ location = (fspath, location[1], str(location[2]))
+ self._location = location
+ return location
+
+class NoMatch(Exception):
+ """ raised if matching cannot locate a matching names. """
+
+class Session(FSCollector):
+ class Interrupted(KeyboardInterrupt):
+ """ signals an interrupted test run. """
+ __module__ = 'builtins' # for py3
+
+ def __init__(self, config):
+ super(Session, self).__init__(py.path.local(), parent=None,
+ config=config, session=self)
+ assert self.config.pluginmanager.register(self, name="session", prepend=True)
+ self._testsfailed = 0
+ self.shouldstop = False
+ self.trace = config.trace.root.get("collection")
+ self._norecursepatterns = config.getini("norecursedirs")
+
+ def pytest_collectstart(self):
+ if self.shouldstop:
+ raise self.Interrupted(self.shouldstop)
+
+ def pytest_runtest_logreport(self, report):
+ if report.failed and 'xfail' not in getattr(report, 'keywords', []):
+ self._testsfailed += 1
+ maxfail = self.config.getvalue("maxfail")
+ if maxfail and self._testsfailed >= maxfail:
+ self.shouldstop = "stopping after %d failures" % (
+ self._testsfailed)
+ pytest_collectreport = pytest_runtest_logreport
+
+ def isinitpath(self, path):
+ return path in self._initialpaths
+
+ def gethookproxy(self, fspath):
+ return HookProxy(fspath, self.config)
+
+ def perform_collect(self, args=None, genitems=True):
+ hook = self.config.hook
+ try:
+ items = self._perform_collect(args, genitems)
+ hook.pytest_collection_modifyitems(session=self,
+ config=self.config, items=items)
+ finally:
+ hook.pytest_collection_finish(session=self)
+ return items
+
+ def _perform_collect(self, args, genitems):
+ if args is None:
+ args = self.config.args
+ self.trace("perform_collect", self, args)
+ self.trace.root.indent += 1
+ self._notfound = []
+ self._initialpaths = set()
+ self._initialparts = []
+ for arg in args:
+ parts = self._parsearg(arg)
+ self._initialparts.append(parts)
+ self._initialpaths.add(parts[0])
+ self.ihook.pytest_collectstart(collector=self)
+ rep = self.ihook.pytest_make_collect_report(collector=self)
+ self.ihook.pytest_collectreport(report=rep)
+ self.trace.root.indent -= 1
+ if self._notfound:
+ for arg, exc in self._notfound:
+ line = "(no name %r in any of %r)" % (arg, exc.args[0])
+ raise pytest.UsageError("not found: %s\n%s" %(arg, line))
+ if not genitems:
+ return rep.result
+ else:
+ self.items = items = []
+ if rep.passed:
+ for node in rep.result:
+ self.items.extend(self.genitems(node))
+ return items
+
+ def collect(self):
+ for parts in self._initialparts:
+ arg = "::".join(map(str, parts))
+ self.trace("processing argument", arg)
+ self.trace.root.indent += 1
+ try:
+ for x in self._collect(arg):
+ yield x
+ except NoMatch:
+ # we are inside a make_report hook so
+ # we cannot directly pass through the exception
+ self._notfound.append((arg, sys.exc_info()[1]))
+ self.trace.root.indent -= 1
+ break
+ self.trace.root.indent -= 1
+
+ def _collect(self, arg):
+ names = self._parsearg(arg)
+ path = names.pop(0)
+ if path.check(dir=1):
+ assert not names, "invalid arg %r" %(arg,)
+ for path in path.visit(fil=lambda x: x.check(file=1),
+ rec=self._recurse, bf=True, sort=True):
+ for x in self._collectfile(path):
+ yield x
+ else:
+ assert path.check(file=1)
+ for x in self.matchnodes(self._collectfile(path), names):
+ yield x
+
+ def _collectfile(self, path):
+ ihook = self.gethookproxy(path)
+ if not self.isinitpath(path):
+ if ihook.pytest_ignore_collect(path=path, config=self.config):
+ return ()
+ return ihook.pytest_collect_file(path=path, parent=self)
+
+ def _recurse(self, path):
+ ihook = self.gethookproxy(path.dirpath())
+ if ihook.pytest_ignore_collect(path=path, config=self.config):
+ return
+ for pat in self._norecursepatterns:
+ if path.check(fnmatch=pat):
+ return False
+ ihook = self.gethookproxy(path)
+ ihook.pytest_collect_directory(path=path, parent=self)
+ return True
+
+ def _tryconvertpyarg(self, x):
+ try:
+ mod = __import__(x, None, None, ['__doc__'])
+ except (ValueError, ImportError):
+ return x
+ p = py.path.local(mod.__file__)
+ if p.purebasename == "__init__":
+ p = p.dirpath()
+ else:
+ p = p.new(basename=p.purebasename+".py")
+ return str(p)
+
+ def _parsearg(self, arg):
+ """ return (fspath, names) tuple after checking the file exists. """
+ arg = str(arg)
+ if self.config.option.pyargs:
+ arg = self._tryconvertpyarg(arg)
+ parts = str(arg).split("::")
+ relpath = parts[0].replace("/", os.sep)
+ path = self.fspath.join(relpath, abs=True)
+ if not path.check():
+ if self.config.option.pyargs:
+ msg = "file or package not found: "
+ else:
+ msg = "file not found: "
+ raise pytest.UsageError(msg + arg)
+ parts[0] = path
+ return parts
+
+ def matchnodes(self, matching, names):
+ self.trace("matchnodes", matching, names)
+ self.trace.root.indent += 1
+ nodes = self._matchnodes(matching, names)
+ num = len(nodes)
+ self.trace("matchnodes finished -> ", num, "nodes")
+ self.trace.root.indent -= 1
+ if num == 0:
+ raise NoMatch(matching, names[:1])
+ return nodes
+
+ def _matchnodes(self, matching, names):
+ if not matching or not names:
+ return matching
+ name = names[0]
+ assert name
+ nextnames = names[1:]
+ resultnodes = []
+ for node in matching:
+ if isinstance(node, pytest.Item):
+ if not names:
+ resultnodes.append(node)
+ continue
+ assert isinstance(node, pytest.Collector)
+ node.ihook.pytest_collectstart(collector=node)
+ rep = node.ihook.pytest_make_collect_report(collector=node)
+ if rep.passed:
+ has_matched = False
+ for x in rep.result:
+ if x.name == name:
+ resultnodes.extend(self.matchnodes([x], nextnames))
+ has_matched = True
+ # XXX accept IDs that don't have "()" for class instances
+ if not has_matched and len(rep.result) == 1 and x.name == "()":
+ nextnames.insert(0, name)
+ resultnodes.extend(self.matchnodes([x], nextnames))
+ node.ihook.pytest_collectreport(report=rep)
+ return resultnodes
+
+ def genitems(self, node):
+ self.trace("genitems", node)
+ if isinstance(node, pytest.Item):
+ node.ihook.pytest_itemcollected(item=node)
+ yield node
+ else:
+ assert isinstance(node, pytest.Collector)
+ node.ihook.pytest_collectstart(collector=node)
+ rep = node.ihook.pytest_make_collect_report(collector=node)
+ if rep.passed:
+ for subnode in rep.result:
+ for x in self.genitems(subnode):
+ yield x
+ node.ihook.pytest_collectreport(report=rep)
diff --git a/lib/pypy/_pytest/mark.py b/lib/pypy/_pytest/mark.py
new file mode 100644
--- /dev/null
+++ b/lib/pypy/_pytest/mark.py
@@ -0,0 +1,176 @@
+""" generic mechanism for marking and selecting python functions. """
+import pytest, py
+
+def pytest_namespace():
+ return {'mark': MarkGenerator()}
+
+def pytest_addoption(parser):
+ group = parser.getgroup("general")
+ group._addoption('-k',
+ action="store", dest="keyword", default='', metavar="KEYWORDEXPR",
+ help="only run tests which match given keyword expression. "
+ "An expression consists of space-separated terms. "
+ "Each term must match. Precede a term with '-' to negate. "
+ "Terminate expression with ':' to make the first match match "
+ "all subsequent tests (usually file-order). ")
+
+def pytest_collection_modifyitems(items, config):
+ keywordexpr = config.option.keyword
+ if not keywordexpr:
+ return
+ selectuntil = False
+ if keywordexpr[-1] == ":":
+ selectuntil = True
+ keywordexpr = keywordexpr[:-1]
+
+ remaining = []
+ deselected = []
+ for colitem in items:
+ if keywordexpr and skipbykeyword(colitem, keywordexpr):
+ deselected.append(colitem)
+ else:
+ remaining.append(colitem)
+ if selectuntil:
+ keywordexpr = None
+
+ if deselected:
+ config.hook.pytest_deselected(items=deselected)
+ items[:] = remaining
+
+def skipbykeyword(colitem, keywordexpr):
+ """ return True if they given keyword expression means to
+ skip this collector/item.
+ """
+ if not keywordexpr:
+ return
+
+ itemkeywords = getkeywords(colitem)
+ for key in filter(None, keywordexpr.split()):
+ eor = key[:1] == '-'
+ if eor:
+ key = key[1:]
+ if not (eor ^ matchonekeyword(key, itemkeywords)):
+ return True
+
+def getkeywords(node):
+ keywords = {}
+ while node is not None:
+ keywords.update(node.keywords)
+ node = node.parent
+ return keywords
+
+
+def matchonekeyword(key, itemkeywords):
+ for elem in key.split("."):
+ for kw in itemkeywords:
+ if elem in kw:
+ break
+ else:
+ return False
+ return True
+
+class MarkGenerator:
+ """ Factory for :class:`MarkDecorator` objects - exposed as
+ a ``py.test.mark`` singleton instance. Example::
+
+ import py
+ @py.test.mark.slowtest
+ def test_function():
+ pass
+
+ will set a 'slowtest' :class:`MarkInfo` object
+ on the ``test_function`` object. """
+
+ def __getattr__(self, name):
+ if name[0] == "_":
+ raise AttributeError(name)
+ return MarkDecorator(name)
+
+class MarkDecorator:
+ """ A decorator for test functions and test classes. When applied
+ it will create :class:`MarkInfo` objects which may be
+ :ref:`retrieved by hooks as item keywords <excontrolskip>`.
+ MarkDecorator instances are often created like this::
+
+ mark1 = py.test.mark.NAME # simple MarkDecorator
+ mark2 = py.test.mark.NAME(name1=value) # parametrized MarkDecorator
+
+ and can then be applied as decorators to test functions::
+
+ @mark2
+ def test_function():
+ pass
+ """
+ def __init__(self, name, args=None, kwargs=None):
+ self.markname = name
+ self.args = args or ()
+ self.kwargs = kwargs or {}
+
+ def __repr__(self):
+ d = self.__dict__.copy()
+ name = d.pop('markname')
+ return "<MarkDecorator %r %r>" %(name, d)
+
+ def __call__(self, *args, **kwargs):
+ """ if passed a single callable argument: decorate it with mark info.
+ otherwise add *args/**kwargs in-place to mark information. """
+ if args:
+ func = args[0]
+ if len(args) == 1 and hasattr(func, '__call__') or \
+ hasattr(func, '__bases__'):
+ if hasattr(func, '__bases__'):
+ if hasattr(func, 'pytestmark'):
+ l = func.pytestmark
+ if not isinstance(l, list):
+ func.pytestmark = [l, self]
+ else:
+ l.append(self)
+ else:
+ func.pytestmark = [self]
+ else:
+ holder = getattr(func, self.markname, None)
+ if holder is None:
+ holder = MarkInfo(self.markname, self.args, self.kwargs)
+ setattr(func, self.markname, holder)
+ else:
+ holder.kwargs.update(self.kwargs)
+ holder.args += self.args
+ return func
+ kw = self.kwargs.copy()
+ kw.update(kwargs)
+ args = self.args + args
+ return self.__class__(self.markname, args=args, kwargs=kw)
+
+class MarkInfo:
+ """ Marking object created by :class:`MarkDecorator` instances. """
+ def __init__(self, name, args, kwargs):
+ #: name of attribute
+ self.name = name
+ #: positional argument list, empty if none specified
+ self.args = args
+ #: keyword argument dictionary, empty if nothing specified
+ self.kwargs = kwargs
+
+ def __repr__(self):
+ return "<MarkInfo %r args=%r kwargs=%r>" % (
+ self.name, self.args, self.kwargs)
+
+def pytest_itemcollected(item):
+ if not isinstance(item, pytest.Function):
+ return
+ try:
+ func = item.obj.__func__
+ except AttributeError:
+ func = getattr(item.obj, 'im_func', item.obj)
+ pyclasses = (pytest.Class, pytest.Module)
+ for node in item.listchain():
+ if isinstance(node, pyclasses):
+ marker = getattr(node.obj, 'pytestmark', None)
+ if marker is not None:
+ if isinstance(marker, list):
+ for mark in marker:
+ mark(func)
+ else:
+ marker(func)
+ node = node.parent
+ item.keywords.update(py.builtin._getfuncdict(func))
diff --git a/lib/pypy/_pytest/monkeypatch.py b/lib/pypy/_pytest/monkeypatch.py
new file mode 100644
--- /dev/null
+++ b/lib/pypy/_pytest/monkeypatch.py
@@ -0,0 +1,103 @@
+""" monkeypatching and mocking functionality. """
+
+import os, sys
+
+def pytest_funcarg__monkeypatch(request):
+ """The returned ``monkeypatch`` funcarg provides these
+ helper methods to modify objects, dictionaries or os.environ::
+
+ monkeypatch.setattr(obj, name, value, raising=True)
+ monkeypatch.delattr(obj, name, raising=True)
+ monkeypatch.setitem(mapping, name, value)
+ monkeypatch.delitem(obj, name, raising=True)
+ monkeypatch.setenv(name, value, prepend=False)
+ monkeypatch.delenv(name, value, raising=True)
+ monkeypatch.syspath_prepend(path)
+
+ All modifications will be undone after the requesting
+ test function has finished. The ``raising``
+ parameter determines if a KeyError or AttributeError
+ will be raised if the set/deletion operation has no target.
+ """
+ mpatch = monkeypatch()
+ request.addfinalizer(mpatch.undo)
+ return mpatch
+
+notset = object()
+
+class monkeypatch:
+ """ object keeping a record of setattr/item/env/syspath changes. """
+ def __init__(self):
+ self._setattr = []
+ self._setitem = []
+
+ def setattr(self, obj, name, value, raising=True):
+ """ set attribute ``name`` on ``obj`` to ``value``, by default
+ raise AttributeEror if the attribute did not exist. """
+ oldval = getattr(obj, name, notset)
+ if raising and oldval is notset:
+ raise AttributeError("%r has no attribute %r" %(obj, name))
+ self._setattr.insert(0, (obj, name, oldval))
+ setattr(obj, name, value)
+
+ def delattr(self, obj, name, raising=True):
+ """ delete attribute ``name`` from ``obj``, by default raise
+ AttributeError it the attribute did not previously exist. """
+ if not hasattr(obj, name):
+ if raising:
+ raise AttributeError(name)
+ else:
+ self._setattr.insert(0, (obj, name, getattr(obj, name, notset)))
+ delattr(obj, name)
+
+ def setitem(self, dic, name, value):
+ """ set dictionary entry ``name`` to value. """
+ self._setitem.insert(0, (dic, name, dic.get(name, notset)))
+ dic[name] = value
+
+ def delitem(self, dic, name, raising=True):
+ """ delete ``name`` from dict, raise KeyError if it doesn't exist."""
+ if name not in dic:
+ if raising:
+ raise KeyError(name)
+ else:
+ self._setitem.insert(0, (dic, name, dic.get(name, notset)))
+ del dic[name]
+
+ def setenv(self, name, value, prepend=None):
+ """ set environment variable ``name`` to ``value``. if ``prepend``
+ is a character, read the current environment variable value
+ and prepend the ``value`` adjoined with the ``prepend`` character."""
+ value = str(value)
+ if prepend and name in os.environ:
+ value = value + prepend + os.environ[name]
+ self.setitem(os.environ, name, value)
+
+ def delenv(self, name, raising=True):
+ """ delete ``name`` from environment, raise KeyError it not exists."""
+ self.delitem(os.environ, name, raising=raising)
+
+ def syspath_prepend(self, path):
+ """ prepend ``path`` to ``sys.path`` list of import locations. """
+ if not hasattr(self, '_savesyspath'):
+ self._savesyspath = sys.path[:]
+ sys.path.insert(0, str(path))
+
+ def undo(self):
+ """ undo previous changes. This call consumes the
+ undo stack. Calling it a second time has no effect unless
+ you do more monkeypatching after the undo call."""
+ for obj, name, value in self._setattr:
+ if value is not notset:
+ setattr(obj, name, value)
+ else:
+ delattr(obj, name)
+ self._setattr[:] = []
+ for dictionary, name, value in self._setitem:
+ if value is notset:
+ del dictionary[name]
+ else:
+ dictionary[name] = value
+ self._setitem[:] = []
+ if hasattr(self, '_savesyspath'):
+ sys.path[:] = self._savesyspath
diff --git a/lib/pypy/_pytest/nose.py b/lib/pypy/_pytest/nose.py
new file mode 100644
--- /dev/null
+++ b/lib/pypy/_pytest/nose.py
@@ -0,0 +1,47 @@
+""" run test suites written for nose. """
+
+import pytest, py
+import inspect
+import sys
+
+def pytest_runtest_makereport(__multicall__, item, call):
+ SkipTest = getattr(sys.modules.get('nose', None), 'SkipTest', None)
+ if SkipTest:
+ if call.excinfo and call.excinfo.errisinstance(SkipTest):
+ # let's substitute the excinfo with a py.test.skip one
+ call2 = call.__class__(lambda: py.test.skip(str(call.excinfo.value)), call.when)
+ call.excinfo = call2.excinfo
+
+
+def pytest_runtest_setup(item):
+ if isinstance(item, (pytest.Function)):
+ if isinstance(item.parent, pytest.Generator):
+ gen = item.parent
+ if not hasattr(gen, '_nosegensetup'):
+ call_optional(gen.obj, 'setup')
+ if isinstance(gen.parent, pytest.Instance):
+ call_optional(gen.parent.obj, 'setup')
+ gen._nosegensetup = True
+ if not call_optional(item.obj, 'setup'):
+ # call module level setup if there is no object level one
+ call_optional(item.parent.obj, 'setup')
+
+def pytest_runtest_teardown(item):
+ if isinstance(item, pytest.Function):
+ if not call_optional(item.obj, 'teardown'):
+ call_optional(item.parent.obj, 'teardown')
+ #if hasattr(item.parent, '_nosegensetup'):
+ # #call_optional(item._nosegensetup, 'teardown')
+ # del item.parent._nosegensetup
+
+def pytest_make_collect_report(collector):
+ if isinstance(collector, pytest.Generator):
+ call_optional(collector.obj, 'setup')
+
+def call_optional(obj, name):
+ method = getattr(obj, name, None)
+ if method:
+ # If there's any problems allow the exception to raise rather than
+ # silently ignoring them
+ method()
+ return True
diff --git a/lib/pypy/_pytest/pastebin.py b/lib/pypy/_pytest/pastebin.py
new file mode 100644
--- /dev/null
+++ b/lib/pypy/_pytest/pastebin.py
@@ -0,0 +1,63 @@
+""" submit failure or test session information to a pastebin service. """
+import py, sys
+
+class url:
+ base = "http://paste.pocoo.org"
+ xmlrpc = base + "/xmlrpc/"
+ show = base + "/show/"
+
+def pytest_addoption(parser):
+ group = parser.getgroup("terminal reporting")
+ group._addoption('--pastebin', metavar="mode",
+ action='store', dest="pastebin", default=None,
+ type="choice", choices=['failed', 'all'],
+ help="send failed|all info to Pocoo pastebin service.")
+
+def pytest_configure(__multicall__, config):
+ import tempfile
+ __multicall__.execute()
+ if config.option.pastebin == "all":
+ config._pastebinfile = tempfile.TemporaryFile('w+')
+ tr = config.pluginmanager.getplugin('terminalreporter')
+ oldwrite = tr._tw.write
+ def tee_write(s, **kwargs):
+ oldwrite(s, **kwargs)
+ config._pastebinfile.write(str(s))
+ tr._tw.write = tee_write
+
+def pytest_unconfigure(config):
+ if hasattr(config, '_pastebinfile'):
+ config._pastebinfile.seek(0)
+ sessionlog = config._pastebinfile.read()
+ config._pastebinfile.close()
+ del config._pastebinfile
+ proxyid = getproxy().newPaste("python", sessionlog)
+ pastebinurl = "%s%s" % (url.show, proxyid)
+ sys.stderr.write("pastebin session-log: %s\n" % pastebinurl)
+ tr = config.pluginmanager.getplugin('terminalreporter')
+ del tr._tw.__dict__['write']
+
+def getproxy():
+ return py.std.xmlrpclib.ServerProxy(url.xmlrpc).pastes
+
+def pytest_terminal_summary(terminalreporter):
+ if terminalreporter.config.option.pastebin != "failed":
+ return
+ tr = terminalreporter
+ if 'failed' in tr.stats:
+ terminalreporter.write_sep("=", "Sending information to Paste Service")
+ if tr.config.option.debug:
+ terminalreporter.write_line("xmlrpcurl: %s" %(url.xmlrpc,))
+ serverproxy = getproxy()
+ for rep in terminalreporter.stats.get('failed'):
+ try:
+ msg = rep.longrepr.reprtraceback.reprentries[-1].reprfileloc
+ except AttributeError:
+ msg = tr._getfailureheadline(rep)
+ tw = py.io.TerminalWriter(stringio=True)
+ rep.toterminal(tw)
+ s = tw.stringio.getvalue()
+ assert len(s)
+ proxyid = serverproxy.newPaste("python", s)
+ pastebinurl = "%s%s" % (url.show, proxyid)
+ tr.write_line("%s --> %s" %(msg, pastebinurl))
diff --git a/lib/pypy/_pytest/pdb.py b/lib/pypy/_pytest/pdb.py
new file mode 100644
--- /dev/null
+++ b/lib/pypy/_pytest/pdb.py
@@ -0,0 +1,79 @@
+""" interactive debugging with PDB, the Python Debugger. """
+
+import pytest, py
+import sys
+
+def pytest_addoption(parser):
+ group = parser.getgroup("general")
+ group._addoption('--pdb',
+ action="store_true", dest="usepdb", default=False,
+ help="start the interactive Python debugger on errors.")
+
+def pytest_namespace():
+ return {'set_trace': pytestPDB().set_trace}
+
+def pytest_configure(config):
+ if config.getvalue("usepdb"):
+ config.pluginmanager.register(PdbInvoke(), 'pdbinvoke')
+
+class pytestPDB:
+ """ Pseudo PDB that defers to the real pdb. """
+ item = None
+
+ def set_trace(self):
+ """ invoke PDB set_trace debugging, dropping any IO capturing. """
+ frame = sys._getframe().f_back
+ item = getattr(self, 'item', None)
+ if item is not None:
+ capman = item.config.pluginmanager.getplugin("capturemanager")
+ out, err = capman.suspendcapture()
+ if hasattr(item, 'outerr'):
+ item.outerr = (item.outerr[0] + out, item.outerr[1] + err)
+ tw = py.io.TerminalWriter()
+ tw.line()
+ tw.sep(">", "PDB set_trace (IO-capturing turned off)")
+ py.std.pdb.Pdb().set_trace(frame)
+
+def pdbitem(item):
+ pytestPDB.item = item
+pytest_runtest_setup = pytest_runtest_call = pytest_runtest_teardown = pdbitem
+
+def pytest_runtest_makereport():
+ pytestPDB.item = None
+
+class PdbInvoke:
+ @pytest.mark.tryfirst
+ def pytest_runtest_makereport(self, item, call, __multicall__):
+ rep = __multicall__.execute()
+ if not call.excinfo or \
+ call.excinfo.errisinstance(pytest.skip.Exception) or \
+ call.excinfo.errisinstance(py.std.bdb.BdbQuit):
+ return rep
+ if "xfail" in rep.keywords:
+ return rep
+ # we assume that the above execute() suspended capturing
+ # XXX we re-use the TerminalReporter's terminalwriter
+ # because this seems to avoid some encoding related troubles
+ # for not completely clear reasons.
+ tw = item.config.pluginmanager.getplugin("terminalreporter")._tw
+ tw.line()
+ tw.sep(">", "traceback")
+ rep.toterminal(tw)
+ tw.sep(">", "entering PDB")
+ post_mortem(call.excinfo._excinfo[2])
+ rep._pdbshown = True
+ return rep
+
+def post_mortem(t):
+ pdb = py.std.pdb
+ class Pdb(pdb.Pdb):
+ def get_stack(self, f, t):
+ stack, i = pdb.Pdb.get_stack(self, f, t)
+ if f is None:
+ i = max(0, len(stack) - 1)
+ while i and stack[i][0].f_locals.get("__tracebackhide__", False):
+ i-=1
+ return stack, i
+ p = Pdb()
+ p.reset()
+ p.interaction(None, t)
diff --git a/lib/pypy/_pytest/pytester.py b/lib/pypy/_pytest/pytester.py
new file mode 100644
--- /dev/null
+++ b/lib/pypy/_pytest/pytester.py
@@ -0,0 +1,685 @@
+""" (disabled by default) support for testing py.test and py.test plugins. """
+
+import py, pytest
+import sys, os
+import re
+import inspect
+import time
+from fnmatch import fnmatch
+from _pytest.main import Session, EXIT_OK
+from py.builtin import print_
+from _pytest.core import HookRelay
+
+def pytest_addoption(parser):
+ group = parser.getgroup("pylib")
+ group.addoption('--no-tools-on-path',
+ action="store_true", dest="notoolsonpath", default=False,
+ help=("discover tools on PATH instead of going through py.cmdline.")
+ )
+
+def pytest_configure(config):
+ # This might be called multiple times. Only take the first.
+ global _pytest_fullpath
+ import pytest
+ try:
+ _pytest_fullpath
+ except NameError:
+ _pytest_fullpath = os.path.abspath(pytest.__file__.rstrip("oc"))
+
+def pytest_funcarg___pytest(request):
+ return PytestArg(request)
+
+class PytestArg:
+ def __init__(self, request):
+ self.request = request
+
+ def gethookrecorder(self, hook):
+ hookrecorder = HookRecorder(hook._pm)
+ hookrecorder.start_recording(hook._hookspecs)
+ self.request.addfinalizer(hookrecorder.finish_recording)
+ return hookrecorder
+
+class ParsedCall:
+ def __init__(self, name, locals):
+ assert '_name' not in locals
+ self.__dict__.update(locals)
+ self.__dict__.pop('self')
+ self._name = name
+
+ def __repr__(self):
+ d = self.__dict__.copy()
+ del d['_name']
+ return "<ParsedCall %r(**%r)>" %(self._name, d)
+
+class HookRecorder:
+ def __init__(self, pluginmanager):
+ self._pluginmanager = pluginmanager
+ self.calls = []
+ self._recorders = {}
+
+ def start_recording(self, hookspecs):
+ if not isinstance(hookspecs, (list, tuple)):
+ hookspecs = [hookspecs]
+ for hookspec in hookspecs:
+ assert hookspec not in self._recorders
+ class RecordCalls:
+ _recorder = self
+ for name, method in vars(hookspec).items():
+ if name[0] != "_":
+ setattr(RecordCalls, name, self._makecallparser(method))
+ recorder = RecordCalls()
+ self._recorders[hookspec] = recorder
+ self._pluginmanager.register(recorder)
+ self.hook = HookRelay(hookspecs, pm=self._pluginmanager,
+ prefix="pytest_")
+
+ def finish_recording(self):
+ for recorder in self._recorders.values():
+ self._pluginmanager.unregister(recorder)
+ self._recorders.clear()
+
+ def _makecallparser(self, method):
+ name = method.__name__
+ args, varargs, varkw, default = py.std.inspect.getargspec(method)
+ if not args or args[0] != "self":
+ args.insert(0, 'self')
+ fspec = py.std.inspect.formatargspec(args, varargs, varkw, default)
+ # we use exec because we want to have early type
+ # errors on wrong input arguments, using
+ # *args/**kwargs delays this and gives errors
+ # elsewhere
+ exec (py.code.compile("""
+ def %(name)s%(fspec)s:
+ self._recorder.calls.append(
+ ParsedCall(%(name)r, locals()))
+ """ % locals()))
+ return locals()[name]
+
+ def getcalls(self, names):
+ if isinstance(names, str):
+ names = names.split()
+ for name in names:
+ for cls in self._recorders:
+ if name in vars(cls):
+ break
+ else:
+ raise ValueError("callname %r not found in %r" %(
+ name, self._recorders.keys()))
+ l = []
+ for call in self.calls:
+ if call._name in names:
+ l.append(call)
+ return l
+
+ def contains(self, entries):
+ __tracebackhide__ = True
+ from py.builtin import print_
+ i = 0
+ entries = list(entries)
+ backlocals = py.std.sys._getframe(1).f_locals
+ while entries:
+ name, check = entries.pop(0)
+ for ind, call in enumerate(self.calls[i:]):
+ if call._name == name:
+ print_("NAMEMATCH", name, call)
+ if eval(check, backlocals, call.__dict__):
+ print_("CHECKERMATCH", repr(check), "->", call)
+ else:
+ print_("NOCHECKERMATCH", repr(check), "-", call)
+ continue
+ i += ind + 1
+ break
+ print_("NONAMEMATCH", name, "with", call)
+ else:
+ py.test.fail("could not find %r check %r" % (name, check))
+
+ def popcall(self, name):
+ __tracebackhide__ = True
+ for i, call in enumerate(self.calls):
+ if call._name == name:
+ del self.calls[i]
+ return call
+ lines = ["could not find call %r, in:" % (name,)]
+ lines.extend([" %s" % str(x) for x in self.calls])
+ py.test.fail("\n".join(lines))
+
+ def getcall(self, name):
+ l = self.getcalls(name)
+ assert len(l) == 1, (name, l)
+ return l[0]
+
+
+def pytest_funcarg__linecomp(request):
+ return LineComp()
+
+def pytest_funcarg__LineMatcher(request):
+ return LineMatcher
+
+def pytest_funcarg__testdir(request):
+ tmptestdir = TmpTestdir(request)
+ return tmptestdir
+
+rex_outcome = re.compile("(\d+) (\w+)")
+class RunResult:
+ def __init__(self, ret, outlines, errlines, duration):
+ self.ret = ret
+ self.outlines = outlines
+ self.errlines = errlines
+ self.stdout = LineMatcher(outlines)
+ self.stderr = LineMatcher(errlines)
+ self.duration = duration
+
+ def parseoutcomes(self):
+ for line in reversed(self.outlines):
+ if 'seconds' in line:
+ outcomes = rex_outcome.findall(line)
+ if outcomes:
+ d = {}
+ for num, cat in outcomes:
+ d[cat] = int(num)
+ return d
+
+class TmpTestdir:
+ def __init__(self, request):
+ self.request = request
+ self.Config = request.config.__class__
+ self._pytest = request.getfuncargvalue("_pytest")
+ # XXX remove duplication with tmpdir plugin
+ basetmp = request.config._tmpdirhandler.ensuretemp("testdir")
+ name = request.function.__name__
+ for i in range(100):
+ try:
+ tmpdir = basetmp.mkdir(name + str(i))
+ except py.error.EEXIST:
+ continue
+ break
+ # we need to create another subdir
+ # because Directory.collect() currently loads
+ # conftest.py from sibling directories
+ self.tmpdir = tmpdir.mkdir(name)
+ self.plugins = []
+ self._syspathremove = []
+ self.chdir() # always chdir
+ self.request.addfinalizer(self.finalize)
+
+ def __repr__(self):
+ return "<TmpTestdir %r>" % (self.tmpdir,)
+
+ def finalize(self):
+ for p in self._syspathremove:
+ py.std.sys.path.remove(p)
+ if hasattr(self, '_olddir'):
+ self._olddir.chdir()
+ # delete modules that have been loaded from tmpdir
+ for name, mod in list(sys.modules.items()):
+ if mod:
+ fn = getattr(mod, '__file__', None)
+ if fn and fn.startswith(str(self.tmpdir)):
+ del sys.modules[name]
+
+ def getreportrecorder(self, obj):
+ if hasattr(obj, 'config'):
+ obj = obj.config
+ if hasattr(obj, 'hook'):
+ obj = obj.hook
+ assert hasattr(obj, '_hookspecs'), obj
+ reprec = ReportRecorder(obj)
+ reprec.hookrecorder = self._pytest.gethookrecorder(obj)
+ reprec.hook = reprec.hookrecorder.hook
+ return reprec
+
+ def chdir(self):
+ old = self.tmpdir.chdir()
+ if not hasattr(self, '_olddir'):
+ self._olddir = old
+
+ def _makefile(self, ext, args, kwargs):
+ items = list(kwargs.items())
+ if args:
+ source = py.builtin._totext("\n").join(
+ map(py.builtin._totext, args)) + py.builtin._totext("\n")
+ basename = self.request.function.__name__
+ items.insert(0, (basename, source))
+ ret = None
+ for name, value in items:
+ p = self.tmpdir.join(name).new(ext=ext)
+ source = py.builtin._totext(py.code.Source(value)).lstrip()
+ p.write(source.encode("utf-8"), "wb")
+ if ret is None:
+ ret = p
+ return ret
+
+
+ def makefile(self, ext, *args, **kwargs):
+ return self._makefile(ext, args, kwargs)
+
+ def makeini(self, source):
+ return self.makefile('cfg', setup=source)
+
+ def makeconftest(self, source):
+ return self.makepyfile(conftest=source)
+
+ def makeini(self, source):
+ return self.makefile('.ini', tox=source)
+
+ def getinicfg(self, source):
+ p = self.makeini(source)
+ return py.iniconfig.IniConfig(p)['pytest']
+
+ def makepyfile(self, *args, **kwargs):
+ return self._makefile('.py', args, kwargs)
+
+ def maketxtfile(self, *args, **kwargs):
+ return self._makefile('.txt', args, kwargs)
+
+ def syspathinsert(self, path=None):
+ if path is None:
+ path = self.tmpdir
+ py.std.sys.path.insert(0, str(path))
+ self._syspathremove.append(str(path))
+
+ def mkdir(self, name):
+ return self.tmpdir.mkdir(name)
+
+ def mkpydir(self, name):
+ p = self.mkdir(name)
+ p.ensure("__init__.py")
+ return p
+
+ Session = Session
+ def getnode(self, config, arg):
+ session = Session(config)
+ assert '::' not in str(arg)
+ p = py.path.local(arg)
+ x = session.fspath.bestrelpath(p)
+ config.hook.pytest_sessionstart(session=session)
+ res = session.perform_collect([x], genitems=False)[0]
+ config.hook.pytest_sessionfinish(session=session, exitstatus=EXIT_OK)
+ return res
+
+ def getpathnode(self, path):
+ config = self.parseconfigure(path)
+ session = Session(config)
+ x = session.fspath.bestrelpath(path)
+ config.hook.pytest_sessionstart(session=session)
+ res = session.perform_collect([x], genitems=False)[0]
+ config.hook.pytest_sessionfinish(session=session, exitstatus=EXIT_OK)
+ return res
+
+ def genitems(self, colitems):
+ session = colitems[0].session
+ result = []
+ for colitem in colitems:
+ result.extend(session.genitems(colitem))
+ return result
+
+ def inline_genitems(self, *args):
+ #config = self.parseconfig(*args)
+ config = self.parseconfigure(*args)
+ rec = self.getreportrecorder(config)
+ session = Session(config)
+ config.hook.pytest_sessionstart(session=session)
+ session.perform_collect()
+ config.hook.pytest_sessionfinish(session=session, exitstatus=EXIT_OK)
+ return session.items, rec
+
+ def runitem(self, source):
+ # used from runner functional tests
+ item = self.getitem(source)
+ # the test class where we are called from wants to provide the runner
+ testclassinstance = py.builtin._getimself(self.request.function)
+ runner = testclassinstance.getrunner()
+ return runner(item)
+
+ def inline_runsource(self, source, *cmdlineargs):
+ p = self.makepyfile(source)
+ l = list(cmdlineargs) + [p]
+ return self.inline_run(*l)
+
+ def inline_runsource1(self, *args):
+ args = list(args)
+ source = args.pop()
+ p = self.makepyfile(source)
+ l = list(args) + [p]
+ reprec = self.inline_run(*l)
+ reports = reprec.getreports("pytest_runtest_logreport")
+ assert len(reports) == 1, reports
+ return reports[0]
+
+ def inline_run(self, *args):
+ args = ("-s", ) + args # otherwise FD leakage
+ config = self.parseconfig(*args)
+ reprec = self.getreportrecorder(config)
+ #config.pluginmanager.do_configure(config)
+ config.hook.pytest_cmdline_main(config=config)
+ #config.pluginmanager.do_unconfigure(config)
+ return reprec
+
+ def config_preparse(self):
+ config = self.Config()
+ for plugin in self.plugins:
+ if isinstance(plugin, str):
+ config.pluginmanager.import_plugin(plugin)
+ else:
+ if isinstance(plugin, dict):
+ plugin = PseudoPlugin(plugin)
+ if not config.pluginmanager.isregistered(plugin):
+ config.pluginmanager.register(plugin)
+ return config
+
+ def parseconfig(self, *args):
+ if not args:
+ args = (self.tmpdir,)
+ config = self.config_preparse()
+ args = list(args)
+ for x in args:
+ if str(x).startswith('--basetemp'):
+ break
+ else:
+ args.append("--basetemp=%s" % self.tmpdir.dirpath('basetemp'))
+ config.parse(args)
+ return config
+
+ def reparseconfig(self, args=None):
+ """ this is used from tests that want to re-invoke parse(). """
+ if not args:
+ args = [self.tmpdir]
+ oldconfig = getattr(py.test, 'config', None)
+ try:
+ c = py.test.config = self.Config()
+ c.basetemp = py.path.local.make_numbered_dir(prefix="reparse",
+ keep=0, rootdir=self.tmpdir, lock_timeout=None)
+ c.parse(args)
+ c.pluginmanager.do_configure(c)
+ self.request.addfinalizer(lambda: c.pluginmanager.do_unconfigure(c))
+ return c
+ finally:
+ py.test.config = oldconfig
+
+ def parseconfigure(self, *args):
+ config = self.parseconfig(*args)
+ config.pluginmanager.do_configure(config)
+ self.request.addfinalizer(lambda:
+ config.pluginmanager.do_unconfigure(config))
+ return config
+
+ def getitem(self, source, funcname="test_func"):
+ for item in self.getitems(source):
+ if item.name == funcname:
+ return item
+ assert 0, "%r item not found in module:\n%s" %(funcname, source)
+
+ def getitems(self, source):
+ modcol = self.getmodulecol(source)
+ return self.genitems([modcol])
+
+ def getmodulecol(self, source, configargs=(), withinit=False):
+ kw = {self.request.function.__name__: py.code.Source(source).strip()}
+ path = self.makepyfile(**kw)
+ if withinit:
+ self.makepyfile(__init__ = "#")
+ self.config = config = self.parseconfigure(path, *configargs)
+ node = self.getnode(config, path)
+ #config.pluginmanager.do_unconfigure(config)
+ return node
+
+ def collect_by_name(self, modcol, name):
+ for colitem in modcol._memocollect():
+ if colitem.name == name:
+ return colitem
+
+ def popen(self, cmdargs, stdout, stderr, **kw):
+ env = os.environ.copy()
+ env['PYTHONPATH'] = os.pathsep.join(filter(None, [
+ str(os.getcwd()), env.get('PYTHONPATH', '')]))
+ kw['env'] = env
+ #print "env", env
+ return py.std.subprocess.Popen(cmdargs, stdout=stdout, stderr=stderr, **kw)
+
+ def pytestmain(self, *args, **kwargs):
+ ret = pytest.main(*args, **kwargs)
+ if ret == 2:
+ raise KeyboardInterrupt()
+ def run(self, *cmdargs):
+ return self._run(*cmdargs)
+
+ def _run(self, *cmdargs):
+ cmdargs = [str(x) for x in cmdargs]
+ p1 = self.tmpdir.join("stdout")
+ p2 = self.tmpdir.join("stderr")
+ print_("running", cmdargs, "curdir=", py.path.local())
+ f1 = p1.open("wb")
+ f2 = p2.open("wb")
+ now = time.time()
+ popen = self.popen(cmdargs, stdout=f1, stderr=f2,
+ close_fds=(sys.platform != "win32"))
+ ret = popen.wait()
+ f1.close()
+ f2.close()
+ out = p1.read("rb")
+ out = getdecoded(out).splitlines()
+ err = p2.read("rb")
+ err = getdecoded(err).splitlines()
+ def dump_lines(lines, fp):
+ try:
+ for line in lines:
+ py.builtin.print_(line, file=fp)
+ except UnicodeEncodeError:
+ print("couldn't print to %s because of encoding" % (fp,))
+ dump_lines(out, sys.stdout)
+ dump_lines(err, sys.stderr)
+ return RunResult(ret, out, err, time.time()-now)
+
+ def runpybin(self, scriptname, *args):
+ fullargs = self._getpybinargs(scriptname) + args
+ return self.run(*fullargs)
+
+ def _getpybinargs(self, scriptname):
+ if not self.request.config.getvalue("notoolsonpath"):
+ # XXX we rely on script refering to the correct environment
+ # we cannot use "(py.std.sys.executable,script)"
+ # becaue on windows the script is e.g. a py.test.exe
+ return (py.std.sys.executable, _pytest_fullpath,)
+ else:
+ py.test.skip("cannot run %r with --no-tools-on-path" % scriptname)
+
+ def runpython(self, script, prepend=True):
+ if prepend:
+ s = self._getsysprepend()
+ if s:
+ script.write(s + "\n" + script.read())
+ return self.run(sys.executable, script)
+
+ def _getsysprepend(self):
+ if self.request.config.getvalue("notoolsonpath"):
+ s = "import sys;sys.path.insert(0,%r);" % str(py._pydir.dirpath())
+ else:
+ s = ""
+ return s
+
+ def runpython_c(self, command):
+ command = self._getsysprepend() + command
+ return self.run(py.std.sys.executable, "-c", command)
+
+ def runpytest(self, *args):
+ p = py.path.local.make_numbered_dir(prefix="runpytest-",
+ keep=None, rootdir=self.tmpdir)
+ args = ('--basetemp=%s' % p, ) + args
+ #for x in args:
+ # if '--confcutdir' in str(x):
+ # break
+ #else:
+ # pass
+ # args = ('--confcutdir=.',) + args
+ plugins = [x for x in self.plugins if isinstance(x, str)]
+ if plugins:
+ args = ('-p', plugins[0]) + args
+ return self.runpybin("py.test", *args)
+
+ def spawn_pytest(self, string, expect_timeout=10.0):
+ if self.request.config.getvalue("notoolsonpath"):
+ py.test.skip("--no-tools-on-path prevents running pexpect-spawn tests")
+ basetemp = self.tmpdir.mkdir("pexpect")
+ invoke = " ".join(map(str, self._getpybinargs("py.test")))
+ cmd = "%s --basetemp=%s %s" % (invoke, basetemp, string)
+ return self.spawn(cmd, expect_timeout=expect_timeout)
+
+ def spawn(self, cmd, expect_timeout=10.0):
+ pexpect = py.test.importorskip("pexpect", "2.4")
+ if hasattr(sys, 'pypy_version_info') and '64' in py.std.platform.machine():
+ pytest.skip("pypy-64 bit not supported")
+ logfile = self.tmpdir.join("spawn.out")
+ child = pexpect.spawn(cmd, logfile=logfile.open("w"))
+ child.timeout = expect_timeout
+ return child
+
+def getdecoded(out):
+ try:
+ return out.decode("utf-8")
+ except UnicodeDecodeError:
+ return "INTERNAL not-utf8-decodeable, truncated string:\n%s" % (
+ py.io.saferepr(out),)
+
+class PseudoPlugin:
+ def __init__(self, vars):
+ self.__dict__.update(vars)
+
+class ReportRecorder(object):
+ def __init__(self, hook):
+ self.hook = hook
+ self.pluginmanager = hook._pm
+ self.pluginmanager.register(self)
+
+ def getcall(self, name):
+ return self.hookrecorder.getcall(name)
+
+ def popcall(self, name):
+ return self.hookrecorder.popcall(name)
+
+ def getcalls(self, names):
+ """ return list of ParsedCall instances matching the given eventname. """
+ return self.hookrecorder.getcalls(names)
+
+ # functionality for test reports
+
+ def getreports(self, names="pytest_runtest_logreport pytest_collectreport"):
+ return [x.report for x in self.getcalls(names)]
+
+ def matchreport(self, inamepart="", names="pytest_runtest_logreport pytest_collectreport", when=None):
+ """ return a testreport whose dotted import path matches """
+ l = []
+ for rep in self.getreports(names=names):
+ if when and getattr(rep, 'when', None) != when:
+ continue
+ if not inamepart or inamepart in rep.nodeid.split("::"):
+ l.append(rep)
+ if not l:
+ raise ValueError("could not find test report matching %r: no test reports at all!" %
+ (inamepart,))
+ if len(l) > 1:
+ raise ValueError("found more than one testreport matching %r: %s" %(
+ inamepart, l))
+ return l[0]
+
+ def getfailures(self, names='pytest_runtest_logreport pytest_collectreport'):
+ return [rep for rep in self.getreports(names) if rep.failed]
+
+ def getfailedcollections(self):
+ return self.getfailures('pytest_collectreport')
+
+ def listoutcomes(self):
+ passed = []
+ skipped = []
+ failed = []
+ for rep in self.getreports("pytest_runtest_logreport"):
+ if rep.passed:
+ if rep.when == "call":
+ passed.append(rep)
+ elif rep.skipped:
+ skipped.append(rep)
+ elif rep.failed:
+ failed.append(rep)
+ return passed, skipped, failed
+
+ def countoutcomes(self):
+ return [len(x) for x in self.listoutcomes()]
+
+ def assertoutcome(self, passed=0, skipped=0, failed=0):
+ realpassed, realskipped, realfailed = self.listoutcomes()
+ assert passed == len(realpassed)
+ assert skipped == len(realskipped)
+ assert failed == len(realfailed)
+
+ def clear(self):
+ self.hookrecorder.calls[:] = []
+
+ def unregister(self):
+ self.pluginmanager.unregister(self)
+ self.hookrecorder.finish_recording()
+
+class LineComp:
+ def __init__(self):
+ self.stringio = py.io.TextIO()
+
+ def assert_contains_lines(self, lines2):
+ """ assert that lines2 are contained (linearly) in lines1.
+ return a list of extralines found.
+ """
+ __tracebackhide__ = True
+ val = self.stringio.getvalue()
+ self.stringio.truncate(0)
+ self.stringio.seek(0)
+ lines1 = val.split("\n")
+ return LineMatcher(lines1).fnmatch_lines(lines2)
+
+class LineMatcher:
+ def __init__(self, lines):
+ self.lines = lines
+
+ def str(self):
+ return "\n".join(self.lines)
+
+ def _getlines(self, lines2):
+ if isinstance(lines2, str):
+ lines2 = py.code.Source(lines2)
+ if isinstance(lines2, py.code.Source):
+ lines2 = lines2.strip().lines
+ return lines2
+
+ def fnmatch_lines_random(self, lines2):
+ lines2 = self._getlines(lines2)
+ for line in lines2:
+ for x in self.lines:
+ if line == x or fnmatch(x, line):
+ print_("matched: ", repr(line))
+ break
+ else:
+ raise ValueError("line %r not found in output" % line)
+
+ def fnmatch_lines(self, lines2):
+ def show(arg1, arg2):
+ py.builtin.print_(arg1, arg2, file=py.std.sys.stderr)
+ lines2 = self._getlines(lines2)
+ lines1 = self.lines[:]
+ nextline = None
+ extralines = []
+ __tracebackhide__ = True
+ for line in lines2:
+ nomatchprinted = False
+ while lines1:
+ nextline = lines1.pop(0)
+ if line == nextline:
+ show("exact match:", repr(line))
+ break
+ elif fnmatch(nextline, line):
+ show("fnmatch:", repr(line))
+ show(" with:", repr(nextline))
+ break
+ else:
+ if not nomatchprinted:
+ show("nomatch:", repr(line))
+ nomatchprinted = True
+ show(" and:", repr(nextline))
+ extralines.append(nextline)
+ else:
+ py.test.fail("remains unmatched: %r, see stderr" % (line,))
diff --git a/lib/pypy/_pytest/python.py b/lib/pypy/_pytest/python.py
new file mode 100644
--- /dev/null
+++ b/lib/pypy/_pytest/python.py
@@ -0,0 +1,871 @@
+""" Python test discovery, setup and run of test functions. """
+import py
+import inspect
+import sys
+import pytest
+from py._code.code import TerminalRepr
+
+import _pytest
+cutdir = py.path.local(_pytest.__file__).dirpath()
+
+def pytest_addoption(parser):
+ group = parser.getgroup("general")
+ group.addoption('--funcargs',
+ action="store_true", dest="showfuncargs", default=False,
+ help="show available function arguments, sorted by plugin")
+ parser.addini("python_files", type="args",
+ default=('test_*.py', '*_test.py'),
+ help="glob-style file patterns for Python test module discovery")
+ parser.addini("python_classes", type="args", default=("Test",),
+ help="prefixes for Python test class discovery")
+ parser.addini("python_functions", type="args", default=("test",),
+ help="prefixes for Python test function and method discovery")
+
+def pytest_cmdline_main(config):
+ if config.option.showfuncargs:
+ showfuncargs(config)
+ return 0
+
+ at pytest.mark.trylast
+def pytest_namespace():
+ raises.Exception = pytest.fail.Exception
+ return {
+ 'raises' : raises,
+ 'collect': {
+ 'Module': Module, 'Class': Class, 'Instance': Instance,
+ 'Function': Function, 'Generator': Generator,
+ '_fillfuncargs': fillfuncargs}
+ }
+
+def pytest_funcarg__pytestconfig(request):
+ """ the pytest config object with access to command line opts."""
+ return request.config
+
+def pytest_pyfunc_call(__multicall__, pyfuncitem):
+ if not __multicall__.execute():
+ testfunction = pyfuncitem.obj
+ if pyfuncitem._isyieldedfunction():
+ testfunction(*pyfuncitem._args)
+ else:
+ funcargs = pyfuncitem.funcargs
+ testfunction(**funcargs)
+
+def pytest_collect_file(path, parent):
+ ext = path.ext
+ pb = path.purebasename
+ if ext == ".py":
+ if not parent.session.isinitpath(path):
+ for pat in parent.config.getini('python_files'):
+ if path.fnmatch(pat):
+ break
+ else:
+ return
+ return parent.ihook.pytest_pycollect_makemodule(
+ path=path, parent=parent)
+
+def pytest_pycollect_makemodule(path, parent):
+ return Module(path, parent)
+
+def pytest_pycollect_makeitem(__multicall__, collector, name, obj):
+ res = __multicall__.execute()
+ if res is not None:
+ return res
+ if inspect.isclass(obj):
+ #if hasattr(collector.obj, 'unittest'):
+ # return # we assume it's a mixin class for a TestCase derived one
+ if collector.classnamefilter(name):
+ if not hasinit(obj):
+ Class = collector._getcustomclass("Class")
+ return Class(name, parent=collector)
+ elif collector.funcnamefilter(name) and hasattr(obj, '__call__'):
+ if is_generator(obj):
+ return Generator(name, parent=collector)
+ else:
+ return collector._genfunctions(name, obj)
+
+def is_generator(func):
+ try:
+ return py.code.getrawcode(func).co_flags & 32 # generator function
+ except AttributeError: # builtin functions have no bytecode
+ # assume them to not be generators
+ return False
+
+class PyobjMixin(object):
+ def obj():
+ def fget(self):
+ try:
+ return self._obj
+ except AttributeError:
+ self._obj = obj = self._getobj()
+ return obj
+ def fset(self, value):
+ self._obj = value
+ return property(fget, fset, None, "underlying python object")
+ obj = obj()
+
+ def _getobj(self):
+ return getattr(self.parent.obj, self.name)
+
+ def getmodpath(self, stopatmodule=True, includemodule=False):
+ """ return python path relative to the containing module. """
+ chain = self.listchain()
+ chain.reverse()
+ parts = []
+ for node in chain:
+ if isinstance(node, Instance):
+ continue
+ name = node.name
+ if isinstance(node, Module):
+ assert name.endswith(".py")
+ name = name[:-3]
+ if stopatmodule:
+ if includemodule:
+ parts.append(name)
+ break
+ parts.append(name)
+ parts.reverse()
+ s = ".".join(parts)
+ return s.replace(".[", "[")
+
+ def _getfslineno(self):
+ try:
+ return self._fslineno
+ except AttributeError:
+ pass
+ obj = self.obj
+ # xxx let decorators etc specify a sane ordering
+ if hasattr(obj, 'place_as'):
+ obj = obj.place_as
+
+ self._fslineno = py.code.getfslineno(obj)
+ return self._fslineno
+
+ def reportinfo(self):
+ # XXX caching?
+ obj = self.obj
+ if hasattr(obj, 'compat_co_firstlineno'):
+ # nose compatibility
+ fspath = sys.modules[obj.__module__].__file__
+ if fspath.endswith(".pyc"):
+ fspath = fspath[:-1]
+ #assert 0
+ #fn = inspect.getsourcefile(obj) or inspect.getfile(obj)
+ lineno = obj.compat_co_firstlineno
+ modpath = obj.__module__
+ else:
+ fspath, lineno = self._getfslineno()
+ modpath = self.getmodpath()
+ return fspath, lineno, modpath
+
+class PyCollectorMixin(PyobjMixin, pytest.Collector):
+
+ def funcnamefilter(self, name):
+ for prefix in self.config.getini("python_functions"):
+ if name.startswith(prefix):
+ return True
+
+ def classnamefilter(self, name):
+ for prefix in self.config.getini("python_classes"):
+ if name.startswith(prefix):
+ return True
+
+ def collect(self):
+ # NB. we avoid random getattrs and peek in the __dict__ instead
+ # (XXX originally introduced from a PyPy need, still true?)
+ dicts = [getattr(self.obj, '__dict__', {})]
+ for basecls in inspect.getmro(self.obj.__class__):
+ dicts.append(basecls.__dict__)
+ seen = {}
+ l = []
+ for dic in dicts:
+ for name, obj in dic.items():
+ if name in seen:
+ continue
+ seen[name] = True
+ if name[0] != "_":
+ res = self.makeitem(name, obj)
+ if res is None:
+ continue
+ if not isinstance(res, list):
+ res = [res]
+ l.extend(res)
+ l.sort(key=lambda item: item.reportinfo()[:2])
+ return l
+
+ def makeitem(self, name, obj):
+ return self.ihook.pytest_pycollect_makeitem(
+ collector=self, name=name, obj=obj)
+
+ def _genfunctions(self, name, funcobj):
+ module = self.getparent(Module).obj
+ clscol = self.getparent(Class)
+ cls = clscol and clscol.obj or None
+ metafunc = Metafunc(funcobj, config=self.config,
+ cls=cls, module=module)
+ gentesthook = self.config.hook.pytest_generate_tests
+ extra = [module]
+ if cls is not None:
+ extra.append(cls())
+ plugins = self.getplugins() + extra
+ gentesthook.pcall(plugins, metafunc=metafunc)
+ Function = self._getcustomclass("Function")
+ if not metafunc._calls:
+ return Function(name, parent=self)
+ l = []
+ for callspec in metafunc._calls:
+ subname = "%s[%s]" %(name, callspec.id)
+ function = Function(name=subname, parent=self,
+ callspec=callspec, callobj=funcobj, keywords={callspec.id:True})
+ l.append(function)
+ return l
+
+
+class Module(pytest.File, PyCollectorMixin):
+ def _getobj(self):
+ return self._memoizedcall('_obj', self._importtestmodule)
+
+ def _importtestmodule(self):
+ # we assume we are only called once per module
+ from _pytest import assertion
+ assertion.before_module_import(self)
+ try:
+ try:
+ mod = self.fspath.pyimport(ensuresyspath=True)
+ finally:
+ assertion.after_module_import(self)
+ except SyntaxError:
+ excinfo = py.code.ExceptionInfo()
+ raise self.CollectError(excinfo.getrepr(style="short"))
+ except self.fspath.ImportMismatchError:
+ e = sys.exc_info()[1]
+ raise self.CollectError(
+ "import file mismatch:\n"
+ "imported module %r has this __file__ attribute:\n"
+ " %s\n"
+ "which is not the same as the test file we want to collect:\n"
+ " %s\n"
+ "HINT: use a unique basename for your test file modules"
+ % e.args
+ )
+ #print "imported test module", mod
+ self.config.pluginmanager.consider_module(mod)
+ return mod
+
+ def setup(self):
+ if hasattr(self.obj, 'setup_module'):
+ #XXX: nose compat hack, move to nose plugin
+ # if it takes a positional arg, its probably a pytest style one
+ # so we pass the current module object
+ if inspect.getargspec(self.obj.setup_module)[0]:
+ self.obj.setup_module(self.obj)
+ else:
+ self.obj.setup_module()
+
+ def teardown(self):
+ if hasattr(self.obj, 'teardown_module'):
+ #XXX: nose compat hack, move to nose plugin
+ # if it takes a positional arg, its probably a py.test style one
+ # so we pass the current module object
+ if inspect.getargspec(self.obj.teardown_module)[0]:
+ self.obj.teardown_module(self.obj)
+ else:
+ self.obj.teardown_module()
+
+class Class(PyCollectorMixin, pytest.Collector):
+
+ def collect(self):
+ return [self._getcustomclass("Instance")(name="()", parent=self)]
+
+ def setup(self):
+ setup_class = getattr(self.obj, 'setup_class', None)
+ if setup_class is not None:
+ setup_class = getattr(setup_class, 'im_func', setup_class)
+ setup_class(self.obj)
+
+ def teardown(self):
+ teardown_class = getattr(self.obj, 'teardown_class', None)
+ if teardown_class is not None:
+ teardown_class = getattr(teardown_class, 'im_func', teardown_class)
+ teardown_class(self.obj)
+
+class Instance(PyCollectorMixin, pytest.Collector):
+ def _getobj(self):
+ return self.parent.obj()
+
+ def newinstance(self):
+ self.obj = self._getobj()
+ return self.obj
+
+class FunctionMixin(PyobjMixin):
+ """ mixin for the code common to Function and Generator.
+ """
+ def setup(self):
+ """ perform setup for this test function. """
+ if hasattr(self, '_preservedparent'):
+ obj = self._preservedparent
+ elif isinstance(self.parent, Instance):
+ obj = self.parent.newinstance()
+ self.obj = self._getobj()
+ else:
+ obj = self.parent.obj
+ if inspect.ismethod(self.obj):
+ name = 'setup_method'
+ else:
+ name = 'setup_function'
+ setup_func_or_method = getattr(obj, name, None)
+ if setup_func_or_method is not None:
+ setup_func_or_method(self.obj)
+
+ def teardown(self):
+ """ perform teardown for this test function. """
+ if inspect.ismethod(self.obj):
+ name = 'teardown_method'
+ else:
+ name = 'teardown_function'
+ obj = self.parent.obj
+ teardown_func_or_meth = getattr(obj, name, None)
+ if teardown_func_or_meth is not None:
+ teardown_func_or_meth(self.obj)
+
+ def _prunetraceback(self, excinfo):
+ if hasattr(self, '_obj') and not self.config.option.fulltrace:
+ code = py.code.Code(self.obj)
+ path, firstlineno = code.path, code.firstlineno
+ traceback = excinfo.traceback
+ ntraceback = traceback.cut(path=path, firstlineno=firstlineno)
+ if ntraceback == traceback:
+ ntraceback = ntraceback.cut(path=path)
+ if ntraceback == traceback:
+ ntraceback = ntraceback.cut(excludepath=cutdir)
+ excinfo.traceback = ntraceback.filter()
+
+ def _repr_failure_py(self, excinfo, style="long"):
+ if excinfo.errisinstance(FuncargRequest.LookupError):
+ fspath, lineno, msg = self.reportinfo()
+ lines, _ = inspect.getsourcelines(self.obj)
+ for i, line in enumerate(lines):
+ if line.strip().startswith('def'):
+ return FuncargLookupErrorRepr(fspath, lineno,
+ lines[:i+1], str(excinfo.value))
+ if excinfo.errisinstance(pytest.fail.Exception):
+ if not excinfo.value.pytrace:
+ return str(excinfo.value)
+ return super(FunctionMixin, self)._repr_failure_py(excinfo,
+ style=style)
+
+ def repr_failure(self, excinfo, outerr=None):
+ assert outerr is None, "XXX outerr usage is deprecated"
+ return self._repr_failure_py(excinfo,
+ style=self.config.option.tbstyle)
+
+class FuncargLookupErrorRepr(TerminalRepr):
+ def __init__(self, filename, firstlineno, deflines, errorstring):
+ self.deflines = deflines
+ self.errorstring = errorstring
+ self.filename = filename
+ self.firstlineno = firstlineno
+
+ def toterminal(self, tw):
+ tw.line()
+ for line in self.deflines:
+ tw.line(" " + line.strip())
+ for line in self.errorstring.split("\n"):
+ tw.line(" " + line.strip(), red=True)
+ tw.line()
+ tw.line("%s:%d" % (self.filename, self.firstlineno+1))
+
+class Generator(FunctionMixin, PyCollectorMixin, pytest.Collector):
+ def collect(self):
+ # test generators are seen as collectors but they also
+ # invoke setup/teardown on popular request
+ # (induced by the common "test_*" naming shared with normal tests)
+ self.session._setupstate.prepare(self)
+ # see FunctionMixin.setup and test_setupstate_is_preserved_134
+ self._preservedparent = self.parent.obj
+ l = []
+ seen = {}
+ for i, x in enumerate(self.obj()):
+ name, call, args = self.getcallargs(x)
+ if not py.builtin.callable(call):
+ raise TypeError("%r yielded non callable test %r" %(self.obj, call,))
+ if name is None:
+ name = "[%d]" % i
+ else:
+ name = "['%s']" % name
+ if name in seen:
+ raise ValueError("%r generated tests with non-unique name %r" %(self, name))
+ seen[name] = True
+ l.append(self.Function(name, self, args=args, callobj=call))
+ return l
+
+ def getcallargs(self, obj):
+ if not isinstance(obj, (tuple, list)):
+ obj = (obj,)
+ # explict naming
+ if isinstance(obj[0], py.builtin._basestring):
+ name = obj[0]
+ obj = obj[1:]
+ else:
+ name = None
+ call, args = obj[0], obj[1:]
+ return name, call, args
+
+
+#
+# Test Items
+#
+_dummy = object()
+class Function(FunctionMixin, pytest.Item):
+ """ a Function Item is responsible for setting up
+ and executing a Python callable test object.
+ """
+ _genid = None
+ def __init__(self, name, parent=None, args=None, config=None,
+ callspec=None, callobj=_dummy, keywords=None, session=None):
+ super(Function, self).__init__(name, parent,
+ config=config, session=session)
+ self._args = args
+ if self._isyieldedfunction():
+ assert not callspec, (
+ "yielded functions (deprecated) cannot have funcargs")
+ else:
+ if callspec is not None:
+ self.funcargs = callspec.funcargs or {}
+ self._genid = callspec.id
+ if hasattr(callspec, "param"):
+ self._requestparam = callspec.param
+ else:
+ self.funcargs = {}
+ if callobj is not _dummy:
+ self._obj = callobj
+ self.function = getattr(self.obj, 'im_func', self.obj)
+ self.keywords.update(py.builtin._getfuncdict(self.obj) or {})
+ if keywords:
+ self.keywords.update(keywords)
+
+ def _getobj(self):
+ name = self.name
+ i = name.find("[") # parametrization
+ if i != -1:
+ name = name[:i]
+ return getattr(self.parent.obj, name)
+
+ def _isyieldedfunction(self):
+ return self._args is not None
+
+ def runtest(self):
+ """ execute the underlying test function. """
+ self.ihook.pytest_pyfunc_call(pyfuncitem=self)
+
+ def setup(self):
+ super(Function, self).setup()
+ if hasattr(self, 'funcargs'):
+ fillfuncargs(self)
+
+ def __eq__(self, other):
+ try:
+ return (self.name == other.name and
+ self._args == other._args and
+ self.parent == other.parent and
+ self.obj == other.obj and
+ getattr(self, '_genid', None) ==
+ getattr(other, '_genid', None)
+ )
+ except AttributeError:
+ pass
+ return False
+
+ def __ne__(self, other):
+ return not self == other
+
+ def __hash__(self):
+ return hash((self.parent, self.name))
+
+def hasinit(obj):
+ init = getattr(obj, '__init__', None)
+ if init:
+ if init != object.__init__:
+ return True
+
+
+def getfuncargnames(function, startindex=None):
+ # XXX merge with main.py's varnames
+ argnames = py.std.inspect.getargs(py.code.getrawcode(function))[0]
+ if startindex is None:
+ startindex = py.std.inspect.ismethod(function) and 1 or 0
+ defaults = getattr(function, 'func_defaults',
+ getattr(function, '__defaults__', None)) or ()
+ numdefaults = len(defaults)
+ if numdefaults:
+ return argnames[startindex:-numdefaults]
+ return argnames[startindex:]
+
+def fillfuncargs(function):
+ """ fill missing funcargs. """
+ request = FuncargRequest(pyfuncitem=function)
+ request._fillfuncargs()
+
+_notexists = object()
+class CallSpec:
+ def __init__(self, funcargs, id, param):
+ self.funcargs = funcargs
+ self.id = id
+ if param is not _notexists:
+ self.param = param
+ def __repr__(self):
+ return "<CallSpec id=%r param=%r funcargs=%r>" %(
+ self.id, getattr(self, 'param', '?'), self.funcargs)
+
+class Metafunc:
+ def __init__(self, function, config=None, cls=None, module=None):
+ self.config = config
+ self.module = module
+ self.function = function
+ self.funcargnames = getfuncargnames(function,
+ startindex=int(cls is not None))
+ self.cls = cls
+ self.module = module
+ self._calls = []
+ self._ids = py.builtin.set()
+
+ def addcall(self, funcargs=None, id=_notexists, param=_notexists):
+ """ add a new call to the underlying test function during the
+ collection phase of a test run. Note that request.addcall() is
+ called during the test collection phase prior and independently
+ to actual test execution. Therefore you should perform setup
+ of resources in a funcarg factory which can be instrumented
+ with the ``param``.
+
+ :arg funcargs: argument keyword dictionary used when invoking
+ the test function.
+
+ :arg id: used for reporting and identification purposes. If you
+ don't supply an `id` the length of the currently
+ list of calls to the test function will be used.
+
+ :arg param: will be exposed to a later funcarg factory invocation
+ through the ``request.param`` attribute. It allows to
+ defer test fixture setup activities to when an actual
+ test is run.
+ """
+ assert funcargs is None or isinstance(funcargs, dict)
+ if funcargs is not None:
+ for name in funcargs:
+ if name not in self.funcargnames:
+ pytest.fail("funcarg %r not used in this function." % name)
+ if id is None:
+ raise ValueError("id=None not allowed")
+ if id is _notexists:
+ id = len(self._calls)
+ id = str(id)
+ if id in self._ids:
+ raise ValueError("duplicate id %r" % id)
+ self._ids.add(id)
+ self._calls.append(CallSpec(funcargs, id, param))
+
+class FuncargRequest:
+ """ A request for function arguments from a test function.
+
+ Note that there is an optional ``param`` attribute in case
+ there was an invocation to metafunc.addcall(param=...).
+ If no such call was done in a ``pytest_generate_tests``
+ hook, the attribute will not be present.
+ """
+ _argprefix = "pytest_funcarg__"
+ _argname = None
+
+ class LookupError(LookupError):
+ """ error on performing funcarg request. """
+
+ def __init__(self, pyfuncitem):
+ self._pyfuncitem = pyfuncitem
+ if hasattr(pyfuncitem, '_requestparam'):
+ self.param = pyfuncitem._requestparam
+ extra = [obj for obj in (self.module, self.instance) if obj]
+ self._plugins = pyfuncitem.getplugins() + extra
+ self._funcargs = self._pyfuncitem.funcargs.copy()
+ self._name2factory = {}
+ self._currentarg = None
+
+ @property
+ def function(self):
+ """ function object of the test invocation. """
+ return self._pyfuncitem.obj
+
+ @property
+ def keywords(self):
+ """ keywords of the test function item.
+
+ .. versionadded:: 2.0
+ """
+ return self._pyfuncitem.keywords
+
+ @property
+ def module(self):
+ """ module where the test function was collected. """
+ return self._pyfuncitem.getparent(pytest.Module).obj
+
+ @property
+ def cls(self):
+ """ class (can be None) where the test function was collected. """
+ clscol = self._pyfuncitem.getparent(pytest.Class)
+ if clscol:
+ return clscol.obj
+ @property
+ def instance(self):
+ """ instance (can be None) on which test function was collected. """
+ return py.builtin._getimself(self.function)
+
+ @property
+ def config(self):
+ """ the pytest config object associated with this request. """
+ return self._pyfuncitem.config
+
+ @property
+ def fspath(self):
+ """ the file system path of the test module which collected this test. """
+ return self._pyfuncitem.fspath
+
+ def _fillfuncargs(self):
+ argnames = getfuncargnames(self.function)
+ if argnames:
+ assert not getattr(self._pyfuncitem, '_args', None), (
+ "yielded functions cannot have funcargs")
+ for argname in argnames:
+ if argname not in self._pyfuncitem.funcargs:
+ self._pyfuncitem.funcargs[argname] = self.getfuncargvalue(argname)
+
+
+ def applymarker(self, marker):
+ """ apply a marker to a single test function invocation.
+ This method is useful if you don't want to have a keyword/marker
+ on all function invocations.
+
+ :arg marker: a :py:class:`_pytest.mark.MarkDecorator` object
+ created by a call to ``py.test.mark.NAME(...)``.
+ """
+ if not isinstance(marker, py.test.mark.XYZ.__class__):
+ raise ValueError("%r is not a py.test.mark.* object")
+ self._pyfuncitem.keywords[marker.markname] = marker
+
+ def cached_setup(self, setup, teardown=None, scope="module", extrakey=None):
+ """ return a testing resource managed by ``setup`` &
+ ``teardown`` calls. ``scope`` and ``extrakey`` determine when the
+ ``teardown`` function will be called so that subsequent calls to
+ ``setup`` would recreate the resource.
+
+ :arg teardown: function receiving a previously setup resource.
+ :arg setup: a no-argument function creating a resource.
+ :arg scope: a string value out of ``function``, ``class``, ``module``
+ or ``session`` indicating the caching lifecycle of the resource.
+ :arg extrakey: added to internal caching key of (funcargname, scope).
+ """
+ if not hasattr(self.config, '_setupcache'):
+ self.config._setupcache = {} # XXX weakref?
+ cachekey = (self._currentarg, self._getscopeitem(scope), extrakey)
+ cache = self.config._setupcache
+ try:
+ val = cache[cachekey]
+ except KeyError:
+ val = setup()
+ cache[cachekey] = val
+ if teardown is not None:
+ def finalizer():
+ del cache[cachekey]
+ teardown(val)
+ self._addfinalizer(finalizer, scope=scope)
+ return val
+
+ def getfuncargvalue(self, argname):
+ """ Retrieve a function argument by name for this test
+ function invocation. This allows one function argument factory
+ to call another function argument factory. If there are two
+ funcarg factories for the same test function argument the first
+ factory may use ``getfuncargvalue`` to call the second one and
+ do something additional with the resource.
+ """
+ try:
+ return self._funcargs[argname]
+ except KeyError:
+ pass
+ if argname not in self._name2factory:
+ self._name2factory[argname] = self.config.pluginmanager.listattr(
+ plugins=self._plugins,
+ attrname=self._argprefix + str(argname)
+ )
+ #else: we are called recursively
+ if not self._name2factory[argname]:
+ self._raiselookupfailed(argname)
+ funcargfactory = self._name2factory[argname].pop()
+ oldarg = self._currentarg
+ self._currentarg = argname
+ try:
+ self._funcargs[argname] = res = funcargfactory(request=self)
+ finally:
+ self._currentarg = oldarg
+ return res
+
+ def _getscopeitem(self, scope):
+ if scope == "function":
+ return self._pyfuncitem
+ elif scope == "session":
+ return None
+ elif scope == "class":
+ x = self._pyfuncitem.getparent(pytest.Class)
+ if x is not None:
+ return x
+ scope = "module"
+ if scope == "module":
+ return self._pyfuncitem.getparent(pytest.Module)
+ raise ValueError("unknown finalization scope %r" %(scope,))
+
+ def addfinalizer(self, finalizer):
+ """add finalizer function to be called after test function
+ finished execution. """
+ self._addfinalizer(finalizer, scope="function")
+
+ def _addfinalizer(self, finalizer, scope):
+ colitem = self._getscopeitem(scope)
+ self._pyfuncitem.session._setupstate.addfinalizer(
+ finalizer=finalizer, colitem=colitem)
+
+ def __repr__(self):
+ return "<FuncargRequest for %r>" %(self._pyfuncitem)
+
+ def _raiselookupfailed(self, argname):
+ available = []
+ for plugin in self._plugins:
+ for name in vars(plugin):
+ if name.startswith(self._argprefix):
+ name = name[len(self._argprefix):]
+ if name not in available:
+ available.append(name)
+ fspath, lineno, msg = self._pyfuncitem.reportinfo()
+ msg = "LookupError: no factory found for function argument %r" % (argname,)
+ msg += "\n available funcargs: %s" %(", ".join(available),)
+ msg += "\n use 'py.test --funcargs [testpath]' for help on them."
+ raise self.LookupError(msg)
+
+def showfuncargs(config):
+ from _pytest.main import wrap_session
+ return wrap_session(config, _showfuncargs_main)
+
+def _showfuncargs_main(config, session):
+ session.perform_collect()
+ if session.items:
+ plugins = session.items[0].getplugins()
+ else:
+ plugins = session.getplugins()
+ curdir = py.path.local()
+ tw = py.io.TerminalWriter()
+ verbose = config.getvalue("verbose")
+ for plugin in plugins:
+ available = []
+ for name, factory in vars(plugin).items():
+ if name.startswith(FuncargRequest._argprefix):
+ name = name[len(FuncargRequest._argprefix):]
+ if name not in available:
+ available.append([name, factory])
+ if available:
+ pluginname = plugin.__name__
+ for name, factory in available:
+ loc = getlocation(factory, curdir)
+ if verbose:
+ funcargspec = "%s -- %s" %(name, loc,)
+ else:
+ funcargspec = name
+ tw.line(funcargspec, green=True)
+ doc = factory.__doc__ or ""
+ if doc:
+ for line in doc.split("\n"):
+ tw.line(" " + line.strip())
+ else:
+ tw.line(" %s: no docstring available" %(loc,),
+ red=True)
+
+def getlocation(function, curdir):
+ import inspect
+ fn = py.path.local(inspect.getfile(function))
+ lineno = py.builtin._getcode(function).co_firstlineno
+ if fn.relto(curdir):
+ fn = fn.relto(curdir)
+ return "%s:%d" %(fn, lineno+1)
+
+# builtin pytest.raises helper
+
+def raises(ExpectedException, *args, **kwargs):
+ """ assert that a code block/function call raises @ExpectedException
+ and raise a failure exception otherwise.
+
+ If using Python 2.5 or above, you may use this function as a
+ context manager::
+
+ >>> with raises(ZeroDivisionError):
+ ... 1/0
+
+ Or you can specify a callable by passing a to-be-called lambda::
+
+ >>> raises(ZeroDivisionError, lambda: 1/0)
+ <ExceptionInfo ...>
+
+ or you can specify an arbitrary callable with arguments::
+
+ >>> def f(x): return 1/x
+ ...
+ >>> raises(ZeroDivisionError, f, 0)
+ <ExceptionInfo ...>
+ >>> raises(ZeroDivisionError, f, x=0)
+ <ExceptionInfo ...>
+
+ A third possibility is to use a string which which will
+ be executed::
+
+ >>> raises(ZeroDivisionError, "f(0)")
+ <ExceptionInfo ...>
+ """
+ __tracebackhide__ = True
+
+ if not args:
+ return RaisesContext(ExpectedException)
+ elif isinstance(args[0], str):
+ code, = args
+ assert isinstance(code, str)
+ frame = sys._getframe(1)
+ loc = frame.f_locals.copy()
+ loc.update(kwargs)
+ #print "raises frame scope: %r" % frame.f_locals
+ try:
+ code = py.code.Source(code).compile()
+ py.builtin.exec_(code, frame.f_globals, loc)
+ # XXX didn'T mean f_globals == f_locals something special?
+ # this is destroyed here ...
+ except ExpectedException:
+ return py.code.ExceptionInfo()
+ else:
+ func = args[0]
+ try:
+ func(*args[1:], **kwargs)
+ except ExpectedException:
+ return py.code.ExceptionInfo()
+ k = ", ".join(["%s=%r" % x for x in kwargs.items()])
+ if k:
+ k = ', ' + k
+ expr = '%s(%r%s)' %(getattr(func, '__name__', func), args, k)
+ pytest.fail("DID NOT RAISE")
+
+class RaisesContext(object):
+ def __init__(self, ExpectedException):
+ self.ExpectedException = ExpectedException
+ self.excinfo = None
+
+ def __enter__(self):
+ self.excinfo = object.__new__(py.code.ExceptionInfo)
+ return self.excinfo
+
+ def __exit__(self, *tp):
+ __tracebackhide__ = True
+ if tp[0] is None:
+ pytest.fail("DID NOT RAISE")
+ self.excinfo.__init__(tp)
+ return issubclass(self.excinfo.type, self.ExpectedException)
+
diff --git a/lib/pypy/_pytest/recwarn.py b/lib/pypy/_pytest/recwarn.py
new file mode 100644
--- /dev/null
+++ b/lib/pypy/_pytest/recwarn.py
@@ -0,0 +1,99 @@
+""" recording warnings during test function execution. """
+
+import py
+import sys, os
+
+def pytest_funcarg__recwarn(request):
+ """Return a WarningsRecorder instance that provides these methods:
+
+ * ``pop(category=None)``: return last warning matching the category.
+ * ``clear()``: clear list of warnings
+
+ See http://docs.python.org/library/warnings.html for information
+ on warning categories.
+ """
+ if sys.version_info >= (2,7):
+ import warnings
+ oldfilters = warnings.filters[:]
+ warnings.simplefilter('default')
+ def reset_filters():
+ warnings.filters[:] = oldfilters
+ request.addfinalizer(reset_filters)
+ wrec = WarningsRecorder()
+ request.addfinalizer(wrec.finalize)
+ return wrec
+
+def pytest_namespace():
+ return {'deprecated_call': deprecated_call}
+
+def deprecated_call(func, *args, **kwargs):
+ """ assert that calling ``func(*args, **kwargs)``
+ triggers a DeprecationWarning.
+ """
+ warningmodule = py.std.warnings
+ l = []
+ oldwarn_explicit = getattr(warningmodule, 'warn_explicit')
+ def warn_explicit(*args, **kwargs):
+ l.append(args)
+ oldwarn_explicit(*args, **kwargs)
+ oldwarn = getattr(warningmodule, 'warn')
+ def warn(*args, **kwargs):
+ l.append(args)
+ oldwarn(*args, **kwargs)
+
+ warningmodule.warn_explicit = warn_explicit
+ warningmodule.warn = warn
+ try:
+ ret = func(*args, **kwargs)
+ finally:
+ warningmodule.warn_explicit = warn_explicit
+ warningmodule.warn = warn
+ if not l:
+ #print warningmodule
+ __tracebackhide__ = True
+ raise AssertionError("%r did not produce DeprecationWarning" %(func,))
+ return ret
+
+
+class RecordedWarning:
+ def __init__(self, message, category, filename, lineno, line):
+ self.message = message
+ self.category = category
+ self.filename = filename
+ self.lineno = lineno
+ self.line = line
+
+class WarningsRecorder:
+ def __init__(self):
+ warningmodule = py.std.warnings
+ self.list = []
+ def showwarning(message, category, filename, lineno, line=0):
+ self.list.append(RecordedWarning(
+ message, category, filename, lineno, line))
+ try:
+ self.old_showwarning(message, category,
+ filename, lineno, line=line)
+ except TypeError:
+ # < python2.6
+ self.old_showwarning(message, category, filename, lineno)
+ self.old_showwarning = warningmodule.showwarning
+ warningmodule.showwarning = showwarning
+
+ def pop(self, cls=Warning):
+ """ pop the first recorded warning, raise exception if not exists."""
+ for i, w in enumerate(self.list):
+ if issubclass(w.category, cls):
+ return self.list.pop(i)
+ __tracebackhide__ = True
+ assert 0, "%r not found in %r" %(cls, self.list)
+
+ #def resetregistry(self):
+ # import warnings
+ # warnings.onceregistry.clear()
+ # warnings.__warningregistry__.clear()
+
+ def clear(self):
+ self.list[:] = []
+
+ def finalize(self):
+ py.std.warnings.showwarning = self.old_showwarning
diff --git a/lib/pypy/_pytest/resultlog.py b/lib/pypy/_pytest/resultlog.py
new file mode 100644
--- /dev/null
+++ b/lib/pypy/_pytest/resultlog.py
@@ -0,0 +1,93 @@
+""" (disabled by default) create result information in a plain text file. """
+
+import py
+
+def pytest_addoption(parser):
+ group = parser.getgroup("terminal reporting", "resultlog plugin options")
+ group.addoption('--resultlog', action="store", dest="resultlog",
+ metavar="path", default=None,
+ help="path for machine-readable result log.")
+
+def pytest_configure(config):
+ resultlog = config.option.resultlog
+ # prevent opening resultlog on slave nodes (xdist)
+ if resultlog and not hasattr(config, 'slaveinput'):
+ logfile = open(resultlog, 'w', 1) # line buffered
+ config._resultlog = ResultLog(config, logfile)
+ config.pluginmanager.register(config._resultlog)
+
+def pytest_unconfigure(config):
+ resultlog = getattr(config, '_resultlog', None)
+ if resultlog:
+ resultlog.logfile.close()
+ del config._resultlog
+ config.pluginmanager.unregister(resultlog)
+
+def generic_path(item):
+ chain = item.listchain()
+ gpath = [chain[0].name]
+ fspath = chain[0].fspath
+ fspart = False
+ for node in chain[1:]:
+ newfspath = node.fspath
+ if newfspath == fspath:
+ if fspart:
+ gpath.append(':')
+ fspart = False
+ else:
+ gpath.append('.')
+ else:
+ gpath.append('/')
+ fspart = True
+ name = node.name
+ if name[0] in '([':
+ gpath.pop()
+ gpath.append(name)
+ fspath = newfspath
+ return ''.join(gpath)
+
+class ResultLog(object):
+ def __init__(self, config, logfile):
+ self.config = config
+ self.logfile = logfile # preferably line buffered
+
+ def write_log_entry(self, testpath, lettercode, longrepr):
+ py.builtin.print_("%s %s" % (lettercode, testpath), file=self.logfile)
+ for line in longrepr.splitlines():
+ py.builtin.print_(" %s" % line, file=self.logfile)
+
+ def log_outcome(self, report, lettercode, longrepr):
+ testpath = getattr(report, 'nodeid', None)
+ if testpath is None:
+ testpath = report.fspath
+ self.write_log_entry(testpath, lettercode, longrepr)
+
+ def pytest_runtest_logreport(self, report):
+ res = self.config.hook.pytest_report_teststatus(report=report)
+ code = res[1]
+ if code == 'x':
+ longrepr = str(report.longrepr)
+ elif code == 'X':
+ longrepr = ''
+ elif report.passed:
+ longrepr = ""
+ elif report.failed:
+ longrepr = str(report.longrepr)
+ elif report.skipped:
+ longrepr = str(report.longrepr[2])
+ self.log_outcome(report, code, longrepr)
+
+ def pytest_collectreport(self, report):
+ if not report.passed:
+ if report.failed:
+ code = "F"
+ longrepr = str(report.longrepr.reprcrash)
+ else:
+ assert report.skipped
+ code = "S"
+ longrepr = "%s:%d: %s" % report.longrepr
+ self.log_outcome(report, code, longrepr)
+
+ def pytest_internalerror(self, excrepr):
+ path = excrepr.reprcrash.path
+ self.write_log_entry(path, '!', str(excrepr))
diff --git a/lib/pypy/_pytest/runner.py b/lib/pypy/_pytest/runner.py
new file mode 100644
--- /dev/null
+++ b/lib/pypy/_pytest/runner.py
@@ -0,0 +1,388 @@
+""" basic collect and runtest protocol implementations """
+
+import py, sys
+from py._code.code import TerminalRepr
+
+def pytest_namespace():
+ return {
+ 'fail' : fail,
+ 'skip' : skip,
+ 'importorskip' : importorskip,
+ 'exit' : exit,
+ }
+
+#
+# pytest plugin hooks
+
+def pytest_sessionstart(session):
+ session._setupstate = SetupState()
+
+def pytest_sessionfinish(session, exitstatus):
+ hook = session.config.hook
+ rep = hook.pytest__teardown_final(session=session)
+ if rep:
+ hook.pytest__teardown_final_logerror(session=session, report=rep)
+ session.exitstatus = 1
+
+class NodeInfo:
+ def __init__(self, location):
+ self.location = location
+
+def pytest_runtest_protocol(item):
+ item.ihook.pytest_runtest_logstart(
+ nodeid=item.nodeid, location=item.location,
+ )
+ runtestprotocol(item)
+ return True
+
+def runtestprotocol(item, log=True):
+ rep = call_and_report(item, "setup", log)
+ reports = [rep]
+ if rep.passed:
+ reports.append(call_and_report(item, "call", log))
+ reports.append(call_and_report(item, "teardown", log))
+ return reports
+
+def pytest_runtest_setup(item):
+ item.session._setupstate.prepare(item)
+
+def pytest_runtest_call(item):
+ item.runtest()
+
+def pytest_runtest_teardown(item):
+ item.session._setupstate.teardown_exact(item)
+
+def pytest__teardown_final(session):
+ call = CallInfo(session._setupstate.teardown_all, when="teardown")
+ if call.excinfo:
+ ntraceback = call.excinfo.traceback .cut(excludepath=py._pydir)
+ call.excinfo.traceback = ntraceback.filter()
+ longrepr = call.excinfo.getrepr(funcargs=True)
+ return TeardownErrorReport(longrepr)
+
+def pytest_report_teststatus(report):
+ if report.when in ("setup", "teardown"):
+ if report.failed:
+ # category, shortletter, verbose-word
+ return "error", "E", "ERROR"
+ elif report.skipped:
+ return "skipped", "s", "SKIPPED"
+ else:
+ return "", "", ""
+
+
+#
+# Implementation
+
+def call_and_report(item, when, log=True):
+ call = call_runtest_hook(item, when)
+ hook = item.ihook
+ report = hook.pytest_runtest_makereport(item=item, call=call)
+ if log and (when == "call" or not report.passed):
+ hook.pytest_runtest_logreport(report=report)
+ return report
+
+def call_runtest_hook(item, when):
+ hookname = "pytest_runtest_" + when
+ ihook = getattr(item.ihook, hookname)
+ return CallInfo(lambda: ihook(item=item), when=when)
+
+class CallInfo:
+ """ Result/Exception info a function invocation. """
+ #: None or ExceptionInfo object.
+ excinfo = None
+ def __init__(self, func, when):
+ #: context of invocation: one of "setup", "call",
+ #: "teardown", "memocollect"
+ self.when = when
+ try:
+ self.result = func()
+ except KeyboardInterrupt:
+ raise
+ except:
+ self.excinfo = py.code.ExceptionInfo()
+
+ def __repr__(self):
+ if self.excinfo:
+ status = "exception: %s" % str(self.excinfo.value)
+ else:
+ status = "result: %r" % (self.result,)
+ return "<CallInfo when=%r %s>" % (self.when, status)
+
+def getslaveinfoline(node):
+ try:
+ return node._slaveinfocache
+ except AttributeError:
+ d = node.slaveinfo
+ ver = "%s.%s.%s" % d['version_info'][:3]
+ node._slaveinfocache = s = "[%s] %s -- Python %s %s" % (
+ d['id'], d['sysplatform'], ver, d['executable'])
+ return s
+
+class BaseReport(object):
+ def toterminal(self, out):
+ longrepr = self.longrepr
+ if hasattr(self, 'node'):
+ out.line(getslaveinfoline(self.node))
+ if hasattr(longrepr, 'toterminal'):
+ longrepr.toterminal(out)
+ else:
+ out.line(str(longrepr))
+
+ passed = property(lambda x: x.outcome == "passed")
+ failed = property(lambda x: x.outcome == "failed")
+ skipped = property(lambda x: x.outcome == "skipped")
+
+ @property
+ def fspath(self):
+ return self.nodeid.split("::")[0]
+
+def pytest_runtest_makereport(item, call):
+ when = call.when
+ keywords = dict([(x,1) for x in item.keywords])
+ excinfo = call.excinfo
+ if not call.excinfo:
+ outcome = "passed"
+ longrepr = None
+ else:
+ excinfo = call.excinfo
+ if not isinstance(excinfo, py.code.ExceptionInfo):
+ outcome = "failed"
+ longrepr = excinfo
+ elif excinfo.errisinstance(py.test.skip.Exception):
+ outcome = "skipped"
+ r = excinfo._getreprcrash()
+ longrepr = (str(r.path), r.lineno, r.message)
+ else:
+ outcome = "failed"
+ if call.when == "call":
+ longrepr = item.repr_failure(excinfo)
+ else: # exception in setup or teardown
+ longrepr = item._repr_failure_py(excinfo)
+ return TestReport(item.nodeid, item.location,
+ keywords, outcome, longrepr, when)
+
+class TestReport(BaseReport):
+ """ Basic test report object (also used for setup and teardown calls if
+ they fail).
+ """
+ def __init__(self, nodeid, location,
+ keywords, outcome, longrepr, when):
+ #: normalized collection node id
+ self.nodeid = nodeid
+
+ #: a (filesystempath, lineno, domaininfo) tuple indicating the
+ #: actual location of a test item - it might be different from the
+ #: collected one e.g. if a method is inherited from a different module.
+ self.location = location
+
+ #: a name -> value dictionary containing all keywords and
+ #: markers associated with a test invocation.
+ self.keywords = keywords
+
+ #: test outcome, always one of "passed", "failed", "skipped".
+ self.outcome = outcome
+
+ #: None or a failure representation.
+ self.longrepr = longrepr
+
+ #: one of 'setup', 'call', 'teardown' to indicate runtest phase.
+ self.when = when
+
+ def __repr__(self):
+ return "<TestReport %r when=%r outcome=%r>" % (
+ self.nodeid, self.when, self.outcome)
+
+class TeardownErrorReport(BaseReport):
+ outcome = "failed"
+ when = "teardown"
+ def __init__(self, longrepr):
+ self.longrepr = longrepr
+
+def pytest_make_collect_report(collector):
+ call = CallInfo(collector._memocollect, "memocollect")
+ longrepr = None
+ if not call.excinfo:
+ outcome = "passed"
+ else:
+ if call.excinfo.errisinstance(py.test.skip.Exception):
+ outcome = "skipped"
+ r = collector._repr_failure_py(call.excinfo, "line").reprcrash
+ longrepr = (str(r.path), r.lineno, r.message)
+ else:
+ outcome = "failed"
+ errorinfo = collector.repr_failure(call.excinfo)
+ if not hasattr(errorinfo, "toterminal"):
+ errorinfo = CollectErrorRepr(errorinfo)
+ longrepr = errorinfo
+ return CollectReport(collector.nodeid, outcome, longrepr,
+ getattr(call, 'result', None))
+
+class CollectReport(BaseReport):
+ def __init__(self, nodeid, outcome, longrepr, result):
+ self.nodeid = nodeid
+ self.outcome = outcome
+ self.longrepr = longrepr
+ self.result = result or []
+
+ @property
+ def location(self):
+ return (self.fspath, None, self.fspath)
+
+ def __repr__(self):
+ return "<CollectReport %r lenresult=%s outcome=%r>" % (
+ self.nodeid, len(self.result), self.outcome)
+
+class CollectErrorRepr(TerminalRepr):
+ def __init__(self, msg):
+ self.longrepr = msg
+ def toterminal(self, out):
+ out.line(str(self.longrepr), red=True)
+
+class SetupState(object):
+ """ shared state for setting up/tearing down test items or collectors. """
+ def __init__(self):
+ self.stack = []
+ self._finalizers = {}
+
+ def addfinalizer(self, finalizer, colitem):
+ """ attach a finalizer to the given colitem.
+ if colitem is None, this will add a finalizer that
+ is called at the end of teardown_all().
+ """
+ assert hasattr(finalizer, '__call__')
+ #assert colitem in self.stack
+ self._finalizers.setdefault(colitem, []).append(finalizer)
+
+ def _pop_and_teardown(self):
+ colitem = self.stack.pop()
+ self._teardown_with_finalization(colitem)
+
+ def _callfinalizers(self, colitem):
+ finalizers = self._finalizers.pop(colitem, None)
+ while finalizers:
+ fin = finalizers.pop()
+ fin()
+
+ def _teardown_with_finalization(self, colitem):
+ self._callfinalizers(colitem)
+ if colitem:
+ colitem.teardown()
+ for colitem in self._finalizers:
+ assert colitem is None or colitem in self.stack
+
+ def teardown_all(self):
+ while self.stack:
+ self._pop_and_teardown()
+ self._teardown_with_finalization(None)
+ assert not self._finalizers
+
+ def teardown_exact(self, item):
+ if self.stack and item == self.stack[-1]:
+ self._pop_and_teardown()
+ else:
+ self._callfinalizers(item)
+
+ def prepare(self, colitem):
+ """ setup objects along the collector chain to the test-method
+ and teardown previously setup objects."""
+ needed_collectors = colitem.listchain()
+ while self.stack:
+ if self.stack == needed_collectors[:len(self.stack)]:
+ break
+ self._pop_and_teardown()
+ # check if the last collection node has raised an error
+ for col in self.stack:
+ if hasattr(col, '_prepare_exc'):
+ py.builtin._reraise(*col._prepare_exc)
+ for col in needed_collectors[len(self.stack):]:
+ self.stack.append(col)
+ try:
+ col.setup()
+ except Exception:
+ col._prepare_exc = sys.exc_info()
+ raise
+
+# =============================================================
+# Test OutcomeExceptions and helpers for creating them.
+
+
+class OutcomeException(Exception):
+ """ OutcomeException and its subclass instances indicate and
+ contain info about test and collection outcomes.
+ """
+ def __init__(self, msg=None, pytrace=True):
+ self.msg = msg
+ self.pytrace = pytrace
+
+ def __repr__(self):
+ if self.msg:
+ return str(self.msg)
+ return "<%s instance>" %(self.__class__.__name__,)
+ __str__ = __repr__
+
+class Skipped(OutcomeException):
+ # XXX hackish: on 3k we fake to live in the builtins
+ # in order to have Skipped exception printing shorter/nicer
+ __module__ = 'builtins'
+
+class Failed(OutcomeException):
+ """ raised from an explicit call to py.test.fail() """
+ __module__ = 'builtins'
+
+class Exit(KeyboardInterrupt):
+ """ raised for immediate program exits (no tracebacks/summaries)"""
+ def __init__(self, msg="unknown reason"):
+ self.msg = msg
+ KeyboardInterrupt.__init__(self, msg)
+
+# exposed helper methods
+
+def exit(msg):
+ """ exit testing process as if KeyboardInterrupt was triggered. """
+ __tracebackhide__ = True
+ raise Exit(msg)
+
+exit.Exception = Exit
+
+def skip(msg=""):
+ """ skip an executing test with the given message. Note: it's usually
+ better to use the py.test.mark.skipif marker to declare a test to be
+ skipped under certain conditions like mismatching platforms or
+ dependencies. See the pytest_skipping plugin for details.
+ """
+ __tracebackhide__ = True
+ raise Skipped(msg=msg)
+skip.Exception = Skipped
+
+def fail(msg="", pytrace=True):
+ """ explicitely fail an currently-executing test with the given Message.
+ if @pytrace is not True the msg represents the full failure information.
+ """
+ __tracebackhide__ = True
+ raise Failed(msg=msg, pytrace=pytrace)
+fail.Exception = Failed
+
+
+def importorskip(modname, minversion=None):
+ """ return imported module if it has a higher __version__ than the
+ optionally specified 'minversion' - otherwise call py.test.skip()
+ with a message detailing the mismatch.
+ """
+ __tracebackhide__ = True
+ compile(modname, '', 'eval') # to catch syntaxerrors
+ try:
+ mod = __import__(modname, None, None, ['__doc__'])
+ except ImportError:
+ py.test.skip("could not import %r" %(modname,))
+ if minversion is None:
+ return mod
+ verattr = getattr(mod, '__version__', None)
+ if isinstance(minversion, str):
+ minver = minversion.split(".")
+ else:
+ minver = list(minversion)
+ if verattr is None or verattr.split(".") < minver:
+ py.test.skip("module %r has __version__ %r, required is: %r" %(
+ modname, verattr, minversion))
+ return mod
diff --git a/lib/pypy/_pytest/skipping.py b/lib/pypy/_pytest/skipping.py
new file mode 100644
--- /dev/null
+++ b/lib/pypy/_pytest/skipping.py
@@ -0,0 +1,246 @@
+""" support for skip/xfail functions and markers. """
+
+import py, pytest
+import sys
+
+def pytest_addoption(parser):
+ group = parser.getgroup("general")
+ group.addoption('--runxfail',
+ action="store_true", dest="runxfail", default=False,
+ help="run tests even if they are marked xfail")
+
+def pytest_namespace():
+ return dict(xfail=xfail)
+
+class XFailed(pytest.fail.Exception):
+ """ raised from an explicit call to py.test.xfail() """
+
+def xfail(reason=""):
+ """ xfail an executing test or setup functions with the given reason."""
+ __tracebackhide__ = True
+ raise XFailed(reason)
+xfail.Exception = XFailed
+
+class MarkEvaluator:
+ def __init__(self, item, name):
+ self.item = item
+ self.name = name
+
+ @property
+ def holder(self):
+ return self.item.keywords.get(self.name, None)
+ def __bool__(self):
+ return bool(self.holder)
+ __nonzero__ = __bool__
+
+ def wasvalid(self):
+ return not hasattr(self, 'exc')
+
+ def istrue(self):
+ try:
+ return self._istrue()
+ except KeyboardInterrupt:
+ raise
+ except:
+ self.exc = sys.exc_info()
+ if isinstance(self.exc[1], SyntaxError):
+ msg = [" " * (self.exc[1].offset + 4) + "^",]
+ msg.append("SyntaxError: invalid syntax")
+ else:
+ msg = py.std.traceback.format_exception_only(*self.exc[:2])
+ pytest.fail("Error evaluating %r expression\n"
+ " %s\n"
+ "%s"
+ %(self.name, self.expr, "\n".join(msg)),
+ pytrace=False)
+
+ def _getglobals(self):
+ d = {'os': py.std.os, 'sys': py.std.sys, 'config': self.item.config}
+ func = self.item.obj
+ try:
+ d.update(func.__globals__)
+ except AttributeError:
+ d.update(func.func_globals)
+ return d
+
+ def _istrue(self):
+ if self.holder:
+ d = self._getglobals()
+ if self.holder.args:
+ self.result = False
+ for expr in self.holder.args:
+ self.expr = expr
+ if isinstance(expr, str):
+ result = cached_eval(self.item.config, expr, d)
+ else:
+ pytest.fail("expression is not a string")
+ if result:
+ self.result = True
+ self.expr = expr
+ break
+ else:
+ self.result = True
+ return getattr(self, 'result', False)
+
+ def get(self, attr, default=None):
+ return self.holder.kwargs.get(attr, default)
+
+ def getexplanation(self):
+ expl = self.get('reason', None)
+ if not expl:
+ if not hasattr(self, 'expr'):
+ return ""
+ else:
+ return "condition: " + str(self.expr)
+ return expl
+
+
+def pytest_runtest_setup(item):
+ if not isinstance(item, pytest.Function):
+ return
+ evalskip = MarkEvaluator(item, 'skipif')
+ if evalskip.istrue():
+ py.test.skip(evalskip.getexplanation())
+ item._evalxfail = MarkEvaluator(item, 'xfail')
+ check_xfail_no_run(item)
+
+def pytest_pyfunc_call(pyfuncitem):
+ check_xfail_no_run(pyfuncitem)
+
+def check_xfail_no_run(item):
+ if not item.config.option.runxfail:
+ evalxfail = item._evalxfail
+ if evalxfail.istrue():
+ if not evalxfail.get('run', True):
+ py.test.xfail("[NOTRUN] " + evalxfail.getexplanation())
+
+def pytest_runtest_makereport(__multicall__, item, call):
+ if not isinstance(item, pytest.Function):
+ return
+ if not (call.excinfo and
+ call.excinfo.errisinstance(py.test.xfail.Exception)):
+ evalxfail = getattr(item, '_evalxfail', None)
+ if not evalxfail:
+ return
+ if call.excinfo and call.excinfo.errisinstance(py.test.xfail.Exception):
+ if not item.config.getvalue("runxfail"):
+ rep = __multicall__.execute()
+ rep.keywords['xfail'] = "reason: " + call.excinfo.value.msg
+ rep.outcome = "skipped"
+ return rep
+ rep = __multicall__.execute()
+ evalxfail = item._evalxfail
+ if not item.config.option.runxfail:
+ if evalxfail.wasvalid() and evalxfail.istrue():
+ if call.excinfo:
+ rep.outcome = "skipped"
+ rep.keywords['xfail'] = evalxfail.getexplanation()
+ elif call.when == "call":
+ rep.outcome = "failed"
+ rep.keywords['xfail'] = evalxfail.getexplanation()
+ return rep
+ if 'xfail' in rep.keywords:
+ del rep.keywords['xfail']
+ return rep
+
+# called by terminalreporter progress reporting
+def pytest_report_teststatus(report):
+ if 'xfail' in report.keywords:
+ if report.skipped:
+ return "xfailed", "x", "xfail"
+ elif report.failed:
+ return "xpassed", "X", "XPASS"
+
+# called by the terminalreporter instance/plugin
+def pytest_terminal_summary(terminalreporter):
+ tr = terminalreporter
+ if not tr.reportchars:
+ #for name in "xfailed skipped failed xpassed":
+ # if not tr.stats.get(name, 0):
+ # tr.write_line("HINT: use '-r' option to see extra "
+ # "summary info about tests")
+ # break
+ return
+
+ lines = []
+ for char in tr.reportchars:
+ if char == "x":
+ show_xfailed(terminalreporter, lines)
+ elif char == "X":
+ show_xpassed(terminalreporter, lines)
+ elif char in "fF":
+ show_failed(terminalreporter, lines)
+ elif char in "sS":
+ show_skipped(terminalreporter, lines)
+ if lines:
+ tr._tw.sep("=", "short test summary info")
+ for line in lines:
+ tr._tw.line(line)
+
+def show_failed(terminalreporter, lines):
+ tw = terminalreporter._tw
+ failed = terminalreporter.stats.get("failed")
+ if failed:
+ for rep in failed:
+ pos = rep.nodeid
+ lines.append("FAIL %s" %(pos, ))
+
+def show_xfailed(terminalreporter, lines):
+ xfailed = terminalreporter.stats.get("xfailed")
+ if xfailed:
+ for rep in xfailed:
+ pos = rep.nodeid
+ reason = rep.keywords['xfail']
+ lines.append("XFAIL %s" % (pos,))
+ if reason:
+ lines.append(" " + str(reason))
+
+def show_xpassed(terminalreporter, lines):
+ xpassed = terminalreporter.stats.get("xpassed")
+ if xpassed:
+ for rep in xpassed:
+ pos = rep.nodeid
+ reason = rep.keywords['xfail']
+ lines.append("XPASS %s %s" %(pos, reason))
+
+def cached_eval(config, expr, d):
+ if not hasattr(config, '_evalcache'):
+ config._evalcache = {}
+ try:
+ return config._evalcache[expr]
+ except KeyError:
+ #import sys
+ #print >>sys.stderr, ("cache-miss: %r" % expr)
+ exprcode = py.code.compile(expr, mode="eval")
+ config._evalcache[expr] = x = eval(exprcode, d)
+ return x
+
+
+def folded_skips(skipped):
+ d = {}
+ for event in skipped:
+ key = event.longrepr
+ assert len(key) == 3, (event, key)
+ d.setdefault(key, []).append(event)
+ l = []
+ for key, events in d.items():
+ l.append((len(events),) + key)
+ return l
+
+def show_skipped(terminalreporter, lines):
+ tr = terminalreporter
+ skipped = tr.stats.get('skipped', [])
+ if skipped:
+ #if not tr.hasopt('skipped'):
+ # tr.write_line(
+ # "%d skipped tests, specify -rs for more info" %
+ # len(skipped))
+ # return
+ fskips = folded_skips(skipped)
+ if fskips:
+ #tr.write_sep("_", "skipped test summary")
+ for num, fspath, lineno, reason in fskips:
+ if reason.startswith("Skipped: "):
+ reason = reason[9:]
+ lines.append("SKIP [%d] %s:%d: %s" %
+ (num, fspath, lineno, reason))
diff --git a/lib/pypy/_pytest/standalonetemplate.py b/lib/pypy/_pytest/standalonetemplate.py
new file mode 100755
--- /dev/null
+++ b/lib/pypy/_pytest/standalonetemplate.py
@@ -0,0 +1,63 @@
+#! /usr/bin/env python
+
+sources = """
+ at SOURCES@"""
+
+import sys
+import base64
+import zlib
+import imp
+
+class DictImporter(object):
+ def __init__(self, sources):
+ self.sources = sources
+
+ def find_module(self, fullname, path=None):
+ if fullname in self.sources:
+ return self
+ if fullname + '.__init__' in self.sources:
+ return self
+ return None
+
+ def load_module(self, fullname):
+ # print "load_module:", fullname
+ from types import ModuleType
+ try:
+ s = self.sources[fullname]
+ is_pkg = False
+ except KeyError:
+ s = self.sources[fullname + '.__init__']
+ is_pkg = True
+
+ co = compile(s, fullname, 'exec')
+ module = sys.modules.setdefault(fullname, ModuleType(fullname))
+ module.__file__ = "%s/%s" % (__file__, fullname)
+ module.__loader__ = self
+ if is_pkg:
+ module.__path__ = [fullname]
+
+ do_exec(co, module.__dict__)
+ return sys.modules[fullname]
+
+ def get_source(self, name):
+ res = self.sources.get(name)
+ if res is None:
+ res = self.sources.get(name + '.__init__')
+ return res
+
+if __name__ == "__main__":
+ if sys.version_info >= (3, 0):
+ exec("def do_exec(co, loc): exec(co, loc)\n")
+ import pickle
+ sources = sources.encode("ascii") # ensure bytes
+ sources = pickle.loads(zlib.decompress(base64.decodebytes(sources)))
+ else:
+ import cPickle as pickle
+ exec("def do_exec(co, loc): exec co in loc\n")
+ sources = pickle.loads(zlib.decompress(base64.decodestring(sources)))
+
+ importer = DictImporter(sources)
+ sys.meta_path.append(importer)
+
+ entry = "@ENTRY@"
+ do_exec(entry, locals())
diff --git a/lib/pypy/_pytest/terminal.py b/lib/pypy/_pytest/terminal.py
new file mode 100644
--- /dev/null
+++ b/lib/pypy/_pytest/terminal.py
@@ -0,0 +1,451 @@
+""" terminal reporting of the full testing process.
+
+This is a good source for looking at the various reporting hooks.
+"""
+import pytest, py
+import sys
+import os
+
+def pytest_addoption(parser):
+ group = parser.getgroup("terminal reporting", "reporting", after="general")
+ group._addoption('-v', '--verbose', action="count",
+ dest="verbose", default=0, help="increase verbosity."),
+ group._addoption('-q', '--quiet', action="count",
+ dest="quiet", default=0, help="decreate verbosity."),
+ group._addoption('-r',
+ action="store", dest="reportchars", default=None, metavar="chars",
+ help="show extra test summary info as specified by chars (f)ailed, "
+ "(s)skipped, (x)failed, (X)passed.")
+ group._addoption('-l', '--showlocals',
+ action="store_true", dest="showlocals", default=False,
+ help="show locals in tracebacks (disabled by default).")
+ group._addoption('--report',
+ action="store", dest="report", default=None, metavar="opts",
+ help="(deprecated, use -r)")
+ group._addoption('--tb', metavar="style",
+ action="store", dest="tbstyle", default='long',
+ type="choice", choices=['long', 'short', 'no', 'line', 'native'],
+ help="traceback print mode (long/short/line/native/no).")
+ group._addoption('--fulltrace',
+ action="store_true", dest="fulltrace", default=False,
+ help="don't cut any tracebacks (default is to cut).")
+
+def pytest_configure(config):
+ config.option.verbose -= config.option.quiet
+ # we try hard to make printing resilient against
+ # later changes on FD level.
+ stdout = py.std.sys.stdout
+ if hasattr(os, 'dup') and hasattr(stdout, 'fileno'):
+ try:
+ newfd = os.dup(stdout.fileno())
+ #print "got newfd", newfd
+ except ValueError:
+ pass
+ else:
+ stdout = os.fdopen(newfd, stdout.mode, 1)
+ config._toclose = stdout
+ reporter = TerminalReporter(config, stdout)
+ config.pluginmanager.register(reporter, 'terminalreporter')
+ if config.option.debug or config.option.traceconfig:
+ def mywriter(tags, args):
+ msg = " ".join(map(str, args))
+ reporter.write_line("[traceconfig] " + msg)
+ config.trace.root.setprocessor("pytest:config", mywriter)
+
+def pytest_unconfigure(config):
+ if hasattr(config, '_toclose'):
+ #print "closing", config._toclose, config._toclose.fileno()
+ config._toclose.close()
+
+def getreportopt(config):
+ reportopts = ""
+ optvalue = config.option.report
+ if optvalue:
+ py.builtin.print_("DEPRECATED: use -r instead of --report option.",
+ file=py.std.sys.stderr)
+ if optvalue:
+ for setting in optvalue.split(","):
+ setting = setting.strip()
+ if setting == "skipped":
+ reportopts += "s"
+ elif setting == "xfailed":
+ reportopts += "x"
+ reportchars = config.option.reportchars
+ if reportchars:
+ for char in reportchars:
+ if char not in reportopts:
+ reportopts += char
+ return reportopts
+
+def pytest_report_teststatus(report):
+ if report.passed:
+ letter = "."
+ elif report.skipped:
+ letter = "s"
+ elif report.failed:
+ letter = "F"
+ if report.when != "call":
+ letter = "f"
+ return report.outcome, letter, report.outcome.upper()
+
+class TerminalReporter:
+ def __init__(self, config, file=None):
+ self.config = config
+ self.verbosity = self.config.option.verbose
+ self.showheader = self.verbosity >= 0
+ self.showfspath = self.verbosity >= 0
+ self.showlongtestinfo = self.verbosity > 0
+ self._numcollected = 0
+
+ self.stats = {}
+ self.curdir = py.path.local()
+ if file is None:
+ file = py.std.sys.stdout
+ self._tw = py.io.TerminalWriter(file)
+ self.currentfspath = None
+ self.reportchars = getreportopt(config)
+ self.hasmarkup = self._tw.hasmarkup
+
+ def hasopt(self, char):
+ char = {'xfailed': 'x', 'skipped': 's'}.get(char,char)
+ return char in self.reportchars
+
+ def write_fspath_result(self, fspath, res):
+ if fspath != self.currentfspath:
+ self.currentfspath = fspath
+ #fspath = self.curdir.bestrelpath(fspath)
+ self._tw.line()
+ #relpath = self.curdir.bestrelpath(fspath)
+ self._tw.write(fspath + " ")
+ self._tw.write(res)
+
+ def write_ensure_prefix(self, prefix, extra="", **kwargs):
+ if self.currentfspath != prefix:
+ self._tw.line()
+ self.currentfspath = prefix
+ self._tw.write(prefix)
+ if extra:
+ self._tw.write(extra, **kwargs)
+ self.currentfspath = -2
+
+ def ensure_newline(self):
+ if self.currentfspath:
+ self._tw.line()
+ self.currentfspath = None
+
+ def write(self, content, **markup):
+ self._tw.write(content, **markup)
+
+ def write_line(self, line, **markup):
+ line = str(line)
+ self.ensure_newline()
+ self._tw.line(line, **markup)
+
+ def rewrite(self, line, **markup):
+ line = str(line)
+ self._tw.write("\r" + line, **markup)
+
+ def write_sep(self, sep, title=None, **markup):
+ self.ensure_newline()
+ self._tw.sep(sep, title, **markup)
+
+ def pytest_internalerror(self, excrepr):
+ for line in str(excrepr).split("\n"):
+ self.write_line("INTERNALERROR> " + line)
+ return 1
+
+ def pytest_plugin_registered(self, plugin):
+ if self.config.option.traceconfig:
+ msg = "PLUGIN registered: %s" %(plugin,)
+ # XXX this event may happen during setup/teardown time
+ # which unfortunately captures our output here
+ # which garbles our output if we use self.write_line
+ self.write_line(msg)
+
+ def pytest_deselected(self, items):
+ self.stats.setdefault('deselected', []).extend(items)
+
+ def pytest__teardown_final_logerror(self, report):
+ self.stats.setdefault("error", []).append(report)
+
+ def pytest_runtest_logstart(self, nodeid, location):
+ # ensure that the path is printed before the
+ # 1st test of a module starts running
+ fspath = nodeid.split("::")[0]
+ if self.showlongtestinfo:
+ line = self._locationline(fspath, *location)
+ self.write_ensure_prefix(line, "")
+ elif self.showfspath:
+ self.write_fspath_result(fspath, "")
+
+ def pytest_runtest_logreport(self, report):
+ rep = report
+ res = self.config.hook.pytest_report_teststatus(report=rep)
+ cat, letter, word = res
+ self.stats.setdefault(cat, []).append(rep)
+ if not letter and not word:
+ # probably passed setup/teardown
+ return
+ if self.verbosity <= 0:
+ if not hasattr(rep, 'node') and self.showfspath:
+ self.write_fspath_result(rep.fspath, letter)
+ else:
+ self._tw.write(letter)
+ else:
+ if isinstance(word, tuple):
+ word, markup = word
+ else:
+ if rep.passed:
+ markup = {'green':True}
+ elif rep.failed:
+ markup = {'red':True}
+ elif rep.skipped:
+ markup = {'yellow':True}
+ line = self._locationline(str(rep.fspath), *rep.location)
+ if not hasattr(rep, 'node'):
+ self.write_ensure_prefix(line, word, **markup)
+ #self._tw.write(word, **markup)
+ else:
+ self.ensure_newline()
+ if hasattr(rep, 'node'):
+ self._tw.write("[%s] " % rep.node.gateway.id)
+ self._tw.write(word, **markup)
+ self._tw.write(" " + line)
+ self.currentfspath = -2
+
+ def pytest_collection(self):
+ if not self.hasmarkup:
+ self.write("collecting ... ", bold=True)
+
+ def pytest_collectreport(self, report):
+ if report.failed:
+ self.stats.setdefault("error", []).append(report)
+ elif report.skipped:
+ self.stats.setdefault("skipped", []).append(report)
+ items = [x for x in report.result if isinstance(x, pytest.Item)]
+ self._numcollected += len(items)
+ if self.hasmarkup:
+ #self.write_fspath_result(report.fspath, 'E')
+ self.report_collect()
+
+ def report_collect(self, final=False):
+ errors = len(self.stats.get('error', []))
+ skipped = len(self.stats.get('skipped', []))
+ if final:
+ line = "collected "
+ else:
+ line = "collecting "
+ line += str(self._numcollected) + " items"
+ if errors:
+ line += " / %d errors" % errors
+ if skipped:
+ line += " / %d skipped" % skipped
+ if self.hasmarkup:
+ if final:
+ line += " \n"
+ self.rewrite(line, bold=True)
+ else:
+ self.write_line(line)
+
+ def pytest_collection_modifyitems(self):
+ self.report_collect(True)
+
+ def pytest_sessionstart(self, session):
+ self._sessionstarttime = py.std.time.time()
+ if not self.showheader:
+ return
+ self.write_sep("=", "test session starts", bold=True)
+ verinfo = ".".join(map(str, sys.version_info[:3]))
+ msg = "platform %s -- Python %s" % (sys.platform, verinfo)
+ if hasattr(sys, 'pypy_version_info'):
+ verinfo = ".".join(map(str, sys.pypy_version_info[:3]))
+ msg += "[pypy-%s]" % verinfo
+ msg += " -- pytest-%s" % (py.test.__version__)
+ if self.verbosity > 0 or self.config.option.debug or \
+ getattr(self.config.option, 'pastebin', None):
+ msg += " -- " + str(sys.executable)
+ self.write_line(msg)
+ lines = self.config.hook.pytest_report_header(config=self.config)
+ lines.reverse()
+ for line in flatten(lines):
+ self.write_line(line)
+
+ def pytest_collection_finish(self, session):
+ if self.config.option.collectonly:
+ self._printcollecteditems(session.items)
+ if self.stats.get('failed'):
+ self._tw.sep("!", "collection failures")
+ for rep in self.stats.get('failed'):
+ rep.toterminal(self._tw)
+ return 1
+ return 0
+ if not self.showheader:
+ return
+ #for i, testarg in enumerate(self.config.args):
+ # self.write_line("test path %d: %s" %(i+1, testarg))
+
+ def _printcollecteditems(self, items):
+ # to print out items and their parent collectors
+ # we take care to leave out Instances aka ()
+ # because later versions are going to get rid of them anyway
+ if self.config.option.verbose < 0:
+ for item in items:
+ nodeid = item.nodeid
+ nodeid = nodeid.replace("::()::", "::")
+ self._tw.line(nodeid)
+ return
+ stack = []
+ indent = ""
+ for item in items:
+ needed_collectors = item.listchain()[1:] # strip root node
+ while stack:
+ if stack == needed_collectors[:len(stack)]:
+ break
+ stack.pop()
+ for col in needed_collectors[len(stack):]:
+ stack.append(col)
+ #if col.name == "()":
+ # continue
+ indent = (len(stack)-1) * " "
+ self._tw.line("%s%s" %(indent, col))
+
+ def pytest_sessionfinish(self, exitstatus, __multicall__):
+ __multicall__.execute()
+ self._tw.line("")
+ if exitstatus in (0, 1, 2):
+ self.summary_errors()
+ self.summary_failures()
+ self.config.hook.pytest_terminal_summary(terminalreporter=self)
+ if exitstatus == 2:
+ self._report_keyboardinterrupt()
+ self.summary_deselected()
+ self.summary_stats()
+
+ def pytest_keyboard_interrupt(self, excinfo):
+ self._keyboardinterrupt_memo = excinfo.getrepr(funcargs=True)
+
+ def _report_keyboardinterrupt(self):
+ excrepr = self._keyboardinterrupt_memo
+ msg = excrepr.reprcrash.message
+ self.write_sep("!", msg)
+ if "KeyboardInterrupt" in msg:
+ if self.config.option.fulltrace:
+ excrepr.toterminal(self._tw)
+ else:
+ excrepr.reprcrash.toterminal(self._tw)
+
+ def _locationline(self, collect_fspath, fspath, lineno, domain):
+ # collect_fspath comes from testid which has a "/"-normalized path
+ if fspath and fspath.replace("\\", "/") != collect_fspath:
+ fspath = "%s <- %s" % (collect_fspath, fspath)
+ if fspath:
+ line = str(fspath)
+ if lineno is not None:
+ lineno += 1
+ line += ":" + str(lineno)
+ if domain:
+ line += ": " + str(domain)
+ else:
+ line = "[location]"
+ return line + " "
+
+ def _getfailureheadline(self, rep):
+ if hasattr(rep, 'location'):
+ fspath, lineno, domain = rep.location
+ return domain
+ else:
+ return "test session" # XXX?
+
+ def _getcrashline(self, rep):
+ try:
+ return str(rep.longrepr.reprcrash)
+ except AttributeError:
+ try:
+ return str(rep.longrepr)[:50]
+ except AttributeError:
+ return ""
+
+ #
+ # summaries for sessionfinish
+ #
+ def getreports(self, name):
+ l = []
+ for x in self.stats.get(name, []):
+ if not hasattr(x, '_pdbshown'):
+ l.append(x)
+ return l
+
+ def summary_failures(self):
+ if self.config.option.tbstyle != "no":
+ reports = self.getreports('failed')
+ if not reports:
+ return
+ self.write_sep("=", "FAILURES")
+ for rep in reports:
+ if self.config.option.tbstyle == "line":
+ line = self._getcrashline(rep)
+ self.write_line(line)
+ else:
+ msg = self._getfailureheadline(rep)
+ self.write_sep("_", msg)
+ rep.toterminal(self._tw)
+
+ def summary_errors(self):
+ if self.config.option.tbstyle != "no":
+ reports = self.getreports('error')
+ if not reports:
+ return
+ self.write_sep("=", "ERRORS")
+ for rep in self.stats['error']:
+ msg = self._getfailureheadline(rep)
+ if not hasattr(rep, 'when'):
+ # collect
+ msg = "ERROR collecting " + msg
+ elif rep.when == "setup":
+ msg = "ERROR at setup of " + msg
+ elif rep.when == "teardown":
+ msg = "ERROR at teardown of " + msg
+ self.write_sep("_", msg)
+ rep.toterminal(self._tw)
+
+ def summary_stats(self):
+ session_duration = py.std.time.time() - self._sessionstarttime
+
+ keys = "failed passed skipped deselected".split()
+ for key in self.stats.keys():
+ if key not in keys:
+ keys.append(key)
+ parts = []
+ for key in keys:
+ val = self.stats.get(key, None)
+ if val:
+ parts.append("%d %s" %(len(val), key))
+ line = ", ".join(parts)
+ # XXX coloring
+ msg = "%s in %.2f seconds" %(line, session_duration)
+ if self.verbosity >= 0:
+ self.write_sep("=", msg, bold=True)
+ else:
+ self.write_line(msg, bold=True)
+
+ def summary_deselected(self):
+ if 'deselected' in self.stats:
+ self.write_sep("=", "%d tests deselected by %r" %(
+ len(self.stats['deselected']), self.config.option.keyword), bold=True)
+
+def repr_pythonversion(v=None):
+ if v is None:
+ v = sys.version_info
+ try:
+ return "%s.%s.%s-%s-%s" % v
+ except (TypeError, ValueError):
+ return str(v)
+
+def flatten(l):
+ for x in l:
+ if isinstance(x, (list, tuple)):
+ for y in flatten(x):
+ yield y
+ else:
+ yield x
+
diff --git a/lib/pypy/_pytest/tmpdir.py b/lib/pypy/_pytest/tmpdir.py
new file mode 100644
--- /dev/null
+++ b/lib/pypy/_pytest/tmpdir.py
@@ -0,0 +1,68 @@
+""" support for providing temporary directories to test functions. """
+import pytest, py
+from _pytest.monkeypatch import monkeypatch
+
+class TempdirHandler:
+ def __init__(self, config):
+ self.config = config
+ self.trace = config.trace.get("tmpdir")
+
+ def ensuretemp(self, string, dir=1):
+ """ (deprecated) return temporary directory path with
+ the given string as the trailing part. It is usually
+ better to use the 'tmpdir' function argument which
+ provides an empty unique-per-test-invocation directory
+ and is guaranteed to be empty.
+ """
+ #py.log._apiwarn(">1.1", "use tmpdir function argument")
+ return self.getbasetemp().ensure(string, dir=dir)
+
+ def mktemp(self, basename, numbered=True):
+ basetemp = self.getbasetemp()
+ if not numbered:
+ p = basetemp.mkdir(basename)
+ else:
+ p = py.path.local.make_numbered_dir(prefix=basename,
+ keep=0, rootdir=basetemp, lock_timeout=None)
+ self.trace("mktemp", p)
+ return p
+
+ def getbasetemp(self):
+ """ return base temporary directory. """
+ try:
+ return self._basetemp
+ except AttributeError:
+ basetemp = self.config.option.basetemp
+ if basetemp:
+ basetemp = py.path.local(basetemp)
+ if basetemp.check():
+ basetemp.remove()
+ basetemp.mkdir()
+ else:
+ basetemp = py.path.local.make_numbered_dir(prefix='pytest-')
+ self._basetemp = t = basetemp
+ self.trace("new basetemp", t)
+ return t
+
+ def finish(self):
+ self.trace("finish")
+
+def pytest_configure(config):
+ mp = monkeypatch()
+ t = TempdirHandler(config)
+ config._cleanup.extend([mp.undo, t.finish])
+ mp.setattr(config, '_tmpdirhandler', t, raising=False)
+ mp.setattr(pytest, 'ensuretemp', t.ensuretemp, raising=False)
+
+def pytest_funcarg__tmpdir(request):
+ """return a temporary directory path object
+ which is unique to each test function invocation,
+ created as a sub directory of the base temporary
+ directory. The returned object is a `py.path.local`_
+ path object.
+ """
+ name = request._pyfuncitem.name
+ name = py.std.re.sub("[\W]", "_", name)
+ x = request.config._tmpdirhandler.mktemp(name, numbered=True)
+ return x.realpath()
+
diff --git a/lib/pypy/_pytest/unittest.py b/lib/pypy/_pytest/unittest.py
new file mode 100644
--- /dev/null
+++ b/lib/pypy/_pytest/unittest.py
@@ -0,0 +1,143 @@
+""" discovery and running of std-library "unittest" style tests. """
+import pytest, py
+import sys, pdb
+
+def pytest_pycollect_makeitem(collector, name, obj):
+ unittest = sys.modules.get('unittest')
+ if unittest is None:
+ return # nobody can have derived unittest.TestCase
+ try:
+ isunit = issubclass(obj, unittest.TestCase)
+ except KeyboardInterrupt:
+ raise
+ except Exception:
+ pass
+ else:
+ if isunit:
+ return UnitTestCase(name, parent=collector)
+
+class UnitTestCase(pytest.Class):
+ def collect(self):
+ loader = py.std.unittest.TestLoader()
+ for name in loader.getTestCaseNames(self.obj):
+ yield TestCaseFunction(name, parent=self)
+
+ def setup(self):
+ meth = getattr(self.obj, 'setUpClass', None)
+ if meth is not None:
+ meth()
+ super(UnitTestCase, self).setup()
+
+ def teardown(self):
+ meth = getattr(self.obj, 'tearDownClass', None)
+ if meth is not None:
+ meth()
+ super(UnitTestCase, self).teardown()
+
+class TestCaseFunction(pytest.Function):
+ _excinfo = None
+
+ def __init__(self, name, parent):
+ super(TestCaseFunction, self).__init__(name, parent)
+ if hasattr(self._obj, 'todo'):
+ getattr(self._obj, 'im_func', self._obj).xfail = \
+ pytest.mark.xfail(reason=str(self._obj.todo))
+
+ def setup(self):
+ self._testcase = self.parent.obj(self.name)
+ self._obj = getattr(self._testcase, self.name)
+ if hasattr(self._testcase, 'setup_method'):
+ self._testcase.setup_method(self._obj)
+
+ def teardown(self):
+ if hasattr(self._testcase, 'teardown_method'):
+ self._testcase.teardown_method(self._obj)
+
+ def startTest(self, testcase):
+ pass
+
+ def _addexcinfo(self, rawexcinfo):
+ # unwrap potential exception info (see twisted trial support below)
+ rawexcinfo = getattr(rawexcinfo, '_rawexcinfo', rawexcinfo)
+ try:
+ excinfo = py.code.ExceptionInfo(rawexcinfo)
+ except TypeError:
+ try:
+ try:
+ l = py.std.traceback.format_exception(*rawexcinfo)
+ l.insert(0, "NOTE: Incompatible Exception Representation, "
+ "displaying natively:\n\n")
+ pytest.fail("".join(l), pytrace=False)
+ except (pytest.fail.Exception, KeyboardInterrupt):
+ raise
+ except:
+ pytest.fail("ERROR: Unknown Incompatible Exception "
+ "representation:\n%r" %(rawexcinfo,), pytrace=False)
+ except KeyboardInterrupt:
+ raise
+ except pytest.fail.Exception:
+ excinfo = py.code.ExceptionInfo()
+ self.__dict__.setdefault('_excinfo', []).append(excinfo)
+
+ def addError(self, testcase, rawexcinfo):
+ self._addexcinfo(rawexcinfo)
+ def addFailure(self, testcase, rawexcinfo):
+ self._addexcinfo(rawexcinfo)
+ def addSkip(self, testcase, reason):
+ try:
+ pytest.skip(reason)
+ except pytest.skip.Exception:
+ self._addexcinfo(sys.exc_info())
+ def addExpectedFailure(self, testcase, rawexcinfo, reason):
+ try:
+ pytest.xfail(str(reason))
+ except pytest.xfail.Exception:
+ self._addexcinfo(sys.exc_info())
+ def addUnexpectedSuccess(self, testcase, reason):
+ pass
+ def addSuccess(self, testcase):
+ pass
+ def stopTest(self, testcase):
+ pass
+ def runtest(self):
+ self._testcase(result=self)
+
+ def _prunetraceback(self, excinfo):
+ pytest.Function._prunetraceback(self, excinfo)
+ excinfo.traceback = excinfo.traceback.filter(lambda x:not x.frame.f_globals.get('__unittest'))
+
+ at pytest.mark.tryfirst
+def pytest_runtest_makereport(item, call):
+ if isinstance(item, TestCaseFunction):
+ if item._excinfo:
+ call.excinfo = item._excinfo.pop(0)
+ del call.result
+
+# twisted trial support
+def pytest_runtest_protocol(item, __multicall__):
+ if isinstance(item, TestCaseFunction):
+ if 'twisted.trial.unittest' in sys.modules:
+ ut = sys.modules['twisted.python.failure']
+ Failure__init__ = ut.Failure.__init__.im_func
+ check_testcase_implements_trial_reporter()
+ def excstore(self, exc_value=None, exc_type=None, exc_tb=None):
+ if exc_value is None:
+ self._rawexcinfo = sys.exc_info()
+ else:
+ if exc_type is None:
+ exc_type = type(exc_value)
+ self._rawexcinfo = (exc_type, exc_value, exc_tb)
+ Failure__init__(self, exc_value, exc_type, exc_tb)
+ ut.Failure.__init__ = excstore
+ try:
+ return __multicall__.execute()
+ finally:
+ ut.Failure.__init__ = Failure__init__
+
+def check_testcase_implements_trial_reporter(done=[]):
+ if done:
+ return
+ from zope.interface import classImplements
+ from twisted.trial.itrial import IReporter
+ classImplements(TestCaseFunction, IReporter)
+ done.append(1)
diff --git a/lib/pypy/ctypes_configure/__init__.py b/lib/pypy/ctypes_configure/__init__.py
new file mode 100644
diff --git a/lib/pypy/ctypes_configure/cbuild.py b/lib/pypy/ctypes_configure/cbuild.py
new file mode 100644
--- /dev/null
+++ b/lib/pypy/ctypes_configure/cbuild.py
@@ -0,0 +1,456 @@
+
+import os, sys, inspect, re, imp, py
+from ctypes_configure import stdoutcapture
+import distutils
+
+debug = 0
+
+configdir = py.path.local.make_numbered_dir(prefix='ctypes_configure-')
+
+class ExternalCompilationInfo(object):
+
+ _ATTRIBUTES = ['pre_include_lines', 'includes', 'include_dirs',
+ 'post_include_lines', 'libraries', 'library_dirs',
+ 'separate_module_sources', 'separate_module_files']
+ _AVOID_DUPLICATES = ['separate_module_files', 'libraries', 'includes',
+ 'include_dirs', 'library_dirs', 'separate_module_sources']
+
+ def __init__(self,
+ pre_include_lines = [],
+ includes = [],
+ include_dirs = [],
+ post_include_lines = [],
+ libraries = [],
+ library_dirs = [],
+ separate_module_sources = [],
+ separate_module_files = []):
+ """
+ pre_include_lines: list of lines that should be put at the top
+ of the generated .c files, before any #include. They shouldn't
+ contain an #include themselves.
+
+ includes: list of .h file names to be #include'd from the
+ generated .c files.
+
+ include_dirs: list of dir names that is passed to the C compiler
+
+ post_include_lines: list of lines that should be put at the top
+ of the generated .c files, after the #includes.
+
+ libraries: list of library names that is passed to the linker
+
+ library_dirs: list of dir names that is passed to the linker
+
+ separate_module_sources: list of multiline strings that are
+ each written to a .c file and compiled separately and linked
+ later on. (If function prototypes are needed for other .c files
+ to access this, they can be put in post_include_lines.)
+
+ separate_module_files: list of .c file names that are compiled
+ separately and linked later on. (If an .h file is needed for
+ other .c files to access this, it can be put in includes.)
+ """
+ for name in self._ATTRIBUTES:
+ value = locals()[name]
+ assert isinstance(value, (list, tuple))
+ setattr(self, name, tuple(value))
+
+ def _value(self):
+ return tuple([getattr(self, x) for x in self._ATTRIBUTES])
+
+ def __hash__(self):
+ return hash(self._value())
+
+ def __eq__(self, other):
+ return self.__class__ is other.__class__ and \
+ self._value() == other._value()
+
+ def __ne__(self, other):
+ return not self == other
+
+ def __repr__(self):
+ info = []
+ for attr in self._ATTRIBUTES:
+ val = getattr(self, attr)
+ info.append("%s=%s" % (attr, repr(val)))
+ return "<ExternalCompilationInfo (%s)>" % ", ".join(info)
+
+ def merge(self, *others):
+ others = list(others)
+ attrs = {}
+ for name in self._ATTRIBUTES:
+ if name not in self._AVOID_DUPLICATES:
+ s = []
+ for i in [self] + others:
+ s += getattr(i, name)
+ attrs[name] = s
+ else:
+ s = set()
+ attr = []
+ for one in [self] + others:
+ for elem in getattr(one, name):
+ if elem not in s:
+ s.add(elem)
+ attr.append(elem)
+ attrs[name] = attr
+ return ExternalCompilationInfo(**attrs)
+
+ def write_c_header(self, fileobj):
+ for line in self.pre_include_lines:
+ print >> fileobj, line
+ for path in self.includes:
+ print >> fileobj, '#include <%s>' % (path,)
+ for line in self.post_include_lines:
+ print >> fileobj, line
+
+ def _copy_attributes(self):
+ d = {}
+ for attr in self._ATTRIBUTES:
+ d[attr] = getattr(self, attr)
+ return d
+
+ def convert_sources_to_files(self, cache_dir=None, being_main=False):
+ if not self.separate_module_sources:
+ return self
+ if cache_dir is None:
+ cache_dir = configdir.join('module_cache').ensure(dir=1)
+ num = 0
+ files = []
+ for source in self.separate_module_sources:
+ while 1:
+ filename = cache_dir.join('module_%d.c' % num)
+ num += 1
+ if not filename.check():
+ break
+ f = filename.open("w")
+ if being_main:
+ f.write("#define PYPY_NOT_MAIN_FILE\n")
+ self.write_c_header(f)
+ source = str(source)
+ f.write(source)
+ if not source.endswith('\n'):
+ f.write('\n')
+ f.close()
+ files.append(str(filename))
+ d = self._copy_attributes()
+ d['separate_module_sources'] = ()
+ d['separate_module_files'] += tuple(files)
+ return ExternalCompilationInfo(**d)
+
+ def compile_shared_lib(self):
+ self = self.convert_sources_to_files()
+ if not self.separate_module_files:
+ return self
+ lib = compile_c_module([], 'externmod', self)
+ d = self._copy_attributes()
+ d['libraries'] += (lib,)
+ d['separate_module_files'] = ()
+ d['separate_module_sources'] = ()
+ return ExternalCompilationInfo(**d)
+
+if sys.platform == 'win32':
+ so_ext = '.dll'
+else:
+ so_ext = '.so'
+
+def compiler_command():
+ # e.g. for tcc, you might set this to
+ # "tcc -shared -o %s.so %s.c"
+ return os.getenv('PYPY_CC')
+
+def enable_fast_compilation():
+ if sys.platform == 'win32':
+ dash = '/'
+ else:
+ dash = '-'
+ from distutils import sysconfig
+ gcv = sysconfig.get_config_vars()
+ opt = gcv.get('OPT') # not always existent
+ if opt:
+ opt = re.sub('%sO\d+' % dash, '%sO0' % dash, opt)
+ else:
+ opt = '%sO0' % dash
+ gcv['OPT'] = opt
+
+def ensure_correct_math():
+ if sys.platform != 'win32':
+ return # so far
+ from distutils import sysconfig
+ gcv = sysconfig.get_config_vars()
+ opt = gcv.get('OPT') # not always existent
+ if opt and '/Op' not in opt:
+ opt += '/Op'
+ gcv['OPT'] = opt
+
+
+def try_compile(c_files, eci):
+ try:
+ build_executable(c_files, eci)
+ result = True
+ except (distutils.errors.CompileError,
+ distutils.errors.LinkError):
+ result = False
+ return result
+
+def compile_c_module(cfiles, modbasename, eci, tmpdir=None):
+ #try:
+ # from distutils.log import set_threshold
+ # set_threshold(10000)
+ #except ImportError:
+ # print "ERROR IMPORTING"
+ # pass
+ cfiles = [py.path.local(f) for f in cfiles]
+ if tmpdir is None:
+ tmpdir = configdir.join("module_cache").ensure(dir=1)
+ num = 0
+ cfiles += eci.separate_module_files
+ include_dirs = list(eci.include_dirs)
+ library_dirs = list(eci.library_dirs)
+ if sys.platform == 'darwin': # support Fink & Darwinports
+ for s in ('/sw/', '/opt/local/'):
+ if s + 'include' not in include_dirs and \
+ os.path.exists(s + 'include'):
+ include_dirs.append(s + 'include')
+ if s + 'lib' not in library_dirs and \
+ os.path.exists(s + 'lib'):
+ library_dirs.append(s + 'lib')
+
+ num = 0
+ modname = modbasename
+ while 1:
+ if not tmpdir.join(modname + so_ext).check():
+ break
+ num += 1
+ modname = '%s_%d' % (modbasename, num)
+
+ lastdir = tmpdir.chdir()
+ libraries = eci.libraries
+ ensure_correct_math()
+ try:
+ if debug: print "modname", modname
+ c = stdoutcapture.Capture(mixed_out_err = True)
+ try:
+ try:
+ if compiler_command():
+ # GCC-ish options only
+ from distutils import sysconfig
+ gcv = sysconfig.get_config_vars()
+ cmd = compiler_command().replace('%s',
+ str(tmpdir.join(modname)))
+ for dir in [gcv['INCLUDEPY']] + list(include_dirs):
+ cmd += ' -I%s' % dir
+ for dir in library_dirs:
+ cmd += ' -L%s' % dir
+ os.system(cmd)
+ else:
+ from distutils.dist import Distribution
+ from distutils.extension import Extension
+ from distutils.ccompiler import get_default_compiler
+ saved_environ = os.environ.items()
+ try:
+ # distutils.core.setup() is really meant for end-user
+ # interactive usage, because it eats most exceptions and
+ # turn them into SystemExits. Instead, we directly
+ # instantiate a Distribution, which also allows us to
+ # ignore unwanted features like config files.
+ extra_compile_args = []
+ # ensure correct math on windows
+ if sys.platform == 'win32':
+ extra_compile_args.append('/Op') # get extra precision
+ if get_default_compiler() == 'unix':
+ old_version = False
+ try:
+ g = os.popen('gcc --version', 'r')
+ verinfo = g.read()
+ g.close()
+ except (OSError, IOError):
+ pass
+ else:
+ old_version = verinfo.startswith('2')
+ if not old_version:
+ extra_compile_args.extend(["-Wno-unused-label",
+ "-Wno-unused-variable"])
+ attrs = {
+ 'name': "testmodule",
+ 'ext_modules': [
+ Extension(modname, [str(cfile) for cfile in cfiles],
+ include_dirs=include_dirs,
+ library_dirs=library_dirs,
+ extra_compile_args=extra_compile_args,
+ libraries=list(libraries),)
+ ],
+ 'script_name': 'setup.py',
+ 'script_args': ['-q', 'build_ext', '--inplace', '--force'],
+ }
+ dist = Distribution(attrs)
+ if not dist.parse_command_line():
+ raise ValueError, "distutils cmdline parse error"
+ dist.run_commands()
+ finally:
+ for key, value in saved_environ:
+ if os.environ.get(key) != value:
+ os.environ[key] = value
+ finally:
+ foutput, foutput = c.done()
+ data = foutput.read()
+ if data:
+ fdump = open("%s.errors" % modname, "w")
+ fdump.write(data)
+ fdump.close()
+ # XXX do we need to do some check on fout/ferr?
+ # XXX not a nice way to import a module
+ except:
+ print >>sys.stderr, data
+ raise
+ finally:
+ lastdir.chdir()
+ return str(tmpdir.join(modname) + so_ext)
+
+def make_module_from_c(cfile, eci):
+ cfile = py.path.local(cfile)
+ modname = cfile.purebasename
+ compile_c_module([cfile], modname, eci)
+ return import_module_from_directory(cfile.dirpath(), modname)
+
+def import_module_from_directory(dir, modname):
+ file, pathname, description = imp.find_module(modname, [str(dir)])
+ try:
+ mod = imp.load_module(modname, file, pathname, description)
+ finally:
+ if file:
+ file.close()
+ return mod
+
+
+def log_spawned_cmd(spawn):
+ def spawn_and_log(cmd, *args, **kwds):
+ if debug:
+ print ' '.join(cmd)
+ return spawn(cmd, *args, **kwds)
+ return spawn_and_log
+
+
+class ProfOpt(object):
+ #XXX assuming gcc style flags for now
+ name = "profopt"
+
+ def __init__(self, compiler):
+ self.compiler = compiler
+
+ def first(self):
+ self.build('-fprofile-generate')
+
+ def probe(self, exe, args):
+ # 'args' is a single string typically containing spaces
+ # and quotes, which represents several arguments.
+ os.system("'%s' %s" % (exe, args))
+
+ def after(self):
+ self.build('-fprofile-use')
+
+ def build(self, option):
+ compiler = self.compiler
+ compiler.compile_extra.append(option)
+ compiler.link_extra.append(option)
+ try:
+ compiler._build()
+ finally:
+ compiler.compile_extra.pop()
+ compiler.link_extra.pop()
+
+class CCompiler:
+
+ def __init__(self, cfilenames, eci, outputfilename=None,
+ compiler_exe=None, profbased=None):
+ self.cfilenames = cfilenames
+ ext = ''
+ self.compile_extra = []
+ self.link_extra = []
+ self.libraries = list(eci.libraries)
+ self.include_dirs = list(eci.include_dirs)
+ self.library_dirs = list(eci.library_dirs)
+ self.compiler_exe = compiler_exe
+ self.profbased = profbased
+ if not sys.platform in ('win32', 'darwin'): # xxx
+ if 'm' not in self.libraries:
+ self.libraries.append('m')
+ if 'pthread' not in self.libraries:
+ self.libraries.append('pthread')
+ self.compile_extra += ['-O3', '-fomit-frame-pointer', '-pthread']
+ self.link_extra += ['-pthread']
+ if sys.platform == 'win32':
+ self.link_extra += ['/DEBUG'] # generate .pdb file
+ if sys.platform == 'darwin':
+ # support Fink & Darwinports
+ for s in ('/sw/', '/opt/local/'):
+ if s + 'include' not in self.include_dirs and \
+ os.path.exists(s + 'include'):
+ self.include_dirs.append(s + 'include')
+ if s + 'lib' not in self.library_dirs and \
+ os.path.exists(s + 'lib'):
+ self.library_dirs.append(s + 'lib')
+ self.compile_extra += ['-O3', '-fomit-frame-pointer']
+
+ if outputfilename is None:
+ self.outputfilename = py.path.local(cfilenames[0]).new(ext=ext)
+ else:
+ self.outputfilename = py.path.local(outputfilename)
+ self.eci = eci
+
+ def build(self, noerr=False):
+ basename = self.outputfilename.new(ext='')
+ data = ''
+ try:
+ saved_environ = os.environ.copy()
+ c = stdoutcapture.Capture(mixed_out_err = True)
+ try:
+ self._build()
+ finally:
+ # workaround for a distutils bugs where some env vars can
+ # become longer and longer every time it is used
+ for key, value in saved_environ.items():
+ if os.environ.get(key) != value:
+ os.environ[key] = value
+ foutput, foutput = c.done()
+ data = foutput.read()
+ if data:
+ fdump = basename.new(ext='errors').open("w")
+ fdump.write(data)
+ fdump.close()
+ except:
+ if not noerr:
+ print >>sys.stderr, data
+ raise
+
+ def _build(self):
+ from distutils.ccompiler import new_compiler
+ compiler = new_compiler(force=1)
+ if self.compiler_exe is not None:
+ for c in '''compiler compiler_so compiler_cxx
+ linker_exe linker_so'''.split():
+ compiler.executables[c][0] = self.compiler_exe
+ compiler.spawn = log_spawned_cmd(compiler.spawn)
+ objects = []
+ for cfile in self.cfilenames:
+ cfile = py.path.local(cfile)
+ old = cfile.dirpath().chdir()
+ try:
+ res = compiler.compile([cfile.basename],
+ include_dirs=self.eci.include_dirs,
+ extra_preargs=self.compile_extra)
+ assert len(res) == 1
+ cobjfile = py.path.local(res[0])
+ assert cobjfile.check()
+ objects.append(str(cobjfile))
+ finally:
+ old.chdir()
+ compiler.link_executable(objects, str(self.outputfilename),
+ libraries=self.eci.libraries,
+ extra_preargs=self.link_extra,
+ library_dirs=self.eci.library_dirs)
+
+def build_executable(*args, **kwds):
+ noerr = kwds.pop('noerr', False)
+ compiler = CCompiler(*args, **kwds)
+ compiler.build(noerr=noerr)
+ return str(compiler.outputfilename)
diff --git a/lib/pypy/ctypes_configure/configure.py b/lib/pypy/ctypes_configure/configure.py
new file mode 100755
--- /dev/null
+++ b/lib/pypy/ctypes_configure/configure.py
@@ -0,0 +1,619 @@
+#! /usr/bin/env python
+
+import os, py, sys
+import ctypes
+from ctypes_configure.cbuild import build_executable, configdir, try_compile
+from ctypes_configure.cbuild import ExternalCompilationInfo
+import distutils
+
+# ____________________________________________________________
+#
+# Helpers for simple cases
+
+def eci_from_header(c_header_source):
+ return ExternalCompilationInfo(
+ pre_include_lines=c_header_source.split("\n")
+ )
+
+
+def getstruct(name, c_header_source, interesting_fields):
+ class CConfig:
+ _compilation_info_ = eci_from_header(c_header_source)
+ STRUCT = Struct(name, interesting_fields)
+ return configure(CConfig)['STRUCT']
+
+def getsimpletype(name, c_header_source, ctype_hint=ctypes.c_int):
+ class CConfig:
+ _compilation_info_ = eci_from_header(c_header_source)
+ TYPE = SimpleType(name, ctype_hint)
+ return configure(CConfig)['TYPE']
+
+def getconstantinteger(name, c_header_source):
+ class CConfig:
+ _compilation_info_ = eci_from_header(c_header_source)
+ CONST = ConstantInteger(name)
+ return configure(CConfig)['CONST']
+
+def getdefined(macro, c_header_source):
+ class CConfig:
+ _compilation_info_ = eci_from_header(c_header_source)
+ DEFINED = Defined(macro)
+ return configure(CConfig)['DEFINED']
+
+def has(name, c_header_source):
+ class CConfig:
+ _compilation_info_ = eci_from_header(c_header_source)
+ HAS = Has(name)
+ return configure(CConfig)['HAS']
+
+def check_eci(eci):
+ """Check if a given ExternalCompilationInfo compiles and links."""
+ class CConfig:
+ _compilation_info_ = eci
+ WORKS = Works()
+ return configure(CConfig)['WORKS']
+
+def sizeof(name, eci, **kwds):
+ class CConfig:
+ _compilation_info_ = eci
+ SIZE = SizeOf(name)
+ for k, v in kwds.items():
+ setattr(CConfig, k, v)
+ return configure(CConfig)['SIZE']
+
+def memory_alignment():
+ """Return the alignment (in bytes) of memory allocations.
+ This is enough to make sure a structure with pointers and 'double'
+ fields is properly aligned."""
+ global _memory_alignment
+ if _memory_alignment is None:
+ S = getstruct('struct memory_alignment_test', """
+ struct memory_alignment_test {
+ double d;
+ void* p;
+ };
+ """, [])
+ result = ctypes.alignment(S)
+ assert result & (result-1) == 0, "not a power of two??"
+ _memory_alignment = result
+ return _memory_alignment
+_memory_alignment = None
+
+# ____________________________________________________________
+#
+# General interface
+
+class ConfigResult:
+ def __init__(self, CConfig, info, entries):
+ self.CConfig = CConfig
+ self.result = {}
+ self.info = info
+ self.entries = entries
+
+ def get_entry_result(self, entry):
+ try:
+ return self.result[entry]
+ except KeyError:
+ pass
+ name = self.entries[entry]
+ info = self.info[name]
+ self.result[entry] = entry.build_result(info, self)
+
+ def get_result(self):
+ return dict([(name, self.result[entry])
+ for entry, name in self.entries.iteritems()])
+
+
+class _CWriter(object):
+ """ A simple class which aggregates config parts
+ """
+ def __init__(self, CConfig):
+ self.path = uniquefilepath()
+ self.f = self.path.open("w")
+ self.config = CConfig
+
+ def write_header(self):
+ f = self.f
+ CConfig = self.config
+ CConfig._compilation_info_.write_c_header(f)
+ print >> f, C_HEADER
+ print >> f
+
+ def write_entry(self, key, entry):
+ f = self.f
+ print >> f, 'void dump_section_%s(void) {' % (key,)
+ for line in entry.prepare_code():
+ if line and line[0] != '#':
+ line = '\t' + line
+ print >> f, line
+ print >> f, '}'
+ print >> f
+
+ def write_entry_main(self, key):
+ print >> self.f, '\tprintf("-+- %s\\n");' % (key,)
+ print >> self.f, '\tdump_section_%s();' % (key,)
+ print >> self.f, '\tprintf("---\\n");'
+
+ def start_main(self):
+ print >> self.f, 'int main(int argc, char *argv[]) {'
+
+ def close(self):
+ f = self.f
+ print >> f, '\treturn 0;'
+ print >> f, '}'
+ f.close()
+
+ def ask_gcc(self, question):
+ self.start_main()
+ self.f.write(question + "\n")
+ self.close()
+ eci = self.config._compilation_info_
+ return try_compile([self.path], eci)
+
+
+def configure(CConfig, noerr=False):
+ """Examine the local system by running the C compiler.
+ The CConfig class contains CConfigEntry attribues that describe
+ what should be inspected; configure() returns a dict mapping
+ names to the results.
+ """
+ for attr in ['_includes_', '_libraries_', '_sources_', '_library_dirs_',
+ '_include_dirs_', '_header_']:
+ assert not hasattr(CConfig, attr), "Found legacy attribut %s on CConfig" % (attr,)
+ entries = []
+ for key in dir(CConfig):
+ value = getattr(CConfig, key)
+ if isinstance(value, CConfigEntry):
+ entries.append((key, value))
+
+ if entries: # can be empty if there are only CConfigSingleEntries
+ writer = _CWriter(CConfig)
+ writer.write_header()
+ for key, entry in entries:
+ writer.write_entry(key, entry)
+
+ f = writer.f
+ writer.start_main()
+ for key, entry in entries:
+ writer.write_entry_main(key)
+ writer.close()
+
+ eci = CConfig._compilation_info_
+ infolist = list(run_example_code(writer.path, eci, noerr=noerr))
+ assert len(infolist) == len(entries)
+
+ resultinfo = {}
+ resultentries = {}
+ for info, (key, entry) in zip(infolist, entries):
+ resultinfo[key] = info
+ resultentries[entry] = key
+
+ result = ConfigResult(CConfig, resultinfo, resultentries)
+ for name, entry in entries:
+ result.get_entry_result(entry)
+ res = result.get_result()
+ else:
+ res = {}
+
+ for key in dir(CConfig):
+ value = getattr(CConfig, key)
+ if isinstance(value, CConfigSingleEntry):
+ writer = _CWriter(CConfig)
+ writer.write_header()
+ res[key] = value.question(writer.ask_gcc)
+ return res
+
+# ____________________________________________________________
+
+
+class CConfigEntry(object):
+ "Abstract base class."
+
+class Struct(CConfigEntry):
+ """An entry in a CConfig class that stands for an externally
+ defined structure.
+ """
+ def __init__(self, name, interesting_fields, ifdef=None):
+ self.name = name
+ self.interesting_fields = interesting_fields
+ self.ifdef = ifdef
+
+ def prepare_code(self):
+ if self.ifdef is not None:
+ yield '#ifdef %s' % (self.ifdef,)
+ yield 'typedef %s ctypesplatcheck_t;' % (self.name,)
+ yield 'typedef struct {'
+ yield ' char c;'
+ yield ' ctypesplatcheck_t s;'
+ yield '} ctypesplatcheck2_t;'
+ yield ''
+ yield 'ctypesplatcheck_t s;'
+ if self.ifdef is not None:
+ yield 'dump("defined", 1);'
+ yield 'dump("align", offsetof(ctypesplatcheck2_t, s));'
+ yield 'dump("size", sizeof(ctypesplatcheck_t));'
+ for fieldname, fieldtype in self.interesting_fields:
+ yield 'dump("fldofs %s", offsetof(ctypesplatcheck_t, %s));'%(
+ fieldname, fieldname)
+ yield 'dump("fldsize %s", sizeof(s.%s));' % (
+ fieldname, fieldname)
+ if fieldtype in integer_class:
+ yield 's.%s = 0; s.%s = ~s.%s;' % (fieldname,
+ fieldname,
+ fieldname)
+ yield 'dump("fldunsigned %s", s.%s > 0);' % (fieldname,
+ fieldname)
+ if self.ifdef is not None:
+ yield '#else'
+ yield 'dump("defined", 0);'
+ yield '#endif'
+
+ def build_result(self, info, config_result):
+ if self.ifdef is not None:
+ if not info['defined']:
+ return None
+ alignment = 1
+ layout = [None] * info['size']
+ for fieldname, fieldtype in self.interesting_fields:
+ if isinstance(fieldtype, Struct):
+ offset = info['fldofs ' + fieldname]
+ size = info['fldsize ' + fieldname]
+ c_fieldtype = config_result.get_entry_result(fieldtype)
+ layout_addfield(layout, offset, c_fieldtype, fieldname)
+ alignment = max(alignment, ctype_alignment(c_fieldtype))
+ else:
+ offset = info['fldofs ' + fieldname]
+ size = info['fldsize ' + fieldname]
+ sign = info.get('fldunsigned ' + fieldname, False)
+ if (size, sign) != size_and_sign(fieldtype):
+ fieldtype = fixup_ctype(fieldtype, fieldname, (size, sign))
+ layout_addfield(layout, offset, fieldtype, fieldname)
+ alignment = max(alignment, ctype_alignment(fieldtype))
+
+ # try to enforce the same alignment as the one of the original
+ # structure
+ if alignment < info['align']:
+ choices = [ctype for ctype in alignment_types
+ if ctype_alignment(ctype) == info['align']]
+ assert choices, "unsupported alignment %d" % (info['align'],)
+ choices = [(ctypes.sizeof(ctype), i, ctype)
+ for i, ctype in enumerate(choices)]
+ csize, _, ctype = min(choices)
+ for i in range(0, info['size'] - csize + 1, info['align']):
+ if layout[i:i+csize] == [None] * csize:
+ layout_addfield(layout, i, ctype, '_alignment')
+ break
+ else:
+ raise AssertionError("unenforceable alignment %d" % (
+ info['align'],))
+
+ n = 0
+ for i, cell in enumerate(layout):
+ if cell is not None:
+ continue
+ layout_addfield(layout, i, ctypes.c_char, '_pad%d' % (n,))
+ n += 1
+
+ # build the ctypes Structure
+ seen = {}
+ fields = []
+ for cell in layout:
+ if cell in seen:
+ continue
+ fields.append((cell.name, cell.ctype))
+ seen[cell] = True
+
+ class S(ctypes.Structure):
+ _fields_ = fields
+ name = self.name
+ if name.startswith('struct '):
+ name = name[7:]
+ S.__name__ = name
+ return S
+
+class SimpleType(CConfigEntry):
+ """An entry in a CConfig class that stands for an externally
+ defined simple numeric type.
+ """
+ def __init__(self, name, ctype_hint=ctypes.c_int, ifdef=None):
+ self.name = name
+ self.ctype_hint = ctype_hint
+ self.ifdef = ifdef
+
+ def prepare_code(self):
+ if self.ifdef is not None:
+ yield '#ifdef %s' % (self.ifdef,)
+ yield 'typedef %s ctypesplatcheck_t;' % (self.name,)
+ yield ''
+ yield 'ctypesplatcheck_t x;'
+ if self.ifdef is not None:
+ yield 'dump("defined", 1);'
+ yield 'dump("size", sizeof(ctypesplatcheck_t));'
+ if self.ctype_hint in integer_class:
+ yield 'x = 0; x = ~x;'
+ yield 'dump("unsigned", x > 0);'
+ if self.ifdef is not None:
+ yield '#else'
+ yield 'dump("defined", 0);'
+ yield '#endif'
+
+ def build_result(self, info, config_result):
+ if self.ifdef is not None and not info['defined']:
+ return None
+ size = info['size']
+ sign = info.get('unsigned', False)
+ ctype = self.ctype_hint
+ if (size, sign) != size_and_sign(ctype):
+ ctype = fixup_ctype(ctype, self.name, (size, sign))
+ return ctype
+
+class ConstantInteger(CConfigEntry):
+ """An entry in a CConfig class that stands for an externally
+ defined integer constant.
+ """
+ def __init__(self, name):
+ self.name = name
+
+ def prepare_code(self):
+ yield 'if ((%s) < 0) {' % (self.name,)
+ yield ' long long x = (long long)(%s);' % (self.name,)
+ yield ' printf("value: %lld\\n", x);'
+ yield '} else {'
+ yield ' unsigned long long x = (unsigned long long)(%s);' % (
+ self.name,)
+ yield ' printf("value: %llu\\n", x);'
+ yield '}'
+
+ def build_result(self, info, config_result):
+ return info['value']
+
+class DefinedConstantInteger(CConfigEntry):
+ """An entry in a CConfig class that stands for an externally
+ defined integer constant. If not #defined the value will be None.
+ """
+ def __init__(self, macro):
+ self.name = self.macro = macro
+
+ def prepare_code(self):
+ yield '#ifdef %s' % self.macro
+ yield 'dump("defined", 1);'
+ yield 'if ((%s) < 0) {' % (self.macro,)
+ yield ' long long x = (long long)(%s);' % (self.macro,)
+ yield ' printf("value: %lld\\n", x);'
+ yield '} else {'
+ yield ' unsigned long long x = (unsigned long long)(%s);' % (
+ self.macro,)
+ yield ' printf("value: %llu\\n", x);'
+ yield '}'
+ yield '#else'
+ yield 'dump("defined", 0);'
+ yield '#endif'
+
+ def build_result(self, info, config_result):
+ if info["defined"]:
+ return info['value']
+ return None
+
+
+class DefinedConstantString(CConfigEntry):
+ """
+ """
+ def __init__(self, macro):
+ self.macro = macro
+ self.name = macro
+
+ def prepare_code(self):
+ yield '#ifdef %s' % self.macro
+ yield 'int i;'
+ yield 'char *p = %s;' % self.macro
+ yield 'dump("defined", 1);'
+ yield 'for (i = 0; p[i] != 0; i++ ) {'
+ yield ' printf("value_%d: %d\\n", i, (int)(unsigned char)p[i]);'
+ yield '}'
+ yield '#else'
+ yield 'dump("defined", 0);'
+ yield '#endif'
+
+ def build_result(self, info, config_result):
+ if info["defined"]:
+ string = ''
+ d = 0
+ while info.has_key('value_%d' % d):
+ string += chr(info['value_%d' % d])
+ d += 1
+ return string
+ return None
+
+
+class Defined(CConfigEntry):
+ """A boolean, corresponding to an #ifdef.
+ """
+ def __init__(self, macro):
+ self.macro = macro
+ self.name = macro
+
+ def prepare_code(self):
+ yield '#ifdef %s' % (self.macro,)
+ yield 'dump("defined", 1);'
+ yield '#else'
+ yield 'dump("defined", 0);'
+ yield '#endif'
+
+ def build_result(self, info, config_result):
+ return bool(info['defined'])
+
+class CConfigSingleEntry(object):
+ """ An abstract class of type which requires
+ gcc succeeding/failing instead of only asking
+ """
+ pass
+
+class Has(CConfigSingleEntry):
+ def __init__(self, name):
+ self.name = name
+
+ def question(self, ask_gcc):
+ return ask_gcc(self.name + ';')
+
+class Works(CConfigSingleEntry):
+ def question(self, ask_gcc):
+ return ask_gcc("")
+
+class SizeOf(CConfigEntry):
+ """An entry in a CConfig class that stands for
+ some external opaque type
+ """
+ def __init__(self, name):
+ self.name = name
+
+ def prepare_code(self):
+ yield 'dump("size", sizeof(%s));' % self.name
+
+ def build_result(self, info, config_result):
+ return info['size']
+
+# ____________________________________________________________
+#
+# internal helpers
+
+def ctype_alignment(c_type):
+ if issubclass(c_type, ctypes.Structure):
+ return max([ctype_alignment(fld_type)
+ for fld_name, fld_type in c_type._fields_])
+
+ return ctypes.alignment(c_type)
+
+def uniquefilepath(LAST=[0]):
+ i = LAST[0]
+ LAST[0] += 1
+ return configdir.join('ctypesplatcheck_%d.c' % i)
+
+alignment_types = [
+ ctypes.c_short,
+ ctypes.c_int,
+ ctypes.c_long,
+ ctypes.c_float,
+ ctypes.c_double,
+ ctypes.c_char_p,
+ ctypes.c_void_p,
+ ctypes.c_longlong,
+ ctypes.c_wchar,
+ ctypes.c_wchar_p,
+ ]
+
+integer_class = [ctypes.c_byte, ctypes.c_ubyte,
+ ctypes.c_short, ctypes.c_ushort,
+ ctypes.c_int, ctypes.c_uint,
+ ctypes.c_long, ctypes.c_ulong,
+ ctypes.c_longlong, ctypes.c_ulonglong,
+ ]
+float_class = [ctypes.c_float, ctypes.c_double]
+
+class Field(object):
+ def __init__(self, name, ctype):
+ self.name = name
+ self.ctype = ctype
+ def __repr__(self):
+ return '<field %s: %s>' % (self.name, self.ctype)
+
+def layout_addfield(layout, offset, ctype, prefix):
+ size = ctypes.sizeof(ctype)
+ name = prefix
+ i = 0
+ while name in layout:
+ i += 1
+ name = '%s_%d' % (prefix, i)
+ field = Field(name, ctype)
+ for i in range(offset, offset+size):
+ assert layout[i] is None, "%s overlaps %r" % (fieldname, layout[i])
+ layout[i] = field
+ return field
+
+def size_and_sign(ctype):
+ return (ctypes.sizeof(ctype),
+ ctype in integer_class and ctype(-1).value > 0)
+
+def fixup_ctype(fieldtype, fieldname, expected_size_and_sign):
+ for typeclass in [integer_class, float_class]:
+ if fieldtype in typeclass:
+ for ctype in typeclass:
+ if size_and_sign(ctype) == expected_size_and_sign:
+ return ctype
+ if (hasattr(fieldtype, '_length_')
+ and getattr(fieldtype, '_type_', None) == ctypes.c_char):
+ # for now, assume it is an array of chars; otherwise we'd also
+ # have to check the exact integer type of the elements of the array
+ size, sign = expected_size_and_sign
+ return ctypes.c_char * size
+ if (hasattr(fieldtype, '_length_')
+ and getattr(fieldtype, '_type_', None) == ctypes.c_ubyte):
+ # grumble, fields of type 'c_char array' have automatic cast-to-
+ # Python-string behavior in ctypes, which may not be what you
+ # want, so here is the same with c_ubytes instead...
+ size, sign = expected_size_and_sign
+ return ctypes.c_ubyte * size
+ raise TypeError("conflicting field type %r for %r" % (fieldtype,
+ fieldname))
+
+
+C_HEADER = """
+#include <stdio.h>
+#include <stddef.h> /* for offsetof() */
+#include <stdint.h> /* FreeBSD: for uint64_t */
+
+void dump(char* key, int value) {
+ printf("%s: %d\\n", key, value);
+}
+"""
+
+def run_example_code(filepath, eci, noerr=False):
+ executable = build_executable([filepath], eci, noerr=noerr)
+ output = py.process.cmdexec(executable)
+ section = None
+ for line in output.splitlines():
+ line = line.strip()
+ if line.startswith('-+- '): # start of a new section
+ section = {}
+ elif line == '---': # section end
+ assert section is not None
+ yield section
+ section = None
+ elif line:
+ assert section is not None
+ key, value = line.split(': ')
+ section[key] = int(value)
+
+# ____________________________________________________________
+
+def get_python_include_dir():
+ from distutils import sysconfig
+ gcv = sysconfig.get_config_vars()
+ return gcv['INCLUDEPY']
+
+if __name__ == '__main__':
+ doc = """Example:
+
+ ctypes_platform.py -h sys/types.h -h netinet/in.h
+ 'struct sockaddr_in'
+ sin_port c_int
+ """
+ import sys, getopt
+ opts, args = getopt.gnu_getopt(sys.argv[1:], 'h:')
+ if not args:
+ print >> sys.stderr, doc
+ else:
+ assert len(args) % 2 == 1
+ headers = []
+ for opt, value in opts:
+ if opt == '-h':
+ headers.append('#include <%s>' % (value,))
+ name = args[0]
+ fields = []
+ for i in range(1, len(args), 2):
+ ctype = getattr(ctypes, args[i+1])
+ fields.append((args[i], ctype))
+
+ S = getstruct(name, '\n'.join(headers), fields)
+
+ for key, value in S._fields_:
+ print key, value
diff --git a/lib/pypy/ctypes_configure/doc/configure.html b/lib/pypy/ctypes_configure/doc/configure.html
new file mode 100644
--- /dev/null
+++ b/lib/pypy/ctypes_configure/doc/configure.html
@@ -0,0 +1,30 @@
+<?xml version="1.0" encoding="latin1" ?>
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
+<head>
+<meta http-equiv="Content-Type" content="text/html; charset=latin1" />
+<meta name="generator" content="Docutils 0.4.1: http://docutils.sourceforge.net/" />
+<title>ctypes configure</title>
+</head>
+<body>
+<div class="document" id="ctypes-configure">
+<h1 class="title">ctypes configure</h1>
+<div class="section">
+<h1><a id="idea" name="idea">idea</a></h1>
+<p>One of ctypes problems is that ctypes programs are usually not very
+platform-independent. We created ctypes_configure, which invokes gcc
+for various platform-dependent details like
+exact sizes of types (for example size_t), #defines, exact outline
+of structures etc. It replaces in this regard code generator (h2py).</p>
+</div>
+<div class="section">
+<h1><a id="installation" name="installation">installation</a></h1>
+<p><tt class="docutils literal"><span class="pre">easy_install</span> <span class="pre">ctypes_configure</span></tt></p>
+</div>
+<div class="section">
+<h1><a id="usage" name="usage">usage</a></h1>
+<p><a class="reference" href="http://codespeak.net/svn/pypy/dist/ctypes_configure/doc/sample.py">sample.py</a> explains in details how to use it.</p>
+</div>
+</div>
+</body>
+</html>
diff --git a/lib/pypy/ctypes_configure/doc/configure.txt b/lib/pypy/ctypes_configure/doc/configure.txt
new file mode 100644
--- /dev/null
+++ b/lib/pypy/ctypes_configure/doc/configure.txt
@@ -0,0 +1,24 @@
+=================
+ctypes configure
+=================
+
+idea
+====
+
+One of ctypes problems is that ctypes programs are usually not very
+platform-independent. We created ctypes_configure, which invokes gcc
+for various platform-dependent details like
+exact sizes of types (for example size\_t), #defines, exact outline
+of structures etc. It replaces in this regard code generator (h2py).
+
+installation
+============
+
+``easy_install ctypes_configure``
+
+usage
+=====
+
+`sample.py`_ explains in details how to use it.
+
+.. _`sample.py`: http://codespeak.net/svn/pypy/dist/ctypes_configure/doc/sample.py
diff --git a/lib/pypy/ctypes_configure/doc/sample.py b/lib/pypy/ctypes_configure/doc/sample.py
new file mode 100644
--- /dev/null
+++ b/lib/pypy/ctypes_configure/doc/sample.py
@@ -0,0 +1,72 @@
+
+from ctypes_configure import configure
+import ctypes
+
+class CConfigure:
+ _compilation_info_ = configure.ExternalCompilationInfo(
+
+ # all lines landing in C header before includes
+ pre_include_lines = [],
+
+ # list of .h files to include
+ includes = ['time.h', 'sys/time.h', 'unistd.h'],
+
+ # list of directories to search for include files
+ include_dirs = [],
+
+ # all lines landing in C header after includes
+ post_include_lines = [],
+
+ # libraries to link with
+ libraries = [],
+
+ # library directories
+ library_dirs = [],
+
+ # additional C sources to compile with (that go to
+ # created .c files)
+ separate_module_sources = [],
+
+ # additional existing C source file names
+ separate_module_files = [],
+ )
+
+ # get real int type out of hint and name
+ size_t = configure.SimpleType('size_t', ctypes.c_int)
+
+ # grab value of numerical #define
+ NULL = configure.ConstantInteger('NULL')
+
+ # grab #define, whether it's defined or not
+ EXISTANT = configure.Defined('NULL')
+ NOT_EXISTANT = configure.Defined('XXXNOTNULL')
+
+ # check for existance of C functions
+ has_write = configure.Has('write')
+ no_xxxwrite = configure.Has('xxxwrite')
+
+ # check for size of type
+ sizeof_size_t = configure.SizeOf('size_t')
+
+ # structure, with given hints for interesting fields,
+ # types does not need to be too specific.
+ # all interesting fields would end up with right offset
+ # size and order
+ struct_timeval = configure.Struct('struct timeval',[
+ ('tv_sec', ctypes.c_int),
+ ('tv_usec', ctypes.c_int)])
+
+info = configure.configure(CConfigure)
+
+assert info['has_write']
+assert not info['no_xxxwrite']
+assert info['NULL'] == 0
+size_t = info['size_t']
+print "size_t in ctypes is ", size_t
+assert ctypes.sizeof(size_t) == info['sizeof_size_t']
+assert info['EXISTANT']
+assert not info['NOT_EXISTANT']
+print
+print "fields of struct timeval are "
+for name, value in info['struct_timeval']._fields_:
+ print " ", name, " ", value
diff --git a/lib/pypy/ctypes_configure/dumpcache.py b/lib/pypy/ctypes_configure/dumpcache.py
new file mode 100644
--- /dev/null
+++ b/lib/pypy/ctypes_configure/dumpcache.py
@@ -0,0 +1,46 @@
+import os, sys
+import ctypes
+
+
+def dumpcache(referencefilename, filename, config):
+ dirname = os.path.dirname(referencefilename)
+ filename = os.path.join(dirname, filename)
+ f = open(filename, 'w')
+ print >> f, 'import ctypes'
+ print >> f
+ names = config.keys()
+ names.sort()
+ print >> f, '__all__ = %r' % (tuple(names),)
+ print >> f
+ for key in names:
+ val = config[key]
+ if isinstance(val, (int, long)):
+ f.write("%s = %d\n" % (key, val))
+ elif val is None:
+ f.write("%s = None\n" % key)
+ elif isinstance(val, ctypes.Structure.__class__):
+ f.write("class %s(ctypes.Structure):\n" % key)
+ f.write(" _fields_ = [\n")
+ for k, v in val._fields_:
+ f.write(" ('%s', %s),\n" % (k, ctypes_repr(v)))
+ f.write(" ]\n")
+ elif isinstance(val, (tuple, list)):
+ for x in val:
+ assert isinstance(x, (int, long, str)), \
+ "lists of integers or strings only"
+ f.write("%s = %r\n" % (key, val))
+ else:
+ # a simple type, hopefully
+ f.write("%s = %s\n" % (key, ctypes_repr(val)))
+ f.close()
+ print 'Wrote %s.' % (filename,)
+ sys.stdout.flush()
+
+def ctypes_repr(cls):
+ # ctypes_configure does not support nested structs so far
+ # so let's ignore it
+ if isinstance(cls, ctypes._SimpleCData.__class__):
+ return "ctypes." + cls.__name__
+ if hasattr(cls, '_length_') and hasattr(cls, '_type_'): # assume an array
+ return '%s*%d' % (ctypes_repr(cls._type_), cls._length_)
+ raise NotImplementedError("saving of object with type %r" % type(cls))
diff --git a/lib/pypy/ctypes_configure/stdoutcapture.py b/lib/pypy/ctypes_configure/stdoutcapture.py
new file mode 100644
--- /dev/null
+++ b/lib/pypy/ctypes_configure/stdoutcapture.py
@@ -0,0 +1,73 @@
+"""
+A quick hack to capture stdout/stderr.
+"""
+
+import os, sys
+
+
+class Capture:
+
+ def __init__(self, mixed_out_err = False):
+ "Start capture of the Unix-level stdout and stderr."
+ if (not hasattr(os, 'tmpfile') or
+ not hasattr(os, 'dup') or
+ not hasattr(os, 'dup2') or
+ not hasattr(os, 'fdopen')):
+ self.dummy = 1
+ else:
+ self.dummy = 0
+ # make new stdout/stderr files if needed
+ self.localoutfd = os.dup(1)
+ self.localerrfd = os.dup(2)
+ if hasattr(sys.stdout, 'fileno') and sys.stdout.fileno() == 1:
+ self.saved_stdout = sys.stdout
+ sys.stdout = os.fdopen(self.localoutfd, 'w', 1)
+ else:
+ self.saved_stdout = None
+ if hasattr(sys.stderr, 'fileno') and sys.stderr.fileno() == 2:
+ self.saved_stderr = sys.stderr
+ sys.stderr = os.fdopen(self.localerrfd, 'w', 0)
+ else:
+ self.saved_stderr = None
+ self.tmpout = os.tmpfile()
+ if mixed_out_err:
+ self.tmperr = self.tmpout
+ else:
+ self.tmperr = os.tmpfile()
+ os.dup2(self.tmpout.fileno(), 1)
+ os.dup2(self.tmperr.fileno(), 2)
+
+ def done(self):
+ "End capture and return the captured text (stdoutfile, stderrfile)."
+ if self.dummy:
+ import cStringIO
+ return cStringIO.StringIO(), cStringIO.StringIO()
+ else:
+ os.dup2(self.localoutfd, 1)
+ os.dup2(self.localerrfd, 2)
+ if self.saved_stdout is not None:
+ f = sys.stdout
+ sys.stdout = self.saved_stdout
+ f.close()
+ else:
+ os.close(self.localoutfd)
+ if self.saved_stderr is not None:
+ f = sys.stderr
+ sys.stderr = self.saved_stderr
+ f.close()
+ else:
+ os.close(self.localerrfd)
+ self.tmpout.seek(0)
+ self.tmperr.seek(0)
+ return self.tmpout, self.tmperr
+
+
+if __name__ == '__main__':
+ # test
+ c = Capture()
+ try:
+ os.system('echo hello')
+ finally:
+ fout, ferr = c.done()
+ print 'Output:', `fout.read()`
+ print 'Error:', `ferr.read()`
diff --git a/lib/pypy/ctypes_configure/test/__init__.py b/lib/pypy/ctypes_configure/test/__init__.py
new file mode 100644
diff --git a/lib/pypy/ctypes_configure/test/test_configure.py b/lib/pypy/ctypes_configure/test/test_configure.py
new file mode 100644
--- /dev/null
+++ b/lib/pypy/ctypes_configure/test/test_configure.py
@@ -0,0 +1,212 @@
+import py, sys, struct
+from ctypes_configure import configure
+from ctypes_configure.cbuild import ExternalCompilationInfo
+import ctypes
+
+def test_dirent():
+ dirent = configure.getstruct("struct dirent",
+ """
+ struct dirent /* for this example only, not the exact dirent */
+ {
+ long d_ino;
+ int d_off;
+ unsigned short d_reclen;
+ char d_name[32];
+ };
+ """,
+ [("d_reclen", ctypes.c_ushort)])
+ assert issubclass(dirent, ctypes.Structure)
+ ssize = (ctypes.sizeof(ctypes.c_long) +
+ ctypes.sizeof(ctypes.c_int) +
+ ctypes.sizeof(ctypes.c_ushort) +
+ 32)
+ extra_padding = (-ssize) % ctypes.alignment(ctypes.c_long)
+
+ assert dirent._fields_ == [('_alignment', ctypes.c_long),
+ ('_pad0', ctypes.c_char),
+ ('_pad1', ctypes.c_char),
+ ('_pad2', ctypes.c_char),
+ ('_pad3', ctypes.c_char),
+ ('d_reclen', ctypes.c_ushort),
+ ] + [
+ ('_pad%d' % n, ctypes.c_char)
+ for n in range(4, 4+32+extra_padding)]
+ assert ctypes.sizeof(dirent) == ssize + extra_padding
+ assert ctypes.alignment(dirent) == ctypes.alignment(ctypes.c_long)
+
+def test_fit_type():
+ S = configure.getstruct("struct S",
+ """
+ struct S {
+ signed char c;
+ unsigned char uc;
+ short s;
+ unsigned short us;
+ int i;
+ unsigned int ui;
+ long l;
+ unsigned long ul;
+ long long ll;
+ unsigned long long ull;
+ float f;
+ double d;
+ };
+ """,
+ [("c", ctypes.c_int),
+ ("uc", ctypes.c_int),
+ ("s", ctypes.c_uint),
+ ("us", ctypes.c_int),
+ ("i", ctypes.c_int),
+ ("ui", ctypes.c_int),
+ ("l", ctypes.c_int),
+ ("ul", ctypes.c_int),
+ ("ll", ctypes.c_int),
+ ("ull", ctypes.c_int),
+ ("f", ctypes.c_double),
+ ("d", ctypes.c_float)])
+ assert issubclass(S, ctypes.Structure)
+ fields = dict(S._fields_)
+ assert fields["c"] == ctypes.c_byte
+ assert fields["uc"] == ctypes.c_ubyte
+ assert fields["s"] == ctypes.c_short
+ assert fields["us"] == ctypes.c_ushort
+ assert fields["i"] == ctypes.c_int
+ assert fields["ui"] == ctypes.c_uint
+ assert fields["l"] == ctypes.c_long
+ assert fields["ul"] == ctypes.c_ulong
+ assert fields["ll"] == ctypes.c_longlong
+ assert fields["ull"] == ctypes.c_ulonglong
+ assert fields["f"] == ctypes.c_float
+ assert fields["d"] == ctypes.c_double
+
+def test_simple_type():
+ ctype = configure.getsimpletype('test_t',
+ 'typedef unsigned short test_t;',
+ ctypes.c_int)
+ assert ctype == ctypes.c_ushort
+
+def test_constant_integer():
+ value = configure.getconstantinteger('BLAH',
+ '#define BLAH (6*7)')
+ assert value == 42
+ value = configure.getconstantinteger('BLAH',
+ '#define BLAH (-2147483648LL)')
+ assert value == -2147483648
+ value = configure.getconstantinteger('BLAH',
+ '#define BLAH (3333333333ULL)')
+ assert value == 3333333333
+
+def test_defined():
+ res = configure.getdefined('ALFKJLKJFLKJFKLEJDLKEWMECEE', '')
+ assert not res
+ res = configure.getdefined('ALFKJLKJFLKJFKLEJDLKEWMECEE',
+ '#define ALFKJLKJFLKJFKLEJDLKEWMECEE')
+ assert res
+
+def test_configure():
+ configdir = configure.configdir
+ test_h = configdir.join('test_ctypes_platform.h')
+ test_h.write('#define XYZZY 42\n')
+
+ class CConfig:
+ _compilation_info_ = ExternalCompilationInfo(
+ pre_include_lines = ["/* a C comment */",
+ "#include <stdio.h>",
+ "#include <test_ctypes_platform.h>"],
+ include_dirs = [str(configdir)]
+ )
+
+ FILE = configure.Struct('FILE', [])
+ ushort = configure.SimpleType('unsigned short')
+ XYZZY = configure.ConstantInteger('XYZZY')
+
+ res = configure.configure(CConfig)
+ assert issubclass(res['FILE'], ctypes.Structure)
+ assert res == {'FILE': res['FILE'],
+ 'ushort': ctypes.c_ushort,
+ 'XYZZY': 42}
+
+def test_ifdef():
+ class CConfig:
+ _compilation_info_ = ExternalCompilationInfo(
+ post_include_lines = ['/* a C comment */',
+ '#define XYZZY 42',
+ 'typedef int foo;',
+ 'struct s {',
+ 'int i;',
+ 'double f;'
+ '};'])
+
+
+ s = configure.Struct('struct s', [('i', ctypes.c_int)],
+ ifdef='XYZZY')
+ z = configure.Struct('struct z', [('i', ctypes.c_int)],
+ ifdef='FOOBAR')
+
+ foo = configure.SimpleType('foo', ifdef='XYZZY')
+ bar = configure.SimpleType('bar', ifdef='FOOBAR')
+
+ res = configure.configure(CConfig)
+ assert res['s'] is not None
+ assert res['z'] is None
+ assert res['foo'] is not None
+ assert res['bar'] is None
+
+def test_nested_structs():
+ class CConfig:
+ _compilation_info_ = ExternalCompilationInfo(
+ post_include_lines="""
+ struct x {
+ int foo;
+ unsigned long bar;
+ };
+ struct y {
+ char c;
+ struct x x;
+ };
+ """.split("\n"))
+
+ x = configure.Struct("struct x", [("bar", ctypes.c_short)])
+ y = configure.Struct("struct y", [("x", x)])
+
+ res = configure.configure(CConfig)
+ c_x = res["x"]
+ c_y = res["y"]
+ c_y_fields = dict(c_y._fields_)
+ assert issubclass(c_x , ctypes.Structure)
+ assert issubclass(c_y, ctypes.Structure)
+ assert c_y_fields["x"] is c_x
+
+def test_array():
+ dirent = configure.getstruct("struct dirent",
+ """
+ struct dirent /* for this example only, not the exact dirent */
+ {
+ long d_ino;
+ int d_off;
+ unsigned short d_reclen;
+ char d_name[32];
+ };
+ """,
+ [("d_name", ctypes.c_char * 0)])
+ assert dirent.d_name.size == 32
+
+def test_has():
+ assert configure.has("x", "int x = 3;")
+ assert not configure.has("x", "")
+ # has() should also not crash if it is given an invalid #include
+ assert not configure.has("x", "#include <some/path/which/cannot/exist>")
+
+def test_check_eci():
+ eci = ExternalCompilationInfo()
+ assert configure.check_eci(eci)
+ eci = ExternalCompilationInfo(libraries=['some_name_that_doesnt_exist_'])
+ assert not configure.check_eci(eci)
+
+def test_sizeof():
+ assert configure.sizeof("char", ExternalCompilationInfo()) == 1
+
+def test_memory_alignment():
+ a = configure.memory_alignment()
+ print a
+ assert a % struct.calcsize("P") == 0
diff --git a/lib/pypy/ctypes_configure/test/test_dumpcache.py b/lib/pypy/ctypes_configure/test/test_dumpcache.py
new file mode 100644
--- /dev/null
+++ b/lib/pypy/ctypes_configure/test/test_dumpcache.py
@@ -0,0 +1,61 @@
+import ctypes
+from ctypes_configure import configure, dumpcache
+from ctypes_configure.cbuild import ExternalCompilationInfo
+
+
+def test_cache():
+ configdir = configure.configdir
+ test_h = configdir.join('test_ctypes_platform2.h')
+ test_h.write('#define XYZZY 42\n'
+ "#define large 2147483648L\n")
+
+ class CConfig:
+ _compilation_info_ = ExternalCompilationInfo(
+ pre_include_lines = ["/* a C comment */",
+ "#include <stdio.h>",
+ "#include <test_ctypes_platform2.h>"],
+ include_dirs = [str(configdir)]
+ )
+
+ FILE = configure.Struct('FILE', [])
+ ushort = configure.SimpleType('unsigned short')
+ XYZZY = configure.ConstantInteger('XYZZY')
+ XUZ = configure.Has('XUZ')
+ large = configure.DefinedConstantInteger('large')
+ undef = configure.Defined('really_undefined')
+
+ res = configure.configure(CConfig)
+
+ cachefile = configdir.join('cache')
+ dumpcache.dumpcache('', str(cachefile), res)
+
+ d = {}
+ execfile(str(cachefile), d)
+ assert d['XYZZY'] == res['XYZZY']
+ assert d['ushort'] == res['ushort']
+ assert d['FILE']._fields_ == res['FILE']._fields_
+ assert d['FILE'].__mro__[1:] == res['FILE'].__mro__[1:]
+ assert d['undef'] == res['undef']
+ assert d['large'] == res['large']
+ assert d['XUZ'] == res['XUZ']
+
+
+def test_cache_array():
+ configdir = configure.configdir
+ res = {'foo': ctypes.c_short * 27}
+ cachefile = configdir.join('cache_array')
+ dumpcache.dumpcache('', str(cachefile), res)
+ #
+ d = {}
+ execfile(str(cachefile), d)
+ assert d['foo'] == res['foo']
+
+def test_cache_array_array():
+ configdir = configure.configdir
+ res = {'foo': (ctypes.c_int * 2) * 3}
+ cachefile = configdir.join('cache_array_array')
+ dumpcache.dumpcache('', str(cachefile), res)
+ #
+ d = {}
+ execfile(str(cachefile), d)
+ assert d['foo'] == res['foo']
diff --git a/lib/pypy/demo/autopath.py b/lib/pypy/demo/autopath.py
new file mode 100644
--- /dev/null
+++ b/lib/pypy/demo/autopath.py
@@ -0,0 +1,2 @@
+import sys, os
+sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
diff --git a/lib/pypy/demo/bpnn.py b/lib/pypy/demo/bpnn.py
new file mode 100755
--- /dev/null
+++ b/lib/pypy/demo/bpnn.py
@@ -0,0 +1,214 @@
+#!/usr/bin/env python
+"""
+ Translator Demo
+
+ To analyse and type-annotate the functions and class defined in
+ this module, starting from the entry point function demo(),
+ use the following command line:
+
+ ../pypy/translator/goal/translate.py bpnn.py
+
+ Insert '--help' before 'bpnn.py' for a list of translation options,
+ or see the Overview of Command Line Options for translation at
+ http://codespeak.net/pypy/dist/pypy/doc/config/commandline.html
+"""
+# Back-Propagation Neural Networks
+#
+# Written in Python. See http://www.python.org/
+#
+# Neil Schemenauer <nascheme at enme.ucalgary.ca>
+#
+# Modifications to the original (Armin Rigo):
+# * import random from PyPy's lib, which is Python 2.2's plain
+# Python implementation
+# * print a doc about how to start the Translator
+
+import sys
+import math
+import time
+
+import autopath
+from pypy.rlib import rrandom
+
+PRINT_IT = True
+
+random = rrandom.Random(1)
+
+# calculate a random number where: a <= rand < b
+def rand(a, b):
+ return (b-a)*random.random() + a
+
+# Make a matrix (we could use NumPy to speed this up)
+def makeMatrix(I, J, fill=0.0):
+ m = []
+ for i in range(I):
+ m.append([fill]*J)
+ return m
+
+class NN:
+
+ def __init__(self, ni, nh, no):
+ # number of input, hidden, and output nodes
+ self.ni = ni + 1 # +1 for bias node
+ self.nh = nh
+ self.no = no
+
+ # activations for nodes
+ self.ai = [1.0]*self.ni
+ self.ah = [1.0]*self.nh
+ self.ao = [1.0]*self.no
+
+ # create weights
+ self.wi = makeMatrix(self.ni, self.nh)
+ self.wo = makeMatrix(self.nh, self.no)
+ # set them to random vaules
+ for i in range(self.ni):
+ for j in range(self.nh):
+ self.wi[i][j] = rand(-2.0, 2.0)
+ for j in range(self.nh):
+ for k in range(self.no):
+ self.wo[j][k] = rand(-2.0, 2.0)
+
+ # last change in weights for momentum
+ self.ci = makeMatrix(self.ni, self.nh)
+ self.co = makeMatrix(self.nh, self.no)
+
+ def update(self, inputs):
+ if len(inputs) != self.ni-1:
+ raise ValueError, 'wrong number of inputs'
+
+ # input activations
+ for i in range(self.ni-1):
+ #self.ai[i] = 1.0/(1.0+math.exp(-inputs[i]))
+ self.ai[i] = inputs[i]
+
+ # hidden activations
+ for j in range(self.nh):
+ sum = 0.0
+ for i in range(self.ni):
+ sum = sum + self.ai[i] * self.wi[i][j]
+ self.ah[j] = 1.0/(1.0+math.exp(-sum))
+
+ # output activations
+ for k in range(self.no):
+ sum = 0.0
+ for j in range(self.nh):
+ sum = sum + self.ah[j] * self.wo[j][k]
+ self.ao[k] = 1.0/(1.0+math.exp(-sum))
+
+ return self.ao[:]
+
+
+ def backPropagate(self, targets, N, M):
+ if len(targets) != self.no:
+ raise ValueError, 'wrong number of target values'
+
+ # calculate error terms for output
+ output_deltas = [0.0] * self.no
+ for k in range(self.no):
+ ao = self.ao[k]
+ output_deltas[k] = ao*(1-ao)*(targets[k]-ao)
+
+ # calculate error terms for hidden
+ hidden_deltas = [0.0] * self.nh
+ for j in range(self.nh):
+ sum = 0.0
+ for k in range(self.no):
+ sum = sum + output_deltas[k]*self.wo[j][k]
More information about the pypy-commit
mailing list