[py-svn] pytest commit 755d6ca9b91b: also un-nest test directory

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Sat Nov 13 11:45:07 CET 2010


# HG changeset patch -- Bitbucket.org
# Project pytest
# URL http://bitbucket.org/hpk42/pytest/overview
# User holger krekel <holger at merlinux.eu>
# Date 1289644240 -3600
# Node ID 755d6ca9b91b6716f55663edee404269d107f1de
# Parent  4e2a0da2c7df834b6789db857af49440a11f98af
also un-nest test directory

--- /dev/null
+++ b/testing/test_pastebin.py
@@ -0,0 +1,47 @@
+
+class TestPasting:
+    def pytest_funcarg__pastebinlist(self, request):
+        mp = request.getfuncargvalue("monkeypatch")
+        pastebinlist = []
+        class MockProxy:
+            def newPaste(self, language, code):
+                pastebinlist.append((language, code))
+        plugin = request.config.pluginmanager.getplugin('pastebin')
+        mp.setattr(plugin, 'getproxy', MockProxy)
+        return pastebinlist
+
+    def test_failed(self, testdir, pastebinlist):
+        testpath = testdir.makepyfile("""
+            import py
+            def test_pass():
+                pass
+            def test_fail():
+                assert 0
+            def test_skip():
+                py.test.skip("")
+        """)
+        reprec = testdir.inline_run(testpath, "--paste=failed")
+        assert len(pastebinlist) == 1
+        assert pastebinlist[0][0] == "python"
+        s = pastebinlist[0][1]
+        assert s.find("def test_fail") != -1
+        assert reprec.countoutcomes() == [1,1,1]
+
+    def test_all(self, testdir, pastebinlist):
+        testpath = testdir.makepyfile("""
+            import py
+            def test_pass():
+                pass
+            def test_fail():
+                assert 0
+            def test_skip():
+                py.test.skip("")
+        """)
+        reprec = testdir.inline_run(testpath, "--pastebin=all")
+        assert reprec.countoutcomes() == [1,1,1]
+        assert len(pastebinlist) == 1
+        assert pastebinlist[0][0] == "python"
+        s = pastebinlist[0][1]
+        for x in 'test_fail test_skip skipped'.split():
+            assert s.find(x), (s, x)
+

--- /dev/null
+++ b/testing/test_terminal.py
@@ -0,0 +1,639 @@
+"""
+terminal reporting of the full testing process.
+"""
+import pytest,py
+import sys
+
+from _pytest.terminal import TerminalReporter, \
+    CollectonlyReporter,  repr_pythonversion, getreportopt
+from _pytest import runner
+
+def basic_run_report(item):
+    runner.call_and_report(item, "setup", log=False)
+    return runner.call_and_report(item, "call", log=False)
+
+class Option:
+    def __init__(self, verbose=False, fulltrace=False):
+        self.verbose = verbose
+        self.fulltrace = fulltrace
+
+    @property
+    def args(self):
+        l = []
+        if self.verbose:
+            l.append('-v')
+        if self.fulltrace:
+            l.append('--fulltrace')
+        return l
+
+def pytest_generate_tests(metafunc):
+    if "option" in metafunc.funcargnames:
+        metafunc.addcall(id="default",
+                         funcargs={'option': Option(verbose=False)})
+        metafunc.addcall(id="verbose",
+                         funcargs={'option': Option(verbose=True)})
+        metafunc.addcall(id="quiet",
+                         funcargs={'option': Option(verbose=-1)})
+        metafunc.addcall(id="fulltrace",
+                         funcargs={'option': Option(fulltrace=True)})
+
+
+class TestTerminal:
+    def test_pass_skip_fail(self, testdir, option):
+        p = testdir.makepyfile("""
+            import py
+            def test_ok():
+                pass
+            def test_skip():
+                py.test.skip("xx")
+            def test_func():
+                assert 0
+        """)
+        result = testdir.runpytest(*option.args)
+        if option.verbose:
+            result.stdout.fnmatch_lines([
+                "*test_pass_skip_fail.py:2: *test_ok*PASS*",
+                "*test_pass_skip_fail.py:4: *test_skip*SKIP*",
+                "*test_pass_skip_fail.py:6: *test_func*FAIL*",
+            ])
+        else:
+            result.stdout.fnmatch_lines([
+            "*test_pass_skip_fail.py .sF"
+        ])
+        result.stdout.fnmatch_lines([
+            "    def test_func():",
+            ">       assert 0",
+            "E       assert 0",
+        ])
+
+    def test_internalerror(self, testdir, linecomp):
+        modcol = testdir.getmodulecol("def test_one(): pass")
+        rep = TerminalReporter(modcol.config, file=linecomp.stringio)
+        excinfo = py.test.raises(ValueError, "raise ValueError('hello')")
+        rep.pytest_internalerror(excinfo.getrepr())
+        linecomp.assert_contains_lines([
+            "INTERNALERROR> *ValueError*hello*"
+        ])
+
+    def test_writeline(self, testdir, linecomp):
+        modcol = testdir.getmodulecol("def test_one(): pass")
+        stringio = py.io.TextIO()
+        rep = TerminalReporter(modcol.config, file=linecomp.stringio)
+        rep.write_fspath_result(py.path.local("xy.py"), '.')
+        rep.write_line("hello world")
+        lines = linecomp.stringio.getvalue().split('\n')
+        assert not lines[0]
+        assert lines[1].endswith("xy.py .")
+        assert lines[2] == "hello world"
+
+    def test_show_runtest_logstart(self, testdir, linecomp):
+        item = testdir.getitem("def test_func(): pass")
+        tr = TerminalReporter(item.config, file=linecomp.stringio)
+        item.config.pluginmanager.register(tr)
+        location = item.reportinfo()
+        tr.config.hook.pytest_runtest_logstart(nodeid=item.nodeid,
+            location=location, fspath=str(item.fspath))
+        linecomp.assert_contains_lines([
+            "*test_show_runtest_logstart.py*"
+        ])
+
+    def test_runtest_location_shown_before_test_starts(self, testdir):
+        p1 = testdir.makepyfile("""
+            def test_1():
+                import time
+                time.sleep(20)
+        """)
+        child = testdir.spawn_pytest("")
+        child.expect(".*test_runtest_location.*py")
+        child.sendeof()
+        child.kill(15)
+
+    def test_itemreport_subclasses_show_subclassed_file(self, testdir):
+        p1 = testdir.makepyfile(test_p1="""
+            class BaseTests:
+                def test_p1(self):
+                    pass
+            class TestClass(BaseTests):
+                pass
+        """)
+        p2 = testdir.makepyfile(test_p2="""
+            from test_p1 import BaseTests
+            class TestMore(BaseTests):
+                pass
+        """)
+        result = testdir.runpytest(p2)
+        result.stdout.fnmatch_lines([
+            "*test_p2.py .",
+            "*1 passed*",
+        ])
+        result = testdir.runpytest("-v", p2)
+        result.stdout.fnmatch_lines([
+            "*test_p2.py <- *test_p1.py:2: TestMore.test_p1*",
+        ])
+
+    def test_keyboard_interrupt(self, testdir, option):
+        p = testdir.makepyfile("""
+            def test_foobar():
+                assert 0
+            def test_spamegg():
+                import py; py.test.skip('skip me please!')
+            def test_interrupt_me():
+                raise KeyboardInterrupt   # simulating the user
+        """)
+
+        result = testdir.runpytest(*option.args)
+        result.stdout.fnmatch_lines([
+            "    def test_foobar():",
+            ">       assert 0",
+            "E       assert 0",
+            "*_keyboard_interrupt.py:6: KeyboardInterrupt*",
+        ])
+        if option.fulltrace:
+            result.stdout.fnmatch_lines([
+                "*raise KeyboardInterrupt   # simulating the user*",
+            ])
+        result.stdout.fnmatch_lines(['*KeyboardInterrupt*'])
+
+
+
+class TestCollectonly:
+    def test_collectonly_basic(self, testdir, linecomp):
+        modcol = testdir.getmodulecol(configargs=['--collectonly'], source="""
+            def test_func():
+                pass
+        """)
+        rep = CollectonlyReporter(modcol.config, out=linecomp.stringio)
+        modcol.config.pluginmanager.register(rep)
+        indent = rep.indent
+        rep.config.hook.pytest_collectstart(collector=modcol)
+        linecomp.assert_contains_lines([
+           "<Module 'test_collectonly_basic.py'>"
+        ])
+        item = modcol.collect()[0]
+        rep.config.hook.pytest_itemcollected(item=item)
+        linecomp.assert_contains_lines([
+           "  <Function 'test_func'>",
+        ])
+        report = rep.config.hook.pytest_make_collect_report(collector=modcol)
+        rep.config.hook.pytest_collectreport(report=report)
+        assert rep.indent == indent
+
+    def test_collectonly_skipped_module(self, testdir, linecomp):
+        modcol = testdir.getmodulecol(configargs=['--collectonly'], source="""
+            import py
+            py.test.skip("nomod")
+        """)
+        rep = CollectonlyReporter(modcol.config, out=linecomp.stringio)
+        modcol.config.pluginmanager.register(rep)
+        cols = list(testdir.genitems([modcol]))
+        assert len(cols) == 0
+        linecomp.assert_contains_lines("""
+            <Module 'test_collectonly_skipped_module.py'>
+              !!! Skipped: nomod !!!
+        """)
+
+    def test_collectonly_failed_module(self, testdir, linecomp):
+        modcol = testdir.getmodulecol(configargs=['--collectonly'], source="""
+            raise ValueError(0)
+        """)
+        rep = CollectonlyReporter(modcol.config, out=linecomp.stringio)
+        modcol.config.pluginmanager.register(rep)
+        cols = list(testdir.genitems([modcol]))
+        assert len(cols) == 0
+        linecomp.assert_contains_lines("""
+            <Module 'test_collectonly_failed_module.py'>
+              !!! ValueError: 0 !!!
+        """)
+
+    def test_collectonly_fatal(self, testdir):
+        p1 = testdir.makeconftest("""
+            def pytest_collectstart(collector):
+                assert 0, "urgs"
+        """)
+        result = testdir.runpytest("--collectonly")
+        result.stdout.fnmatch_lines([
+            "*INTERNAL*args*"
+        ])
+        assert result.ret == 3
+
+    def test_collectonly_simple(self, testdir):
+        p = testdir.makepyfile("""
+            def test_func1():
+                pass
+            class TestClass:
+                def test_method(self):
+                    pass
+        """)
+        result = testdir.runpytest("--collectonly", p)
+        stderr = result.stderr.str().strip()
+        #assert stderr.startswith("inserting into sys.path")
+        assert result.ret == 0
+        extra = result.stdout.fnmatch_lines([
+            "*<Module '*.py'>",
+            "* <Function 'test_func1'*>",
+            "* <Class 'TestClass'>",
+            "*  <Instance '()'>",
+            "*   <Function 'test_method'*>",
+        ])
+
+    def test_collectonly_error(self, testdir):
+        p = testdir.makepyfile("import Errlkjqweqwe")
+        result = testdir.runpytest("--collectonly", p)
+        stderr = result.stderr.str().strip()
+        assert result.ret == 1
+        extra = result.stdout.fnmatch_lines(py.code.Source("""
+            *<Module '*.py'>
+              *ImportError*
+            *!!!*failures*!!!
+            *test_collectonly_error.py:1*
+        """).strip())
+
+
+def test_repr_python_version(monkeypatch):
+    monkeypatch.setattr(sys, 'version_info', (2, 5, 1, 'final', 0))
+    assert repr_pythonversion() == "2.5.1-final-0"
+    py.std.sys.version_info = x = (2,3)
+    assert repr_pythonversion() == str(x)
+
+class TestFixtureReporting:
+    def test_setup_fixture_error(self, testdir):
+        p = testdir.makepyfile("""
+            def setup_function(function):
+                print ("setup func")
+                assert 0
+            def test_nada():
+                pass
+        """)
+        result = testdir.runpytest()
+        result.stdout.fnmatch_lines([
+            "*ERROR at setup of test_nada*",
+            "*setup_function(function):*",
+            "*setup func*",
+            "*assert 0*",
+            "*1 error*",
+        ])
+        assert result.ret != 0
+
+    def test_teardown_fixture_error(self, testdir):
+        p = testdir.makepyfile("""
+            def test_nada():
+                pass
+            def teardown_function(function):
+                print ("teardown func")
+                assert 0
+        """)
+        result = testdir.runpytest()
+        result.stdout.fnmatch_lines([
+            "*ERROR at teardown*",
+            "*teardown_function(function):*",
+            "*assert 0*",
+            "*Captured stdout*",
+            "*teardown func*",
+            "*1 passed*1 error*",
+        ])
+
+    def test_teardown_fixture_error_and_test_failure(self, testdir):
+        p = testdir.makepyfile("""
+            def test_fail():
+                assert 0, "failingfunc"
+
+            def teardown_function(function):
+                print ("teardown func")
+                assert False
+        """)
+        result = testdir.runpytest()
+        result.stdout.fnmatch_lines([
+            "*ERROR at teardown of test_fail*",
+            "*teardown_function(function):*",
+            "*assert False*",
+            "*Captured stdout*",
+            "*teardown func*",
+
+            "*test_fail*",
+            "*def test_fail():",
+            "*failingfunc*",
+            "*1 failed*1 error*",
+         ])
+
+class TestTerminalFunctional:
+    def test_deselected(self, testdir):
+        testpath = testdir.makepyfile("""
+                def test_one():
+                    pass
+                def test_two():
+                    pass
+                def test_three():
+                    pass
+           """
+        )
+        result = testdir.runpytest("-k", "test_two:", testpath)
+        result.stdout.fnmatch_lines([
+            "*test_deselected.py ..",
+            "=* 1 test*deselected by 'test_two:'*=",
+        ])
+        assert result.ret == 0
+
+    def test_no_skip_summary_if_failure(self, testdir):
+        testdir.makepyfile("""
+            import py
+            def test_ok():
+                pass
+            def test_fail():
+                assert 0
+            def test_skip():
+                py.test.skip("dontshow")
+        """)
+        result = testdir.runpytest()
+        assert result.stdout.str().find("skip test summary") == -1
+        assert result.ret == 1
+
+    def test_passes(self, testdir):
+        p1 = testdir.makepyfile("""
+            def test_passes():
+                pass
+            class TestClass:
+                def test_method(self):
+                    pass
+        """)
+        old = p1.dirpath().chdir()
+        try:
+            result = testdir.runpytest()
+        finally:
+            old.chdir()
+        result.stdout.fnmatch_lines([
+            "test_passes.py ..",
+            "* 2 pass*",
+        ])
+        assert result.ret == 0
+
+    def test_header_trailer_info(self, testdir):
+        p1 = testdir.makepyfile("""
+            def test_passes():
+                pass
+        """)
+        result = testdir.runpytest()
+        verinfo = ".".join(map(str, py.std.sys.version_info[:3]))
+        result.stdout.fnmatch_lines([
+            "*===== test session starts ====*",
+            "platform %s -- Python %s*" %(
+                    py.std.sys.platform, verinfo), # , py.std.sys.executable),
+            "*test_header_trailer_info.py .",
+            "=* 1 passed in *.[0-9][0-9] seconds *=",
+        ])
+
+    def test_showlocals(self, testdir):
+        p1 = testdir.makepyfile("""
+            def test_showlocals():
+                x = 3
+                y = "x" * 5000
+                assert 0
+        """)
+        result = testdir.runpytest(p1, '-l')
+        result.stdout.fnmatch_lines([
+            #"_ _ * Locals *",
+            "x* = 3",
+            "y* = 'xxxxxx*"
+        ])
+
+    def test_verbose_reporting(self, testdir, pytestconfig):
+        p1 = testdir.makepyfile("""
+            import py
+            def test_fail():
+                raise ValueError()
+            def test_pass():
+                pass
+            class TestClass:
+                def test_skip(self):
+                    py.test.skip("hello")
+            def test_gen():
+                def check(x):
+                    assert x == 1
+                yield check, 0
+        """)
+        result = testdir.runpytest(p1, '-v')
+        result.stdout.fnmatch_lines([
+            "*test_verbose_reporting.py:2: test_fail*FAIL*",
+            "*test_verbose_reporting.py:4: test_pass*PASS*",
+            "*test_verbose_reporting.py:7: TestClass.test_skip*SKIP*",
+            "*test_verbose_reporting.py:10: test_gen*FAIL*",
+        ])
+        assert result.ret == 1
+        pytestconfig.pluginmanager.skipifmissing("xdist")
+        result = testdir.runpytest(p1, '-v', '-n 1')
+        result.stdout.fnmatch_lines([
+            "*FAIL*test_verbose_reporting.py:2: test_fail*",
+        ])
+        assert result.ret == 1
+
+    def test_quiet_reporting(self, testdir):
+        p1 = testdir.makepyfile("def test_pass(): pass")
+        result = testdir.runpytest(p1, '-q')
+        s = result.stdout.str()
+        assert 'test session starts' not in s
+        assert p1.basename not in s
+        assert "===" not in s
+
+def test_fail_extra_reporting(testdir):
+    p = testdir.makepyfile("def test_this(): assert 0")
+    result = testdir.runpytest(p)
+    assert 'short test summary' not in result.stdout.str()
+    result = testdir.runpytest(p, '-rf')
+    result.stdout.fnmatch_lines([
+        "*test summary*",
+        "FAIL*test_fail_extra_reporting*",
+    ])
+
+def test_fail_reporting_on_pass(testdir):
+    p = testdir.makepyfile("def test_this(): assert 1")
+    result = testdir.runpytest(p, '-rf')
+    assert 'short test summary' not in result.stdout.str()
+
+def test_getreportopt():
+    class config:
+        class option:
+            reportchars = ""
+    config.option.report = "xfailed"
+    assert getreportopt(config) == "x"
+
+    config.option.report = "xfailed,skipped"
+    assert getreportopt(config) == "xs"
+
+    config.option.report = "skipped,xfailed"
+    assert getreportopt(config) == "sx"
+
+    config.option.report = "skipped"
+    config.option.reportchars = "sf"
+    assert getreportopt(config) == "sf"
+
+    config.option.reportchars = "sfx"
+    assert getreportopt(config) == "sfx"
+
+def test_terminalreporter_reportopt_addopts(testdir):
+    testdir.makeini("[pytest]\naddopts=-rs")
+    p = testdir.makepyfile("""
+        def pytest_funcarg__tr(request):
+            tr = request.config.pluginmanager.getplugin("terminalreporter")
+            return tr
+        def test_opt(tr):
+            assert tr.hasopt('skipped')
+            assert not tr.hasopt('qwe')
+    """)
+    result = testdir.runpytest()
+    result.stdout.fnmatch_lines([
+        "*1 passed*"
+    ])
+
+def test_tbstyle_short(testdir):
+    p = testdir.makepyfile("""
+        def pytest_funcarg__arg(request):
+            return 42
+        def test_opt(arg):
+            x = 0
+            assert x
+    """)
+    result = testdir.runpytest("--tb=short")
+    s = result.stdout.str()
+    assert 'arg = 42' not in s
+    assert 'x = 0' not in s
+    result.stdout.fnmatch_lines([
+        "*%s:5*" % p.basename,
+        ">*assert x",
+        "E*assert*",
+    ])
+    result = testdir.runpytest()
+    s = result.stdout.str()
+    assert 'x = 0' in s
+    assert 'assert x' in s
+
+def test_traceconfig(testdir, monkeypatch):
+    result = testdir.runpytest("--traceconfig")
+    result.stdout.fnmatch_lines([
+        "*active plugins*"
+    ])
+    assert result.ret == 0
+
+def test_debug(testdir, monkeypatch):
+    result = testdir.runpytest("--debug")
+    result.stderr.fnmatch_lines([
+        "*pytest_sessionstart*session*",
+    ])
+    assert result.ret == 0
+
+def test_PYTEST_DEBUG(testdir, monkeypatch):
+    monkeypatch.setenv("PYTEST_DEBUG", "1")
+    result = testdir.runpytest()
+    assert result.ret == 0
+    result.stderr.fnmatch_lines([
+        "*registered*PluginManager*"
+    ])
+    
+
+class TestGenericReporting:
+    """ this test class can be subclassed with a different option
+        provider to run e.g. distributed tests.
+    """
+    def test_collect_fail(self, testdir, option):
+        p = testdir.makepyfile("import xyz\n")
+        result = testdir.runpytest(*option.args)
+        result.stdout.fnmatch_lines([
+            "*test_collect_fail.py E*",
+            ">   import xyz",
+            "E   ImportError: No module named xyz",
+            "*1 error*",
+        ])
+
+    def test_maxfailures(self, testdir, option):
+        p = testdir.makepyfile("""
+            def test_1():
+                assert 0
+            def test_2():
+                assert 0
+            def test_3():
+                assert 0
+        """)
+        result = testdir.runpytest("--maxfail=2", *option.args)
+        result.stdout.fnmatch_lines([
+            "*def test_1():*",
+            "*def test_2():*",
+            "*!! Interrupted: stopping after 2 failures*!!*",
+            "*2 failed*",
+        ])
+
+
+    def test_tb_option(self, testdir, option):
+        p = testdir.makepyfile("""
+            import py
+            def g():
+                raise IndexError
+            def test_func():
+                print (6*7)
+                g()  # --calling--
+        """)
+        for tbopt in ["long", "short", "no"]:
+            print('testing --tb=%s...' % tbopt)
+            result = testdir.runpytest('--tb=%s' % tbopt)
+            s = result.stdout.str()
+            if tbopt == "long":
+                assert 'print (6*7)' in s
+            else:
+                assert 'print (6*7)' not in s
+            if tbopt != "no":
+                assert '--calling--' in s
+                assert 'IndexError' in s
+            else:
+                assert 'FAILURES' not in s
+                assert '--calling--' not in s
+                assert 'IndexError' not in s
+
+    def test_tb_crashline(self, testdir, option):
+        p = testdir.makepyfile("""
+            import py
+            def g():
+                raise IndexError
+            def test_func1():
+                print (6*7)
+                g()  # --calling--
+            def test_func2():
+                assert 0, "hello"
+        """)
+        result = testdir.runpytest("--tb=line")
+        bn = p.basename
+        result.stdout.fnmatch_lines([
+            "*%s:3: IndexError*" % bn,
+            "*%s:8: AssertionError: hello*" % bn,
+        ])
+        s = result.stdout.str()
+        assert "def test_func2" not in s
+
+    def test_pytest_report_header(self, testdir, option):
+        testdir.makeconftest("""
+            def pytest_report_header(config):
+                return "hello: info"
+        """)
+        testdir.mkdir("a").join("conftest.py").write("""
+def pytest_report_header(config):
+    return ["line1", "line2"]""")
+        result = testdir.runpytest("a")
+        result.stdout.fnmatch_lines([
+            "line1",
+            "line2",
+            "*hello: info*",
+        ])
+
+ at py.test.mark.xfail("not hasattr(os, 'dup')")
+def test_fdopen_kept_alive_issue124(testdir):
+    testdir.makepyfile("""
+        import os, sys
+        k = []
+        def test_open_file_and_keep_alive(capfd):
+            stdout = os.fdopen(1, 'w', 1)
+            k.append(stdout)
+
+        def test_close_kept_alive_file():
+            stdout = k.pop()
+            stdout.close()
+    """)
+    result = testdir.runpytest("-s")
+    result.stdout.fnmatch_lines([
+        "*2 passed*"
+    ])

--- a/testing/plugin/test_doctest.py
+++ /dev/null
@@ -1,114 +0,0 @@
-from _pytest.doctest import DoctestModule, DoctestTextfile
-import py
-
-pytest_plugins = ["pytest_doctest"]
-
-class TestDoctests:
-
-    def test_collect_testtextfile(self, testdir):
-        testdir.maketxtfile(whatever="")
-        checkfile = testdir.maketxtfile(test_something="""
-            alskdjalsdk
-            >>> i = 5
-            >>> i-1
-            4
-        """)
-        for x in (testdir.tmpdir, checkfile):
-            #print "checking that %s returns custom items" % (x,)
-            items, reprec = testdir.inline_genitems(x)
-            assert len(items) == 1
-            assert isinstance(items[0], DoctestTextfile)
-
-    def test_collect_module(self, testdir):
-        path = testdir.makepyfile(whatever="#")
-        for p in (path, testdir.tmpdir):
-            items, reprec = testdir.inline_genitems(p,
-                '--doctest-modules')
-            assert len(items) == 1
-            assert isinstance(items[0], DoctestModule)
-
-    def test_simple_doctestfile(self, testdir):
-        p = testdir.maketxtfile(test_doc="""
-            >>> x = 1
-            >>> x == 1
-            False
-        """)
-        reprec = testdir.inline_run(p, )
-        reprec.assertoutcome(failed=1)
-
-    def test_new_pattern(self, testdir):
-        p = testdir.maketxtfile(xdoc ="""
-            >>> x = 1
-            >>> x == 1
-            False
-        """)
-        reprec = testdir.inline_run(p, "--doctest-glob=x*.txt")
-        reprec.assertoutcome(failed=1)
-
-    def test_doctest_unexpected_exception(self, testdir):
-        p = testdir.maketxtfile("""
-            >>> i = 0
-            >>> i = 1
-            >>> x
-            2
-        """)
-        reprec = testdir.inline_run(p)
-        call = reprec.getcall("pytest_runtest_logreport")
-        assert call.report.failed
-        assert call.report.longrepr
-        # XXX
-        #testitem, = items
-        #excinfo = py.test.raises(Failed, "testitem.runtest()")
-        #repr = testitem.repr_failure(excinfo, ("", ""))
-        #assert repr.reprlocation
-
-    def test_doctestmodule(self, testdir):
-        p = testdir.makepyfile("""
-            '''
-                >>> x = 1
-                >>> x == 1
-                False
-
-            '''
-        """)
-        reprec = testdir.inline_run(p, "--doctest-modules")
-        reprec.assertoutcome(failed=1)
-
-    def test_doctestmodule_external_and_issue116(self, testdir):
-        p = testdir.mkpydir("hello")
-        p.join("__init__.py").write(py.code.Source("""
-            def somefunc():
-                '''
-                    >>> i = 0
-                    >>> i + 1
-                    2
-                '''
-        """))
-        result = testdir.runpytest(p, "--doctest-modules")
-        result.stdout.fnmatch_lines([
-            '004 *>>> i = 0',
-            '005 *>>> i + 1',
-            '*Expected:',
-            "*    2",
-            "*Got:",
-            "*    1",
-            "*:5: DocTestFailure"
-        ])
-
-
-    def test_txtfile_failing(self, testdir):
-        p = testdir.maketxtfile("""
-            >>> i = 0
-            >>> i + 1
-            2
-        """)
-        result = testdir.runpytest(p)
-        result.stdout.fnmatch_lines([
-            '001 >>> i = 0',
-            '002 >>> i + 1',
-            'Expected:',
-            "    2",
-            "Got:",
-            "    1",
-            "*test_txtfile_failing.txt:2: DocTestFailure"
-        ])

--- a/testing/plugin/test_unittest.py
+++ /dev/null
@@ -1,105 +0,0 @@
-import py
-
-def test_simple_unittest(testdir):
-    testpath = testdir.makepyfile("""
-        import unittest
-        pytest_plugins = "pytest_unittest"
-        class MyTestCase(unittest.TestCase):
-            def testpassing(self):
-                self.assertEquals('foo', 'foo')
-            def test_failing(self):
-                self.assertEquals('foo', 'bar')
-    """)
-    reprec = testdir.inline_run(testpath)
-    assert reprec.matchreport("testpassing").passed
-    assert reprec.matchreport("test_failing").failed
-
-def test_isclasscheck_issue53(testdir):
-    testpath = testdir.makepyfile("""
-        import unittest
-        class _E(object):
-            def __getattr__(self, tag):
-                pass
-        E = _E()
-    """)
-    result = testdir.runpytest(testpath)
-    assert result.ret == 0
-
-def test_setup(testdir):
-    testpath = testdir.makepyfile(test_two="""
-        import unittest
-        class MyTestCase(unittest.TestCase):
-            def setUp(self):
-                self.foo = 1
-            def test_setUp(self):
-                self.assertEquals(1, self.foo)
-    """)
-    reprec = testdir.inline_run(testpath)
-    rep = reprec.matchreport("test_setUp")
-    assert rep.passed
-
-def test_new_instances(testdir):
-    testpath = testdir.makepyfile("""
-        import unittest
-        class MyTestCase(unittest.TestCase):
-            def test_func1(self):
-                self.x = 2
-            def test_func2(self):
-                assert not hasattr(self, 'x')
-    """)
-    reprec = testdir.inline_run(testpath)
-    reprec.assertoutcome(passed=2)
-
-def test_teardown(testdir):
-    testpath = testdir.makepyfile("""
-        import unittest
-        pytest_plugins = "pytest_unittest" # XXX
-        class MyTestCase(unittest.TestCase):
-            l = []
-            def test_one(self):
-                pass
-            def tearDown(self):
-                self.l.append(None)
-        class Second(unittest.TestCase):
-            def test_check(self):
-                self.assertEquals(MyTestCase.l, [None])
-    """)
-    reprec = testdir.inline_run(testpath)
-    passed, skipped, failed = reprec.countoutcomes()
-    assert failed == 0, failed
-    assert passed == 2
-    assert passed + skipped + failed == 2
-
-def test_module_level_pytestmark(testdir):
-    testpath = testdir.makepyfile("""
-        import unittest
-        import py
-        pytestmark = py.test.mark.xfail
-        class MyTestCase(unittest.TestCase):
-            def test_func1(self):
-                assert 0
-    """)
-    reprec = testdir.inline_run(testpath, "-s")
-    reprec.assertoutcome(skipped=1)
-
-def test_class_setup(testdir):
-    testpath = testdir.makepyfile("""
-        import unittest
-        import py
-        class MyTestCase(unittest.TestCase):
-            x = 0
-            @classmethod
-            def setUpClass(cls):
-                cls.x += 1
-            def test_func1(self):
-                assert self.x == 1
-            def test_func2(self):
-                assert self.x == 1
-            @classmethod
-            def tearDownClass(cls):
-                cls.x -= 1
-        def test_teareddown():
-            assert MyTestCase.x == 0
-    """)
-    reprec = testdir.inline_run(testpath)
-    reprec.assertoutcome(passed=3)

--- a/testing/plugin/test_pdb.py
+++ /dev/null
@@ -1,145 +0,0 @@
-import py
-import sys
-
-class TestPDB:
-    def pytest_funcarg__pdblist(self, request):
-        monkeypatch = request.getfuncargvalue("monkeypatch")
-        pdblist = []
-        def mypdb(*args):
-            pdblist.append(args)
-        plugin = request.config.pluginmanager.getplugin('pdb')
-        monkeypatch.setattr(plugin, 'post_mortem', mypdb)
-        return pdblist
-
-    def test_pdb_on_fail(self, testdir, pdblist):
-        rep = testdir.inline_runsource1('--pdb', """
-            def test_func():
-                assert 0
-        """)
-        assert rep.failed
-        assert len(pdblist) == 1
-        tb = py.code.Traceback(pdblist[0][0])
-        assert tb[-1].name == "test_func"
-
-    def test_pdb_on_xfail(self, testdir, pdblist):
-        rep = testdir.inline_runsource1('--pdb', """
-            import py
-            @py.test.mark.xfail
-            def test_func():
-                assert 0
-        """)
-        assert "xfail" in rep.keywords
-        assert not pdblist
-
-    def test_pdb_on_skip(self, testdir, pdblist):
-        rep = testdir.inline_runsource1('--pdb', """
-            import py
-            def test_func():
-                py.test.skip("hello")
-        """)
-        assert rep.skipped
-        assert len(pdblist) == 0
-
-    def test_pdb_on_BdbQuit(self, testdir, pdblist):
-        rep = testdir.inline_runsource1('--pdb', """
-            import py, bdb
-            def test_func():
-                raise bdb.BdbQuit
-        """)
-        assert rep.failed
-        assert len(pdblist) == 0
-
-    def test_pdb_interaction(self, testdir):
-        p1 = testdir.makepyfile("""
-            def test_1():
-                i = 0
-                assert i == 1
-        """)
-        child = testdir.spawn_pytest("--pdb %s" % p1)
-        child.expect(".*def test_1")
-        child.expect(".*i = 0")
-        child.expect("(Pdb)")
-        child.sendeof()
-        rest = child.read()
-        assert "1 failed" in rest
-        assert "def test_1" not in rest
-        if child.isalive():
-            child.wait()
-
-    def test_pdb_interaction_exception(self, testdir):
-        p1 = testdir.makepyfile("""
-            import py
-            def globalfunc():
-                pass
-            def test_1():
-                py.test.raises(ValueError, globalfunc)
-        """)
-        child = testdir.spawn_pytest("--pdb %s" % p1)
-        child.expect(".*def test_1")
-        child.expect(".*py.test.raises.*globalfunc")
-        child.expect("(Pdb)")
-        child.sendline("globalfunc")
-        child.expect(".*function")
-        child.sendeof()
-        child.expect("1 failed")
-        if child.isalive():
-            child.wait()
-
-    def test_pdb_interaction_capturing_simple(self, testdir):
-        p1 = testdir.makepyfile("""
-            import py
-            def test_1():
-                i = 0
-                print ("hello17")
-                py.test.set_trace()
-                x = 3
-        """)
-        child = testdir.spawn_pytest(str(p1))
-        child.expect("test_1")
-        child.expect("x = 3")
-        child.expect("(Pdb)")
-        child.sendeof()
-        rest = child.read()
-        assert "1 failed" in rest
-        assert "def test_1" in rest
-        assert "hello17" in rest # out is captured
-        if child.isalive():
-            child.wait()
-
-    def test_pdb_interaction_capturing_twice(self, testdir):
-        p1 = testdir.makepyfile("""
-            import py
-            def test_1():
-                i = 0
-                print ("hello17")
-                py.test.set_trace()
-                x = 3
-                print ("hello18")
-                py.test.set_trace()
-                x = 4
-        """)
-        child = testdir.spawn_pytest(str(p1))
-        child.expect("test_1")
-        child.expect("x = 3")
-        child.expect("(Pdb)")
-        child.sendline('c')
-        child.expect("x = 4")
-        child.sendeof()
-        rest = child.read()
-        assert "1 failed" in rest
-        assert "def test_1" in rest
-        assert "hello17" in rest # out is captured
-        assert "hello18" in rest # out is captured
-        if child.isalive():
-            child.wait()
-
-    def test_pdb_used_outside_test(self, testdir):
-        p1 = testdir.makepyfile("""
-            import py
-            py.test.set_trace()
-            x = 5
-        """)
-        child = testdir.spawn("%s %s" %(sys.executable, p1))
-        child.expect("x = 5")
-        child.sendeof()
-        child.wait()

--- /dev/null
+++ b/testing/test_helpconfig.py
@@ -0,0 +1,53 @@
+import py, pytest,os
+from _pytest.helpconfig import collectattr
+
+def test_version(testdir):
+    result = testdir.runpytest("--version")
+    assert result.ret == 0
+    #p = py.path.local(py.__file__).dirpath()
+    result.stderr.fnmatch_lines([
+        '*py.test*%s*imported from*' % (pytest.__version__, )
+    ])
+
+def test_help(testdir):
+    result = testdir.runpytest("--help")
+    assert result.ret == 0
+    result.stdout.fnmatch_lines([
+        "*-v*verbose*",
+        "*setup.cfg*",
+        "*minversion*",
+    ])
+
+def test_collectattr():
+    class A:
+        def pytest_hello(self):
+            pass
+    class B(A):
+        def pytest_world(self):
+            pass
+    methods = py.builtin.sorted(collectattr(B))
+    assert list(methods) == ['pytest_hello', 'pytest_world']
+    methods = py.builtin.sorted(collectattr(B()))
+    assert list(methods) == ['pytest_hello', 'pytest_world']
+
+def test_hookvalidation_unknown(testdir):
+    testdir.makeconftest("""
+        def pytest_hello(xyz):
+            pass
+    """)
+    result = testdir.runpytest()
+    assert result.ret != 0
+    result.stderr.fnmatch_lines([
+        '*unknown hook*pytest_hello*'
+    ])
+
+def test_hookvalidation_optional(testdir):
+    testdir.makeconftest("""
+        import py
+        @py.test.mark.optionalhook
+        def pytest_hello(xyz):
+            pass
+    """)
+    result = testdir.runpytest()
+    assert result.ret == 0
+

--- a/testing/plugin/test_capture.py
+++ /dev/null
@@ -1,379 +0,0 @@
-import py, os, sys
-from _pytest.capture import CaptureManager
-
-needsosdup = py.test.mark.xfail("not hasattr(os, 'dup')")
-
-class TestCaptureManager:
-    def test_getmethod_default_no_fd(self, testdir, monkeypatch):
-        config = testdir.parseconfig(testdir.tmpdir)
-        assert config.getvalue("capture") is None
-        capman = CaptureManager()
-        monkeypatch.delattr(os, 'dup', raising=False)
-        try:
-            assert capman._getmethod(config, None) == "sys"
-        finally:
-            monkeypatch.undo()
-
-    def test_configure_per_fspath(self, testdir):
-        config = testdir.parseconfig(testdir.tmpdir)
-        assert config.getvalue("capture") is None
-        capman = CaptureManager()
-        hasfd = hasattr(os, 'dup')
-        if hasfd:
-            assert capman._getmethod(config, None) == "fd"
-        else:
-            assert capman._getmethod(config, None) == "sys"
-
-        for name in ('no', 'fd', 'sys'):
-            if not hasfd and name == 'fd':
-                continue
-            sub = testdir.tmpdir.mkdir("dir" + name)
-            sub.ensure("__init__.py")
-            sub.join("conftest.py").write('option_capture = %r' % name)
-            assert capman._getmethod(config, sub.join("test_hello.py")) == name
-
-    @needsosdup
-    @py.test.mark.multi(method=['no', 'fd', 'sys'])
-    def test_capturing_basic_api(self, method):
-        capouter = py.io.StdCaptureFD()
-        old = sys.stdout, sys.stderr, sys.stdin
-        try:
-            capman = CaptureManager()
-            # call suspend without resume or start
-            outerr = capman.suspendcapture()
-            outerr = capman.suspendcapture()
-            assert outerr == ("", "")
-            capman.resumecapture(method)
-            print ("hello")
-            out, err = capman.suspendcapture()
-            if method == "no":
-                assert old == (sys.stdout, sys.stderr, sys.stdin)
-            else:
-                assert out == "hello\n"
-            capman.resumecapture(method)
-            out, err = capman.suspendcapture()
-            assert not out and not err
-        finally:
-            capouter.reset()
-
-    @needsosdup
-    def test_juggle_capturings(self, testdir):
-        capouter = py.io.StdCaptureFD()
-        try:
-            config = testdir.parseconfig(testdir.tmpdir)
-            capman = CaptureManager()
-            capman.resumecapture("fd")
-            py.test.raises(ValueError, 'capman.resumecapture("fd")')
-            py.test.raises(ValueError, 'capman.resumecapture("sys")')
-            os.write(1, "hello\n".encode('ascii'))
-            out, err = capman.suspendcapture()
-            assert out == "hello\n"
-            capman.resumecapture("sys")
-            os.write(1, "hello\n".encode('ascii'))
-            py.builtin.print_("world", file=sys.stderr)
-            out, err = capman.suspendcapture()
-            assert not out
-            assert err == "world\n"
-        finally:
-            capouter.reset()
-
- at py.test.mark.multi(method=['fd', 'sys'])
-def test_capturing_unicode(testdir, method):
-    if sys.version_info >= (3,0):
-        obj = "'b\u00f6y'"
-    else:
-        obj = "u'\u00f6y'"
-    testdir.makepyfile("""
-        # coding=utf8
-        # taken from issue 227 from nosetests
-        def test_unicode():
-            import sys
-            print (sys.stdout)
-            print (%s)
-    """ % obj)
-    result = testdir.runpytest("--capture=%s" % method)
-    result.stdout.fnmatch_lines([
-        "*1 passed*"
-    ])
-
- at py.test.mark.multi(method=['fd', 'sys'])
-def test_capturing_bytes_in_utf8_encoding(testdir, method):
-    testdir.makepyfile("""
-        def test_unicode():
-            print ('b\\u00f6y')
-    """)
-    result = testdir.runpytest("--capture=%s" % method)
-    result.stdout.fnmatch_lines([
-        "*1 passed*"
-    ])
-
-def test_collect_capturing(testdir):
-    p = testdir.makepyfile("""
-        print ("collect %s failure" % 13)
-        import xyz42123
-    """)
-    result = testdir.runpytest(p)
-    result.stdout.fnmatch_lines([
-        "*Captured stdout*",
-        "*collect 13 failure*",
-    ])
-
-class TestPerTestCapturing:
-    def test_capture_and_fixtures(self, testdir):
-        p = testdir.makepyfile("""
-            def setup_module(mod):
-                print ("setup module")
-            def setup_function(function):
-                print ("setup " + function.__name__)
-            def test_func1():
-                print ("in func1")
-                assert 0
-            def test_func2():
-                print ("in func2")
-                assert 0
-        """)
-        result = testdir.runpytest(p)
-        result.stdout.fnmatch_lines([
-            "setup module*",
-            "setup test_func1*",
-            "in func1*",
-            "setup test_func2*",
-            "in func2*",
-        ])
-
-    @py.test.mark.xfail
-    def test_capture_scope_cache(self, testdir):
-        p = testdir.makepyfile("""
-            import sys
-            def setup_module(func):
-                print ("module-setup")
-            def setup_function(func):
-                print ("function-setup")
-            def test_func():
-                print ("in function")
-                assert 0
-            def teardown_function(func):
-                print ("in teardown")
-        """)
-        result = testdir.runpytest(p)
-        result.stdout.fnmatch_lines([
-            "*test_func():*",
-            "*Captured stdout during setup*",
-            "module-setup*",
-            "function-setup*",
-            "*Captured stdout*",
-            "in teardown*",
-        ])
-
-
-    def test_no_carry_over(self, testdir):
-        p = testdir.makepyfile("""
-            def test_func1():
-                print ("in func1")
-            def test_func2():
-                print ("in func2")
-                assert 0
-        """)
-        result = testdir.runpytest(p)
-        s = result.stdout.str()
-        assert "in func1" not in s
-        assert "in func2" in s
-
-
-    def test_teardown_capturing(self, testdir):
-        p = testdir.makepyfile("""
-            def setup_function(function):
-                print ("setup func1")
-            def teardown_function(function):
-                print ("teardown func1")
-                assert 0
-            def test_func1():
-                print ("in func1")
-                pass
-        """)
-        result = testdir.runpytest(p)
-        result.stdout.fnmatch_lines([
-            '*teardown_function*',
-            '*Captured stdout*',
-            "setup func1*",
-            "in func1*",
-            "teardown func1*",
-            #"*1 fixture failure*"
-        ])
-
-    def test_teardown_final_capturing(self, testdir):
-        p = testdir.makepyfile("""
-            def teardown_module(mod):
-                print ("teardown module")
-                assert 0
-            def test_func():
-                pass
-        """)
-        result = testdir.runpytest(p)
-        result.stdout.fnmatch_lines([
-            "*def teardown_module(mod):*",
-            "*Captured stdout*",
-            "*teardown module*",
-            "*1 error*",
-        ])
-
-    def test_capturing_outerr(self, testdir):
-        p1 = testdir.makepyfile("""
-            import sys
-            def test_capturing():
-                print (42)
-                sys.stderr.write(str(23))
-            def test_capturing_error():
-                print (1)
-                sys.stderr.write(str(2))
-                raise ValueError
-        """)
-        result = testdir.runpytest(p1)
-        result.stdout.fnmatch_lines([
-            "*test_capturing_outerr.py .F",
-            "====* FAILURES *====",
-            "____*____",
-            "*test_capturing_outerr.py:8: ValueError",
-            "*--- Captured stdout ---*",
-            "1",
-            "*--- Captured stderr ---*",
-            "2",
-        ])
-
-class TestLoggingInteraction:
-    def test_logging_stream_ownership(self, testdir):
-        p = testdir.makepyfile("""
-            def test_logging():
-                import logging
-                import py
-                stream = py.io.TextIO()
-                logging.basicConfig(stream=stream)
-                stream.close() # to free memory/release resources
-        """)
-        result = testdir.runpytest(p)
-        result.stderr.str().find("atexit") == -1
-
-    def test_logging_and_immediate_setupteardown(self, testdir):
-        p = testdir.makepyfile("""
-            import logging
-            def setup_function(function):
-                logging.warn("hello1")
-
-            def test_logging():
-                logging.warn("hello2")
-                assert 0
-
-            def teardown_function(function):
-                logging.warn("hello3")
-                assert 0
-        """)
-        for optargs in (('--capture=sys',), ('--capture=fd',)):
-            print (optargs)
-            result = testdir.runpytest(p, *optargs)
-            s = result.stdout.str()
-            result.stdout.fnmatch_lines([
-                "*WARN*hello3",  # errors show first!
-                "*WARN*hello1",
-                "*WARN*hello2",
-            ])
-            # verify proper termination
-            assert "closed" not in s
-
-    def test_logging_and_crossscope_fixtures(self, testdir):
-        p = testdir.makepyfile("""
-            import logging
-            def setup_module(function):
-                logging.warn("hello1")
-
-            def test_logging():
-                logging.warn("hello2")
-                assert 0
-
-            def teardown_module(function):
-                logging.warn("hello3")
-                assert 0
-        """)
-        for optargs in (('--capture=sys',), ('--capture=fd',)):
-            print (optargs)
-            result = testdir.runpytest(p, *optargs)
-            s = result.stdout.str()
-            result.stdout.fnmatch_lines([
-                "*WARN*hello3",  # errors come first
-                "*WARN*hello1",
-                "*WARN*hello2",
-            ])
-            # verify proper termination
-            assert "closed" not in s
-
-class TestCaptureFuncarg:
-    def test_std_functional(self, testdir):
-        reprec = testdir.inline_runsource("""
-            def test_hello(capsys):
-                print (42)
-                out, err = capsys.readouterr()
-                assert out.startswith("42")
-        """)
-        reprec.assertoutcome(passed=1)
-
-    @needsosdup
-    def test_stdfd_functional(self, testdir):
-        reprec = testdir.inline_runsource("""
-            def test_hello(capfd):
-                import os
-                os.write(1, "42".encode('ascii'))
-                out, err = capfd.readouterr()
-                assert out.startswith("42")
-                capfd.close()
-        """)
-        reprec.assertoutcome(passed=1)
-
-    def test_partial_setup_failure(self, testdir):
-        p = testdir.makepyfile("""
-            def test_hello(capsys, missingarg):
-                pass
-        """)
-        result = testdir.runpytest(p)
-        result.stdout.fnmatch_lines([
-            "*test_partial_setup_failure*",
-            "*1 error*",
-        ])
-
-    @needsosdup
-    def test_keyboardinterrupt_disables_capturing(self, testdir):
-        p = testdir.makepyfile("""
-            def test_hello(capfd):
-                import os
-                os.write(1, str(42).encode('ascii'))
-                raise KeyboardInterrupt()
-        """)
-        result = testdir.runpytest(p)
-        result.stdout.fnmatch_lines([
-            "*KeyboardInterrupt*"
-        ])
-        assert result.ret == 2
-
-def test_setup_failure_does_not_kill_capturing(testdir):
-    sub1 = testdir.mkpydir("sub1")
-    sub1.join("conftest.py").write(py.code.Source("""
-        def pytest_runtest_setup(item):
-            raise ValueError(42)
-    """))
-    sub1.join("test_mod.py").write("def test_func1(): pass")
-    result = testdir.runpytest(testdir.tmpdir, '--traceconfig')
-    result.stdout.fnmatch_lines([
-        "*ValueError(42)*",
-        "*1 error*"
-    ])
-
-def test_fdfuncarg_skips_on_no_osdup(testdir):
-    testdir.makepyfile("""
-        import os
-        if hasattr(os, 'dup'):
-            del os.dup
-        def test_hello(capfd):
-            pass
-    """)
-    result = testdir.runpytest("--capture=no")
-    result.stdout.fnmatch_lines([
-        "*1 skipped*"
-    ])

--- /dev/null
+++ b/testing/test_resultlog.py
@@ -0,0 +1,193 @@
+import py
+import os
+from _pytest.resultlog import generic_path, ResultLog, \
+        pytest_configure, pytest_unconfigure
+from _pytest.session import Node, Item, FSCollector
+
+def test_generic_path(testdir):
+    from _pytest.session import Session
+    config = testdir.parseconfig()
+    session = Session(config)
+    p1 = Node('a', config=config, session=session)
+    #assert p1.fspath is None
+    p2 = Node('B', parent=p1)
+    p3 = Node('()', parent = p2)
+    item = Item('c', parent = p3)
+
+    res = generic_path(item)
+    assert res == 'a.B().c'
+
+    p0 = FSCollector('proj/test', config=config, session=session)
+    p1 = FSCollector('proj/test/a', parent=p0)
+    p2 = Node('B', parent=p1)
+    p3 = Node('()', parent = p2)
+    p4 = Node('c', parent=p3)
+    item = Item('[1]', parent = p4)
+
+    res = generic_path(item)
+    assert res == 'test/a:B().c[1]'
+
+def test_write_log_entry():
+    reslog = ResultLog(None, None)
+    reslog.logfile = py.io.TextIO()
+    reslog.write_log_entry('name', '.', '')
+    entry = reslog.logfile.getvalue()
+    assert entry[-1] == '\n'
+    entry_lines = entry.splitlines()
+    assert len(entry_lines) == 1
+    assert entry_lines[0] == '. name'
+
+    reslog.logfile = py.io.TextIO()
+    reslog.write_log_entry('name', 's', 'Skipped')
+    entry = reslog.logfile.getvalue()
+    assert entry[-1] == '\n'
+    entry_lines = entry.splitlines()
+    assert len(entry_lines) == 2
+    assert entry_lines[0] == 's name'
+    assert entry_lines[1] == ' Skipped'
+
+    reslog.logfile = py.io.TextIO()
+    reslog.write_log_entry('name', 's', 'Skipped\n')
+    entry = reslog.logfile.getvalue()
+    assert entry[-1] == '\n'
+    entry_lines = entry.splitlines()
+    assert len(entry_lines) == 2
+    assert entry_lines[0] == 's name'
+    assert entry_lines[1] == ' Skipped'
+
+    reslog.logfile = py.io.TextIO()
+    longrepr = ' tb1\n tb 2\nE tb3\nSome Error'
+    reslog.write_log_entry('name', 'F', longrepr)
+    entry = reslog.logfile.getvalue()
+    assert entry[-1] == '\n'
+    entry_lines = entry.splitlines()
+    assert len(entry_lines) == 5
+    assert entry_lines[0] == 'F name'
+    assert entry_lines[1:] == [' '+line for line in longrepr.splitlines()]
+
+
+class TestWithFunctionIntegration:
+    # XXX (hpk) i think that the resultlog plugin should
+    # provide a Parser object so that one can remain
+    # ignorant regarding formatting details.
+    def getresultlog(self, testdir, arg):
+        resultlog = testdir.tmpdir.join("resultlog")
+        testdir.plugins.append("resultlog")
+        args = ["--resultlog=%s" % resultlog] + [arg]
+        testdir.runpytest(*args)
+        return [x for x in resultlog.readlines(cr=0) if x]
+
+    def test_collection_report(self, testdir):
+        ok = testdir.makepyfile(test_collection_ok="")
+        skip = testdir.makepyfile(test_collection_skip="import py ; py.test.skip('hello')")
+        fail = testdir.makepyfile(test_collection_fail="XXX")
+        lines = self.getresultlog(testdir, ok)
+        assert not lines
+
+        lines = self.getresultlog(testdir, skip)
+        assert len(lines) == 2
+        assert lines[0].startswith("S ")
+        assert lines[0].endswith("test_collection_skip.py")
+        assert lines[1].startswith(" ")
+        assert lines[1].endswith("test_collection_skip.py:1: Skipped: hello")
+
+        lines = self.getresultlog(testdir, fail)
+        assert lines
+        assert lines[0].startswith("F ")
+        assert lines[0].endswith("test_collection_fail.py"), lines[0]
+        for x in lines[1:]:
+            assert x.startswith(" ")
+        assert "XXX" in "".join(lines[1:])
+
+    def test_log_test_outcomes(self, testdir):
+        mod = testdir.makepyfile(test_mod="""
+            import py
+            def test_pass(): pass
+            def test_skip(): py.test.skip("hello")
+            def test_fail(): raise ValueError("FAIL")
+
+            @py.test.mark.xfail
+            def test_xfail(): raise ValueError("XFAIL")
+            @py.test.mark.xfail
+            def test_xpass(): pass
+
+        """)
+        lines = self.getresultlog(testdir, mod)
+        assert len(lines) >= 3
+        assert lines[0].startswith(". ")
+        assert lines[0].endswith("test_pass")
+        assert lines[1].startswith("s "), lines[1]
+        assert lines[1].endswith("test_skip")
+        assert lines[2].find("hello") != -1
+
+        assert lines[3].startswith("F ")
+        assert lines[3].endswith("test_fail")
+        tb = "".join(lines[4:8])
+        assert tb.find('raise ValueError("FAIL")') != -1
+
+        assert lines[8].startswith('x ')
+        tb = "".join(lines[8:14])
+        assert tb.find('raise ValueError("XFAIL")') != -1
+
+        assert lines[14].startswith('X ')
+        assert len(lines) == 15
+
+    def test_internal_exception(self):
+        # they are produced for example by a teardown failing
+        # at the end of the run
+        try:
+            raise ValueError
+        except ValueError:
+            excinfo = py.code.ExceptionInfo()
+        reslog = ResultLog(None, py.io.TextIO())
+        reslog.pytest_internalerror(excinfo.getrepr())
+        entry = reslog.logfile.getvalue()
+        entry_lines = entry.splitlines()
+
+        assert entry_lines[0].startswith('! ')
+        assert os.path.basename(__file__)[:-9] in entry_lines[0] #.pyc/class
+        assert entry_lines[-1][0] == ' '
+        assert 'ValueError' in entry
+
+def test_generic(testdir, LineMatcher):
+    testdir.plugins.append("resultlog")
+    testdir.makepyfile("""
+        import py
+        def test_pass():
+            pass
+        def test_fail():
+            assert 0
+        def test_skip():
+            py.test.skip("")
+        @py.test.mark.xfail
+        def test_xfail():
+            assert 0
+        @py.test.mark.xfail(run=False)
+        def test_xfail_norun():
+            assert 0
+    """)
+    testdir.runpytest("--resultlog=result.log")
+    lines = testdir.tmpdir.join("result.log").readlines(cr=0)
+    LineMatcher(lines).fnmatch_lines([
+        ". *:test_pass",
+        "F *:test_fail",
+        "s *:test_skip",
+        "x *:test_xfail",
+        "x *:test_xfail_norun",
+    ])
+
+def test_no_resultlog_on_slaves(testdir):
+    config = testdir.parseconfig("-p", "resultlog", "--resultlog=resultlog")
+
+    assert not hasattr(config, '_resultlog')
+    pytest_configure(config)
+    assert hasattr(config, '_resultlog')
+    pytest_unconfigure(config)
+    assert not hasattr(config, '_resultlog')
+
+    config.slaveinput = {}
+    pytest_configure(config)
+    assert not hasattr(config, '_resultlog')
+    pytest_unconfigure(config)
+    assert not hasattr(config, '_resultlog')
+

--- a/testing/plugin/test_runner.py
+++ /dev/null
@@ -1,387 +0,0 @@
-import py, sys
-from _pytest import runner
-from py._code.code import ReprExceptionInfo
-
-class TestSetupState:
-    def test_setup(self, testdir):
-        ss = runner.SetupState()
-        item = testdir.getitem("def test_func(): pass")
-        l = [1]
-        ss.prepare(item)
-        ss.addfinalizer(l.pop, colitem=item)
-        assert l
-        ss._pop_and_teardown()
-        assert not l
-
-    def test_setup_scope_None(self, testdir):
-        item = testdir.getitem("def test_func(): pass")
-        ss = runner.SetupState()
-        l = [1]
-        ss.prepare(item)
-        ss.addfinalizer(l.pop, colitem=None)
-        assert l
-        ss._pop_and_teardown()
-        assert l
-        ss._pop_and_teardown()
-        assert l
-        ss.teardown_all()
-        assert not l
-
-    def test_teardown_exact_stack_empty(self, testdir):
-        item = testdir.getitem("def test_func(): pass")
-        ss = runner.SetupState()
-        ss.teardown_exact(item)
-        ss.teardown_exact(item)
-        ss.teardown_exact(item)
-
-    def test_setup_fails_and_failure_is_cached(self, testdir):
-        item = testdir.getitem("""
-            def setup_module(mod):
-                raise ValueError(42)
-            def test_func(): pass
-        """)
-        ss = runner.SetupState()
-        py.test.raises(ValueError, "ss.prepare(item)")
-        py.test.raises(ValueError, "ss.prepare(item)")
-
-class BaseFunctionalTests:
-    def test_passfunction(self, testdir):
-        reports = testdir.runitem("""
-            def test_func():
-                pass
-        """)
-        rep = reports[1]
-        assert rep.passed
-        assert not rep.failed
-        assert rep.outcome == "passed"
-        assert not rep.longrepr
-
-    def test_failfunction(self, testdir):
-        reports = testdir.runitem("""
-            def test_func():
-                assert 0
-        """)
-        rep = reports[1]
-        assert not rep.passed
-        assert not rep.skipped
-        assert rep.failed
-        assert rep.when == "call"
-        assert rep.outcome == "failed"
-        #assert isinstance(rep.longrepr, ReprExceptionInfo)
-
-    def test_skipfunction(self, testdir):
-        reports = testdir.runitem("""
-            import py
-            def test_func():
-                py.test.skip("hello")
-        """)
-        rep = reports[1]
-        assert not rep.failed
-        assert not rep.passed
-        assert rep.skipped
-        assert rep.outcome == "skipped"
-        #assert rep.skipped.when == "call"
-        #assert rep.skipped.when == "call"
-        #assert rep.skipped == "%sreason == "hello"
-        #assert rep.skipped.location.lineno == 3
-        #assert rep.skipped.location.path
-        #assert not rep.skipped.failurerepr
-
-    def test_skip_in_setup_function(self, testdir):
-        reports = testdir.runitem("""
-            import py
-            def setup_function(func):
-                py.test.skip("hello")
-            def test_func():
-                pass
-        """)
-        print(reports)
-        rep = reports[0]
-        assert not rep.failed
-        assert not rep.passed
-        assert rep.skipped
-        #assert rep.skipped.reason == "hello"
-        #assert rep.skipped.location.lineno == 3
-        #assert rep.skipped.location.lineno == 3
-        assert len(reports) == 2
-        assert reports[1].passed # teardown
-
-    def test_failure_in_setup_function(self, testdir):
-        reports = testdir.runitem("""
-            import py
-            def setup_function(func):
-                raise ValueError(42)
-            def test_func():
-                pass
-        """)
-        rep = reports[0]
-        assert not rep.skipped
-        assert not rep.passed
-        assert rep.failed
-        assert rep.when == "setup"
-        assert len(reports) == 2
-
-    def test_failure_in_teardown_function(self, testdir):
-        reports = testdir.runitem("""
-            import py
-            def teardown_function(func):
-                raise ValueError(42)
-            def test_func():
-                pass
-        """)
-        print(reports)
-        assert len(reports) == 3
-        rep = reports[2]
-        assert not rep.skipped
-        assert not rep.passed
-        assert rep.failed
-        assert rep.when == "teardown"
-        #assert rep.longrepr.reprcrash.lineno == 3
-        #assert rep.longrepr.reprtraceback.reprentries
-
-    def test_custom_failure_repr(self, testdir):
-        testdir.makepyfile(conftest="""
-            import pytest
-            class Function(pytest.Function):
-                def repr_failure(self, excinfo):
-                    return "hello"
-        """)
-        reports = testdir.runitem("""
-            import py
-            def test_func():
-                assert 0
-        """)
-        rep = reports[1]
-        assert not rep.skipped
-        assert not rep.passed
-        assert rep.failed
-        #assert rep.outcome.when == "call"
-        #assert rep.failed.where.lineno == 3
-        #assert rep.failed.where.path.basename == "test_func.py"
-        #assert rep.failed.failurerepr == "hello"
-
-    def test_failure_in_setup_function_ignores_custom_repr(self, testdir):
-        testdir.makepyfile(conftest="""
-            import pytest
-            class Function(pytest.Function):
-                def repr_failure(self, excinfo):
-                    assert 0
-        """)
-        reports = testdir.runitem("""
-            def setup_function(func):
-                raise ValueError(42)
-            def test_func():
-                pass
-        """)
-        assert len(reports) == 2
-        rep = reports[0]
-        print(rep)
-        assert not rep.skipped
-        assert not rep.passed
-        assert rep.failed
-        #assert rep.outcome.when == "setup"
-        #assert rep.outcome.where.lineno == 3
-        #assert rep.outcome.where.path.basename == "test_func.py"
-        #assert instanace(rep.failed.failurerepr, PythonFailureRepr)
-
-    def test_systemexit_does_not_bail_out(self, testdir):
-        try:
-            reports = testdir.runitem("""
-                def test_func():
-                    raise SystemExit(42)
-            """)
-        except SystemExit:
-            py.test.fail("runner did not catch SystemExit")
-        rep = reports[1]
-        assert rep.failed
-        assert rep.when == "call"
-
-    def test_exit_propagates(self, testdir):
-        try:
-            testdir.runitem("""
-                import pytest
-                def test_func():
-                    raise pytest.exit.Exception()
-            """)
-        except py.test.exit.Exception:
-            pass
-        else:
-            py.test.fail("did not raise")
-
-class TestExecutionNonForked(BaseFunctionalTests):
-    def getrunner(self):
-        def f(item):
-            return runner.runtestprotocol(item, log=False)
-        return f
-
-    def test_keyboardinterrupt_propagates(self, testdir):
-        try:
-            testdir.runitem("""
-                def test_func():
-                    raise KeyboardInterrupt("fake")
-            """)
-        except KeyboardInterrupt:
-            pass
-        else:
-            py.test.fail("did not raise")
-
-class TestExecutionForked(BaseFunctionalTests):
-    pytestmark = py.test.mark.skipif("not hasattr(os, 'fork')")
-
-    def getrunner(self):
-        # XXX re-arrange this test to live in pytest-xdist
-        xplugin = py.test.importorskip("xdist.plugin")
-        return xplugin.forked_run_report
-
-    def test_suicide(self, testdir):
-        reports = testdir.runitem("""
-            def test_func():
-                import os
-                os.kill(os.getpid(), 15)
-        """)
-        rep = reports[0]
-        assert rep.failed
-        assert rep.when == "???"
-
-class TestSessionReports:
-    def test_collect_result(self, testdir):
-        col = testdir.getmodulecol("""
-            def test_func1():
-                pass
-            class TestClass:
-                pass
-        """)
-        rep = runner.pytest_make_collect_report(col)
-        assert not rep.failed
-        assert not rep.skipped
-        assert rep.passed
-        locinfo = rep.location
-        assert locinfo[0] == col.fspath.basename
-        assert not locinfo[1]
-        assert locinfo[2] == col.fspath.basename
-        res = rep.result
-        assert len(res) == 2
-        assert res[0].name == "test_func1"
-        assert res[1].name == "TestClass"
-
-    def test_skip_at_module_scope(self, testdir):
-        col = testdir.getmodulecol("""
-            import pytest
-            pytest.skip("hello")
-            def test_func():
-                pass
-        """)
-        rep = runner.pytest_make_collect_report(col)
-        assert not rep.failed
-        assert not rep.passed
-        assert rep.skipped
-
-def test_callinfo():
-    ci = runner.CallInfo(lambda: 0, '123')
-    assert ci.when == "123"
-    assert ci.result == 0
-    assert "result" in repr(ci)
-    ci = runner.CallInfo(lambda: 0/0, '123')
-    assert ci.when == "123"
-    assert not hasattr(ci, 'result')
-    assert ci.excinfo
-    assert "exc" in repr(ci)
-
-# design question: do we want general hooks in python files?
-# then something like the following functional tests makes sense
- at py.test.mark.xfail
-def test_runtest_in_module_ordering(testdir):
-    p1 = testdir.makepyfile("""
-        def pytest_runtest_setup(item): # runs after class-level!
-            item.function.mylist.append("module")
-        class TestClass:
-            def pytest_runtest_setup(self, item):
-                assert not hasattr(item.function, 'mylist')
-                item.function.mylist = ['class']
-            def pytest_funcarg__mylist(self, request):
-                return request.function.mylist
-            def pytest_runtest_call(self, item, __multicall__):
-                try:
-                    __multicall__.execute()
-                except ValueError:
-                    pass
-            def test_hello1(self, mylist):
-                assert mylist == ['class', 'module'], mylist
-                raise ValueError()
-            def test_hello2(self, mylist):
-                assert mylist == ['class', 'module'], mylist
-        def pytest_runtest_teardown(item):
-            del item.function.mylist
-    """)
-    result = testdir.runpytest(p1)
-    result.stdout.fnmatch_lines([
-        "*2 passed*"
-    ])
-
-
-def test_pytest_exit():
-    try:
-        py.test.exit("hello")
-    except py.test.exit.Exception:
-        excinfo = py.code.ExceptionInfo()
-        assert excinfo.errisinstance(KeyboardInterrupt)
-
-def test_pytest_fail():
-    try:
-        py.test.fail("hello")
-    except py.test.fail.Exception:
-        excinfo = py.code.ExceptionInfo()
-        s = excinfo.exconly(tryshort=True)
-        assert s.startswith("Failed")
-
-def test_exception_printing_skip():
-    try:
-        py.test.skip("hello")
-    except py.test.skip.Exception:
-        excinfo = py.code.ExceptionInfo()
-        s = excinfo.exconly(tryshort=True)
-        assert s.startswith("Skipped")
-
-def test_importorskip():
-    importorskip = py.test.importorskip
-    try:
-        sys = importorskip("sys")
-        assert sys == py.std.sys
-        #path = py.test.importorskip("os.path")
-        #assert path == py.std.os.path
-        py.test.raises(py.test.skip.Exception,
-            "py.test.importorskip('alskdj')")
-        py.test.raises(SyntaxError, "py.test.importorskip('x y z')")
-        py.test.raises(SyntaxError, "py.test.importorskip('x=y')")
-        path = importorskip("py", minversion=".".join(py.__version__))
-        mod = py.std.types.ModuleType("hello123")
-        mod.__version__ = "1.3"
-        py.test.raises(py.test.skip.Exception, """
-            py.test.importorskip("hello123", minversion="5.0")
-        """)
-    except py.test.skip.Exception:
-        print(py.code.ExceptionInfo())
-        py.test.fail("spurious skip")
-
-def test_importorskip_imports_last_module_part():
-    import os
-    ospath = py.test.importorskip("os.path")
-    assert os.path == ospath
-
-
-def test_pytest_cmdline_main(testdir):
-    p = testdir.makepyfile("""
-        import sys
-        sys.path.insert(0, %r)
-        import py
-        def test_hello():
-            assert 1
-        if __name__ == '__main__':
-           py.test.cmdline.main([__file__])
-    """ % (str(py._pydir.dirpath())))
-    import subprocess
-    popen = subprocess.Popen([sys.executable, str(p)], stdout=subprocess.PIPE)
-    s = popen.stdout.read()
-    ret = popen.wait()
-    assert ret == 0
-

--- a/testing/plugin/test_terminal.py
+++ /dev/null
@@ -1,639 +0,0 @@
-"""
-terminal reporting of the full testing process.
-"""
-import pytest,py
-import sys
-
-from _pytest.terminal import TerminalReporter, \
-    CollectonlyReporter,  repr_pythonversion, getreportopt
-from _pytest import runner
-
-def basic_run_report(item):
-    runner.call_and_report(item, "setup", log=False)
-    return runner.call_and_report(item, "call", log=False)
-
-class Option:
-    def __init__(self, verbose=False, fulltrace=False):
-        self.verbose = verbose
-        self.fulltrace = fulltrace
-
-    @property
-    def args(self):
-        l = []
-        if self.verbose:
-            l.append('-v')
-        if self.fulltrace:
-            l.append('--fulltrace')
-        return l
-
-def pytest_generate_tests(metafunc):
-    if "option" in metafunc.funcargnames:
-        metafunc.addcall(id="default",
-                         funcargs={'option': Option(verbose=False)})
-        metafunc.addcall(id="verbose",
-                         funcargs={'option': Option(verbose=True)})
-        metafunc.addcall(id="quiet",
-                         funcargs={'option': Option(verbose=-1)})
-        metafunc.addcall(id="fulltrace",
-                         funcargs={'option': Option(fulltrace=True)})
-
-
-class TestTerminal:
-    def test_pass_skip_fail(self, testdir, option):
-        p = testdir.makepyfile("""
-            import py
-            def test_ok():
-                pass
-            def test_skip():
-                py.test.skip("xx")
-            def test_func():
-                assert 0
-        """)
-        result = testdir.runpytest(*option.args)
-        if option.verbose:
-            result.stdout.fnmatch_lines([
-                "*test_pass_skip_fail.py:2: *test_ok*PASS*",
-                "*test_pass_skip_fail.py:4: *test_skip*SKIP*",
-                "*test_pass_skip_fail.py:6: *test_func*FAIL*",
-            ])
-        else:
-            result.stdout.fnmatch_lines([
-            "*test_pass_skip_fail.py .sF"
-        ])
-        result.stdout.fnmatch_lines([
-            "    def test_func():",
-            ">       assert 0",
-            "E       assert 0",
-        ])
-
-    def test_internalerror(self, testdir, linecomp):
-        modcol = testdir.getmodulecol("def test_one(): pass")
-        rep = TerminalReporter(modcol.config, file=linecomp.stringio)
-        excinfo = py.test.raises(ValueError, "raise ValueError('hello')")
-        rep.pytest_internalerror(excinfo.getrepr())
-        linecomp.assert_contains_lines([
-            "INTERNALERROR> *ValueError*hello*"
-        ])
-
-    def test_writeline(self, testdir, linecomp):
-        modcol = testdir.getmodulecol("def test_one(): pass")
-        stringio = py.io.TextIO()
-        rep = TerminalReporter(modcol.config, file=linecomp.stringio)
-        rep.write_fspath_result(py.path.local("xy.py"), '.')
-        rep.write_line("hello world")
-        lines = linecomp.stringio.getvalue().split('\n')
-        assert not lines[0]
-        assert lines[1].endswith("xy.py .")
-        assert lines[2] == "hello world"
-
-    def test_show_runtest_logstart(self, testdir, linecomp):
-        item = testdir.getitem("def test_func(): pass")
-        tr = TerminalReporter(item.config, file=linecomp.stringio)
-        item.config.pluginmanager.register(tr)
-        location = item.reportinfo()
-        tr.config.hook.pytest_runtest_logstart(nodeid=item.nodeid,
-            location=location, fspath=str(item.fspath))
-        linecomp.assert_contains_lines([
-            "*test_show_runtest_logstart.py*"
-        ])
-
-    def test_runtest_location_shown_before_test_starts(self, testdir):
-        p1 = testdir.makepyfile("""
-            def test_1():
-                import time
-                time.sleep(20)
-        """)
-        child = testdir.spawn_pytest("")
-        child.expect(".*test_runtest_location.*py")
-        child.sendeof()
-        child.kill(15)
-
-    def test_itemreport_subclasses_show_subclassed_file(self, testdir):
-        p1 = testdir.makepyfile(test_p1="""
-            class BaseTests:
-                def test_p1(self):
-                    pass
-            class TestClass(BaseTests):
-                pass
-        """)
-        p2 = testdir.makepyfile(test_p2="""
-            from test_p1 import BaseTests
-            class TestMore(BaseTests):
-                pass
-        """)
-        result = testdir.runpytest(p2)
-        result.stdout.fnmatch_lines([
-            "*test_p2.py .",
-            "*1 passed*",
-        ])
-        result = testdir.runpytest("-v", p2)
-        result.stdout.fnmatch_lines([
-            "*test_p2.py <- *test_p1.py:2: TestMore.test_p1*",
-        ])
-
-    def test_keyboard_interrupt(self, testdir, option):
-        p = testdir.makepyfile("""
-            def test_foobar():
-                assert 0
-            def test_spamegg():
-                import py; py.test.skip('skip me please!')
-            def test_interrupt_me():
-                raise KeyboardInterrupt   # simulating the user
-        """)
-
-        result = testdir.runpytest(*option.args)
-        result.stdout.fnmatch_lines([
-            "    def test_foobar():",
-            ">       assert 0",
-            "E       assert 0",
-            "*_keyboard_interrupt.py:6: KeyboardInterrupt*",
-        ])
-        if option.fulltrace:
-            result.stdout.fnmatch_lines([
-                "*raise KeyboardInterrupt   # simulating the user*",
-            ])
-        result.stdout.fnmatch_lines(['*KeyboardInterrupt*'])
-
-
-
-class TestCollectonly:
-    def test_collectonly_basic(self, testdir, linecomp):
-        modcol = testdir.getmodulecol(configargs=['--collectonly'], source="""
-            def test_func():
-                pass
-        """)
-        rep = CollectonlyReporter(modcol.config, out=linecomp.stringio)
-        modcol.config.pluginmanager.register(rep)
-        indent = rep.indent
-        rep.config.hook.pytest_collectstart(collector=modcol)
-        linecomp.assert_contains_lines([
-           "<Module 'test_collectonly_basic.py'>"
-        ])
-        item = modcol.collect()[0]
-        rep.config.hook.pytest_itemcollected(item=item)
-        linecomp.assert_contains_lines([
-           "  <Function 'test_func'>",
-        ])
-        report = rep.config.hook.pytest_make_collect_report(collector=modcol)
-        rep.config.hook.pytest_collectreport(report=report)
-        assert rep.indent == indent
-
-    def test_collectonly_skipped_module(self, testdir, linecomp):
-        modcol = testdir.getmodulecol(configargs=['--collectonly'], source="""
-            import py
-            py.test.skip("nomod")
-        """)
-        rep = CollectonlyReporter(modcol.config, out=linecomp.stringio)
-        modcol.config.pluginmanager.register(rep)
-        cols = list(testdir.genitems([modcol]))
-        assert len(cols) == 0
-        linecomp.assert_contains_lines("""
-            <Module 'test_collectonly_skipped_module.py'>
-              !!! Skipped: nomod !!!
-        """)
-
-    def test_collectonly_failed_module(self, testdir, linecomp):
-        modcol = testdir.getmodulecol(configargs=['--collectonly'], source="""
-            raise ValueError(0)
-        """)
-        rep = CollectonlyReporter(modcol.config, out=linecomp.stringio)
-        modcol.config.pluginmanager.register(rep)
-        cols = list(testdir.genitems([modcol]))
-        assert len(cols) == 0
-        linecomp.assert_contains_lines("""
-            <Module 'test_collectonly_failed_module.py'>
-              !!! ValueError: 0 !!!
-        """)
-
-    def test_collectonly_fatal(self, testdir):
-        p1 = testdir.makeconftest("""
-            def pytest_collectstart(collector):
-                assert 0, "urgs"
-        """)
-        result = testdir.runpytest("--collectonly")
-        result.stdout.fnmatch_lines([
-            "*INTERNAL*args*"
-        ])
-        assert result.ret == 3
-
-    def test_collectonly_simple(self, testdir):
-        p = testdir.makepyfile("""
-            def test_func1():
-                pass
-            class TestClass:
-                def test_method(self):
-                    pass
-        """)
-        result = testdir.runpytest("--collectonly", p)
-        stderr = result.stderr.str().strip()
-        #assert stderr.startswith("inserting into sys.path")
-        assert result.ret == 0
-        extra = result.stdout.fnmatch_lines([
-            "*<Module '*.py'>",
-            "* <Function 'test_func1'*>",
-            "* <Class 'TestClass'>",
-            "*  <Instance '()'>",
-            "*   <Function 'test_method'*>",
-        ])
-
-    def test_collectonly_error(self, testdir):
-        p = testdir.makepyfile("import Errlkjqweqwe")
-        result = testdir.runpytest("--collectonly", p)
-        stderr = result.stderr.str().strip()
-        assert result.ret == 1
-        extra = result.stdout.fnmatch_lines(py.code.Source("""
-            *<Module '*.py'>
-              *ImportError*
-            *!!!*failures*!!!
-            *test_collectonly_error.py:1*
-        """).strip())
-
-
-def test_repr_python_version(monkeypatch):
-    monkeypatch.setattr(sys, 'version_info', (2, 5, 1, 'final', 0))
-    assert repr_pythonversion() == "2.5.1-final-0"
-    py.std.sys.version_info = x = (2,3)
-    assert repr_pythonversion() == str(x)
-
-class TestFixtureReporting:
-    def test_setup_fixture_error(self, testdir):
-        p = testdir.makepyfile("""
-            def setup_function(function):
-                print ("setup func")
-                assert 0
-            def test_nada():
-                pass
-        """)
-        result = testdir.runpytest()
-        result.stdout.fnmatch_lines([
-            "*ERROR at setup of test_nada*",
-            "*setup_function(function):*",
-            "*setup func*",
-            "*assert 0*",
-            "*1 error*",
-        ])
-        assert result.ret != 0
-
-    def test_teardown_fixture_error(self, testdir):
-        p = testdir.makepyfile("""
-            def test_nada():
-                pass
-            def teardown_function(function):
-                print ("teardown func")
-                assert 0
-        """)
-        result = testdir.runpytest()
-        result.stdout.fnmatch_lines([
-            "*ERROR at teardown*",
-            "*teardown_function(function):*",
-            "*assert 0*",
-            "*Captured stdout*",
-            "*teardown func*",
-            "*1 passed*1 error*",
-        ])
-
-    def test_teardown_fixture_error_and_test_failure(self, testdir):
-        p = testdir.makepyfile("""
-            def test_fail():
-                assert 0, "failingfunc"
-
-            def teardown_function(function):
-                print ("teardown func")
-                assert False
-        """)
-        result = testdir.runpytest()
-        result.stdout.fnmatch_lines([
-            "*ERROR at teardown of test_fail*",
-            "*teardown_function(function):*",
-            "*assert False*",
-            "*Captured stdout*",
-            "*teardown func*",
-
-            "*test_fail*",
-            "*def test_fail():",
-            "*failingfunc*",
-            "*1 failed*1 error*",
-         ])
-
-class TestTerminalFunctional:
-    def test_deselected(self, testdir):
-        testpath = testdir.makepyfile("""
-                def test_one():
-                    pass
-                def test_two():
-                    pass
-                def test_three():
-                    pass
-           """
-        )
-        result = testdir.runpytest("-k", "test_two:", testpath)
-        result.stdout.fnmatch_lines([
-            "*test_deselected.py ..",
-            "=* 1 test*deselected by 'test_two:'*=",
-        ])
-        assert result.ret == 0
-
-    def test_no_skip_summary_if_failure(self, testdir):
-        testdir.makepyfile("""
-            import py
-            def test_ok():
-                pass
-            def test_fail():
-                assert 0
-            def test_skip():
-                py.test.skip("dontshow")
-        """)
-        result = testdir.runpytest()
-        assert result.stdout.str().find("skip test summary") == -1
-        assert result.ret == 1
-
-    def test_passes(self, testdir):
-        p1 = testdir.makepyfile("""
-            def test_passes():
-                pass
-            class TestClass:
-                def test_method(self):
-                    pass
-        """)
-        old = p1.dirpath().chdir()
-        try:
-            result = testdir.runpytest()
-        finally:
-            old.chdir()
-        result.stdout.fnmatch_lines([
-            "test_passes.py ..",
-            "* 2 pass*",
-        ])
-        assert result.ret == 0
-
-    def test_header_trailer_info(self, testdir):
-        p1 = testdir.makepyfile("""
-            def test_passes():
-                pass
-        """)
-        result = testdir.runpytest()
-        verinfo = ".".join(map(str, py.std.sys.version_info[:3]))
-        result.stdout.fnmatch_lines([
-            "*===== test session starts ====*",
-            "platform %s -- Python %s*" %(
-                    py.std.sys.platform, verinfo), # , py.std.sys.executable),
-            "*test_header_trailer_info.py .",
-            "=* 1 passed in *.[0-9][0-9] seconds *=",
-        ])
-
-    def test_showlocals(self, testdir):
-        p1 = testdir.makepyfile("""
-            def test_showlocals():
-                x = 3
-                y = "x" * 5000
-                assert 0
-        """)
-        result = testdir.runpytest(p1, '-l')
-        result.stdout.fnmatch_lines([
-            #"_ _ * Locals *",
-            "x* = 3",
-            "y* = 'xxxxxx*"
-        ])
-
-    def test_verbose_reporting(self, testdir, pytestconfig):
-        p1 = testdir.makepyfile("""
-            import py
-            def test_fail():
-                raise ValueError()
-            def test_pass():
-                pass
-            class TestClass:
-                def test_skip(self):
-                    py.test.skip("hello")
-            def test_gen():
-                def check(x):
-                    assert x == 1
-                yield check, 0
-        """)
-        result = testdir.runpytest(p1, '-v')
-        result.stdout.fnmatch_lines([
-            "*test_verbose_reporting.py:2: test_fail*FAIL*",
-            "*test_verbose_reporting.py:4: test_pass*PASS*",
-            "*test_verbose_reporting.py:7: TestClass.test_skip*SKIP*",
-            "*test_verbose_reporting.py:10: test_gen*FAIL*",
-        ])
-        assert result.ret == 1
-        pytestconfig.pluginmanager.skipifmissing("xdist")
-        result = testdir.runpytest(p1, '-v', '-n 1')
-        result.stdout.fnmatch_lines([
-            "*FAIL*test_verbose_reporting.py:2: test_fail*",
-        ])
-        assert result.ret == 1
-
-    def test_quiet_reporting(self, testdir):
-        p1 = testdir.makepyfile("def test_pass(): pass")
-        result = testdir.runpytest(p1, '-q')
-        s = result.stdout.str()
-        assert 'test session starts' not in s
-        assert p1.basename not in s
-        assert "===" not in s
-
-def test_fail_extra_reporting(testdir):
-    p = testdir.makepyfile("def test_this(): assert 0")
-    result = testdir.runpytest(p)
-    assert 'short test summary' not in result.stdout.str()
-    result = testdir.runpytest(p, '-rf')
-    result.stdout.fnmatch_lines([
-        "*test summary*",
-        "FAIL*test_fail_extra_reporting*",
-    ])
-
-def test_fail_reporting_on_pass(testdir):
-    p = testdir.makepyfile("def test_this(): assert 1")
-    result = testdir.runpytest(p, '-rf')
-    assert 'short test summary' not in result.stdout.str()
-
-def test_getreportopt():
-    class config:
-        class option:
-            reportchars = ""
-    config.option.report = "xfailed"
-    assert getreportopt(config) == "x"
-
-    config.option.report = "xfailed,skipped"
-    assert getreportopt(config) == "xs"
-
-    config.option.report = "skipped,xfailed"
-    assert getreportopt(config) == "sx"
-
-    config.option.report = "skipped"
-    config.option.reportchars = "sf"
-    assert getreportopt(config) == "sf"
-
-    config.option.reportchars = "sfx"
-    assert getreportopt(config) == "sfx"
-
-def test_terminalreporter_reportopt_addopts(testdir):
-    testdir.makeini("[pytest]\naddopts=-rs")
-    p = testdir.makepyfile("""
-        def pytest_funcarg__tr(request):
-            tr = request.config.pluginmanager.getplugin("terminalreporter")
-            return tr
-        def test_opt(tr):
-            assert tr.hasopt('skipped')
-            assert not tr.hasopt('qwe')
-    """)
-    result = testdir.runpytest()
-    result.stdout.fnmatch_lines([
-        "*1 passed*"
-    ])
-
-def test_tbstyle_short(testdir):
-    p = testdir.makepyfile("""
-        def pytest_funcarg__arg(request):
-            return 42
-        def test_opt(arg):
-            x = 0
-            assert x
-    """)
-    result = testdir.runpytest("--tb=short")
-    s = result.stdout.str()
-    assert 'arg = 42' not in s
-    assert 'x = 0' not in s
-    result.stdout.fnmatch_lines([
-        "*%s:5*" % p.basename,
-        ">*assert x",
-        "E*assert*",
-    ])
-    result = testdir.runpytest()
-    s = result.stdout.str()
-    assert 'x = 0' in s
-    assert 'assert x' in s
-
-def test_traceconfig(testdir, monkeypatch):
-    result = testdir.runpytest("--traceconfig")
-    result.stdout.fnmatch_lines([
-        "*active plugins*"
-    ])
-    assert result.ret == 0
-
-def test_debug(testdir, monkeypatch):
-    result = testdir.runpytest("--debug")
-    result.stderr.fnmatch_lines([
-        "*pytest_sessionstart*session*",
-    ])
-    assert result.ret == 0
-
-def test_PYTEST_DEBUG(testdir, monkeypatch):
-    monkeypatch.setenv("PYTEST_DEBUG", "1")
-    result = testdir.runpytest()
-    assert result.ret == 0
-    result.stderr.fnmatch_lines([
-        "*registered*PluginManager*"
-    ])
-    
-
-class TestGenericReporting:
-    """ this test class can be subclassed with a different option
-        provider to run e.g. distributed tests.
-    """
-    def test_collect_fail(self, testdir, option):
-        p = testdir.makepyfile("import xyz\n")
-        result = testdir.runpytest(*option.args)
-        result.stdout.fnmatch_lines([
-            "*test_collect_fail.py E*",
-            ">   import xyz",
-            "E   ImportError: No module named xyz",
-            "*1 error*",
-        ])
-
-    def test_maxfailures(self, testdir, option):
-        p = testdir.makepyfile("""
-            def test_1():
-                assert 0
-            def test_2():
-                assert 0
-            def test_3():
-                assert 0
-        """)
-        result = testdir.runpytest("--maxfail=2", *option.args)
-        result.stdout.fnmatch_lines([
-            "*def test_1():*",
-            "*def test_2():*",
-            "*!! Interrupted: stopping after 2 failures*!!*",
-            "*2 failed*",
-        ])
-
-
-    def test_tb_option(self, testdir, option):
-        p = testdir.makepyfile("""
-            import py
-            def g():
-                raise IndexError
-            def test_func():
-                print (6*7)
-                g()  # --calling--
-        """)
-        for tbopt in ["long", "short", "no"]:
-            print('testing --tb=%s...' % tbopt)
-            result = testdir.runpytest('--tb=%s' % tbopt)
-            s = result.stdout.str()
-            if tbopt == "long":
-                assert 'print (6*7)' in s
-            else:
-                assert 'print (6*7)' not in s
-            if tbopt != "no":
-                assert '--calling--' in s
-                assert 'IndexError' in s
-            else:
-                assert 'FAILURES' not in s
-                assert '--calling--' not in s
-                assert 'IndexError' not in s
-
-    def test_tb_crashline(self, testdir, option):
-        p = testdir.makepyfile("""
-            import py
-            def g():
-                raise IndexError
-            def test_func1():
-                print (6*7)
-                g()  # --calling--
-            def test_func2():
-                assert 0, "hello"
-        """)
-        result = testdir.runpytest("--tb=line")
-        bn = p.basename
-        result.stdout.fnmatch_lines([
-            "*%s:3: IndexError*" % bn,
-            "*%s:8: AssertionError: hello*" % bn,
-        ])
-        s = result.stdout.str()
-        assert "def test_func2" not in s
-
-    def test_pytest_report_header(self, testdir, option):
-        testdir.makeconftest("""
-            def pytest_report_header(config):
-                return "hello: info"
-        """)
-        testdir.mkdir("a").join("conftest.py").write("""
-def pytest_report_header(config):
-    return ["line1", "line2"]""")
-        result = testdir.runpytest("a")
-        result.stdout.fnmatch_lines([
-            "line1",
-            "line2",
-            "*hello: info*",
-        ])
-
- at py.test.mark.xfail("not hasattr(os, 'dup')")
-def test_fdopen_kept_alive_issue124(testdir):
-    testdir.makepyfile("""
-        import os, sys
-        k = []
-        def test_open_file_and_keep_alive(capfd):
-            stdout = os.fdopen(1, 'w', 1)
-            k.append(stdout)
-
-        def test_close_kept_alive_file():
-            stdout = k.pop()
-            stdout.close()
-    """)
-    result = testdir.runpytest("-s")
-    result.stdout.fnmatch_lines([
-        "*2 passed*"
-    ])

--- /dev/null
+++ b/testing/test_skipping.py
@@ -0,0 +1,444 @@
+import py
+
+from _pytest.skipping import MarkEvaluator, folded_skips
+from _pytest.skipping import pytest_runtest_setup
+from _pytest.runner import runtestprotocol
+
+class TestEvaluator:
+    def test_no_marker(self, testdir):
+        item = testdir.getitem("def test_func(): pass")
+        evalskipif = MarkEvaluator(item, 'skipif')
+        assert not evalskipif
+        assert not evalskipif.istrue()
+
+    def test_marked_no_args(self, testdir):
+        item = testdir.getitem("""
+            import py
+            @py.test.mark.xyz
+            def test_func():
+                pass
+        """)
+        ev = MarkEvaluator(item, 'xyz')
+        assert ev
+        assert ev.istrue()
+        expl = ev.getexplanation()
+        assert expl == ""
+        assert not ev.get("run", False)
+
+    def test_marked_one_arg(self, testdir):
+        item = testdir.getitem("""
+            import py
+            @py.test.mark.xyz("hasattr(os, 'sep')")
+            def test_func():
+                pass
+        """)
+        ev = MarkEvaluator(item, 'xyz')
+        assert ev
+        assert ev.istrue()
+        expl = ev.getexplanation()
+        assert expl == "condition: hasattr(os, 'sep')"
+
+    def test_marked_one_arg_with_reason(self, testdir):
+        item = testdir.getitem("""
+            import py
+            @py.test.mark.xyz("hasattr(os, 'sep')", attr=2, reason="hello world")
+            def test_func():
+                pass
+        """)
+        ev = MarkEvaluator(item, 'xyz')
+        assert ev
+        assert ev.istrue()
+        expl = ev.getexplanation()
+        assert expl == "hello world"
+        assert ev.get("attr") == 2
+
+    def test_marked_one_arg_twice(self, testdir):
+        lines = [
+            '''@py.test.mark.skipif("not hasattr(os, 'murks')")''',
+            '''@py.test.mark.skipif("hasattr(os, 'murks')")'''
+        ]
+        for i in range(0, 2):
+            item = testdir.getitem("""
+                import py
+                %s
+                %s
+                def test_func():
+                    pass
+            """ % (lines[i], lines[(i+1) %2]))
+            ev = MarkEvaluator(item, 'skipif')
+            assert ev
+            assert ev.istrue()
+            expl = ev.getexplanation()
+            assert expl == "condition: not hasattr(os, 'murks')"
+
+    def test_marked_one_arg_twice2(self, testdir):
+        item = testdir.getitem("""
+            import py
+            @py.test.mark.skipif("hasattr(os, 'murks')")
+            @py.test.mark.skipif("not hasattr(os, 'murks')")
+            def test_func():
+                pass
+        """)
+        ev = MarkEvaluator(item, 'skipif')
+        assert ev
+        assert ev.istrue()
+        expl = ev.getexplanation()
+        assert expl == "condition: not hasattr(os, 'murks')"
+
+    def test_skipif_class(self, testdir):
+        item, = testdir.getitems("""
+            import py
+            class TestClass:
+                pytestmark = py.test.mark.skipif("config._hackxyz")
+                def test_func(self):
+                    pass
+        """)
+        item.config._hackxyz = 3
+        ev = MarkEvaluator(item, 'skipif')
+        assert ev.istrue()
+        expl = ev.getexplanation()
+        assert expl == "condition: config._hackxyz"
+
+
+class TestXFail:
+    def test_xfail_simple(self, testdir):
+        item = testdir.getitem("""
+            import py
+            @py.test.mark.xfail
+            def test_func():
+                assert 0
+        """)
+        reports = runtestprotocol(item, log=False)
+        assert len(reports) == 3
+        callreport = reports[1]
+        assert callreport.skipped
+        expl = callreport.keywords['xfail']
+        assert expl == ""
+
+    def test_xfail_xpassed(self, testdir):
+        item = testdir.getitem("""
+            import py
+            @py.test.mark.xfail
+            def test_func():
+                assert 1
+        """)
+        reports = runtestprotocol(item, log=False)
+        assert len(reports) == 3
+        callreport = reports[1]
+        assert callreport.failed
+        expl = callreport.keywords['xfail']
+        assert expl == ""
+
+    def test_xfail_run_anyway(self, testdir):
+        testdir.makepyfile("""
+            import py
+            @py.test.mark.xfail
+            def test_func():
+                assert 0
+        """)
+        result = testdir.runpytest("--runxfail")
+        assert result.ret == 1
+        result.stdout.fnmatch_lines([
+            "*def test_func():*",
+            "*assert 0*",
+            "*1 failed*",
+        ])
+
+    def test_xfail_evalfalse_but_fails(self, testdir):
+        item = testdir.getitem("""
+            import py
+            @py.test.mark.xfail('False')
+            def test_func():
+                assert 0
+        """)
+        reports = runtestprotocol(item, log=False)
+        callreport = reports[1]
+        assert callreport.failed
+        assert 'xfail' not in callreport.keywords
+
+    def test_xfail_not_report_default(self, testdir):
+        p = testdir.makepyfile(test_one="""
+            import py
+            @py.test.mark.xfail
+            def test_this():
+                assert 0
+        """)
+        result = testdir.runpytest(p, '-v')
+        #result.stdout.fnmatch_lines([
+        #    "*HINT*use*-r*"
+        #])
+
+    def test_xfail_not_run_xfail_reporting(self, testdir):
+        p = testdir.makepyfile(test_one="""
+            import py
+            @py.test.mark.xfail(run=False, reason="noway")
+            def test_this():
+                assert 0
+            @py.test.mark.xfail("True", run=False)
+            def test_this_true():
+                assert 0
+            @py.test.mark.xfail("False", run=False, reason="huh")
+            def test_this_false():
+                assert 1
+        """)
+        result = testdir.runpytest(p, '--report=xfailed', )
+        result.stdout.fnmatch_lines([
+            "*test_one*test_this*",
+            "*NOTRUN*noway",
+            "*test_one*test_this_true*",
+            "*NOTRUN*condition:*True*",
+            "*1 passed*",
+        ])
+
+    def test_xfail_not_run_no_setup_run(self, testdir):
+        p = testdir.makepyfile(test_one="""
+            import py
+            @py.test.mark.xfail(run=False, reason="hello")
+            def test_this():
+                assert 0
+            def setup_module(mod):
+                raise ValueError(42)
+        """)
+        result = testdir.runpytest(p, '--report=xfailed', )
+        result.stdout.fnmatch_lines([
+            "*test_one*test_this*",
+            "*NOTRUN*hello",
+            "*1 xfailed*",
+        ])
+
+    def test_xfail_xpass(self, testdir):
+        p = testdir.makepyfile(test_one="""
+            import py
+            @py.test.mark.xfail
+            def test_that():
+                assert 1
+        """)
+        result = testdir.runpytest(p, '-rX')
+        result.stdout.fnmatch_lines([
+            "*XPASS*test_that*",
+            "*1 xpassed*"
+        ])
+        assert result.ret == 0
+
+    def test_xfail_imperative(self, testdir):
+        p = testdir.makepyfile("""
+            import py
+            def test_this():
+                py.test.xfail("hello")
+        """)
+        result = testdir.runpytest(p)
+        result.stdout.fnmatch_lines([
+            "*1 xfailed*",
+        ])
+        result = testdir.runpytest(p, "-rx")
+        result.stdout.fnmatch_lines([
+            "*XFAIL*test_this*",
+            "*reason:*hello*",
+        ])
+        result = testdir.runpytest(p, "--runxfail")
+        result.stdout.fnmatch_lines([
+            "*def test_this():*",
+            "*py.test.xfail*",
+        ])
+
+    def test_xfail_imperative_in_setup_function(self, testdir):
+        p = testdir.makepyfile("""
+            import py
+            def setup_function(function):
+                py.test.xfail("hello")
+
+            def test_this():
+                assert 0
+        """)
+        result = testdir.runpytest(p)
+        result.stdout.fnmatch_lines([
+            "*1 xfailed*",
+        ])
+        result = testdir.runpytest(p, "-rx")
+        result.stdout.fnmatch_lines([
+            "*XFAIL*test_this*",
+            "*reason:*hello*",
+        ])
+        result = testdir.runpytest(p, "--runxfail")
+        result.stdout.fnmatch_lines([
+            "*def setup_function(function):*",
+            "*py.test.xfail*",
+        ])
+
+    def xtest_dynamic_xfail_set_during_setup(self, testdir):
+        p = testdir.makepyfile("""
+            import py
+            def setup_function(function):
+                py.test.mark.xfail(function)
+            def test_this():
+                assert 0
+            def test_that():
+                assert 1
+        """)
+        result = testdir.runpytest(p, '-rxX')
+        result.stdout.fnmatch_lines([
+            "*XFAIL*test_this*",
+            "*XPASS*test_that*",
+        ])
+
+    def test_dynamic_xfail_no_run(self, testdir):
+        p = testdir.makepyfile("""
+            import py
+            def pytest_funcarg__arg(request):
+                request.applymarker(py.test.mark.xfail(run=False))
+            def test_this(arg):
+                assert 0
+        """)
+        result = testdir.runpytest(p, '-rxX')
+        result.stdout.fnmatch_lines([
+            "*XFAIL*test_this*",
+            "*NOTRUN*",
+        ])
+
+    def test_dynamic_xfail_set_during_funcarg_setup(self, testdir):
+        p = testdir.makepyfile("""
+            import py
+            def pytest_funcarg__arg(request):
+                request.applymarker(py.test.mark.xfail)
+            def test_this2(arg):
+                assert 0
+        """)
+        result = testdir.runpytest(p)
+        result.stdout.fnmatch_lines([
+            "*1 xfailed*",
+        ])
+
+
+class TestSkipif:
+    def test_skipif_conditional(self, testdir):
+        item = testdir.getitem("""
+            import py
+            @py.test.mark.skipif("hasattr(os, 'sep')")
+            def test_func():
+                pass
+        """)
+        x = py.test.raises(py.test.skip.Exception, "pytest_runtest_setup(item)")
+        assert x.value.msg == "condition: hasattr(os, 'sep')"
+
+
+    def test_skipif_reporting(self, testdir):
+        p = testdir.makepyfile("""
+            import py
+            @py.test.mark.skipif("hasattr(sys, 'platform')")
+            def test_that():
+                assert 0
+        """)
+        result = testdir.runpytest(p, '-s', '-rs')
+        result.stdout.fnmatch_lines([
+            "*SKIP*1*platform*",
+            "*1 skipped*"
+        ])
+        assert result.ret == 0
+
+def test_skip_not_report_default(testdir):
+    p = testdir.makepyfile(test_one="""
+        import py
+        def test_this():
+            py.test.skip("hello")
+    """)
+    result = testdir.runpytest(p, '-v')
+    result.stdout.fnmatch_lines([
+        #"*HINT*use*-r*",
+        "*1 skipped*",
+    ])
+
+
+def test_skipif_class(testdir):
+    p = testdir.makepyfile("""
+        import py
+
+        class TestClass:
+            pytestmark = py.test.mark.skipif("True")
+            def test_that(self):
+                assert 0
+            def test_though(self):
+                assert 0
+    """)
+    result = testdir.runpytest(p)
+    result.stdout.fnmatch_lines([
+        "*2 skipped*"
+    ])
+
+
+def test_skip_reasons_folding():
+    class longrepr:
+        class reprcrash:
+            path = 'xyz'
+            lineno = 3
+            message = "justso"
+
+    class X:
+        pass
+    ev1 = X()
+    ev1.when = "execute"
+    ev1.skipped = True
+    ev1.longrepr = longrepr
+
+    ev2 = X()
+    ev2.longrepr = longrepr
+    ev2.skipped = True
+
+    l = folded_skips([ev1, ev2])
+    assert len(l) == 1
+    num, fspath, lineno, reason = l[0]
+    assert num == 2
+    assert fspath == longrepr.reprcrash.path
+    assert lineno == longrepr.reprcrash.lineno
+    assert reason == longrepr.reprcrash.message
+
+def test_skipped_reasons_functional(testdir):
+    testdir.makepyfile(
+        test_one="""
+            from conftest import doskip
+            def setup_function(func):
+                doskip()
+            def test_func():
+                pass
+            class TestClass:
+                def test_method(self):
+                    doskip()
+       """,
+       test_two = """
+            from conftest import doskip
+            doskip()
+       """,
+       conftest = """
+            import py
+            def doskip():
+                py.test.skip('test')
+        """
+    )
+    result = testdir.runpytest('--report=skipped')
+    result.stdout.fnmatch_lines([
+        "*test_two.py S",
+        "*test_one.py ss",
+        "*SKIP*3*conftest.py:3: test",
+    ])
+    assert result.ret == 0
+
+def test_reportchars(testdir):
+    testdir.makepyfile("""
+        import py
+        def test_1():
+            assert 0
+        @py.test.mark.xfail
+        def test_2():
+            assert 0
+        @py.test.mark.xfail
+        def test_3():
+            pass
+        def test_4():
+            py.test.skip("four")
+    """)
+    result = testdir.runpytest("-rfxXs")
+    result.stdout.fnmatch_lines([
+        "FAIL*test_1*",
+        "XFAIL*test_2*",
+        "XPASS*test_3*",
+        "SKIP*four*",
+    ])

--- a/testing/plugin/test_assertion.py
+++ /dev/null
@@ -1,203 +0,0 @@
-import sys
-
-import py
-import _pytest.assertion as plugin
-
-needsnewassert = py.test.mark.skipif("sys.version_info < (2,6)")
-
-def interpret(expr):
-    return py.code._reinterpret(expr, py.code.Frame(sys._getframe(1)))
-
-class TestBinReprIntegration:
-    pytestmark = needsnewassert
-
-    def pytest_funcarg__hook(self, request):
-        class MockHook(object):
-            def __init__(self):
-                self.called = False
-                self.args = tuple()
-                self.kwargs = dict()
-
-            def __call__(self, op, left, right):
-                self.called = True
-                self.op = op
-                self.left = left
-                self.right = right
-        mockhook = MockHook()
-        monkeypatch = request.getfuncargvalue("monkeypatch")
-        monkeypatch.setattr(py.code, '_reprcompare', mockhook)
-        return mockhook
-
-    def test_pytest_assertrepr_compare_called(self, hook):
-        interpret('assert 0 == 1')
-        assert hook.called
-
-
-    def test_pytest_assertrepr_compare_args(self, hook):
-        interpret('assert [0, 1] == [0, 2]')
-        assert hook.op == '=='
-        assert hook.left == [0, 1]
-        assert hook.right == [0, 2]
-
-    def test_configure_unconfigure(self, testdir, hook):
-        assert hook == py.code._reprcompare
-        config = testdir.parseconfig()
-        plugin.pytest_configure(config)
-        assert hook != py.code._reprcompare
-        plugin.pytest_unconfigure(config)
-        assert hook == py.code._reprcompare
-
-def callequal(left, right):
-    return plugin.pytest_assertrepr_compare('==', left, right)
-
-class TestAssert_reprcompare:
-    def test_different_types(self):
-        assert callequal([0, 1], 'foo') is None
-
-    def test_summary(self):
-        summary = callequal([0, 1], [0, 2])[0]
-        assert len(summary) < 65
-
-    def test_text_diff(self):
-        diff = callequal('spam', 'eggs')[1:]
-        assert '- spam' in diff
-        assert '+ eggs' in diff
-
-    def test_multiline_text_diff(self):
-        left = 'foo\nspam\nbar'
-        right = 'foo\neggs\nbar'
-        diff = callequal(left, right)
-        assert '- spam' in diff
-        assert '+ eggs' in diff
-
-    def test_list(self):
-        expl = callequal([0, 1], [0, 2])
-        assert len(expl) > 1
-
-    def test_list_different_lenghts(self):
-        expl = callequal([0, 1], [0, 1, 2])
-        assert len(expl) > 1
-        expl = callequal([0, 1, 2], [0, 1])
-        assert len(expl) > 1
-
-    def test_dict(self):
-        expl = callequal({'a': 0}, {'a': 1})
-        assert len(expl) > 1
-
-    def test_set(self):
-        expl = callequal(set([0, 1]), set([0, 2]))
-        assert len(expl) > 1
-
-    def test_list_tuples(self):
-        expl = callequal([], [(1,2)])
-        assert len(expl) > 1
-        expl = callequal([(1,2)], [])
-        assert len(expl) > 1
-
-    def test_list_bad_repr(self):
-        class A:
-            def __repr__(self):
-                raise ValueError(42)
-        expl = callequal([], [A()])
-        assert 'ValueError' in "".join(expl)
-        expl = callequal({}, {'1': A()})
-        assert 'faulty' in "".join(expl)
-
-    def test_one_repr_empty(self):
-        """
-        the faulty empty string repr did trigger
-        a unbound local error in _diff_text
-        """
-        class A(str):
-            def __repr__(self):
-                return ''
-        expl = callequal(A(), '')
-        assert not expl
-
- at needsnewassert
-def test_pytest_assertrepr_compare_integration(testdir):
-    testdir.makepyfile("""
-        def test_hello():
-            x = set(range(100))
-            y = x.copy()
-            y.remove(50)
-            assert x == y
-    """)
-    result = testdir.runpytest()
-    result.stdout.fnmatch_lines([
-        "*def test_hello():*",
-        "*assert x == y*",
-        "*E*Extra items*left*",
-        "*E*50*",
-    ])
-
- at needsnewassert
-def test_sequence_comparison_uses_repr(testdir):
-    testdir.makepyfile("""
-        def test_hello():
-            x = set("hello x")
-            y = set("hello y")
-            assert x == y
-    """)
-    result = testdir.runpytest()
-    result.stdout.fnmatch_lines([
-        "*def test_hello():*",
-        "*assert x == y*",
-        "*E*Extra items*left*",
-        "*E*'x'*",
-        "*E*Extra items*right*",
-        "*E*'y'*",
-    ])
-
-
-def test_functional(testdir):
-    testdir.makepyfile("""
-        def test_hello():
-            x = 3
-            assert x == 4
-    """)
-    result = testdir.runpytest()
-    assert "3 == 4" in result.stdout.str()
-    result = testdir.runpytest("--no-assert")
-    assert "3 == 4" not in result.stdout.str()
-
-def test_triple_quoted_string_issue113(testdir):
-    testdir.makepyfile("""
-        def test_hello():
-            assert "" == '''
-    '''""")
-    result = testdir.runpytest("--fulltrace")
-    result.stdout.fnmatch_lines([
-        "*1 failed*",
-    ])
-    assert 'SyntaxError' not in result.stdout.str()
-
-def test_traceback_failure(testdir):
-    p1 = testdir.makepyfile("""
-        def g():
-            return 2
-        def f(x):
-            assert x == g()
-        def test_onefails():
-            f(3)
-    """)
-    result = testdir.runpytest(p1)
-    result.stdout.fnmatch_lines([
-        "*test_traceback_failure.py F",
-        "====* FAILURES *====",
-        "____*____",
-        "",
-        "    def test_onefails():",
-        ">       f(3)",
-        "",
-        "*test_*.py:6: ",
-        "_ _ _ *",
-        #"",
-        "    def f(x):",
-        ">       assert x == g()",
-        "E       assert 3 == 2",
-        "E        +  where 2 = g()",
-        "",
-        "*test_traceback_failure.py:4: AssertionError"
-    ])
-

--- /dev/null
+++ b/testing/test_junitxml.py
@@ -0,0 +1,265 @@
+
+from xml.dom import minidom
+import py, sys
+
+def runandparse(testdir, *args):
+    resultpath = testdir.tmpdir.join("junit.xml")
+    result = testdir.runpytest("--junitxml=%s" % resultpath, *args)
+    xmldoc = minidom.parse(str(resultpath))
+    return result, xmldoc
+
+def assert_attr(node, **kwargs):
+    __tracebackhide__ = True
+    for name, expected in kwargs.items():
+        anode = node.getAttributeNode(name)
+        assert anode, "node %r has no attribute %r" %(node, name)
+        val = anode.value
+        if val != str(expected):
+            py.test.fail("%r != %r" %(str(val), str(expected)))
+
+class TestPython:
+    def test_summing_simple(self, testdir):
+        testdir.makepyfile("""
+            import py
+            def test_pass():
+                pass
+            def test_fail():
+                assert 0
+            def test_skip():
+                py.test.skip("")
+            @py.test.mark.xfail
+            def test_xfail():
+                assert 0
+            @py.test.mark.xfail
+            def test_xpass():
+                assert 1
+        """)
+        result, dom = runandparse(testdir)
+        assert result.ret
+        node = dom.getElementsByTagName("testsuite")[0]
+        assert_attr(node, errors=0, failures=1, skips=3, tests=2)
+
+    def test_setup_error(self, testdir):
+        testdir.makepyfile("""
+            def pytest_funcarg__arg(request):
+                raise ValueError()
+            def test_function(arg):
+                pass
+        """)
+        result, dom = runandparse(testdir)
+        assert result.ret
+        node = dom.getElementsByTagName("testsuite")[0]
+        assert_attr(node, errors=1, tests=0)
+        tnode = node.getElementsByTagName("testcase")[0]
+        assert_attr(tnode,
+            classname="test_setup_error",
+            name="test_function")
+        fnode = tnode.getElementsByTagName("error")[0]
+        assert_attr(fnode, message="test setup failure")
+        assert "ValueError" in fnode.toxml()
+
+    def test_classname_instance(self, testdir):
+        testdir.makepyfile("""
+            class TestClass:
+                def test_method(self):
+                    assert 0
+        """)
+        result, dom = runandparse(testdir)
+        assert result.ret
+        node = dom.getElementsByTagName("testsuite")[0]
+        assert_attr(node, failures=1)
+        tnode = node.getElementsByTagName("testcase")[0]
+        assert_attr(tnode,
+            classname="test_classname_instance.TestClass",
+            name="test_method")
+
+    def test_classname_nested_dir(self, testdir):
+        p = testdir.tmpdir.ensure("sub", "test_hello.py")
+        p.write("def test_func(): 0/0")
+        result, dom = runandparse(testdir)
+        assert result.ret
+        node = dom.getElementsByTagName("testsuite")[0]
+        assert_attr(node, failures=1)
+        tnode = node.getElementsByTagName("testcase")[0]
+        assert_attr(tnode,
+            classname="sub.test_hello",
+            name="test_func")
+
+    def test_internal_error(self, testdir):
+        testdir.makeconftest("def pytest_runtest_protocol(): 0 / 0")
+        testdir.makepyfile("def test_function(): pass")
+        result, dom = runandparse(testdir)
+        assert result.ret
+        node = dom.getElementsByTagName("testsuite")[0]
+        assert_attr(node, errors=1, tests=0)
+        tnode = node.getElementsByTagName("testcase")[0]
+        assert_attr(tnode, classname="pytest", name="internal")
+        fnode = tnode.getElementsByTagName("error")[0]
+        assert_attr(fnode, message="internal error")
+        assert "Division" in fnode.toxml()
+
+    def test_failure_function(self, testdir):
+        testdir.makepyfile("def test_fail(): raise ValueError(42)")
+        result, dom = runandparse(testdir)
+        assert result.ret
+        node = dom.getElementsByTagName("testsuite")[0]
+        assert_attr(node, failures=1, tests=1)
+        tnode = node.getElementsByTagName("testcase")[0]
+        assert_attr(tnode,
+            classname="test_failure_function",
+            name="test_fail")
+        fnode = tnode.getElementsByTagName("failure")[0]
+        assert_attr(fnode, message="test failure")
+        assert "ValueError" in fnode.toxml()
+
+    def test_failure_escape(self, testdir):
+        testdir.makepyfile("""
+            def pytest_generate_tests(metafunc):
+                metafunc.addcall(id="<", funcargs=dict(arg1=42))
+                metafunc.addcall(id="&", funcargs=dict(arg1=44))
+            def test_func(arg1):
+                assert 0
+        """)
+        result, dom = runandparse(testdir)
+        assert result.ret
+        node = dom.getElementsByTagName("testsuite")[0]
+        assert_attr(node, failures=2, tests=2)
+        tnode = node.getElementsByTagName("testcase")[0]
+        assert_attr(tnode,
+            classname="test_failure_escape",
+            name="test_func[<]")
+        tnode = node.getElementsByTagName("testcase")[1]
+        assert_attr(tnode,
+            classname="test_failure_escape",
+            name="test_func[&]")
+
+    def test_junit_prefixing(self, testdir):
+        testdir.makepyfile("""
+            def test_func():
+                assert 0
+            class TestHello:
+                def test_hello(self):
+                    pass
+        """)
+        result, dom = runandparse(testdir, "--junitprefix=xyz")
+        assert result.ret
+        node = dom.getElementsByTagName("testsuite")[0]
+        assert_attr(node, failures=1, tests=2)
+        tnode = node.getElementsByTagName("testcase")[0]
+        assert_attr(tnode,
+            classname="xyz.test_junit_prefixing",
+            name="test_func")
+        tnode = node.getElementsByTagName("testcase")[1]
+        assert_attr(tnode,
+            classname="xyz.test_junit_prefixing."
+                      "TestHello",
+            name="test_hello")
+
+    def test_xfailure_function(self, testdir):
+        testdir.makepyfile("""
+            import py
+            def test_xfail():
+                py.test.xfail("42")
+        """)
+        result, dom = runandparse(testdir)
+        assert not result.ret
+        node = dom.getElementsByTagName("testsuite")[0]
+        assert_attr(node, skips=1, tests=0)
+        tnode = node.getElementsByTagName("testcase")[0]
+        assert_attr(tnode,
+            classname="test_xfailure_function",
+            name="test_xfail")
+        fnode = tnode.getElementsByTagName("skipped")[0]
+        assert_attr(fnode, message="expected test failure")
+        #assert "ValueError" in fnode.toxml()
+
+    def test_xfailure_xpass(self, testdir):
+        testdir.makepyfile("""
+            import py
+            @py.test.mark.xfail
+            def test_xpass():
+                pass
+        """)
+        result, dom = runandparse(testdir)
+        #assert result.ret
+        node = dom.getElementsByTagName("testsuite")[0]
+        assert_attr(node, skips=1, tests=0)
+        tnode = node.getElementsByTagName("testcase")[0]
+        assert_attr(tnode,
+            classname="test_xfailure_xpass",
+            name="test_xpass")
+        fnode = tnode.getElementsByTagName("skipped")[0]
+        assert_attr(fnode, message="xfail-marked test passes unexpectedly")
+        #assert "ValueError" in fnode.toxml()
+
+    def test_collect_error(self, testdir):
+        testdir.makepyfile("syntax error")
+        result, dom = runandparse(testdir)
+        assert result.ret
+        node = dom.getElementsByTagName("testsuite")[0]
+        assert_attr(node, errors=1, tests=0)
+        tnode = node.getElementsByTagName("testcase")[0]
+        assert_attr(tnode,
+            #classname="test_collect_error",
+            name="test_collect_error")
+        fnode = tnode.getElementsByTagName("failure")[0]
+        assert_attr(fnode, message="collection failure")
+        assert "SyntaxError" in fnode.toxml()
+
+    def test_collect_skipped(self, testdir):
+        testdir.makepyfile("import py ; py.test.skip('xyz')")
+        result, dom = runandparse(testdir)
+        assert not result.ret
+        node = dom.getElementsByTagName("testsuite")[0]
+        assert_attr(node, skips=1, tests=0)
+        tnode = node.getElementsByTagName("testcase")[0]
+        assert_attr(tnode,
+            #classname="test_collect_error",
+            name="test_collect_skipped")
+        fnode = tnode.getElementsByTagName("skipped")[0]
+        assert_attr(fnode, message="collection skipped")
+
+    def test_unicode(self, testdir):
+        value = 'hx\xc4\x85\xc4\x87\n'
+        testdir.makepyfile("""
+            # coding: utf-8
+            def test_hello():
+                print (%r)
+                assert 0
+        """ % value)
+        result, dom = runandparse(testdir)
+        assert result.ret == 1
+        tnode = dom.getElementsByTagName("testcase")[0]
+        fnode = tnode.getElementsByTagName("failure")[0]
+        if not sys.platform.startswith("java"):
+            assert "hx" in fnode.toxml()
+
+class TestNonPython:
+    def test_summing_simple(self, testdir):
+        testdir.makeconftest("""
+            import pytest
+            def pytest_collect_file(path, parent):
+                if path.ext == ".xyz":
+                    return MyItem(path, parent)
+            class MyItem(pytest.Item):
+                def __init__(self, path, parent):
+                    super(MyItem, self).__init__(path.basename, parent)
+                    self.fspath = path
+                def runtest(self):
+                    raise ValueError(42)
+                def repr_failure(self, excinfo):
+                    return "custom item runtest failed"
+        """)
+        testdir.tmpdir.join("myfile.xyz").write("hello")
+        result, dom = runandparse(testdir)
+        assert result.ret
+        node = dom.getElementsByTagName("testsuite")[0]
+        assert_attr(node, errors=0, failures=1, skips=0, tests=1)
+        tnode = node.getElementsByTagName("testcase")[0]
+        assert_attr(tnode,
+            #classname="test_collect_error",
+            name="myfile.xyz")
+        fnode = tnode.getElementsByTagName("failure")[0]
+        assert_attr(fnode, message="test failure")
+        assert "custom item runtest failed" in fnode.toxml()
+

--- a/testing/plugin/test_pastebin.py
+++ /dev/null
@@ -1,47 +0,0 @@
-
-class TestPasting:
-    def pytest_funcarg__pastebinlist(self, request):
-        mp = request.getfuncargvalue("monkeypatch")
-        pastebinlist = []
-        class MockProxy:
-            def newPaste(self, language, code):
-                pastebinlist.append((language, code))
-        plugin = request.config.pluginmanager.getplugin('pastebin')
-        mp.setattr(plugin, 'getproxy', MockProxy)
-        return pastebinlist
-
-    def test_failed(self, testdir, pastebinlist):
-        testpath = testdir.makepyfile("""
-            import py
-            def test_pass():
-                pass
-            def test_fail():
-                assert 0
-            def test_skip():
-                py.test.skip("")
-        """)
-        reprec = testdir.inline_run(testpath, "--paste=failed")
-        assert len(pastebinlist) == 1
-        assert pastebinlist[0][0] == "python"
-        s = pastebinlist[0][1]
-        assert s.find("def test_fail") != -1
-        assert reprec.countoutcomes() == [1,1,1]
-
-    def test_all(self, testdir, pastebinlist):
-        testpath = testdir.makepyfile("""
-            import py
-            def test_pass():
-                pass
-            def test_fail():
-                assert 0
-            def test_skip():
-                py.test.skip("")
-        """)
-        reprec = testdir.inline_run(testpath, "--pastebin=all")
-        assert reprec.countoutcomes() == [1,1,1]
-        assert len(pastebinlist) == 1
-        assert pastebinlist[0][0] == "python"
-        s = pastebinlist[0][1]
-        for x in 'test_fail test_skip skipped'.split():
-            assert s.find(x), (s, x)
-

--- a/testing/plugin/test_junitxml.py
+++ /dev/null
@@ -1,265 +0,0 @@
-
-from xml.dom import minidom
-import py, sys
-
-def runandparse(testdir, *args):
-    resultpath = testdir.tmpdir.join("junit.xml")
-    result = testdir.runpytest("--junitxml=%s" % resultpath, *args)
-    xmldoc = minidom.parse(str(resultpath))
-    return result, xmldoc
-
-def assert_attr(node, **kwargs):
-    __tracebackhide__ = True
-    for name, expected in kwargs.items():
-        anode = node.getAttributeNode(name)
-        assert anode, "node %r has no attribute %r" %(node, name)
-        val = anode.value
-        if val != str(expected):
-            py.test.fail("%r != %r" %(str(val), str(expected)))
-
-class TestPython:
-    def test_summing_simple(self, testdir):
-        testdir.makepyfile("""
-            import py
-            def test_pass():
-                pass
-            def test_fail():
-                assert 0
-            def test_skip():
-                py.test.skip("")
-            @py.test.mark.xfail
-            def test_xfail():
-                assert 0
-            @py.test.mark.xfail
-            def test_xpass():
-                assert 1
-        """)
-        result, dom = runandparse(testdir)
-        assert result.ret
-        node = dom.getElementsByTagName("testsuite")[0]
-        assert_attr(node, errors=0, failures=1, skips=3, tests=2)
-
-    def test_setup_error(self, testdir):
-        testdir.makepyfile("""
-            def pytest_funcarg__arg(request):
-                raise ValueError()
-            def test_function(arg):
-                pass
-        """)
-        result, dom = runandparse(testdir)
-        assert result.ret
-        node = dom.getElementsByTagName("testsuite")[0]
-        assert_attr(node, errors=1, tests=0)
-        tnode = node.getElementsByTagName("testcase")[0]
-        assert_attr(tnode,
-            classname="test_setup_error",
-            name="test_function")
-        fnode = tnode.getElementsByTagName("error")[0]
-        assert_attr(fnode, message="test setup failure")
-        assert "ValueError" in fnode.toxml()
-
-    def test_classname_instance(self, testdir):
-        testdir.makepyfile("""
-            class TestClass:
-                def test_method(self):
-                    assert 0
-        """)
-        result, dom = runandparse(testdir)
-        assert result.ret
-        node = dom.getElementsByTagName("testsuite")[0]
-        assert_attr(node, failures=1)
-        tnode = node.getElementsByTagName("testcase")[0]
-        assert_attr(tnode,
-            classname="test_classname_instance.TestClass",
-            name="test_method")
-
-    def test_classname_nested_dir(self, testdir):
-        p = testdir.tmpdir.ensure("sub", "test_hello.py")
-        p.write("def test_func(): 0/0")
-        result, dom = runandparse(testdir)
-        assert result.ret
-        node = dom.getElementsByTagName("testsuite")[0]
-        assert_attr(node, failures=1)
-        tnode = node.getElementsByTagName("testcase")[0]
-        assert_attr(tnode,
-            classname="sub.test_hello",
-            name="test_func")
-
-    def test_internal_error(self, testdir):
-        testdir.makeconftest("def pytest_runtest_protocol(): 0 / 0")
-        testdir.makepyfile("def test_function(): pass")
-        result, dom = runandparse(testdir)
-        assert result.ret
-        node = dom.getElementsByTagName("testsuite")[0]
-        assert_attr(node, errors=1, tests=0)
-        tnode = node.getElementsByTagName("testcase")[0]
-        assert_attr(tnode, classname="pytest", name="internal")
-        fnode = tnode.getElementsByTagName("error")[0]
-        assert_attr(fnode, message="internal error")
-        assert "Division" in fnode.toxml()
-
-    def test_failure_function(self, testdir):
-        testdir.makepyfile("def test_fail(): raise ValueError(42)")
-        result, dom = runandparse(testdir)
-        assert result.ret
-        node = dom.getElementsByTagName("testsuite")[0]
-        assert_attr(node, failures=1, tests=1)
-        tnode = node.getElementsByTagName("testcase")[0]
-        assert_attr(tnode,
-            classname="test_failure_function",
-            name="test_fail")
-        fnode = tnode.getElementsByTagName("failure")[0]
-        assert_attr(fnode, message="test failure")
-        assert "ValueError" in fnode.toxml()
-
-    def test_failure_escape(self, testdir):
-        testdir.makepyfile("""
-            def pytest_generate_tests(metafunc):
-                metafunc.addcall(id="<", funcargs=dict(arg1=42))
-                metafunc.addcall(id="&", funcargs=dict(arg1=44))
-            def test_func(arg1):
-                assert 0
-        """)
-        result, dom = runandparse(testdir)
-        assert result.ret
-        node = dom.getElementsByTagName("testsuite")[0]
-        assert_attr(node, failures=2, tests=2)
-        tnode = node.getElementsByTagName("testcase")[0]
-        assert_attr(tnode,
-            classname="test_failure_escape",
-            name="test_func[<]")
-        tnode = node.getElementsByTagName("testcase")[1]
-        assert_attr(tnode,
-            classname="test_failure_escape",
-            name="test_func[&]")
-
-    def test_junit_prefixing(self, testdir):
-        testdir.makepyfile("""
-            def test_func():
-                assert 0
-            class TestHello:
-                def test_hello(self):
-                    pass
-        """)
-        result, dom = runandparse(testdir, "--junitprefix=xyz")
-        assert result.ret
-        node = dom.getElementsByTagName("testsuite")[0]
-        assert_attr(node, failures=1, tests=2)
-        tnode = node.getElementsByTagName("testcase")[0]
-        assert_attr(tnode,
-            classname="xyz.test_junit_prefixing",
-            name="test_func")
-        tnode = node.getElementsByTagName("testcase")[1]
-        assert_attr(tnode,
-            classname="xyz.test_junit_prefixing."
-                      "TestHello",
-            name="test_hello")
-
-    def test_xfailure_function(self, testdir):
-        testdir.makepyfile("""
-            import py
-            def test_xfail():
-                py.test.xfail("42")
-        """)
-        result, dom = runandparse(testdir)
-        assert not result.ret
-        node = dom.getElementsByTagName("testsuite")[0]
-        assert_attr(node, skips=1, tests=0)
-        tnode = node.getElementsByTagName("testcase")[0]
-        assert_attr(tnode,
-            classname="test_xfailure_function",
-            name="test_xfail")
-        fnode = tnode.getElementsByTagName("skipped")[0]
-        assert_attr(fnode, message="expected test failure")
-        #assert "ValueError" in fnode.toxml()
-
-    def test_xfailure_xpass(self, testdir):
-        testdir.makepyfile("""
-            import py
-            @py.test.mark.xfail
-            def test_xpass():
-                pass
-        """)
-        result, dom = runandparse(testdir)
-        #assert result.ret
-        node = dom.getElementsByTagName("testsuite")[0]
-        assert_attr(node, skips=1, tests=0)
-        tnode = node.getElementsByTagName("testcase")[0]
-        assert_attr(tnode,
-            classname="test_xfailure_xpass",
-            name="test_xpass")
-        fnode = tnode.getElementsByTagName("skipped")[0]
-        assert_attr(fnode, message="xfail-marked test passes unexpectedly")
-        #assert "ValueError" in fnode.toxml()
-
-    def test_collect_error(self, testdir):
-        testdir.makepyfile("syntax error")
-        result, dom = runandparse(testdir)
-        assert result.ret
-        node = dom.getElementsByTagName("testsuite")[0]
-        assert_attr(node, errors=1, tests=0)
-        tnode = node.getElementsByTagName("testcase")[0]
-        assert_attr(tnode,
-            #classname="test_collect_error",
-            name="test_collect_error")
-        fnode = tnode.getElementsByTagName("failure")[0]
-        assert_attr(fnode, message="collection failure")
-        assert "SyntaxError" in fnode.toxml()
-
-    def test_collect_skipped(self, testdir):
-        testdir.makepyfile("import py ; py.test.skip('xyz')")
-        result, dom = runandparse(testdir)
-        assert not result.ret
-        node = dom.getElementsByTagName("testsuite")[0]
-        assert_attr(node, skips=1, tests=0)
-        tnode = node.getElementsByTagName("testcase")[0]
-        assert_attr(tnode,
-            #classname="test_collect_error",
-            name="test_collect_skipped")
-        fnode = tnode.getElementsByTagName("skipped")[0]
-        assert_attr(fnode, message="collection skipped")
-
-    def test_unicode(self, testdir):
-        value = 'hx\xc4\x85\xc4\x87\n'
-        testdir.makepyfile("""
-            # coding: utf-8
-            def test_hello():
-                print (%r)
-                assert 0
-        """ % value)
-        result, dom = runandparse(testdir)
-        assert result.ret == 1
-        tnode = dom.getElementsByTagName("testcase")[0]
-        fnode = tnode.getElementsByTagName("failure")[0]
-        if not sys.platform.startswith("java"):
-            assert "hx" in fnode.toxml()
-
-class TestNonPython:
-    def test_summing_simple(self, testdir):
-        testdir.makeconftest("""
-            import pytest
-            def pytest_collect_file(path, parent):
-                if path.ext == ".xyz":
-                    return MyItem(path, parent)
-            class MyItem(pytest.Item):
-                def __init__(self, path, parent):
-                    super(MyItem, self).__init__(path.basename, parent)
-                    self.fspath = path
-                def runtest(self):
-                    raise ValueError(42)
-                def repr_failure(self, excinfo):
-                    return "custom item runtest failed"
-        """)
-        testdir.tmpdir.join("myfile.xyz").write("hello")
-        result, dom = runandparse(testdir)
-        assert result.ret
-        node = dom.getElementsByTagName("testsuite")[0]
-        assert_attr(node, errors=0, failures=1, skips=0, tests=1)
-        tnode = node.getElementsByTagName("testcase")[0]
-        assert_attr(tnode,
-            #classname="test_collect_error",
-            name="myfile.xyz")
-        fnode = tnode.getElementsByTagName("failure")[0]
-        assert_attr(fnode, message="test failure")
-        assert "custom item runtest failed" in fnode.toxml()
-

--- a/testing/plugin/test_tmpdir.py
+++ /dev/null
@@ -1,29 +0,0 @@
-import py
-
-from _pytest.tmpdir import pytest_funcarg__tmpdir
-from _pytest.python import FuncargRequest
-
-def test_funcarg(testdir):
-    item = testdir.getitem("""
-            def pytest_generate_tests(metafunc):
-                metafunc.addcall(id='a')
-                metafunc.addcall(id='b')
-            def test_func(tmpdir): pass
-            """, 'test_func[a]')
-    p = pytest_funcarg__tmpdir(FuncargRequest(item))
-    assert p.check()
-    bn = p.basename.strip("0123456789")
-    assert bn.endswith("test_func_a_")
-    item.name = "qwe/\\abc"
-    p = pytest_funcarg__tmpdir(FuncargRequest(item))
-    assert p.check()
-    bn = p.basename.strip("0123456789")
-    assert bn == "qwe__abc"
-
-def test_ensuretemp(recwarn):
-    #py.test.deprecated_call(py.test.ensuretemp, 'hello')
-    d1 = py.test.ensuretemp('hello')
-    d2 = py.test.ensuretemp('hello')
-    assert d1 == d2
-    assert d1.check(dir=1)
-

--- a/testing/plugin/test_resultlog.py
+++ /dev/null
@@ -1,193 +0,0 @@
-import py
-import os
-from _pytest.resultlog import generic_path, ResultLog, \
-        pytest_configure, pytest_unconfigure
-from _pytest.session import Node, Item, FSCollector
-
-def test_generic_path(testdir):
-    from _pytest.session import Session
-    config = testdir.parseconfig()
-    session = Session(config)
-    p1 = Node('a', config=config, session=session)
-    #assert p1.fspath is None
-    p2 = Node('B', parent=p1)
-    p3 = Node('()', parent = p2)
-    item = Item('c', parent = p3)
-
-    res = generic_path(item)
-    assert res == 'a.B().c'
-
-    p0 = FSCollector('proj/test', config=config, session=session)
-    p1 = FSCollector('proj/test/a', parent=p0)
-    p2 = Node('B', parent=p1)
-    p3 = Node('()', parent = p2)
-    p4 = Node('c', parent=p3)
-    item = Item('[1]', parent = p4)
-
-    res = generic_path(item)
-    assert res == 'test/a:B().c[1]'
-
-def test_write_log_entry():
-    reslog = ResultLog(None, None)
-    reslog.logfile = py.io.TextIO()
-    reslog.write_log_entry('name', '.', '')
-    entry = reslog.logfile.getvalue()
-    assert entry[-1] == '\n'
-    entry_lines = entry.splitlines()
-    assert len(entry_lines) == 1
-    assert entry_lines[0] == '. name'
-
-    reslog.logfile = py.io.TextIO()
-    reslog.write_log_entry('name', 's', 'Skipped')
-    entry = reslog.logfile.getvalue()
-    assert entry[-1] == '\n'
-    entry_lines = entry.splitlines()
-    assert len(entry_lines) == 2
-    assert entry_lines[0] == 's name'
-    assert entry_lines[1] == ' Skipped'
-
-    reslog.logfile = py.io.TextIO()
-    reslog.write_log_entry('name', 's', 'Skipped\n')
-    entry = reslog.logfile.getvalue()
-    assert entry[-1] == '\n'
-    entry_lines = entry.splitlines()
-    assert len(entry_lines) == 2
-    assert entry_lines[0] == 's name'
-    assert entry_lines[1] == ' Skipped'
-
-    reslog.logfile = py.io.TextIO()
-    longrepr = ' tb1\n tb 2\nE tb3\nSome Error'
-    reslog.write_log_entry('name', 'F', longrepr)
-    entry = reslog.logfile.getvalue()
-    assert entry[-1] == '\n'
-    entry_lines = entry.splitlines()
-    assert len(entry_lines) == 5
-    assert entry_lines[0] == 'F name'
-    assert entry_lines[1:] == [' '+line for line in longrepr.splitlines()]
-
-
-class TestWithFunctionIntegration:
-    # XXX (hpk) i think that the resultlog plugin should
-    # provide a Parser object so that one can remain
-    # ignorant regarding formatting details.
-    def getresultlog(self, testdir, arg):
-        resultlog = testdir.tmpdir.join("resultlog")
-        testdir.plugins.append("resultlog")
-        args = ["--resultlog=%s" % resultlog] + [arg]
-        testdir.runpytest(*args)
-        return [x for x in resultlog.readlines(cr=0) if x]
-
-    def test_collection_report(self, testdir):
-        ok = testdir.makepyfile(test_collection_ok="")
-        skip = testdir.makepyfile(test_collection_skip="import py ; py.test.skip('hello')")
-        fail = testdir.makepyfile(test_collection_fail="XXX")
-        lines = self.getresultlog(testdir, ok)
-        assert not lines
-
-        lines = self.getresultlog(testdir, skip)
-        assert len(lines) == 2
-        assert lines[0].startswith("S ")
-        assert lines[0].endswith("test_collection_skip.py")
-        assert lines[1].startswith(" ")
-        assert lines[1].endswith("test_collection_skip.py:1: Skipped: hello")
-
-        lines = self.getresultlog(testdir, fail)
-        assert lines
-        assert lines[0].startswith("F ")
-        assert lines[0].endswith("test_collection_fail.py"), lines[0]
-        for x in lines[1:]:
-            assert x.startswith(" ")
-        assert "XXX" in "".join(lines[1:])
-
-    def test_log_test_outcomes(self, testdir):
-        mod = testdir.makepyfile(test_mod="""
-            import py
-            def test_pass(): pass
-            def test_skip(): py.test.skip("hello")
-            def test_fail(): raise ValueError("FAIL")
-
-            @py.test.mark.xfail
-            def test_xfail(): raise ValueError("XFAIL")
-            @py.test.mark.xfail
-            def test_xpass(): pass
-
-        """)
-        lines = self.getresultlog(testdir, mod)
-        assert len(lines) >= 3
-        assert lines[0].startswith(". ")
-        assert lines[0].endswith("test_pass")
-        assert lines[1].startswith("s "), lines[1]
-        assert lines[1].endswith("test_skip")
-        assert lines[2].find("hello") != -1
-
-        assert lines[3].startswith("F ")
-        assert lines[3].endswith("test_fail")
-        tb = "".join(lines[4:8])
-        assert tb.find('raise ValueError("FAIL")') != -1
-
-        assert lines[8].startswith('x ')
-        tb = "".join(lines[8:14])
-        assert tb.find('raise ValueError("XFAIL")') != -1
-
-        assert lines[14].startswith('X ')
-        assert len(lines) == 15
-
-    def test_internal_exception(self):
-        # they are produced for example by a teardown failing
-        # at the end of the run
-        try:
-            raise ValueError
-        except ValueError:
-            excinfo = py.code.ExceptionInfo()
-        reslog = ResultLog(None, py.io.TextIO())
-        reslog.pytest_internalerror(excinfo.getrepr())
-        entry = reslog.logfile.getvalue()
-        entry_lines = entry.splitlines()
-
-        assert entry_lines[0].startswith('! ')
-        assert os.path.basename(__file__)[:-9] in entry_lines[0] #.pyc/class
-        assert entry_lines[-1][0] == ' '
-        assert 'ValueError' in entry
-
-def test_generic(testdir, LineMatcher):
-    testdir.plugins.append("resultlog")
-    testdir.makepyfile("""
-        import py
-        def test_pass():
-            pass
-        def test_fail():
-            assert 0
-        def test_skip():
-            py.test.skip("")
-        @py.test.mark.xfail
-        def test_xfail():
-            assert 0
-        @py.test.mark.xfail(run=False)
-        def test_xfail_norun():
-            assert 0
-    """)
-    testdir.runpytest("--resultlog=result.log")
-    lines = testdir.tmpdir.join("result.log").readlines(cr=0)
-    LineMatcher(lines).fnmatch_lines([
-        ". *:test_pass",
-        "F *:test_fail",
-        "s *:test_skip",
-        "x *:test_xfail",
-        "x *:test_xfail_norun",
-    ])
-
-def test_no_resultlog_on_slaves(testdir):
-    config = testdir.parseconfig("-p", "resultlog", "--resultlog=resultlog")
-
-    assert not hasattr(config, '_resultlog')
-    pytest_configure(config)
-    assert hasattr(config, '_resultlog')
-    pytest_unconfigure(config)
-    assert not hasattr(config, '_resultlog')
-
-    config.slaveinput = {}
-    pytest_configure(config)
-    assert not hasattr(config, '_resultlog')
-    pytest_unconfigure(config)
-    assert not hasattr(config, '_resultlog')
-

--- a/testing/plugin/test_runner_xunit.py
+++ /dev/null
@@ -1,212 +0,0 @@
-#
-# test correct setup/teardowns at
-# module, class, and instance level
-
-def test_module_and_function_setup(testdir):
-    reprec = testdir.inline_runsource("""
-        modlevel = []
-        def setup_module(module):
-            assert not modlevel
-            module.modlevel.append(42)
-
-        def teardown_module(module):
-            modlevel.pop()
-
-        def setup_function(function):
-            function.answer = 17
-
-        def teardown_function(function):
-            del function.answer
-
-        def test_modlevel():
-            assert modlevel[0] == 42
-            assert test_modlevel.answer == 17
-
-        class TestFromClass:
-            def test_module(self):
-                assert modlevel[0] == 42
-                assert not hasattr(test_modlevel, 'answer')
-    """)
-    rep = reprec.matchreport("test_modlevel")
-    assert rep.passed
-    rep = reprec.matchreport("test_module")
-    assert rep.passed
-
-def test_class_setup(testdir):
-    reprec = testdir.inline_runsource("""
-        class TestSimpleClassSetup:
-            clslevel = []
-            def setup_class(cls):
-                cls.clslevel.append(23)
-
-            def teardown_class(cls):
-                cls.clslevel.pop()
-
-            def test_classlevel(self):
-                assert self.clslevel[0] == 23
-
-        class TestInheritedClassSetupStillWorks(TestSimpleClassSetup):
-            def test_classlevel_anothertime(self):
-                assert self.clslevel == [23]
-
-        def test_cleanup():
-            assert not TestSimpleClassSetup.clslevel
-            assert not TestInheritedClassSetupStillWorks.clslevel
-    """)
-    reprec.assertoutcome(passed=1+2+1)
-
-
-def test_method_setup(testdir):
-    reprec = testdir.inline_runsource("""
-        class TestSetupMethod:
-            def setup_method(self, meth):
-                self.methsetup = meth
-            def teardown_method(self, meth):
-                del self.methsetup
-
-            def test_some(self):
-                assert self.methsetup == self.test_some
-
-            def test_other(self):
-                assert self.methsetup == self.test_other
-    """)
-    reprec.assertoutcome(passed=2)
-
-def test_method_generator_setup(testdir):
-    reprec = testdir.inline_runsource("""
-        class TestSetupTeardownOnInstance:
-            def setup_class(cls):
-                cls.classsetup = True
-
-            def setup_method(self, method):
-                self.methsetup = method
-
-            def test_generate(self):
-                assert self.classsetup
-                assert self.methsetup == self.test_generate
-                yield self.generated, 5
-                yield self.generated, 2
-
-            def generated(self, value):
-                assert self.classsetup
-                assert self.methsetup == self.test_generate
-                assert value == 5
-    """)
-    reprec.assertoutcome(passed=1, failed=1)
-
-def test_func_generator_setup(testdir):
-    reprec = testdir.inline_runsource("""
-        import sys
-
-        def setup_module(mod):
-            print ("setup_module")
-            mod.x = []
-
-        def setup_function(fun):
-            print ("setup_function")
-            x.append(1)
-
-        def teardown_function(fun):
-            print ("teardown_function")
-            x.pop()
-
-        def test_one():
-            assert x == [1]
-            def check():
-                print ("check")
-                sys.stderr.write("e\\n")
-                assert x == [1]
-            yield check
-            assert x == [1]
-    """)
-    rep = reprec.matchreport("test_one", names="pytest_runtest_logreport")
-    assert rep.passed
-
-def test_method_setup_uses_fresh_instances(testdir):
-    reprec = testdir.inline_runsource("""
-        class TestSelfState1:
-            memory = []
-            def test_hello(self):
-                self.memory.append(self)
-
-            def test_afterhello(self):
-                assert self != self.memory[0]
-    """)
-    reprec.assertoutcome(passed=2, failed=0)
-
-def test_failing_setup_calls_teardown(testdir):
-    p = testdir.makepyfile("""
-        def setup_module(mod):
-            raise ValueError(42)
-        def test_function():
-            assert 0
-        def teardown_module(mod):
-            raise ValueError(43)
-    """)
-    result = testdir.runpytest(p)
-    result.stdout.fnmatch_lines([
-        "*42*",
-        "*43*",
-        "*2 error*"
-    ])
-
-def test_setup_that_skips_calledagain_and_teardown(testdir):
-    p = testdir.makepyfile("""
-        import py
-        def setup_module(mod):
-            py.test.skip("x")
-        def test_function1():
-            pass
-        def test_function2():
-            pass
-        def teardown_module(mod):
-            raise ValueError(43)
-    """)
-    result = testdir.runpytest(p)
-    result.stdout.fnmatch_lines([
-        "*ValueError*43*",
-        "*2 skipped*1 error*",
-    ])
-
-def test_setup_fails_again_on_all_tests(testdir):
-    p = testdir.makepyfile("""
-        import py
-        def setup_module(mod):
-            raise ValueError(42)
-        def test_function1():
-            pass
-        def test_function2():
-            pass
-        def teardown_module(mod):
-            raise ValueError(43)
-    """)
-    result = testdir.runpytest(p)
-    result.stdout.fnmatch_lines([
-        "*3 error*"
-    ])
-    assert "passed" not in result.stdout.str()
-
-def test_setup_funcarg_setup_not_called_if_outer_scope_fails(testdir):
-    p = testdir.makepyfile("""
-        import py
-        def setup_module(mod):
-            raise ValueError(42)
-        def pytest_funcarg__hello(request):
-            raise ValueError("xyz43")
-        def test_function1(hello):
-            pass
-        def test_function2(hello):
-            pass
-    """)
-    result = testdir.runpytest(p)
-    result.stdout.fnmatch_lines([
-        "*function1*",
-        "*ValueError*42*",
-        "*function2*",
-        "*ValueError*42*",
-        "*2 error*"
-    ])
-    assert "xyz43" not in result.stdout.str()
-
-
-

--- /dev/null
+++ b/testing/test_recwarn.py
@@ -0,0 +1,81 @@
+import py
+from _pytest.recwarn import WarningsRecorder
+
+def test_WarningRecorder(recwarn):
+    showwarning = py.std.warnings.showwarning
+    rec = WarningsRecorder()
+    assert py.std.warnings.showwarning != showwarning
+    assert not rec.list
+    py.std.warnings.warn_explicit("hello", UserWarning, "xyz", 13)
+    assert len(rec.list) == 1
+    py.std.warnings.warn(DeprecationWarning("hello"))
+    assert len(rec.list) == 2
+    warn = rec.pop()
+    assert str(warn.message) == "hello"
+    l = rec.list
+    rec.clear()
+    assert len(rec.list) == 0
+    assert l is rec.list
+    py.test.raises(AssertionError, "rec.pop()")
+    rec.finalize()
+    assert showwarning == py.std.warnings.showwarning
+
+def test_recwarn_functional(testdir):
+    reprec = testdir.inline_runsource("""
+        pytest_plugins = 'pytest_recwarn',
+        import warnings
+        oldwarn = warnings.showwarning
+        def test_method(recwarn):
+            assert warnings.showwarning != oldwarn
+            warnings.warn("hello")
+            warn = recwarn.pop()
+            assert isinstance(warn.message, UserWarning)
+        def test_finalized():
+            assert warnings.showwarning == oldwarn
+    """)
+    res = reprec.countoutcomes()
+    assert tuple(res) == (2, 0, 0), res
+
+#
+# ============ test py.test.deprecated_call() ==============
+#
+
+def dep(i):
+    if i == 0:
+        py.std.warnings.warn("is deprecated", DeprecationWarning)
+    return 42
+
+reg = {}
+def dep_explicit(i):
+    if i == 0:
+        py.std.warnings.warn_explicit("dep_explicit", category=DeprecationWarning,
+                                      filename="hello", lineno=3)
+
+def test_deprecated_call_raises():
+    excinfo = py.test.raises(AssertionError,
+                   "py.test.deprecated_call(dep, 3)")
+    assert str(excinfo).find("did not produce") != -1
+
+def test_deprecated_call():
+    py.test.deprecated_call(dep, 0)
+
+def test_deprecated_call_ret():
+    ret = py.test.deprecated_call(dep, 0)
+    assert ret == 42
+
+def test_deprecated_call_preserves():
+    r = py.std.warnings.onceregistry.copy()
+    f = py.std.warnings.filters[:]
+    test_deprecated_call_raises()
+    test_deprecated_call()
+    assert r == py.std.warnings.onceregistry
+    assert f == py.std.warnings.filters
+
+def test_deprecated_explicit_call_raises():
+    py.test.raises(AssertionError,
+                   "py.test.deprecated_call(dep_explicit, 3)")
+
+def test_deprecated_explicit_call():
+    py.test.deprecated_call(dep_explicit, 0)
+    py.test.deprecated_call(dep_explicit, 0)
+

--- /dev/null
+++ b/testing/test_runner.py
@@ -0,0 +1,387 @@
+import py, sys
+from _pytest import runner
+from py._code.code import ReprExceptionInfo
+
+class TestSetupState:
+    def test_setup(self, testdir):
+        ss = runner.SetupState()
+        item = testdir.getitem("def test_func(): pass")
+        l = [1]
+        ss.prepare(item)
+        ss.addfinalizer(l.pop, colitem=item)
+        assert l
+        ss._pop_and_teardown()
+        assert not l
+
+    def test_setup_scope_None(self, testdir):
+        item = testdir.getitem("def test_func(): pass")
+        ss = runner.SetupState()
+        l = [1]
+        ss.prepare(item)
+        ss.addfinalizer(l.pop, colitem=None)
+        assert l
+        ss._pop_and_teardown()
+        assert l
+        ss._pop_and_teardown()
+        assert l
+        ss.teardown_all()
+        assert not l
+
+    def test_teardown_exact_stack_empty(self, testdir):
+        item = testdir.getitem("def test_func(): pass")
+        ss = runner.SetupState()
+        ss.teardown_exact(item)
+        ss.teardown_exact(item)
+        ss.teardown_exact(item)
+
+    def test_setup_fails_and_failure_is_cached(self, testdir):
+        item = testdir.getitem("""
+            def setup_module(mod):
+                raise ValueError(42)
+            def test_func(): pass
+        """)
+        ss = runner.SetupState()
+        py.test.raises(ValueError, "ss.prepare(item)")
+        py.test.raises(ValueError, "ss.prepare(item)")
+
+class BaseFunctionalTests:
+    def test_passfunction(self, testdir):
+        reports = testdir.runitem("""
+            def test_func():
+                pass
+        """)
+        rep = reports[1]
+        assert rep.passed
+        assert not rep.failed
+        assert rep.outcome == "passed"
+        assert not rep.longrepr
+
+    def test_failfunction(self, testdir):
+        reports = testdir.runitem("""
+            def test_func():
+                assert 0
+        """)
+        rep = reports[1]
+        assert not rep.passed
+        assert not rep.skipped
+        assert rep.failed
+        assert rep.when == "call"
+        assert rep.outcome == "failed"
+        #assert isinstance(rep.longrepr, ReprExceptionInfo)
+
+    def test_skipfunction(self, testdir):
+        reports = testdir.runitem("""
+            import py
+            def test_func():
+                py.test.skip("hello")
+        """)
+        rep = reports[1]
+        assert not rep.failed
+        assert not rep.passed
+        assert rep.skipped
+        assert rep.outcome == "skipped"
+        #assert rep.skipped.when == "call"
+        #assert rep.skipped.when == "call"
+        #assert rep.skipped == "%sreason == "hello"
+        #assert rep.skipped.location.lineno == 3
+        #assert rep.skipped.location.path
+        #assert not rep.skipped.failurerepr
+
+    def test_skip_in_setup_function(self, testdir):
+        reports = testdir.runitem("""
+            import py
+            def setup_function(func):
+                py.test.skip("hello")
+            def test_func():
+                pass
+        """)
+        print(reports)
+        rep = reports[0]
+        assert not rep.failed
+        assert not rep.passed
+        assert rep.skipped
+        #assert rep.skipped.reason == "hello"
+        #assert rep.skipped.location.lineno == 3
+        #assert rep.skipped.location.lineno == 3
+        assert len(reports) == 2
+        assert reports[1].passed # teardown
+
+    def test_failure_in_setup_function(self, testdir):
+        reports = testdir.runitem("""
+            import py
+            def setup_function(func):
+                raise ValueError(42)
+            def test_func():
+                pass
+        """)
+        rep = reports[0]
+        assert not rep.skipped
+        assert not rep.passed
+        assert rep.failed
+        assert rep.when == "setup"
+        assert len(reports) == 2
+
+    def test_failure_in_teardown_function(self, testdir):
+        reports = testdir.runitem("""
+            import py
+            def teardown_function(func):
+                raise ValueError(42)
+            def test_func():
+                pass
+        """)
+        print(reports)
+        assert len(reports) == 3
+        rep = reports[2]
+        assert not rep.skipped
+        assert not rep.passed
+        assert rep.failed
+        assert rep.when == "teardown"
+        #assert rep.longrepr.reprcrash.lineno == 3
+        #assert rep.longrepr.reprtraceback.reprentries
+
+    def test_custom_failure_repr(self, testdir):
+        testdir.makepyfile(conftest="""
+            import pytest
+            class Function(pytest.Function):
+                def repr_failure(self, excinfo):
+                    return "hello"
+        """)
+        reports = testdir.runitem("""
+            import py
+            def test_func():
+                assert 0
+        """)
+        rep = reports[1]
+        assert not rep.skipped
+        assert not rep.passed
+        assert rep.failed
+        #assert rep.outcome.when == "call"
+        #assert rep.failed.where.lineno == 3
+        #assert rep.failed.where.path.basename == "test_func.py"
+        #assert rep.failed.failurerepr == "hello"
+
+    def test_failure_in_setup_function_ignores_custom_repr(self, testdir):
+        testdir.makepyfile(conftest="""
+            import pytest
+            class Function(pytest.Function):
+                def repr_failure(self, excinfo):
+                    assert 0
+        """)
+        reports = testdir.runitem("""
+            def setup_function(func):
+                raise ValueError(42)
+            def test_func():
+                pass
+        """)
+        assert len(reports) == 2
+        rep = reports[0]
+        print(rep)
+        assert not rep.skipped
+        assert not rep.passed
+        assert rep.failed
+        #assert rep.outcome.when == "setup"
+        #assert rep.outcome.where.lineno == 3
+        #assert rep.outcome.where.path.basename == "test_func.py"
+        #assert instanace(rep.failed.failurerepr, PythonFailureRepr)
+
+    def test_systemexit_does_not_bail_out(self, testdir):
+        try:
+            reports = testdir.runitem("""
+                def test_func():
+                    raise SystemExit(42)
+            """)
+        except SystemExit:
+            py.test.fail("runner did not catch SystemExit")
+        rep = reports[1]
+        assert rep.failed
+        assert rep.when == "call"
+
+    def test_exit_propagates(self, testdir):
+        try:
+            testdir.runitem("""
+                import pytest
+                def test_func():
+                    raise pytest.exit.Exception()
+            """)
+        except py.test.exit.Exception:
+            pass
+        else:
+            py.test.fail("did not raise")
+
+class TestExecutionNonForked(BaseFunctionalTests):
+    def getrunner(self):
+        def f(item):
+            return runner.runtestprotocol(item, log=False)
+        return f
+
+    def test_keyboardinterrupt_propagates(self, testdir):
+        try:
+            testdir.runitem("""
+                def test_func():
+                    raise KeyboardInterrupt("fake")
+            """)
+        except KeyboardInterrupt:
+            pass
+        else:
+            py.test.fail("did not raise")
+
+class TestExecutionForked(BaseFunctionalTests):
+    pytestmark = py.test.mark.skipif("not hasattr(os, 'fork')")
+
+    def getrunner(self):
+        # XXX re-arrange this test to live in pytest-xdist
+        xplugin = py.test.importorskip("xdist.plugin")
+        return xplugin.forked_run_report
+
+    def test_suicide(self, testdir):
+        reports = testdir.runitem("""
+            def test_func():
+                import os
+                os.kill(os.getpid(), 15)
+        """)
+        rep = reports[0]
+        assert rep.failed
+        assert rep.when == "???"
+
+class TestSessionReports:
+    def test_collect_result(self, testdir):
+        col = testdir.getmodulecol("""
+            def test_func1():
+                pass
+            class TestClass:
+                pass
+        """)
+        rep = runner.pytest_make_collect_report(col)
+        assert not rep.failed
+        assert not rep.skipped
+        assert rep.passed
+        locinfo = rep.location
+        assert locinfo[0] == col.fspath.basename
+        assert not locinfo[1]
+        assert locinfo[2] == col.fspath.basename
+        res = rep.result
+        assert len(res) == 2
+        assert res[0].name == "test_func1"
+        assert res[1].name == "TestClass"
+
+    def test_skip_at_module_scope(self, testdir):
+        col = testdir.getmodulecol("""
+            import pytest
+            pytest.skip("hello")
+            def test_func():
+                pass
+        """)
+        rep = runner.pytest_make_collect_report(col)
+        assert not rep.failed
+        assert not rep.passed
+        assert rep.skipped
+
+def test_callinfo():
+    ci = runner.CallInfo(lambda: 0, '123')
+    assert ci.when == "123"
+    assert ci.result == 0
+    assert "result" in repr(ci)
+    ci = runner.CallInfo(lambda: 0/0, '123')
+    assert ci.when == "123"
+    assert not hasattr(ci, 'result')
+    assert ci.excinfo
+    assert "exc" in repr(ci)
+
+# design question: do we want general hooks in python files?
+# then something like the following functional tests makes sense
+ at py.test.mark.xfail
+def test_runtest_in_module_ordering(testdir):
+    p1 = testdir.makepyfile("""
+        def pytest_runtest_setup(item): # runs after class-level!
+            item.function.mylist.append("module")
+        class TestClass:
+            def pytest_runtest_setup(self, item):
+                assert not hasattr(item.function, 'mylist')
+                item.function.mylist = ['class']
+            def pytest_funcarg__mylist(self, request):
+                return request.function.mylist
+            def pytest_runtest_call(self, item, __multicall__):
+                try:
+                    __multicall__.execute()
+                except ValueError:
+                    pass
+            def test_hello1(self, mylist):
+                assert mylist == ['class', 'module'], mylist
+                raise ValueError()
+            def test_hello2(self, mylist):
+                assert mylist == ['class', 'module'], mylist
+        def pytest_runtest_teardown(item):
+            del item.function.mylist
+    """)
+    result = testdir.runpytest(p1)
+    result.stdout.fnmatch_lines([
+        "*2 passed*"
+    ])
+
+
+def test_pytest_exit():
+    try:
+        py.test.exit("hello")
+    except py.test.exit.Exception:
+        excinfo = py.code.ExceptionInfo()
+        assert excinfo.errisinstance(KeyboardInterrupt)
+
+def test_pytest_fail():
+    try:
+        py.test.fail("hello")
+    except py.test.fail.Exception:
+        excinfo = py.code.ExceptionInfo()
+        s = excinfo.exconly(tryshort=True)
+        assert s.startswith("Failed")
+
+def test_exception_printing_skip():
+    try:
+        py.test.skip("hello")
+    except py.test.skip.Exception:
+        excinfo = py.code.ExceptionInfo()
+        s = excinfo.exconly(tryshort=True)
+        assert s.startswith("Skipped")
+
+def test_importorskip():
+    importorskip = py.test.importorskip
+    try:
+        sys = importorskip("sys")
+        assert sys == py.std.sys
+        #path = py.test.importorskip("os.path")
+        #assert path == py.std.os.path
+        py.test.raises(py.test.skip.Exception,
+            "py.test.importorskip('alskdj')")
+        py.test.raises(SyntaxError, "py.test.importorskip('x y z')")
+        py.test.raises(SyntaxError, "py.test.importorskip('x=y')")
+        path = importorskip("py", minversion=".".join(py.__version__))
+        mod = py.std.types.ModuleType("hello123")
+        mod.__version__ = "1.3"
+        py.test.raises(py.test.skip.Exception, """
+            py.test.importorskip("hello123", minversion="5.0")
+        """)
+    except py.test.skip.Exception:
+        print(py.code.ExceptionInfo())
+        py.test.fail("spurious skip")
+
+def test_importorskip_imports_last_module_part():
+    import os
+    ospath = py.test.importorskip("os.path")
+    assert os.path == ospath
+
+
+def test_pytest_cmdline_main(testdir):
+    p = testdir.makepyfile("""
+        import sys
+        sys.path.insert(0, %r)
+        import py
+        def test_hello():
+            assert 1
+        if __name__ == '__main__':
+           py.test.cmdline.main([__file__])
+    """ % (str(py._pydir.dirpath())))
+    import subprocess
+    popen = subprocess.Popen([sys.executable, str(p)], stdout=subprocess.PIPE)
+    s = popen.stdout.read()
+    ret = popen.wait()
+    assert ret == 0
+

--- a/testing/plugin/test_helpconfig.py
+++ /dev/null
@@ -1,53 +0,0 @@
-import py, pytest,os
-from _pytest.helpconfig import collectattr
-
-def test_version(testdir):
-    result = testdir.runpytest("--version")
-    assert result.ret == 0
-    #p = py.path.local(py.__file__).dirpath()
-    result.stderr.fnmatch_lines([
-        '*py.test*%s*imported from*' % (pytest.__version__, )
-    ])
-
-def test_help(testdir):
-    result = testdir.runpytest("--help")
-    assert result.ret == 0
-    result.stdout.fnmatch_lines([
-        "*-v*verbose*",
-        "*setup.cfg*",
-        "*minversion*",
-    ])
-
-def test_collectattr():
-    class A:
-        def pytest_hello(self):
-            pass
-    class B(A):
-        def pytest_world(self):
-            pass
-    methods = py.builtin.sorted(collectattr(B))
-    assert list(methods) == ['pytest_hello', 'pytest_world']
-    methods = py.builtin.sorted(collectattr(B()))
-    assert list(methods) == ['pytest_hello', 'pytest_world']
-
-def test_hookvalidation_unknown(testdir):
-    testdir.makeconftest("""
-        def pytest_hello(xyz):
-            pass
-    """)
-    result = testdir.runpytest()
-    assert result.ret != 0
-    result.stderr.fnmatch_lines([
-        '*unknown hook*pytest_hello*'
-    ])
-
-def test_hookvalidation_optional(testdir):
-    testdir.makeconftest("""
-        import py
-        @py.test.mark.optionalhook
-        def pytest_hello(xyz):
-            pass
-    """)
-    result = testdir.runpytest()
-    assert result.ret == 0
-

--- a/testing/plugin/__init__.py
+++ /dev/null
@@ -1,1 +0,0 @@
-#

--- /dev/null
+++ b/testing/test_assertion.py
@@ -0,0 +1,203 @@
+import sys
+
+import py
+import _pytest.assertion as plugin
+
+needsnewassert = py.test.mark.skipif("sys.version_info < (2,6)")
+
+def interpret(expr):
+    return py.code._reinterpret(expr, py.code.Frame(sys._getframe(1)))
+
+class TestBinReprIntegration:
+    pytestmark = needsnewassert
+
+    def pytest_funcarg__hook(self, request):
+        class MockHook(object):
+            def __init__(self):
+                self.called = False
+                self.args = tuple()
+                self.kwargs = dict()
+
+            def __call__(self, op, left, right):
+                self.called = True
+                self.op = op
+                self.left = left
+                self.right = right
+        mockhook = MockHook()
+        monkeypatch = request.getfuncargvalue("monkeypatch")
+        monkeypatch.setattr(py.code, '_reprcompare', mockhook)
+        return mockhook
+
+    def test_pytest_assertrepr_compare_called(self, hook):
+        interpret('assert 0 == 1')
+        assert hook.called
+
+
+    def test_pytest_assertrepr_compare_args(self, hook):
+        interpret('assert [0, 1] == [0, 2]')
+        assert hook.op == '=='
+        assert hook.left == [0, 1]
+        assert hook.right == [0, 2]
+
+    def test_configure_unconfigure(self, testdir, hook):
+        assert hook == py.code._reprcompare
+        config = testdir.parseconfig()
+        plugin.pytest_configure(config)
+        assert hook != py.code._reprcompare
+        plugin.pytest_unconfigure(config)
+        assert hook == py.code._reprcompare
+
+def callequal(left, right):
+    return plugin.pytest_assertrepr_compare('==', left, right)
+
+class TestAssert_reprcompare:
+    def test_different_types(self):
+        assert callequal([0, 1], 'foo') is None
+
+    def test_summary(self):
+        summary = callequal([0, 1], [0, 2])[0]
+        assert len(summary) < 65
+
+    def test_text_diff(self):
+        diff = callequal('spam', 'eggs')[1:]
+        assert '- spam' in diff
+        assert '+ eggs' in diff
+
+    def test_multiline_text_diff(self):
+        left = 'foo\nspam\nbar'
+        right = 'foo\neggs\nbar'
+        diff = callequal(left, right)
+        assert '- spam' in diff
+        assert '+ eggs' in diff
+
+    def test_list(self):
+        expl = callequal([0, 1], [0, 2])
+        assert len(expl) > 1
+
+    def test_list_different_lenghts(self):
+        expl = callequal([0, 1], [0, 1, 2])
+        assert len(expl) > 1
+        expl = callequal([0, 1, 2], [0, 1])
+        assert len(expl) > 1
+
+    def test_dict(self):
+        expl = callequal({'a': 0}, {'a': 1})
+        assert len(expl) > 1
+
+    def test_set(self):
+        expl = callequal(set([0, 1]), set([0, 2]))
+        assert len(expl) > 1
+
+    def test_list_tuples(self):
+        expl = callequal([], [(1,2)])
+        assert len(expl) > 1
+        expl = callequal([(1,2)], [])
+        assert len(expl) > 1
+
+    def test_list_bad_repr(self):
+        class A:
+            def __repr__(self):
+                raise ValueError(42)
+        expl = callequal([], [A()])
+        assert 'ValueError' in "".join(expl)
+        expl = callequal({}, {'1': A()})
+        assert 'faulty' in "".join(expl)
+
+    def test_one_repr_empty(self):
+        """
+        the faulty empty string repr did trigger
+        a unbound local error in _diff_text
+        """
+        class A(str):
+            def __repr__(self):
+                return ''
+        expl = callequal(A(), '')
+        assert not expl
+
+ at needsnewassert
+def test_pytest_assertrepr_compare_integration(testdir):
+    testdir.makepyfile("""
+        def test_hello():
+            x = set(range(100))
+            y = x.copy()
+            y.remove(50)
+            assert x == y
+    """)
+    result = testdir.runpytest()
+    result.stdout.fnmatch_lines([
+        "*def test_hello():*",
+        "*assert x == y*",
+        "*E*Extra items*left*",
+        "*E*50*",
+    ])
+
+ at needsnewassert
+def test_sequence_comparison_uses_repr(testdir):
+    testdir.makepyfile("""
+        def test_hello():
+            x = set("hello x")
+            y = set("hello y")
+            assert x == y
+    """)
+    result = testdir.runpytest()
+    result.stdout.fnmatch_lines([
+        "*def test_hello():*",
+        "*assert x == y*",
+        "*E*Extra items*left*",
+        "*E*'x'*",
+        "*E*Extra items*right*",
+        "*E*'y'*",
+    ])
+
+
+def test_functional(testdir):
+    testdir.makepyfile("""
+        def test_hello():
+            x = 3
+            assert x == 4
+    """)
+    result = testdir.runpytest()
+    assert "3 == 4" in result.stdout.str()
+    result = testdir.runpytest("--no-assert")
+    assert "3 == 4" not in result.stdout.str()
+
+def test_triple_quoted_string_issue113(testdir):
+    testdir.makepyfile("""
+        def test_hello():
+            assert "" == '''
+    '''""")
+    result = testdir.runpytest("--fulltrace")
+    result.stdout.fnmatch_lines([
+        "*1 failed*",
+    ])
+    assert 'SyntaxError' not in result.stdout.str()
+
+def test_traceback_failure(testdir):
+    p1 = testdir.makepyfile("""
+        def g():
+            return 2
+        def f(x):
+            assert x == g()
+        def test_onefails():
+            f(3)
+    """)
+    result = testdir.runpytest(p1)
+    result.stdout.fnmatch_lines([
+        "*test_traceback_failure.py F",
+        "====* FAILURES *====",
+        "____*____",
+        "",
+        "    def test_onefails():",
+        ">       f(3)",
+        "",
+        "*test_*.py:6: ",
+        "_ _ _ *",
+        #"",
+        "    def f(x):",
+        ">       assert x == g()",
+        "E       assert 3 == 2",
+        "E        +  where 2 = g()",
+        "",
+        "*test_traceback_failure.py:4: AssertionError"
+    ])
+

--- a/testing/plugin/conftest.py
+++ /dev/null
@@ -1,36 +0,0 @@
-import py
-
-import _pytest
-plugindir = py.path.local(_pytest.__file__).dirpath()
-from _pytest.core import default_plugins
-
-def pytest_collect_file(path, parent):
-    if path.basename.startswith("pytest_") and path.ext == ".py":
-        mod = parent.Module(path, parent=parent)
-        return mod
-
-# for plugin test we try to automatically make sure that
-# the according plugin is loaded
-def pytest_funcarg__testdir(request):
-    testdir = request.getfuncargvalue("testdir")
-    #for obj in (request.cls, request.module):
-    #    if hasattr(obj, 'testplugin'):
-    #        testdir.plugins.append(obj.testplugin)
-    #        break
-    #else:
-    modname = request.module.__name__.split(".")[-1]
-    if modname.startswith("test_pytest_"):
-        modname = modname[5:]
-        if plugindir.join("%s.py" % modname).check():
-            if modname[7:] not in default_plugins:
-                testdir.plugins.append(vars(request.module))
-                testdir.plugins.append(modname)
-    #elif modname.startswith("test_pytest"):
-    #    pname = modname[5:]
-    #    assert pname not in testdir.plugins
-    #    testdir.plugins.append(pname)
-    #    #testdir.plugins.append(vars(request.module))
-    else:
-        pass # raise ValueError("need better support code")
-    return testdir
-

--- /dev/null
+++ b/testing/test_session.py
@@ -0,0 +1,227 @@
+import pytest, py
+
+class SessionTests:
+    def test_basic_testitem_events(self, testdir):
+        tfile = testdir.makepyfile("""
+            def test_one():
+                pass
+            def test_one_one():
+                assert 0
+            def test_other():
+                raise ValueError(23)
+            def test_two(someargs):
+                pass
+        """)
+        reprec = testdir.inline_run(tfile)
+        passed, skipped, failed = reprec.listoutcomes()
+        assert len(skipped) == 0
+        assert len(passed) == 1
+        assert len(failed) == 3
+        end = lambda x: x.nodeid.split("::")[-1]
+        assert end(failed[0]) == "test_one_one"
+        assert end(failed[1]) == "test_other"
+        assert end(failed[2]) == "test_two"
+        itemstarted = reprec.getcalls("pytest_itemcollected")
+        assert len(itemstarted) == 4
+        colstarted = reprec.getcalls("pytest_collectstart")
+        assert len(colstarted) == 1 + 1
+        col = colstarted[1].collector
+        assert isinstance(col, pytest.Module)
+
+    def test_nested_import_error(self, testdir):
+        tfile = testdir.makepyfile("""
+            import import_fails
+            def test_this():
+                assert import_fails.a == 1
+        """, import_fails="""
+            import does_not_work
+            a = 1
+        """)
+        reprec = testdir.inline_run(tfile)
+        l = reprec.getfailedcollections()
+        assert len(l) == 1
+        out = l[0].longrepr.reprcrash.message
+        assert out.find('does_not_work') != -1
+
+    def test_raises_output(self, testdir):
+        reprec = testdir.inline_runsource("""
+            import py
+            def test_raises_doesnt():
+                py.test.raises(ValueError, int, "3")
+        """)
+        passed, skipped, failed = reprec.listoutcomes()
+        assert len(failed) == 1
+        out = failed[0].longrepr.reprcrash.message
+        if not out.find("DID NOT RAISE") != -1:
+            print(out)
+            py.test.fail("incorrect raises() output")
+
+    def test_generator_yields_None(self, testdir):
+        reprec = testdir.inline_runsource("""
+            def test_1():
+                yield None
+        """)
+        failures = reprec.getfailedcollections()
+        out = failures[0].longrepr.reprcrash.message
+        i = out.find('TypeError')
+        assert i != -1
+
+    def test_syntax_error_module(self, testdir):
+        reprec = testdir.inline_runsource("this is really not python")
+        l = reprec.getfailedcollections()
+        assert len(l) == 1
+        out = str(l[0].longrepr)
+        assert out.find(str('not python')) != -1
+
+    def test_exit_first_problem(self, testdir):
+        reprec = testdir.inline_runsource("""
+            def test_one(): assert 0
+            def test_two(): assert 0
+        """, '--exitfirst')
+        passed, skipped, failed = reprec.countoutcomes()
+        assert failed == 1
+        assert passed == skipped == 0
+
+    def test_maxfail(self, testdir):
+        reprec = testdir.inline_runsource("""
+            def test_one(): assert 0
+            def test_two(): assert 0
+            def test_three(): assert 0
+        """, '--maxfail=2')
+        passed, skipped, failed = reprec.countoutcomes()
+        assert failed == 2
+        assert passed == skipped == 0
+
+    def test_broken_repr(self, testdir):
+        p = testdir.makepyfile("""
+            import pytest
+            class BrokenRepr1:
+                foo=0
+                def __repr__(self):
+                    raise Exception("Ha Ha fooled you, I'm a broken repr().")
+
+            class TestBrokenClass:
+                def test_explicit_bad_repr(self):
+                    t = BrokenRepr1()
+                    pytest.raises(Exception, 'repr(t)')
+
+                def test_implicit_bad_repr1(self):
+                    t = BrokenRepr1()
+                    assert t.foo == 1
+
+        """)
+        reprec = testdir.inline_run(p)
+        passed, skipped, failed = reprec.listoutcomes()
+        assert len(failed) == 1
+        out = failed[0].longrepr.reprcrash.message
+        assert out.find("""[Exception("Ha Ha fooled you, I'm a broken repr().") raised in repr()]""") != -1 #'
+
+    def test_skip_file_by_conftest(self, testdir):
+        testdir.makepyfile(conftest="""
+            import py
+            def pytest_collect_file():
+                py.test.skip("intentional")
+        """, test_file="""
+            def test_one(): pass
+        """)
+        try:
+            reprec = testdir.inline_run(testdir.tmpdir)
+        except py.test.skip.Exception:
+            py.test.fail("wrong skipped caught")
+        reports = reprec.getreports("pytest_collectreport")
+        assert len(reports) == 1
+        assert reports[0].skipped
+
+class TestNewSession(SessionTests):
+
+    def test_order_of_execution(self, testdir):
+        reprec = testdir.inline_runsource("""
+            l = []
+            def test_1():
+                l.append(1)
+            def test_2():
+                l.append(2)
+            def test_3():
+                assert l == [1,2]
+            class Testmygroup:
+                reslist = l
+                def test_1(self):
+                    self.reslist.append(1)
+                def test_2(self):
+                    self.reslist.append(2)
+                def test_3(self):
+                    self.reslist.append(3)
+                def test_4(self):
+                    assert self.reslist == [1,2,1,2,3]
+        """)
+        passed, skipped, failed = reprec.countoutcomes()
+        assert failed == skipped == 0
+        assert passed == 7
+        # also test listnames() here ...
+
+    def test_collect_only_with_various_situations(self, testdir):
+        p = testdir.makepyfile(
+            test_one="""
+                def test_one():
+                    raise ValueError()
+
+                class TestX:
+                    def test_method_one(self):
+                        pass
+
+                class TestY(TestX):
+                    pass
+            """,
+            test_two="""
+                import py
+                py.test.skip('xxx')
+            """,
+            test_three="xxxdsadsadsadsa",
+            __init__=""
+        )
+        reprec = testdir.inline_run('--collectonly', p.dirpath())
+
+        itemstarted = reprec.getcalls("pytest_itemcollected")
+        assert len(itemstarted) == 3
+        assert not reprec.getreports("pytest_runtest_logreport")
+        started = reprec.getcalls("pytest_collectstart")
+        finished = reprec.getreports("pytest_collectreport")
+        assert len(started) == len(finished)
+        assert len(started) == 8 # XXX extra TopCollector
+        colfail = [x for x in finished if x.failed]
+        colskipped = [x for x in finished if x.skipped]
+        assert len(colfail) == 1
+        assert len(colskipped) == 1
+
+    def test_minus_x_import_error(self, testdir):
+        testdir.makepyfile(__init__="")
+        testdir.makepyfile(test_one="xxxx", test_two="yyyy")
+        reprec = testdir.inline_run("-x", testdir.tmpdir)
+        finished = reprec.getreports("pytest_collectreport")
+        colfail = [x for x in finished if x.failed]
+        assert len(colfail) == 1
+
+
+def test_plugin_specify(testdir):
+    testdir.chdir()
+    config = py.test.raises(ImportError, """
+            testdir.parseconfig("-p", "nqweotexistent")
+    """)
+    #py.test.raises(ImportError,
+    #    "config.pluginmanager.do_configure(config)"
+    #)
+
+def test_plugin_already_exists(testdir):
+    config = testdir.parseconfig("-p", "session")
+    assert config.option.plugins == ['session']
+    config.pluginmanager.do_configure(config)
+
+def test_exclude(testdir):
+    hellodir = testdir.mkdir("hello")
+    hellodir.join("test_hello.py").write("x y syntaxerror")
+    hello2dir = testdir.mkdir("hello2")
+    hello2dir.join("test_hello2.py").write("x y syntaxerror")
+    testdir.makepyfile(test_ok="def test_pass(): pass")
+    result = testdir.runpytest("--ignore=hello", "--ignore=hello2")
+    assert result.ret == 0
+    result.stdout.fnmatch_lines(["*1 passed*"])

--- a/testing/plugin/test_nose.py
+++ /dev/null
@@ -1,254 +0,0 @@
-import py
-
-def setup_module(mod):
-    mod.nose = py.test.importorskip("nose")
-
-def test_nose_setup(testdir):
-    p = testdir.makepyfile("""
-        l = []
-
-        def test_hello():
-            assert l == [1]
-
-        def test_world():
-            assert l == [1,2]
-
-        test_hello.setup = lambda: l.append(1)
-        test_hello.teardown = lambda: l.append(2)
-    """)
-    result = testdir.runpytest(p, '-p', 'nose')
-    result.stdout.fnmatch_lines([
-        "*2 passed*"
-    ])
-
-
-def test_nose_setup_func(testdir):
-    p = testdir.makepyfile("""
-        l = []
-
-        def my_setup():
-            a = 1
-            l.append(a)
-
-        def my_teardown():
-            b = 2
-            l.append(b)
-
-        def test_hello():
-            print l
-            assert l == [1]
-
-        def test_world():
-            print l
-            assert l == [1,2]
-
-        test_hello.setup = my_setup
-        test_hello.teardown = my_teardown
-    """)
-    result = testdir.runpytest(p, '-p', 'nose')
-    result.stdout.fnmatch_lines([
-        "*2 passed*"
-    ])
-
-
-def test_nose_setup_func_failure(testdir):
-    p = testdir.makepyfile("""
-        l = []
-
-        my_setup = lambda x: 1
-        my_teardown = lambda x: 2
-
-        def test_hello():
-            print l
-            assert l == [1]
-
-        def test_world():
-            print l
-            assert l == [1,2]
-
-        test_hello.setup = my_setup
-        test_hello.teardown = my_teardown
-    """)
-    result = testdir.runpytest(p, '-p', 'nose')
-    result.stdout.fnmatch_lines([
-        "*TypeError: <lambda>() takes exactly 1 argument (0 given)*"
-    ])
-
-
-def test_nose_setup_func_failure_2(testdir):
-    p = testdir.makepyfile("""
-        l = []
-
-        my_setup = 1
-        my_teardown = 2
-
-        def test_hello():
-            print l
-            assert l == [1]
-
-        def test_world():
-            print l
-            assert l == [1,2]
-
-        test_hello.setup = my_setup
-        test_hello.teardown = my_teardown
-    """)
-    result = testdir.runpytest(p, '-p', 'nose')
-    result.stdout.fnmatch_lines([
-        "*TypeError: 'int' object is not callable*"
-    ])
-
-
-def test_nose_setup_partial(testdir):
-    py.test.importorskip("functools")
-    p = testdir.makepyfile("""
-        from functools import partial
-
-        l = []
-
-        def my_setup(x):
-            a = x
-            l.append(a)
-
-        def my_teardown(x):
-            b = x
-            l.append(b)
-
-        my_setup_partial = partial(my_setup, 1)
-        my_teardown_partial = partial(my_teardown, 2)
-
-        def test_hello():
-            print l
-            assert l == [1]
-
-        def test_world():
-            print l
-            assert l == [1,2]
-
-        test_hello.setup = my_setup_partial
-        test_hello.teardown = my_teardown_partial
-    """)
-    result = testdir.runpytest(p, '-p', 'nose')
-    result.stdout.fnmatch_lines([
-        "*2 passed*"
-    ])
-
-
-def test_nose_test_generator_fixtures(testdir):
-    p = testdir.makepyfile("""
-        # taken from nose-0.11.1 unit_tests/test_generator_fixtures.py
-        from nose.tools import eq_
-        called = []
-
-        def outer_setup():
-            called.append('outer_setup')
-
-        def outer_teardown():
-            called.append('outer_teardown')
-
-        def inner_setup():
-            called.append('inner_setup')
-
-        def inner_teardown():
-            called.append('inner_teardown')
-
-        def test_gen():
-            called[:] = []
-            for i in range(0, 5):
-                yield check, i
-
-        def check(i):
-            expect = ['outer_setup']
-            for x in range(0, i):
-                expect.append('inner_setup')
-                expect.append('inner_teardown')
-            expect.append('inner_setup')
-            eq_(called, expect)
-
-
-        test_gen.setup = outer_setup
-        test_gen.teardown = outer_teardown
-        check.setup = inner_setup
-        check.teardown = inner_teardown
-
-        class TestClass(object):
-            def setup(self):
-                print "setup called in", self
-                self.called = ['setup']
-
-            def teardown(self):
-                print "teardown called in", self
-                eq_(self.called, ['setup'])
-                self.called.append('teardown')
-
-            def test(self):
-                print "test called in", self
-                for i in range(0, 5):
-                    yield self.check, i
-
-            def check(self, i):
-                print "check called in", self
-                expect = ['setup']
-                #for x in range(0, i):
-                #    expect.append('setup')
-                #    expect.append('teardown')
-                #expect.append('setup')
-                eq_(self.called, expect)
-    """)
-    result = testdir.runpytest(p, '-p', 'nose')
-    result.stdout.fnmatch_lines([
-        "*10 passed*"
-    ])
-
-
-def test_module_level_setup(testdir):
-    testdir.makepyfile("""
-        from nose.tools import with_setup
-        items = {}
-
-        def setup():
-            items[1]=1
-
-        def teardown():
-            del items[1]
-
-        def setup2():
-            items[2] = 2
-
-        def teardown2():
-            del items[2]
-
-        def test_setup_module_setup():
-            assert items[1] == 1
-
-        @with_setup(setup2, teardown2)
-        def test_local_setup():
-            assert items[2] == 2
-            assert 1 not in items
-    """)
-    result = testdir.runpytest('-p', 'nose')
-    result.stdout.fnmatch_lines([
-        "*2 passed*",
-    ])
-
-
-def test_nose_style_setup_teardown(testdir):
-    testdir.makepyfile("""
-        l = []
-
-        def setup_module():
-            l.append(1)
-
-        def teardown_module():
-            del l[0]
-
-        def test_hello():
-            assert l == [1]
-
-        def test_world():
-            assert l == [1]
-        """)
-    result = testdir.runpytest('-p', 'nose')
-    result.stdout.fnmatch_lines([
-        "*2 passed*",
-    ])

--- a/testing/plugin/test_skipping.py
+++ /dev/null
@@ -1,444 +0,0 @@
-import py
-
-from _pytest.skipping import MarkEvaluator, folded_skips
-from _pytest.skipping import pytest_runtest_setup
-from _pytest.runner import runtestprotocol
-
-class TestEvaluator:
-    def test_no_marker(self, testdir):
-        item = testdir.getitem("def test_func(): pass")
-        evalskipif = MarkEvaluator(item, 'skipif')
-        assert not evalskipif
-        assert not evalskipif.istrue()
-
-    def test_marked_no_args(self, testdir):
-        item = testdir.getitem("""
-            import py
-            @py.test.mark.xyz
-            def test_func():
-                pass
-        """)
-        ev = MarkEvaluator(item, 'xyz')
-        assert ev
-        assert ev.istrue()
-        expl = ev.getexplanation()
-        assert expl == ""
-        assert not ev.get("run", False)
-
-    def test_marked_one_arg(self, testdir):
-        item = testdir.getitem("""
-            import py
-            @py.test.mark.xyz("hasattr(os, 'sep')")
-            def test_func():
-                pass
-        """)
-        ev = MarkEvaluator(item, 'xyz')
-        assert ev
-        assert ev.istrue()
-        expl = ev.getexplanation()
-        assert expl == "condition: hasattr(os, 'sep')"
-
-    def test_marked_one_arg_with_reason(self, testdir):
-        item = testdir.getitem("""
-            import py
-            @py.test.mark.xyz("hasattr(os, 'sep')", attr=2, reason="hello world")
-            def test_func():
-                pass
-        """)
-        ev = MarkEvaluator(item, 'xyz')
-        assert ev
-        assert ev.istrue()
-        expl = ev.getexplanation()
-        assert expl == "hello world"
-        assert ev.get("attr") == 2
-
-    def test_marked_one_arg_twice(self, testdir):
-        lines = [
-            '''@py.test.mark.skipif("not hasattr(os, 'murks')")''',
-            '''@py.test.mark.skipif("hasattr(os, 'murks')")'''
-        ]
-        for i in range(0, 2):
-            item = testdir.getitem("""
-                import py
-                %s
-                %s
-                def test_func():
-                    pass
-            """ % (lines[i], lines[(i+1) %2]))
-            ev = MarkEvaluator(item, 'skipif')
-            assert ev
-            assert ev.istrue()
-            expl = ev.getexplanation()
-            assert expl == "condition: not hasattr(os, 'murks')"
-
-    def test_marked_one_arg_twice2(self, testdir):
-        item = testdir.getitem("""
-            import py
-            @py.test.mark.skipif("hasattr(os, 'murks')")
-            @py.test.mark.skipif("not hasattr(os, 'murks')")
-            def test_func():
-                pass
-        """)
-        ev = MarkEvaluator(item, 'skipif')
-        assert ev
-        assert ev.istrue()
-        expl = ev.getexplanation()
-        assert expl == "condition: not hasattr(os, 'murks')"
-
-    def test_skipif_class(self, testdir):
-        item, = testdir.getitems("""
-            import py
-            class TestClass:
-                pytestmark = py.test.mark.skipif("config._hackxyz")
-                def test_func(self):
-                    pass
-        """)
-        item.config._hackxyz = 3
-        ev = MarkEvaluator(item, 'skipif')
-        assert ev.istrue()
-        expl = ev.getexplanation()
-        assert expl == "condition: config._hackxyz"
-
-
-class TestXFail:
-    def test_xfail_simple(self, testdir):
-        item = testdir.getitem("""
-            import py
-            @py.test.mark.xfail
-            def test_func():
-                assert 0
-        """)
-        reports = runtestprotocol(item, log=False)
-        assert len(reports) == 3
-        callreport = reports[1]
-        assert callreport.skipped
-        expl = callreport.keywords['xfail']
-        assert expl == ""
-
-    def test_xfail_xpassed(self, testdir):
-        item = testdir.getitem("""
-            import py
-            @py.test.mark.xfail
-            def test_func():
-                assert 1
-        """)
-        reports = runtestprotocol(item, log=False)
-        assert len(reports) == 3
-        callreport = reports[1]
-        assert callreport.failed
-        expl = callreport.keywords['xfail']
-        assert expl == ""
-
-    def test_xfail_run_anyway(self, testdir):
-        testdir.makepyfile("""
-            import py
-            @py.test.mark.xfail
-            def test_func():
-                assert 0
-        """)
-        result = testdir.runpytest("--runxfail")
-        assert result.ret == 1
-        result.stdout.fnmatch_lines([
-            "*def test_func():*",
-            "*assert 0*",
-            "*1 failed*",
-        ])
-
-    def test_xfail_evalfalse_but_fails(self, testdir):
-        item = testdir.getitem("""
-            import py
-            @py.test.mark.xfail('False')
-            def test_func():
-                assert 0
-        """)
-        reports = runtestprotocol(item, log=False)
-        callreport = reports[1]
-        assert callreport.failed
-        assert 'xfail' not in callreport.keywords
-
-    def test_xfail_not_report_default(self, testdir):
-        p = testdir.makepyfile(test_one="""
-            import py
-            @py.test.mark.xfail
-            def test_this():
-                assert 0
-        """)
-        result = testdir.runpytest(p, '-v')
-        #result.stdout.fnmatch_lines([
-        #    "*HINT*use*-r*"
-        #])
-
-    def test_xfail_not_run_xfail_reporting(self, testdir):
-        p = testdir.makepyfile(test_one="""
-            import py
-            @py.test.mark.xfail(run=False, reason="noway")
-            def test_this():
-                assert 0
-            @py.test.mark.xfail("True", run=False)
-            def test_this_true():
-                assert 0
-            @py.test.mark.xfail("False", run=False, reason="huh")
-            def test_this_false():
-                assert 1
-        """)
-        result = testdir.runpytest(p, '--report=xfailed', )
-        result.stdout.fnmatch_lines([
-            "*test_one*test_this*",
-            "*NOTRUN*noway",
-            "*test_one*test_this_true*",
-            "*NOTRUN*condition:*True*",
-            "*1 passed*",
-        ])
-
-    def test_xfail_not_run_no_setup_run(self, testdir):
-        p = testdir.makepyfile(test_one="""
-            import py
-            @py.test.mark.xfail(run=False, reason="hello")
-            def test_this():
-                assert 0
-            def setup_module(mod):
-                raise ValueError(42)
-        """)
-        result = testdir.runpytest(p, '--report=xfailed', )
-        result.stdout.fnmatch_lines([
-            "*test_one*test_this*",
-            "*NOTRUN*hello",
-            "*1 xfailed*",
-        ])
-
-    def test_xfail_xpass(self, testdir):
-        p = testdir.makepyfile(test_one="""
-            import py
-            @py.test.mark.xfail
-            def test_that():
-                assert 1
-        """)
-        result = testdir.runpytest(p, '-rX')
-        result.stdout.fnmatch_lines([
-            "*XPASS*test_that*",
-            "*1 xpassed*"
-        ])
-        assert result.ret == 0
-
-    def test_xfail_imperative(self, testdir):
-        p = testdir.makepyfile("""
-            import py
-            def test_this():
-                py.test.xfail("hello")
-        """)
-        result = testdir.runpytest(p)
-        result.stdout.fnmatch_lines([
-            "*1 xfailed*",
-        ])
-        result = testdir.runpytest(p, "-rx")
-        result.stdout.fnmatch_lines([
-            "*XFAIL*test_this*",
-            "*reason:*hello*",
-        ])
-        result = testdir.runpytest(p, "--runxfail")
-        result.stdout.fnmatch_lines([
-            "*def test_this():*",
-            "*py.test.xfail*",
-        ])
-
-    def test_xfail_imperative_in_setup_function(self, testdir):
-        p = testdir.makepyfile("""
-            import py
-            def setup_function(function):
-                py.test.xfail("hello")
-
-            def test_this():
-                assert 0
-        """)
-        result = testdir.runpytest(p)
-        result.stdout.fnmatch_lines([
-            "*1 xfailed*",
-        ])
-        result = testdir.runpytest(p, "-rx")
-        result.stdout.fnmatch_lines([
-            "*XFAIL*test_this*",
-            "*reason:*hello*",
-        ])
-        result = testdir.runpytest(p, "--runxfail")
-        result.stdout.fnmatch_lines([
-            "*def setup_function(function):*",
-            "*py.test.xfail*",
-        ])
-
-    def xtest_dynamic_xfail_set_during_setup(self, testdir):
-        p = testdir.makepyfile("""
-            import py
-            def setup_function(function):
-                py.test.mark.xfail(function)
-            def test_this():
-                assert 0
-            def test_that():
-                assert 1
-        """)
-        result = testdir.runpytest(p, '-rxX')
-        result.stdout.fnmatch_lines([
-            "*XFAIL*test_this*",
-            "*XPASS*test_that*",
-        ])
-
-    def test_dynamic_xfail_no_run(self, testdir):
-        p = testdir.makepyfile("""
-            import py
-            def pytest_funcarg__arg(request):
-                request.applymarker(py.test.mark.xfail(run=False))
-            def test_this(arg):
-                assert 0
-        """)
-        result = testdir.runpytest(p, '-rxX')
-        result.stdout.fnmatch_lines([
-            "*XFAIL*test_this*",
-            "*NOTRUN*",
-        ])
-
-    def test_dynamic_xfail_set_during_funcarg_setup(self, testdir):
-        p = testdir.makepyfile("""
-            import py
-            def pytest_funcarg__arg(request):
-                request.applymarker(py.test.mark.xfail)
-            def test_this2(arg):
-                assert 0
-        """)
-        result = testdir.runpytest(p)
-        result.stdout.fnmatch_lines([
-            "*1 xfailed*",
-        ])
-
-
-class TestSkipif:
-    def test_skipif_conditional(self, testdir):
-        item = testdir.getitem("""
-            import py
-            @py.test.mark.skipif("hasattr(os, 'sep')")
-            def test_func():
-                pass
-        """)
-        x = py.test.raises(py.test.skip.Exception, "pytest_runtest_setup(item)")
-        assert x.value.msg == "condition: hasattr(os, 'sep')"
-
-
-    def test_skipif_reporting(self, testdir):
-        p = testdir.makepyfile("""
-            import py
-            @py.test.mark.skipif("hasattr(sys, 'platform')")
-            def test_that():
-                assert 0
-        """)
-        result = testdir.runpytest(p, '-s', '-rs')
-        result.stdout.fnmatch_lines([
-            "*SKIP*1*platform*",
-            "*1 skipped*"
-        ])
-        assert result.ret == 0
-
-def test_skip_not_report_default(testdir):
-    p = testdir.makepyfile(test_one="""
-        import py
-        def test_this():
-            py.test.skip("hello")
-    """)
-    result = testdir.runpytest(p, '-v')
-    result.stdout.fnmatch_lines([
-        #"*HINT*use*-r*",
-        "*1 skipped*",
-    ])
-
-
-def test_skipif_class(testdir):
-    p = testdir.makepyfile("""
-        import py
-
-        class TestClass:
-            pytestmark = py.test.mark.skipif("True")
-            def test_that(self):
-                assert 0
-            def test_though(self):
-                assert 0
-    """)
-    result = testdir.runpytest(p)
-    result.stdout.fnmatch_lines([
-        "*2 skipped*"
-    ])
-
-
-def test_skip_reasons_folding():
-    class longrepr:
-        class reprcrash:
-            path = 'xyz'
-            lineno = 3
-            message = "justso"
-
-    class X:
-        pass
-    ev1 = X()
-    ev1.when = "execute"
-    ev1.skipped = True
-    ev1.longrepr = longrepr
-
-    ev2 = X()
-    ev2.longrepr = longrepr
-    ev2.skipped = True
-
-    l = folded_skips([ev1, ev2])
-    assert len(l) == 1
-    num, fspath, lineno, reason = l[0]
-    assert num == 2
-    assert fspath == longrepr.reprcrash.path
-    assert lineno == longrepr.reprcrash.lineno
-    assert reason == longrepr.reprcrash.message
-
-def test_skipped_reasons_functional(testdir):
-    testdir.makepyfile(
-        test_one="""
-            from conftest import doskip
-            def setup_function(func):
-                doskip()
-            def test_func():
-                pass
-            class TestClass:
-                def test_method(self):
-                    doskip()
-       """,
-       test_two = """
-            from conftest import doskip
-            doskip()
-       """,
-       conftest = """
-            import py
-            def doskip():
-                py.test.skip('test')
-        """
-    )
-    result = testdir.runpytest('--report=skipped')
-    result.stdout.fnmatch_lines([
-        "*test_two.py S",
-        "*test_one.py ss",
-        "*SKIP*3*conftest.py:3: test",
-    ])
-    assert result.ret == 0
-
-def test_reportchars(testdir):
-    testdir.makepyfile("""
-        import py
-        def test_1():
-            assert 0
-        @py.test.mark.xfail
-        def test_2():
-            assert 0
-        @py.test.mark.xfail
-        def test_3():
-            pass
-        def test_4():
-            py.test.skip("four")
-    """)
-    result = testdir.runpytest("-rfxXs")
-    result.stdout.fnmatch_lines([
-        "FAIL*test_1*",
-        "XFAIL*test_2*",
-        "XPASS*test_3*",
-        "SKIP*four*",
-    ])

--- /dev/null
+++ b/testing/test_mark.py
@@ -0,0 +1,295 @@
+import py
+from _pytest.mark import MarkGenerator as Mark
+
+class TestMark:
+    def test_pytest_mark_notcallable(self):
+        mark = Mark()
+        py.test.raises((AttributeError, TypeError), "mark()")
+
+    def test_pytest_mark_bare(self):
+        mark = Mark()
+        def f():
+            pass
+        mark.hello(f)
+        assert f.hello
+
+    def test_pytest_mark_keywords(self):
+        mark = Mark()
+        def f():
+            pass
+        mark.world(x=3, y=4)(f)
+        assert f.world
+        assert f.world.kwargs['x'] == 3
+        assert f.world.kwargs['y'] == 4
+
+    def test_apply_multiple_and_merge(self):
+        mark = Mark()
+        def f():
+            pass
+        marker = mark.world
+        mark.world(x=3)(f)
+        assert f.world.kwargs['x'] == 3
+        mark.world(y=4)(f)
+        assert f.world.kwargs['x'] == 3
+        assert f.world.kwargs['y'] == 4
+        mark.world(y=1)(f)
+        assert f.world.kwargs['y'] == 1
+        assert len(f.world.args) == 0
+
+    def test_pytest_mark_positional(self):
+        mark = Mark()
+        def f():
+            pass
+        mark.world("hello")(f)
+        assert f.world.args[0] == "hello"
+        mark.world("world")(f)
+
+class TestFunctional:
+    def test_mark_per_function(self, testdir):
+        p = testdir.makepyfile("""
+            import py
+            @py.test.mark.hello
+            def test_hello():
+                assert hasattr(test_hello, 'hello')
+        """)
+        result = testdir.runpytest(p)
+        result.stdout.fnmatch_lines(["*passed*"])
+
+    def test_mark_per_module(self, testdir):
+        item = testdir.getitem("""
+            import py
+            pytestmark = py.test.mark.hello
+            def test_func():
+                pass
+        """)
+        keywords = item.keywords
+        assert 'hello' in keywords
+
+    def test_marklist_per_class(self, testdir):
+        item = testdir.getitem("""
+            import py
+            class TestClass:
+                pytestmark = [py.test.mark.hello, py.test.mark.world]
+                def test_func(self):
+                    assert TestClass.test_func.hello
+                    assert TestClass.test_func.world
+        """)
+        keywords = item.keywords
+        assert 'hello' in keywords
+
+    def test_marklist_per_module(self, testdir):
+        item = testdir.getitem("""
+            import py
+            pytestmark = [py.test.mark.hello, py.test.mark.world]
+            class TestClass:
+                def test_func(self):
+                    assert TestClass.test_func.hello
+                    assert TestClass.test_func.world
+        """)
+        keywords = item.keywords
+        assert 'hello' in keywords
+        assert 'world' in keywords
+
+    @py.test.mark.skipif("sys.version_info < (2,6)")
+    def test_mark_per_class_decorator(self, testdir):
+        item = testdir.getitem("""
+            import py
+            @py.test.mark.hello
+            class TestClass:
+                def test_func(self):
+                    assert TestClass.test_func.hello
+        """)
+        keywords = item.keywords
+        assert 'hello' in keywords
+
+    @py.test.mark.skipif("sys.version_info < (2,6)")
+    def test_mark_per_class_decorator_plus_existing_dec(self, testdir):
+        item = testdir.getitem("""
+            import py
+            @py.test.mark.hello
+            class TestClass:
+                pytestmark = py.test.mark.world
+                def test_func(self):
+                    assert TestClass.test_func.hello
+                    assert TestClass.test_func.world
+        """)
+        keywords = item.keywords
+        assert 'hello' in keywords
+        assert 'world' in keywords
+
+    def test_merging_markers(self, testdir):
+        p = testdir.makepyfile("""
+            import py
+            pytestmark = py.test.mark.hello("pos1", x=1, y=2)
+            class TestClass:
+                # classlevel overrides module level
+                pytestmark = py.test.mark.hello(x=3)
+                @py.test.mark.hello("pos0", z=4)
+                def test_func(self):
+                    pass
+        """)
+        items, rec = testdir.inline_genitems(p)
+        item, = items
+        keywords = item.keywords
+        marker = keywords['hello']
+        assert marker.args == ["pos0", "pos1"]
+        assert marker.kwargs == {'x': 3, 'y': 2, 'z': 4}
+
+    def test_mark_other(self, testdir):
+        py.test.raises(TypeError, '''
+            testdir.getitem("""
+                import py
+                class pytestmark:
+                    pass
+                def test_func():
+                    pass
+            """)
+        ''')
+
+    def test_mark_dynamically_in_funcarg(self, testdir):
+        testdir.makeconftest("""
+            import py
+            def pytest_funcarg__arg(request):
+                request.applymarker(py.test.mark.hello)
+            def pytest_terminal_summary(terminalreporter):
+                l = terminalreporter.stats['passed']
+                terminalreporter._tw.line("keyword: %s" % l[0].keywords)
+        """)
+        testdir.makepyfile("""
+            def test_func(arg):
+                pass
+        """)
+        result = testdir.runpytest()
+        result.stdout.fnmatch_lines([
+            "keyword: *hello*"
+        ])
+
+
+class Test_genitems:
+    def test_check_collect_hashes(self, testdir):
+        p = testdir.makepyfile("""
+            def test_1():
+                pass
+
+            def test_2():
+                pass
+        """)
+        p.copy(p.dirpath(p.purebasename + "2" + ".py"))
+        items, reprec = testdir.inline_genitems(p.dirpath())
+        assert len(items) == 4
+        for numi, i in enumerate(items):
+            for numj, j in enumerate(items):
+                if numj != numi:
+                    assert hash(i) != hash(j)
+                    assert i != j
+
+    def test_root_conftest_syntax_error(self, testdir):
+        # do we want to unify behaviour with
+        # test_subdir_conftest_error?
+        p = testdir.makepyfile(conftest="raise SyntaxError\n")
+        py.test.raises(SyntaxError, testdir.inline_genitems, p.dirpath())
+
+    def test_example_items1(self, testdir):
+        p = testdir.makepyfile('''
+            def testone():
+                pass
+
+            class TestX:
+                def testmethod_one(self):
+                    pass
+
+            class TestY(TestX):
+                pass
+        ''')
+        items, reprec = testdir.inline_genitems(p)
+        assert len(items) == 3
+        assert items[0].name == 'testone'
+        assert items[1].name == 'testmethod_one'
+        assert items[2].name == 'testmethod_one'
+
+        # let's also test getmodpath here
+        assert items[0].getmodpath() == "testone"
+        assert items[1].getmodpath() == "TestX.testmethod_one"
+        assert items[2].getmodpath() == "TestY.testmethod_one"
+
+        s = items[0].getmodpath(stopatmodule=False)
+        assert s.endswith("test_example_items1.testone")
+        print(s)
+
+
+class TestKeywordSelection:
+    def test_select_simple(self, testdir):
+        file_test = testdir.makepyfile("""
+            def test_one():
+                assert 0
+            class TestClass(object):
+                def test_method_one(self):
+                    assert 42 == 43
+        """)
+        def check(keyword, name):
+            reprec = testdir.inline_run("-s", "-k", keyword, file_test)
+            passed, skipped, failed = reprec.listoutcomes()
+            assert len(failed) == 1
+            assert failed[0].nodeid.split("::")[-1] == name
+            assert len(reprec.getcalls('pytest_deselected')) == 1
+
+        for keyword in ['test_one', 'est_on']:
+            #yield check, keyword, 'test_one'
+            check(keyword, 'test_one')
+        check('TestClass.test', 'test_method_one')
+
+    def test_select_extra_keywords(self, testdir):
+        p = testdir.makepyfile(test_select="""
+            def test_1():
+                pass
+            class TestClass:
+                def test_2(self):
+                    pass
+        """)
+        testdir.makepyfile(conftest="""
+            import py
+            def pytest_pycollect_makeitem(__multicall__, name):
+                if name == "TestClass":
+                    item = __multicall__.execute()
+                    item.keywords['xxx'] = True
+                    return item
+        """)
+        for keyword in ('xxx', 'xxx test_2', 'TestClass', 'xxx -test_1',
+                        'TestClass test_2', 'xxx TestClass test_2',):
+            reprec = testdir.inline_run(p.dirpath(), '-s', '-k', keyword)
+            py.builtin.print_("keyword", repr(keyword))
+            passed, skipped, failed = reprec.listoutcomes()
+            assert len(passed) == 1
+            assert passed[0].nodeid.endswith("test_2")
+            dlist = reprec.getcalls("pytest_deselected")
+            assert len(dlist) == 1
+            assert dlist[0].items[0].name == 'test_1'
+
+    def test_select_starton(self, testdir):
+        threepass = testdir.makepyfile(test_threepass="""
+            def test_one(): assert 1
+            def test_two(): assert 1
+            def test_three(): assert 1
+        """)
+        reprec = testdir.inline_run("-k", "test_two:", threepass)
+        passed, skipped, failed = reprec.listoutcomes()
+        assert len(passed) == 2
+        assert not failed
+        dlist = reprec.getcalls("pytest_deselected")
+        assert len(dlist) == 1
+        item = dlist[0].items[0]
+        assert item.name == "test_one"
+
+
+    def test_keyword_extra(self, testdir):
+        p = testdir.makepyfile("""
+           def test_one():
+               assert 0
+           test_one.mykeyword = True
+        """)
+        reprec = testdir.inline_run("-k", "-mykeyword", p)
+        passed, skipped, failed = reprec.countoutcomes()
+        assert passed + skipped + failed == 0
+        reprec = testdir.inline_run("-k", "mykeyword", p)
+        passed, skipped, failed = reprec.countoutcomes()
+        assert failed == 1

--- /dev/null
+++ b/testing/test_monkeypatch.py
@@ -0,0 +1,139 @@
+import os, sys
+import py
+from _pytest.monkeypatch import monkeypatch as MonkeyPatch
+
+def test_setattr():
+    class A:
+        x = 1
+    monkeypatch = MonkeyPatch()
+    py.test.raises(AttributeError, "monkeypatch.setattr(A, 'notexists', 2)")
+    monkeypatch.setattr(A, 'y', 2, raising=False)
+    assert A.y == 2
+    monkeypatch.undo()
+    assert not hasattr(A, 'y')
+
+    monkeypatch = MonkeyPatch()
+    monkeypatch.setattr(A, 'x', 2)
+    assert A.x == 2
+    monkeypatch.setattr(A, 'x', 3)
+    assert A.x == 3
+    monkeypatch.undo()
+    assert A.x == 1
+
+    A.x = 5
+    monkeypatch.undo() # double-undo makes no modification
+    assert A.x == 5
+
+def test_delattr():
+    class A:
+        x = 1
+    monkeypatch = MonkeyPatch()
+    monkeypatch.delattr(A, 'x')
+    assert not hasattr(A, 'x')
+    monkeypatch.undo()
+    assert A.x == 1
+
+    monkeypatch = MonkeyPatch()
+    monkeypatch.delattr(A, 'x')
+    py.test.raises(AttributeError, "monkeypatch.delattr(A, 'y')")
+    monkeypatch.delattr(A, 'y', raising=False)
+    monkeypatch.setattr(A, 'x', 5, raising=False)
+    assert A.x == 5
+    monkeypatch.undo()
+    assert A.x == 1
+
+def test_setitem():
+    d = {'x': 1}
+    monkeypatch = MonkeyPatch()
+    monkeypatch.setitem(d, 'x', 2)
+    monkeypatch.setitem(d, 'y', 1700)
+    monkeypatch.setitem(d, 'y', 1700)
+    assert d['x'] == 2
+    assert d['y'] == 1700
+    monkeypatch.setitem(d, 'x', 3)
+    assert d['x'] == 3
+    monkeypatch.undo()
+    assert d['x'] == 1
+    assert 'y' not in d
+    d['x'] = 5
+    monkeypatch.undo()
+    assert d['x'] == 5
+
+def test_delitem():
+    d = {'x': 1}
+    monkeypatch = MonkeyPatch()
+    monkeypatch.delitem(d, 'x')
+    assert 'x' not in d
+    monkeypatch.delitem(d, 'y', raising=False)
+    py.test.raises(KeyError, "monkeypatch.delitem(d, 'y')")
+    assert not d
+    monkeypatch.setitem(d, 'y', 1700)
+    assert d['y'] == 1700
+    d['hello'] = 'world'
+    monkeypatch.setitem(d, 'x', 1500)
+    assert d['x'] == 1500
+    monkeypatch.undo()
+    assert d == {'hello': 'world', 'x': 1}
+
+def test_setenv():
+    monkeypatch = MonkeyPatch()
+    monkeypatch.setenv('XYZ123', 2)
+    import os
+    assert os.environ['XYZ123'] == "2"
+    monkeypatch.undo()
+    assert 'XYZ123' not in os.environ
+
+def test_delenv():
+    name = 'xyz1234'
+    assert name not in os.environ
+    monkeypatch = MonkeyPatch()
+    py.test.raises(KeyError, "monkeypatch.delenv(%r, raising=True)" % name)
+    monkeypatch.delenv(name, raising=False)
+    monkeypatch.undo()
+    os.environ[name] = "1"
+    try:
+        monkeypatch = MonkeyPatch()
+        monkeypatch.delenv(name)
+        assert name not in os.environ
+        monkeypatch.setenv(name, "3")
+        assert os.environ[name] == "3"
+        monkeypatch.undo()
+        assert os.environ[name] == "1"
+    finally:
+        if name in os.environ:
+            del os.environ[name]
+
+def test_setenv_prepend():
+    import os
+    monkeypatch = MonkeyPatch()
+    monkeypatch.setenv('XYZ123', 2, prepend="-")
+    assert os.environ['XYZ123'] == "2"
+    monkeypatch.setenv('XYZ123', 3, prepend="-")
+    assert os.environ['XYZ123'] == "3-2"
+    monkeypatch.undo()
+    assert 'XYZ123' not in os.environ
+
+def test_monkeypatch_plugin(testdir):
+    reprec = testdir.inline_runsource("""
+        def test_method(monkeypatch):
+            assert monkeypatch.__class__.__name__ == "monkeypatch"
+    """)
+    res = reprec.countoutcomes()
+    assert tuple(res) == (1, 0, 0), res
+
+def test_syspath_prepend():
+    old = list(sys.path)
+    try:
+        monkeypatch = MonkeyPatch()
+        monkeypatch.syspath_prepend('world')
+        monkeypatch.syspath_prepend('hello')
+        assert sys.path[0] == "hello"
+        assert sys.path[1] == "world"
+        monkeypatch.undo()
+        assert sys.path == old
+        monkeypatch.undo()
+        assert sys.path == old
+    finally:
+        sys.path[:] = old
+
+

--- /dev/null
+++ b/testing/test_nose.py
@@ -0,0 +1,254 @@
+import py
+
+def setup_module(mod):
+    mod.nose = py.test.importorskip("nose")
+
+def test_nose_setup(testdir):
+    p = testdir.makepyfile("""
+        l = []
+
+        def test_hello():
+            assert l == [1]
+
+        def test_world():
+            assert l == [1,2]
+
+        test_hello.setup = lambda: l.append(1)
+        test_hello.teardown = lambda: l.append(2)
+    """)
+    result = testdir.runpytest(p, '-p', 'nose')
+    result.stdout.fnmatch_lines([
+        "*2 passed*"
+    ])
+
+
+def test_nose_setup_func(testdir):
+    p = testdir.makepyfile("""
+        l = []
+
+        def my_setup():
+            a = 1
+            l.append(a)
+
+        def my_teardown():
+            b = 2
+            l.append(b)
+
+        def test_hello():
+            print l
+            assert l == [1]
+
+        def test_world():
+            print l
+            assert l == [1,2]
+
+        test_hello.setup = my_setup
+        test_hello.teardown = my_teardown
+    """)
+    result = testdir.runpytest(p, '-p', 'nose')
+    result.stdout.fnmatch_lines([
+        "*2 passed*"
+    ])
+
+
+def test_nose_setup_func_failure(testdir):
+    p = testdir.makepyfile("""
+        l = []
+
+        my_setup = lambda x: 1
+        my_teardown = lambda x: 2
+
+        def test_hello():
+            print l
+            assert l == [1]
+
+        def test_world():
+            print l
+            assert l == [1,2]
+
+        test_hello.setup = my_setup
+        test_hello.teardown = my_teardown
+    """)
+    result = testdir.runpytest(p, '-p', 'nose')
+    result.stdout.fnmatch_lines([
+        "*TypeError: <lambda>() takes exactly 1 argument (0 given)*"
+    ])
+
+
+def test_nose_setup_func_failure_2(testdir):
+    p = testdir.makepyfile("""
+        l = []
+
+        my_setup = 1
+        my_teardown = 2
+
+        def test_hello():
+            print l
+            assert l == [1]
+
+        def test_world():
+            print l
+            assert l == [1,2]
+
+        test_hello.setup = my_setup
+        test_hello.teardown = my_teardown
+    """)
+    result = testdir.runpytest(p, '-p', 'nose')
+    result.stdout.fnmatch_lines([
+        "*TypeError: 'int' object is not callable*"
+    ])
+
+
+def test_nose_setup_partial(testdir):
+    py.test.importorskip("functools")
+    p = testdir.makepyfile("""
+        from functools import partial
+
+        l = []
+
+        def my_setup(x):
+            a = x
+            l.append(a)
+
+        def my_teardown(x):
+            b = x
+            l.append(b)
+
+        my_setup_partial = partial(my_setup, 1)
+        my_teardown_partial = partial(my_teardown, 2)
+
+        def test_hello():
+            print l
+            assert l == [1]
+
+        def test_world():
+            print l
+            assert l == [1,2]
+
+        test_hello.setup = my_setup_partial
+        test_hello.teardown = my_teardown_partial
+    """)
+    result = testdir.runpytest(p, '-p', 'nose')
+    result.stdout.fnmatch_lines([
+        "*2 passed*"
+    ])
+
+
+def test_nose_test_generator_fixtures(testdir):
+    p = testdir.makepyfile("""
+        # taken from nose-0.11.1 unit_tests/test_generator_fixtures.py
+        from nose.tools import eq_
+        called = []
+
+        def outer_setup():
+            called.append('outer_setup')
+
+        def outer_teardown():
+            called.append('outer_teardown')
+
+        def inner_setup():
+            called.append('inner_setup')
+
+        def inner_teardown():
+            called.append('inner_teardown')
+
+        def test_gen():
+            called[:] = []
+            for i in range(0, 5):
+                yield check, i
+
+        def check(i):
+            expect = ['outer_setup']
+            for x in range(0, i):
+                expect.append('inner_setup')
+                expect.append('inner_teardown')
+            expect.append('inner_setup')
+            eq_(called, expect)
+
+
+        test_gen.setup = outer_setup
+        test_gen.teardown = outer_teardown
+        check.setup = inner_setup
+        check.teardown = inner_teardown
+
+        class TestClass(object):
+            def setup(self):
+                print "setup called in", self
+                self.called = ['setup']
+
+            def teardown(self):
+                print "teardown called in", self
+                eq_(self.called, ['setup'])
+                self.called.append('teardown')
+
+            def test(self):
+                print "test called in", self
+                for i in range(0, 5):
+                    yield self.check, i
+
+            def check(self, i):
+                print "check called in", self
+                expect = ['setup']
+                #for x in range(0, i):
+                #    expect.append('setup')
+                #    expect.append('teardown')
+                #expect.append('setup')
+                eq_(self.called, expect)
+    """)
+    result = testdir.runpytest(p, '-p', 'nose')
+    result.stdout.fnmatch_lines([
+        "*10 passed*"
+    ])
+
+
+def test_module_level_setup(testdir):
+    testdir.makepyfile("""
+        from nose.tools import with_setup
+        items = {}
+
+        def setup():
+            items[1]=1
+
+        def teardown():
+            del items[1]
+
+        def setup2():
+            items[2] = 2
+
+        def teardown2():
+            del items[2]
+
+        def test_setup_module_setup():
+            assert items[1] == 1
+
+        @with_setup(setup2, teardown2)
+        def test_local_setup():
+            assert items[2] == 2
+            assert 1 not in items
+    """)
+    result = testdir.runpytest('-p', 'nose')
+    result.stdout.fnmatch_lines([
+        "*2 passed*",
+    ])
+
+
+def test_nose_style_setup_teardown(testdir):
+    testdir.makepyfile("""
+        l = []
+
+        def setup_module():
+            l.append(1)
+
+        def teardown_module():
+            del l[0]
+
+        def test_hello():
+            assert l == [1]
+
+        def test_world():
+            assert l == [1]
+        """)
+    result = testdir.runpytest('-p', 'nose')
+    result.stdout.fnmatch_lines([
+        "*2 passed*",
+    ])

--- /dev/null
+++ b/testing/test_genscript.py
@@ -0,0 +1,41 @@
+import py, os, sys
+import subprocess
+
+
+def pytest_funcarg__standalone(request):
+    return request.cached_setup(scope="module", setup=lambda: Standalone(request))
+
+class Standalone:
+    def __init__(self, request):
+        self.testdir = request.getfuncargvalue("testdir")
+        script = "mypytest"
+        result = self.testdir.runpytest("--genscript=%s" % script)
+        assert result.ret == 0
+        self.script = self.testdir.tmpdir.join(script)
+        assert self.script.check()
+
+    def run(self, anypython, testdir, *args):
+        testdir.chdir()
+        return testdir._run(anypython, self.script, *args)
+
+def test_gen(testdir, anypython, standalone):
+    result = standalone.run(anypython, testdir, '--version')
+    assert result.ret == 0
+    result.stderr.fnmatch_lines([
+        "*imported from*mypytest*"
+    ])
+    p = testdir.makepyfile("def test_func(): assert 0")
+    result = standalone.run(anypython, testdir, p)
+    assert result.ret != 0
+
+def test_rundist(testdir, pytestconfig, standalone):
+    pytestconfig.pluginmanager.skipifmissing("xdist")
+    testdir.makepyfile("""
+        def test_one():
+            pass
+    """)
+    result = standalone.run(sys.executable, testdir, '-n', '3')
+    assert result.ret == 0
+    result.stdout.fnmatch_lines([
+        "*1 passed*",
+    ])

--- /dev/null
+++ b/testing/test_tmpdir.py
@@ -0,0 +1,29 @@
+import py
+
+from _pytest.tmpdir import pytest_funcarg__tmpdir
+from _pytest.python import FuncargRequest
+
+def test_funcarg(testdir):
+    item = testdir.getitem("""
+            def pytest_generate_tests(metafunc):
+                metafunc.addcall(id='a')
+                metafunc.addcall(id='b')
+            def test_func(tmpdir): pass
+            """, 'test_func[a]')
+    p = pytest_funcarg__tmpdir(FuncargRequest(item))
+    assert p.check()
+    bn = p.basename.strip("0123456789")
+    assert bn.endswith("test_func_a_")
+    item.name = "qwe/\\abc"
+    p = pytest_funcarg__tmpdir(FuncargRequest(item))
+    assert p.check()
+    bn = p.basename.strip("0123456789")
+    assert bn == "qwe__abc"
+
+def test_ensuretemp(recwarn):
+    #py.test.deprecated_call(py.test.ensuretemp, 'hello')
+    d1 = py.test.ensuretemp('hello')
+    d2 = py.test.ensuretemp('hello')
+    assert d1 == d2
+    assert d1.check(dir=1)
+

--- /dev/null
+++ b/testing/test_pdb.py
@@ -0,0 +1,145 @@
+import py
+import sys
+
+class TestPDB:
+    def pytest_funcarg__pdblist(self, request):
+        monkeypatch = request.getfuncargvalue("monkeypatch")
+        pdblist = []
+        def mypdb(*args):
+            pdblist.append(args)
+        plugin = request.config.pluginmanager.getplugin('pdb')
+        monkeypatch.setattr(plugin, 'post_mortem', mypdb)
+        return pdblist
+
+    def test_pdb_on_fail(self, testdir, pdblist):
+        rep = testdir.inline_runsource1('--pdb', """
+            def test_func():
+                assert 0
+        """)
+        assert rep.failed
+        assert len(pdblist) == 1
+        tb = py.code.Traceback(pdblist[0][0])
+        assert tb[-1].name == "test_func"
+
+    def test_pdb_on_xfail(self, testdir, pdblist):
+        rep = testdir.inline_runsource1('--pdb', """
+            import py
+            @py.test.mark.xfail
+            def test_func():
+                assert 0
+        """)
+        assert "xfail" in rep.keywords
+        assert not pdblist
+
+    def test_pdb_on_skip(self, testdir, pdblist):
+        rep = testdir.inline_runsource1('--pdb', """
+            import py
+            def test_func():
+                py.test.skip("hello")
+        """)
+        assert rep.skipped
+        assert len(pdblist) == 0
+
+    def test_pdb_on_BdbQuit(self, testdir, pdblist):
+        rep = testdir.inline_runsource1('--pdb', """
+            import py, bdb
+            def test_func():
+                raise bdb.BdbQuit
+        """)
+        assert rep.failed
+        assert len(pdblist) == 0
+
+    def test_pdb_interaction(self, testdir):
+        p1 = testdir.makepyfile("""
+            def test_1():
+                i = 0
+                assert i == 1
+        """)
+        child = testdir.spawn_pytest("--pdb %s" % p1)
+        child.expect(".*def test_1")
+        child.expect(".*i = 0")
+        child.expect("(Pdb)")
+        child.sendeof()
+        rest = child.read()
+        assert "1 failed" in rest
+        assert "def test_1" not in rest
+        if child.isalive():
+            child.wait()
+
+    def test_pdb_interaction_exception(self, testdir):
+        p1 = testdir.makepyfile("""
+            import py
+            def globalfunc():
+                pass
+            def test_1():
+                py.test.raises(ValueError, globalfunc)
+        """)
+        child = testdir.spawn_pytest("--pdb %s" % p1)
+        child.expect(".*def test_1")
+        child.expect(".*py.test.raises.*globalfunc")
+        child.expect("(Pdb)")
+        child.sendline("globalfunc")
+        child.expect(".*function")
+        child.sendeof()
+        child.expect("1 failed")
+        if child.isalive():
+            child.wait()
+
+    def test_pdb_interaction_capturing_simple(self, testdir):
+        p1 = testdir.makepyfile("""
+            import py
+            def test_1():
+                i = 0
+                print ("hello17")
+                py.test.set_trace()
+                x = 3
+        """)
+        child = testdir.spawn_pytest(str(p1))
+        child.expect("test_1")
+        child.expect("x = 3")
+        child.expect("(Pdb)")
+        child.sendeof()
+        rest = child.read()
+        assert "1 failed" in rest
+        assert "def test_1" in rest
+        assert "hello17" in rest # out is captured
+        if child.isalive():
+            child.wait()
+
+    def test_pdb_interaction_capturing_twice(self, testdir):
+        p1 = testdir.makepyfile("""
+            import py
+            def test_1():
+                i = 0
+                print ("hello17")
+                py.test.set_trace()
+                x = 3
+                print ("hello18")
+                py.test.set_trace()
+                x = 4
+        """)
+        child = testdir.spawn_pytest(str(p1))
+        child.expect("test_1")
+        child.expect("x = 3")
+        child.expect("(Pdb)")
+        child.sendline('c')
+        child.expect("x = 4")
+        child.sendeof()
+        rest = child.read()
+        assert "1 failed" in rest
+        assert "def test_1" in rest
+        assert "hello17" in rest # out is captured
+        assert "hello18" in rest # out is captured
+        if child.isalive():
+            child.wait()
+
+    def test_pdb_used_outside_test(self, testdir):
+        p1 = testdir.makepyfile("""
+            import py
+            py.test.set_trace()
+            x = 5
+        """)
+        child = testdir.spawn("%s %s" %(sys.executable, p1))
+        child.expect("x = 5")
+        child.sendeof()
+        child.wait()

--- /dev/null
+++ b/testing/test_runner_xunit.py
@@ -0,0 +1,212 @@
+#
+# test correct setup/teardowns at
+# module, class, and instance level
+
+def test_module_and_function_setup(testdir):
+    reprec = testdir.inline_runsource("""
+        modlevel = []
+        def setup_module(module):
+            assert not modlevel
+            module.modlevel.append(42)
+
+        def teardown_module(module):
+            modlevel.pop()
+
+        def setup_function(function):
+            function.answer = 17
+
+        def teardown_function(function):
+            del function.answer
+
+        def test_modlevel():
+            assert modlevel[0] == 42
+            assert test_modlevel.answer == 17
+
+        class TestFromClass:
+            def test_module(self):
+                assert modlevel[0] == 42
+                assert not hasattr(test_modlevel, 'answer')
+    """)
+    rep = reprec.matchreport("test_modlevel")
+    assert rep.passed
+    rep = reprec.matchreport("test_module")
+    assert rep.passed
+
+def test_class_setup(testdir):
+    reprec = testdir.inline_runsource("""
+        class TestSimpleClassSetup:
+            clslevel = []
+            def setup_class(cls):
+                cls.clslevel.append(23)
+
+            def teardown_class(cls):
+                cls.clslevel.pop()
+
+            def test_classlevel(self):
+                assert self.clslevel[0] == 23
+
+        class TestInheritedClassSetupStillWorks(TestSimpleClassSetup):
+            def test_classlevel_anothertime(self):
+                assert self.clslevel == [23]
+
+        def test_cleanup():
+            assert not TestSimpleClassSetup.clslevel
+            assert not TestInheritedClassSetupStillWorks.clslevel
+    """)
+    reprec.assertoutcome(passed=1+2+1)
+
+
+def test_method_setup(testdir):
+    reprec = testdir.inline_runsource("""
+        class TestSetupMethod:
+            def setup_method(self, meth):
+                self.methsetup = meth
+            def teardown_method(self, meth):
+                del self.methsetup
+
+            def test_some(self):
+                assert self.methsetup == self.test_some
+
+            def test_other(self):
+                assert self.methsetup == self.test_other
+    """)
+    reprec.assertoutcome(passed=2)
+
+def test_method_generator_setup(testdir):
+    reprec = testdir.inline_runsource("""
+        class TestSetupTeardownOnInstance:
+            def setup_class(cls):
+                cls.classsetup = True
+
+            def setup_method(self, method):
+                self.methsetup = method
+
+            def test_generate(self):
+                assert self.classsetup
+                assert self.methsetup == self.test_generate
+                yield self.generated, 5
+                yield self.generated, 2
+
+            def generated(self, value):
+                assert self.classsetup
+                assert self.methsetup == self.test_generate
+                assert value == 5
+    """)
+    reprec.assertoutcome(passed=1, failed=1)
+
+def test_func_generator_setup(testdir):
+    reprec = testdir.inline_runsource("""
+        import sys
+
+        def setup_module(mod):
+            print ("setup_module")
+            mod.x = []
+
+        def setup_function(fun):
+            print ("setup_function")
+            x.append(1)
+
+        def teardown_function(fun):
+            print ("teardown_function")
+            x.pop()
+
+        def test_one():
+            assert x == [1]
+            def check():
+                print ("check")
+                sys.stderr.write("e\\n")
+                assert x == [1]
+            yield check
+            assert x == [1]
+    """)
+    rep = reprec.matchreport("test_one", names="pytest_runtest_logreport")
+    assert rep.passed
+
+def test_method_setup_uses_fresh_instances(testdir):
+    reprec = testdir.inline_runsource("""
+        class TestSelfState1:
+            memory = []
+            def test_hello(self):
+                self.memory.append(self)
+
+            def test_afterhello(self):
+                assert self != self.memory[0]
+    """)
+    reprec.assertoutcome(passed=2, failed=0)
+
+def test_failing_setup_calls_teardown(testdir):
+    p = testdir.makepyfile("""
+        def setup_module(mod):
+            raise ValueError(42)
+        def test_function():
+            assert 0
+        def teardown_module(mod):
+            raise ValueError(43)
+    """)
+    result = testdir.runpytest(p)
+    result.stdout.fnmatch_lines([
+        "*42*",
+        "*43*",
+        "*2 error*"
+    ])
+
+def test_setup_that_skips_calledagain_and_teardown(testdir):
+    p = testdir.makepyfile("""
+        import py
+        def setup_module(mod):
+            py.test.skip("x")
+        def test_function1():
+            pass
+        def test_function2():
+            pass
+        def teardown_module(mod):
+            raise ValueError(43)
+    """)
+    result = testdir.runpytest(p)
+    result.stdout.fnmatch_lines([
+        "*ValueError*43*",
+        "*2 skipped*1 error*",
+    ])
+
+def test_setup_fails_again_on_all_tests(testdir):
+    p = testdir.makepyfile("""
+        import py
+        def setup_module(mod):
+            raise ValueError(42)
+        def test_function1():
+            pass
+        def test_function2():
+            pass
+        def teardown_module(mod):
+            raise ValueError(43)
+    """)
+    result = testdir.runpytest(p)
+    result.stdout.fnmatch_lines([
+        "*3 error*"
+    ])
+    assert "passed" not in result.stdout.str()
+
+def test_setup_funcarg_setup_not_called_if_outer_scope_fails(testdir):
+    p = testdir.makepyfile("""
+        import py
+        def setup_module(mod):
+            raise ValueError(42)
+        def pytest_funcarg__hello(request):
+            raise ValueError("xyz43")
+        def test_function1(hello):
+            pass
+        def test_function2(hello):
+            pass
+    """)
+    result = testdir.runpytest(p)
+    result.stdout.fnmatch_lines([
+        "*function1*",
+        "*ValueError*42*",
+        "*function2*",
+        "*ValueError*42*",
+        "*2 error*"
+    ])
+    assert "xyz43" not in result.stdout.str()
+
+
+

--- a/testing/plugin/test_session.py
+++ /dev/null
@@ -1,227 +0,0 @@
-import pytest, py
-
-class SessionTests:
-    def test_basic_testitem_events(self, testdir):
-        tfile = testdir.makepyfile("""
-            def test_one():
-                pass
-            def test_one_one():
-                assert 0
-            def test_other():
-                raise ValueError(23)
-            def test_two(someargs):
-                pass
-        """)
-        reprec = testdir.inline_run(tfile)
-        passed, skipped, failed = reprec.listoutcomes()
-        assert len(skipped) == 0
-        assert len(passed) == 1
-        assert len(failed) == 3
-        end = lambda x: x.nodeid.split("::")[-1]
-        assert end(failed[0]) == "test_one_one"
-        assert end(failed[1]) == "test_other"
-        assert end(failed[2]) == "test_two"
-        itemstarted = reprec.getcalls("pytest_itemcollected")
-        assert len(itemstarted) == 4
-        colstarted = reprec.getcalls("pytest_collectstart")
-        assert len(colstarted) == 1 + 1
-        col = colstarted[1].collector
-        assert isinstance(col, pytest.Module)
-
-    def test_nested_import_error(self, testdir):
-        tfile = testdir.makepyfile("""
-            import import_fails
-            def test_this():
-                assert import_fails.a == 1
-        """, import_fails="""
-            import does_not_work
-            a = 1
-        """)
-        reprec = testdir.inline_run(tfile)
-        l = reprec.getfailedcollections()
-        assert len(l) == 1
-        out = l[0].longrepr.reprcrash.message
-        assert out.find('does_not_work') != -1
-
-    def test_raises_output(self, testdir):
-        reprec = testdir.inline_runsource("""
-            import py
-            def test_raises_doesnt():
-                py.test.raises(ValueError, int, "3")
-        """)
-        passed, skipped, failed = reprec.listoutcomes()
-        assert len(failed) == 1
-        out = failed[0].longrepr.reprcrash.message
-        if not out.find("DID NOT RAISE") != -1:
-            print(out)
-            py.test.fail("incorrect raises() output")
-
-    def test_generator_yields_None(self, testdir):
-        reprec = testdir.inline_runsource("""
-            def test_1():
-                yield None
-        """)
-        failures = reprec.getfailedcollections()
-        out = failures[0].longrepr.reprcrash.message
-        i = out.find('TypeError')
-        assert i != -1
-
-    def test_syntax_error_module(self, testdir):
-        reprec = testdir.inline_runsource("this is really not python")
-        l = reprec.getfailedcollections()
-        assert len(l) == 1
-        out = str(l[0].longrepr)
-        assert out.find(str('not python')) != -1
-
-    def test_exit_first_problem(self, testdir):
-        reprec = testdir.inline_runsource("""
-            def test_one(): assert 0
-            def test_two(): assert 0
-        """, '--exitfirst')
-        passed, skipped, failed = reprec.countoutcomes()
-        assert failed == 1
-        assert passed == skipped == 0
-
-    def test_maxfail(self, testdir):
-        reprec = testdir.inline_runsource("""
-            def test_one(): assert 0
-            def test_two(): assert 0
-            def test_three(): assert 0
-        """, '--maxfail=2')
-        passed, skipped, failed = reprec.countoutcomes()
-        assert failed == 2
-        assert passed == skipped == 0
-
-    def test_broken_repr(self, testdir):
-        p = testdir.makepyfile("""
-            import pytest
-            class BrokenRepr1:
-                foo=0
-                def __repr__(self):
-                    raise Exception("Ha Ha fooled you, I'm a broken repr().")
-
-            class TestBrokenClass:
-                def test_explicit_bad_repr(self):
-                    t = BrokenRepr1()
-                    pytest.raises(Exception, 'repr(t)')
-
-                def test_implicit_bad_repr1(self):
-                    t = BrokenRepr1()
-                    assert t.foo == 1
-
-        """)
-        reprec = testdir.inline_run(p)
-        passed, skipped, failed = reprec.listoutcomes()
-        assert len(failed) == 1
-        out = failed[0].longrepr.reprcrash.message
-        assert out.find("""[Exception("Ha Ha fooled you, I'm a broken repr().") raised in repr()]""") != -1 #'
-
-    def test_skip_file_by_conftest(self, testdir):
-        testdir.makepyfile(conftest="""
-            import py
-            def pytest_collect_file():
-                py.test.skip("intentional")
-        """, test_file="""
-            def test_one(): pass
-        """)
-        try:
-            reprec = testdir.inline_run(testdir.tmpdir)
-        except py.test.skip.Exception:
-            py.test.fail("wrong skipped caught")
-        reports = reprec.getreports("pytest_collectreport")
-        assert len(reports) == 1
-        assert reports[0].skipped
-
-class TestNewSession(SessionTests):
-
-    def test_order_of_execution(self, testdir):
-        reprec = testdir.inline_runsource("""
-            l = []
-            def test_1():
-                l.append(1)
-            def test_2():
-                l.append(2)
-            def test_3():
-                assert l == [1,2]
-            class Testmygroup:
-                reslist = l
-                def test_1(self):
-                    self.reslist.append(1)
-                def test_2(self):
-                    self.reslist.append(2)
-                def test_3(self):
-                    self.reslist.append(3)
-                def test_4(self):
-                    assert self.reslist == [1,2,1,2,3]
-        """)
-        passed, skipped, failed = reprec.countoutcomes()
-        assert failed == skipped == 0
-        assert passed == 7
-        # also test listnames() here ...
-
-    def test_collect_only_with_various_situations(self, testdir):
-        p = testdir.makepyfile(
-            test_one="""
-                def test_one():
-                    raise ValueError()
-
-                class TestX:
-                    def test_method_one(self):
-                        pass
-
-                class TestY(TestX):
-                    pass
-            """,
-            test_two="""
-                import py
-                py.test.skip('xxx')
-            """,
-            test_three="xxxdsadsadsadsa",
-            __init__=""
-        )
-        reprec = testdir.inline_run('--collectonly', p.dirpath())
-
-        itemstarted = reprec.getcalls("pytest_itemcollected")
-        assert len(itemstarted) == 3
-        assert not reprec.getreports("pytest_runtest_logreport")
-        started = reprec.getcalls("pytest_collectstart")
-        finished = reprec.getreports("pytest_collectreport")
-        assert len(started) == len(finished)
-        assert len(started) == 8 # XXX extra TopCollector
-        colfail = [x for x in finished if x.failed]
-        colskipped = [x for x in finished if x.skipped]
-        assert len(colfail) == 1
-        assert len(colskipped) == 1
-
-    def test_minus_x_import_error(self, testdir):
-        testdir.makepyfile(__init__="")
-        testdir.makepyfile(test_one="xxxx", test_two="yyyy")
-        reprec = testdir.inline_run("-x", testdir.tmpdir)
-        finished = reprec.getreports("pytest_collectreport")
-        colfail = [x for x in finished if x.failed]
-        assert len(colfail) == 1
-
-
-def test_plugin_specify(testdir):
-    testdir.chdir()
-    config = py.test.raises(ImportError, """
-            testdir.parseconfig("-p", "nqweotexistent")
-    """)
-    #py.test.raises(ImportError,
-    #    "config.pluginmanager.do_configure(config)"
-    #)
-
-def test_plugin_already_exists(testdir):
-    config = testdir.parseconfig("-p", "session")
-    assert config.option.plugins == ['session']
-    config.pluginmanager.do_configure(config)
-
-def test_exclude(testdir):
-    hellodir = testdir.mkdir("hello")
-    hellodir.join("test_hello.py").write("x y syntaxerror")
-    hello2dir = testdir.mkdir("hello2")
-    hello2dir.join("test_hello2.py").write("x y syntaxerror")
-    testdir.makepyfile(test_ok="def test_pass(): pass")
-    result = testdir.runpytest("--ignore=hello", "--ignore=hello2")
-    assert result.ret == 0
-    result.stdout.fnmatch_lines(["*1 passed*"])

--- a/testing/plugin/test_python.py
+++ /dev/null
@@ -1,1176 +0,0 @@
-import pytest, py, sys
-from _pytest import python as funcargs
-
-class TestModule:
-    def test_failing_import(self, testdir):
-        modcol = testdir.getmodulecol("import alksdjalskdjalkjals")
-        py.test.raises(ImportError, modcol.collect)
-        py.test.raises(ImportError, modcol.collect)
-
-    def test_import_duplicate(self, testdir):
-        a = testdir.mkdir("a")
-        b = testdir.mkdir("b")
-        p = a.ensure("test_whatever.py")
-        p.pyimport()
-        del py.std.sys.modules['test_whatever']
-        b.ensure("test_whatever.py")
-        result = testdir.runpytest()
-        result.stdout.fnmatch_lines([
-            "*import*mismatch*",
-            "*imported*test_whatever*",
-            "*%s*" % a.join("test_whatever.py"),
-            "*not the same*",
-            "*%s*" % b.join("test_whatever.py"),
-            "*HINT*",
-        ])
-
-    def test_syntax_error_in_module(self, testdir):
-        modcol = testdir.getmodulecol("this is a syntax error")
-        py.test.raises(modcol.CollectError, modcol.collect)
-        py.test.raises(modcol.CollectError, modcol.collect)
-
-    def test_module_considers_pluginmanager_at_import(self, testdir):
-        modcol = testdir.getmodulecol("pytest_plugins='xasdlkj',")
-        py.test.raises(ImportError, "modcol.obj")
-
-class TestClass:
-    def test_class_with_init_not_collected(self, testdir):
-        modcol = testdir.getmodulecol("""
-            class TestClass1:
-                def __init__(self):
-                    pass
-            class TestClass2(object):
-                def __init__(self):
-                    pass
-        """)
-        l = modcol.collect()
-        assert len(l) == 0
-
-class TestGenerator:
-    def test_generative_functions(self, testdir):
-        modcol = testdir.getmodulecol("""
-            def func1(arg, arg2):
-                assert arg == arg2
-
-            def test_gen():
-                yield func1, 17, 3*5
-                yield func1, 42, 6*7
-        """)
-        colitems = modcol.collect()
-        assert len(colitems) == 1
-        gencol = colitems[0]
-        assert isinstance(gencol, pytest.Generator)
-        gencolitems = gencol.collect()
-        assert len(gencolitems) == 2
-        assert isinstance(gencolitems[0], pytest.Function)
-        assert isinstance(gencolitems[1], pytest.Function)
-        assert gencolitems[0].name == '[0]'
-        assert gencolitems[0].obj.__name__ == 'func1'
-
-    def test_generative_methods(self, testdir):
-        modcol = testdir.getmodulecol("""
-            def func1(arg, arg2):
-                assert arg == arg2
-            class TestGenMethods:
-                def test_gen(self):
-                    yield func1, 17, 3*5
-                    yield func1, 42, 6*7
-        """)
-        gencol = modcol.collect()[0].collect()[0].collect()[0]
-        assert isinstance(gencol, pytest.Generator)
-        gencolitems = gencol.collect()
-        assert len(gencolitems) == 2
-        assert isinstance(gencolitems[0], pytest.Function)
-        assert isinstance(gencolitems[1], pytest.Function)
-        assert gencolitems[0].name == '[0]'
-        assert gencolitems[0].obj.__name__ == 'func1'
-
-    def test_generative_functions_with_explicit_names(self, testdir):
-        modcol = testdir.getmodulecol("""
-            def func1(arg, arg2):
-                assert arg == arg2
-
-            def test_gen():
-                yield "seventeen", func1, 17, 3*5
-                yield "fortytwo", func1, 42, 6*7
-        """)
-        colitems = modcol.collect()
-        assert len(colitems) == 1
-        gencol = colitems[0]
-        assert isinstance(gencol, pytest.Generator)
-        gencolitems = gencol.collect()
-        assert len(gencolitems) == 2
-        assert isinstance(gencolitems[0], pytest.Function)
-        assert isinstance(gencolitems[1], pytest.Function)
-        assert gencolitems[0].name == "['seventeen']"
-        assert gencolitems[0].obj.__name__ == 'func1'
-        assert gencolitems[1].name == "['fortytwo']"
-        assert gencolitems[1].obj.__name__ == 'func1'
-
-    def test_generative_functions_unique_explicit_names(self, testdir):
-        # generative
-        modcol = testdir.getmodulecol("""
-            def func(): pass
-            def test_gen():
-                yield "name", func
-                yield "name", func
-        """)
-        colitems = modcol.collect()
-        assert len(colitems) == 1
-        gencol = colitems[0]
-        assert isinstance(gencol, pytest.Generator)
-        py.test.raises(ValueError, "gencol.collect()")
-
-    def test_generative_methods_with_explicit_names(self, testdir):
-        modcol = testdir.getmodulecol("""
-            def func1(arg, arg2):
-                assert arg == arg2
-            class TestGenMethods:
-                def test_gen(self):
-                    yield "m1", func1, 17, 3*5
-                    yield "m2", func1, 42, 6*7
-        """)
-        gencol = modcol.collect()[0].collect()[0].collect()[0]
-        assert isinstance(gencol, pytest.Generator)
-        gencolitems = gencol.collect()
-        assert len(gencolitems) == 2
-        assert isinstance(gencolitems[0], pytest.Function)
-        assert isinstance(gencolitems[1], pytest.Function)
-        assert gencolitems[0].name == "['m1']"
-        assert gencolitems[0].obj.__name__ == 'func1'
-        assert gencolitems[1].name == "['m2']"
-        assert gencolitems[1].obj.__name__ == 'func1'
-
-    def test_order_of_execution_generator_same_codeline(self, testdir, tmpdir):
-        o = testdir.makepyfile("""
-            def test_generative_order_of_execution():
-                import py
-                test_list = []
-                expected_list = list(range(6))
-
-                def list_append(item):
-                    test_list.append(item)
-
-                def assert_order_of_execution():
-                    py.builtin.print_('expected order', expected_list)
-                    py.builtin.print_('but got       ', test_list)
-                    assert test_list == expected_list
-
-                for i in expected_list:
-                    yield list_append, i
-                yield assert_order_of_execution
-        """)
-        reprec = testdir.inline_run(o)
-        passed, skipped, failed = reprec.countoutcomes()
-        assert passed == 7
-        assert not skipped and not failed
-
-    def test_order_of_execution_generator_different_codeline(self, testdir):
-        o = testdir.makepyfile("""
-            def test_generative_tests_different_codeline():
-                import py
-                test_list = []
-                expected_list = list(range(3))
-
-                def list_append_2():
-                    test_list.append(2)
-
-                def list_append_1():
-                    test_list.append(1)
-
-                def list_append_0():
-                    test_list.append(0)
-
-                def assert_order_of_execution():
-                    py.builtin.print_('expected order', expected_list)
-                    py.builtin.print_('but got       ', test_list)
-                    assert test_list == expected_list
-
-                yield list_append_0
-                yield list_append_1
-                yield list_append_2
-                yield assert_order_of_execution
-        """)
-        reprec = testdir.inline_run(o)
-        passed, skipped, failed = reprec.countoutcomes()
-        assert passed == 4
-        assert not skipped and not failed
-
-class TestFunction:
-    def test_getmodulecollector(self, testdir):
-        item = testdir.getitem("def test_func(): pass")
-        modcol = item.getparent(pytest.Module)
-        assert isinstance(modcol, pytest.Module)
-        assert hasattr(modcol.obj, 'test_func')
-
-    def test_function_equality(self, testdir, tmpdir):
-        config = testdir.reparseconfig()
-        session = testdir.Session(config)
-        f1 = pytest.Function(name="name", config=config,
-                args=(1,), callobj=isinstance, session=session)
-        f2 = pytest.Function(name="name",config=config,
-                args=(1,), callobj=py.builtin.callable, session=session)
-        assert not f1 == f2
-        assert f1 != f2
-        f3 = pytest.Function(name="name", config=config,
-                args=(1,2), callobj=py.builtin.callable, session=session)
-        assert not f3 == f2
-        assert f3 != f2
-
-        assert not f3 == f1
-        assert f3 != f1
-
-        f1_b = pytest.Function(name="name", config=config,
-              args=(1,), callobj=isinstance, session=session)
-        assert f1 == f1_b
-        assert not f1 != f1_b
-
-    def test_function_equality_with_callspec(self, testdir, tmpdir):
-        config = testdir.reparseconfig()
-        class callspec1:
-            param = 1
-            funcargs = {}
-            id = "hello"
-        class callspec2:
-            param = 1
-            funcargs = {}
-            id = "world"
-        session = testdir.Session(config)
-        f5 = pytest.Function(name="name", config=config,
-            callspec=callspec1, callobj=isinstance, session=session)
-        f5b = pytest.Function(name="name", config=config,
-            callspec=callspec2, callobj=isinstance, session=session)
-        assert f5 != f5b
-        assert not (f5 == f5b)
-
-    def test_pyfunc_call(self, testdir):
-        item = testdir.getitem("def test_func(): raise ValueError")
-        config = item.config
-        class MyPlugin1:
-            def pytest_pyfunc_call(self, pyfuncitem):
-                raise ValueError
-        class MyPlugin2:
-            def pytest_pyfunc_call(self, pyfuncitem):
-                return True
-        config.pluginmanager.register(MyPlugin1())
-        config.pluginmanager.register(MyPlugin2())
-        config.hook.pytest_pyfunc_call(pyfuncitem=item)
-
-class TestSorting:
-    def test_check_equality(self, testdir):
-        modcol = testdir.getmodulecol("""
-            def test_pass(): pass
-            def test_fail(): assert 0
-        """)
-        fn1 = testdir.collect_by_name(modcol, "test_pass")
-        assert isinstance(fn1, pytest.Function)
-        fn2 = testdir.collect_by_name(modcol, "test_pass")
-        assert isinstance(fn2, pytest.Function)
-
-        assert fn1 == fn2
-        assert fn1 != modcol
-        if py.std.sys.version_info < (3, 0):
-            assert cmp(fn1, fn2) == 0
-        assert hash(fn1) == hash(fn2)
-
-        fn3 = testdir.collect_by_name(modcol, "test_fail")
-        assert isinstance(fn3, pytest.Function)
-        assert not (fn1 == fn3)
-        assert fn1 != fn3
-
-        for fn in fn1,fn2,fn3:
-            assert fn != 3
-            assert fn != modcol
-            assert fn != [1,2,3]
-            assert [1,2,3] != fn
-            assert modcol != fn
-
-    def test_allow_sane_sorting_for_decorators(self, testdir):
-        modcol = testdir.getmodulecol("""
-            def dec(f):
-                g = lambda: f(2)
-                g.place_as = f
-                return g
-
-
-            def test_b(y):
-                pass
-            test_b = dec(test_b)
-
-            def test_a(y):
-                pass
-            test_a = dec(test_a)
-        """)
-        colitems = modcol.collect()
-        assert len(colitems) == 2
-        assert [item.name for item in colitems] == ['test_b', 'test_a']
-
-
-class TestConftestCustomization:
-    def test_pytest_pycollect_module(self, testdir):
-        testdir.makeconftest("""
-            import pytest
-            class MyModule(pytest.Module):
-                pass
-            def pytest_pycollect_makemodule(path, parent):
-                if path.basename == "test_xyz.py":
-                    return MyModule(path, parent)
-        """)
-        testdir.makepyfile("def some(): pass")
-        testdir.makepyfile(test_xyz="")
-        result = testdir.runpytest("--collectonly")
-        result.stdout.fnmatch_lines([
-            "*<Module*test_pytest*",
-            "*<MyModule*xyz*",
-        ])
-
-    def test_pytest_pycollect_makeitem(self, testdir):
-        testdir.makeconftest("""
-            import pytest
-            class MyFunction(pytest.Function):
-                pass
-            def pytest_pycollect_makeitem(collector, name, obj):
-                if name == "some":
-                    return MyFunction(name, collector)
-        """)
-        testdir.makepyfile("def some(): pass")
-        result = testdir.runpytest("--collectonly")
-        result.stdout.fnmatch_lines([
-            "*MyFunction*some*",
-        ])
-
-    def test_makeitem_non_underscore(self, testdir, monkeypatch):
-        modcol = testdir.getmodulecol("def _hello(): pass")
-        l = []
-        monkeypatch.setattr(pytest.Module, 'makeitem',
-            lambda self, name, obj: l.append(name))
-        l = modcol.collect()
-        assert '_hello' not in l
-
-def test_setup_only_available_in_subdir(testdir):
-    sub1 = testdir.mkpydir("sub1")
-    sub2 = testdir.mkpydir("sub2")
-    sub1.join("conftest.py").write(py.code.Source("""
-        import py
-        def pytest_runtest_setup(item):
-            assert item.fspath.purebasename == "test_in_sub1"
-        def pytest_runtest_call(item):
-            assert item.fspath.purebasename == "test_in_sub1"
-        def pytest_runtest_teardown(item):
-            assert item.fspath.purebasename == "test_in_sub1"
-    """))
-    sub2.join("conftest.py").write(py.code.Source("""
-        import py
-        def pytest_runtest_setup(item):
-            assert item.fspath.purebasename == "test_in_sub2"
-        def pytest_runtest_call(item):
-            assert item.fspath.purebasename == "test_in_sub2"
-        def pytest_runtest_teardown(item):
-            assert item.fspath.purebasename == "test_in_sub2"
-    """))
-    sub1.join("test_in_sub1.py").write("def test_1(): pass")
-    sub2.join("test_in_sub2.py").write("def test_2(): pass")
-    result = testdir.runpytest("-v", "-s")
-    result.stdout.fnmatch_lines([
-        "*2 passed*"
-    ])
-
-def test_generate_tests_only_done_in_subdir(testdir):
-    sub1 = testdir.mkpydir("sub1")
-    sub2 = testdir.mkpydir("sub2")
-    sub1.join("conftest.py").write(py.code.Source("""
-        def pytest_generate_tests(metafunc):
-            assert metafunc.function.__name__ == "test_1"
-    """))
-    sub2.join("conftest.py").write(py.code.Source("""
-        def pytest_generate_tests(metafunc):
-            assert metafunc.function.__name__ == "test_2"
-    """))
-    sub1.join("test_in_sub1.py").write("def test_1(): pass")
-    sub2.join("test_in_sub2.py").write("def test_2(): pass")
-    result = testdir.runpytest("-v", "-s", sub1, sub2, sub1)
-    result.stdout.fnmatch_lines([
-        "*3 passed*"
-    ])
-
-def test_modulecol_roundtrip(testdir):
-    modcol = testdir.getmodulecol("pass", withinit=True)
-    trail = modcol.nodeid
-    newcol = modcol.session.perform_collect([trail], genitems=0)[0]
-    assert modcol.name == newcol.name
-
-
-class TestTracebackCutting:
-    def test_skip_simple(self):
-        excinfo = py.test.raises(py.test.skip.Exception, 'py.test.skip("xxx")')
-        assert excinfo.traceback[-1].frame.code.name == "skip"
-        assert excinfo.traceback[-1].ishidden()
-
-    def test_traceback_argsetup(self, testdir):
-        testdir.makeconftest("""
-            def pytest_funcarg__hello(request):
-                raise ValueError("xyz")
-        """)
-        p = testdir.makepyfile("def test(hello): pass")
-        result = testdir.runpytest(p)
-        assert result.ret != 0
-        out = result.stdout.str()
-        assert out.find("xyz") != -1
-        assert out.find("conftest.py:2: ValueError") != -1
-        numentries = out.count("_ _ _") # separator for traceback entries
-        assert numentries == 0
-
-        result = testdir.runpytest("--fulltrace", p)
-        out = result.stdout.str()
-        assert out.find("conftest.py:2: ValueError") != -1
-        numentries = out.count("_ _ _ _") # separator for traceback entries
-        assert numentries >3
-
-    def test_traceback_error_during_import(self, testdir):
-        testdir.makepyfile("""
-            x = 1
-            x = 2
-            x = 17
-            asd
-        """)
-        result = testdir.runpytest()
-        assert result.ret != 0
-        out = result.stdout.str()
-        assert "x = 1" not in out
-        assert "x = 2" not in out
-        result.stdout.fnmatch_lines([
-            ">*asd*",
-            "E*NameError*",
-        ])
-        result = testdir.runpytest("--fulltrace")
-        out = result.stdout.str()
-        assert "x = 1" in out
-        assert "x = 2" in out
-        result.stdout.fnmatch_lines([
-            ">*asd*",
-            "E*NameError*",
-        ])
-
-def test_getfuncargnames():
-    def f(): pass
-    assert not funcargs.getfuncargnames(f)
-    def g(arg): pass
-    assert funcargs.getfuncargnames(g) == ['arg']
-    def h(arg1, arg2="hello"): pass
-    assert funcargs.getfuncargnames(h) == ['arg1']
-    def h(arg1, arg2, arg3="hello"): pass
-    assert funcargs.getfuncargnames(h) == ['arg1', 'arg2']
-    class A:
-        def f(self, arg1, arg2="hello"):
-            pass
-    assert funcargs.getfuncargnames(A().f) == ['arg1']
-    if sys.version_info < (3,0):
-        assert funcargs.getfuncargnames(A.f) == ['arg1']
-
-def test_callspec_repr():
-    cs = funcargs.CallSpec({}, 'hello', 1)
-    repr(cs)
-    cs = funcargs.CallSpec({}, 'hello', funcargs._notexists)
-    repr(cs)
-
-class TestFillFuncArgs:
-    def test_funcarg_lookupfails(self, testdir):
-        testdir.makeconftest("""
-            def pytest_funcarg__xyzsomething(request):
-                return 42
-        """)
-        item = testdir.getitem("def test_func(some): pass")
-        exc = py.test.raises(funcargs.FuncargRequest.LookupError,
-            "funcargs.fillfuncargs(item)")
-        s = str(exc.value)
-        assert s.find("xyzsomething") != -1
-
-    def test_funcarg_lookup_default(self, testdir):
-        item = testdir.getitem("def test_func(some, other=42): pass")
-        class Provider:
-            def pytest_funcarg__some(self, request):
-                return request.function.__name__
-        item.config.pluginmanager.register(Provider())
-        funcargs.fillfuncargs(item)
-        assert len(item.funcargs) == 1
-
-    def test_funcarg_basic(self, testdir):
-        item = testdir.getitem("def test_func(some, other): pass")
-        class Provider:
-            def pytest_funcarg__some(self, request):
-                return request.function.__name__
-            def pytest_funcarg__other(self, request):
-                return 42
-        item.config.pluginmanager.register(Provider())
-        funcargs.fillfuncargs(item)
-        assert len(item.funcargs) == 2
-        assert item.funcargs['some'] == "test_func"
-        assert item.funcargs['other'] == 42
-
-    def test_funcarg_lookup_modulelevel(self, testdir):
-        modcol = testdir.getmodulecol("""
-            def pytest_funcarg__something(request):
-                return request.function.__name__
-
-            class TestClass:
-                def test_method(self, something):
-                    pass
-            def test_func(something):
-                pass
-        """)
-        item1, item2 = testdir.genitems([modcol])
-        funcargs.fillfuncargs(item1)
-        assert item1.funcargs['something'] ==  "test_method"
-        funcargs.fillfuncargs(item2)
-        assert item2.funcargs['something'] ==  "test_func"
-
-    def test_funcarg_lookup_classlevel(self, testdir):
-        p = testdir.makepyfile("""
-            class TestClass:
-                def pytest_funcarg__something(self, request):
-                    return request.instance
-                def test_method(self, something):
-                    assert something is self
-        """)
-        result = testdir.runpytest(p)
-        result.stdout.fnmatch_lines([
-            "*1 passed*"
-        ])
-
-    def test_fillfuncargs_exposed(self, testdir):
-        item = testdir.getitem("def test_func(some, other=42): pass")
-        class Provider:
-            def pytest_funcarg__some(self, request):
-                return request.function.__name__
-        item.config.pluginmanager.register(Provider())
-        if hasattr(item, '_args'):
-            del item._args
-        pytest._fillfuncargs(item)
-        assert len(item.funcargs) == 1
-
-class TestRequest:
-    def test_request_attributes(self, testdir):
-        item = testdir.getitem("""
-            def pytest_funcarg__something(request): pass
-            def test_func(something): pass
-        """)
-        req = funcargs.FuncargRequest(item)
-        assert req.function == item.obj
-        assert req.keywords is item.keywords
-        assert hasattr(req.module, 'test_func')
-        assert req.cls is None
-        assert req.function.__name__ == "test_func"
-        assert req.config == item.config
-        assert repr(req).find(req.function.__name__) != -1
-
-    def test_request_attributes_method(self, testdir):
-        item, = testdir.getitems("""
-            class TestB:
-                def test_func(self, something):
-                    pass
-        """)
-        req = funcargs.FuncargRequest(item)
-        assert req.cls.__name__ == "TestB"
-        assert req.instance.__class__ == req.cls
-
-    def XXXtest_request_contains_funcarg_name2factory(self, testdir):
-        modcol = testdir.getmodulecol("""
-            def pytest_funcarg__something(request):
-                pass
-            class TestClass:
-                def test_method(self, something):
-                    pass
-        """)
-        item1, = testdir.genitems([modcol])
-        assert item1.name == "test_method"
-        name2factory = funcargs.FuncargRequest(item1)._name2factory
-        assert len(name2factory) == 1
-        assert name2factory[0].__name__ == "pytest_funcarg__something"
-
-    def test_getfuncargvalue_recursive(self, testdir):
-        testdir.makeconftest("""
-            def pytest_funcarg__something(request):
-                return 1
-        """)
-        item = testdir.getitem("""
-            def pytest_funcarg__something(request):
-                return request.getfuncargvalue("something") + 1
-            def test_func(something):
-                assert something == 2
-        """)
-        req = funcargs.FuncargRequest(item)
-        val = req.getfuncargvalue("something")
-        assert val == 2
-
-    def test_getfuncargvalue(self, testdir):
-        item = testdir.getitem("""
-            l = [2]
-            def pytest_funcarg__something(request): return 1
-            def pytest_funcarg__other(request):
-                return l.pop()
-            def test_func(something): pass
-        """)
-        req = funcargs.FuncargRequest(item)
-        py.test.raises(req.LookupError, req.getfuncargvalue, "notexists")
-        val = req.getfuncargvalue("something")
-        assert val == 1
-        val = req.getfuncargvalue("something")
-        assert val == 1
-        val2 = req.getfuncargvalue("other")
-        assert val2 == 2
-        val2 = req.getfuncargvalue("other")  # see about caching
-        assert val2 == 2
-        req._fillfuncargs()
-        assert item.funcargs == {'something': 1}
-
-    def test_request_addfinalizer(self, testdir):
-        item = testdir.getitem("""
-            teardownlist = []
-            def pytest_funcarg__something(request):
-                request.addfinalizer(lambda: teardownlist.append(1))
-            def test_func(something): pass
-        """)
-        req = funcargs.FuncargRequest(item)
-        req.config._setupstate.prepare(item) # XXX
-        req._fillfuncargs()
-        # successively check finalization calls
-        teardownlist = item.getparent(pytest.Module).obj.teardownlist
-        ss = item.config._setupstate
-        assert not teardownlist
-        ss.teardown_exact(item)
-        print(ss.stack)
-        assert teardownlist == [1]
-
-    def test_request_addfinalizer_partial_setup_failure(self, testdir):
-        p = testdir.makepyfile("""
-            l = []
-            def pytest_funcarg__something(request):
-                request.addfinalizer(lambda: l.append(None))
-            def test_func(something, missingarg):
-                pass
-            def test_second():
-                assert len(l) == 1
-        """)
-        result = testdir.runpytest(p)
-        result.stdout.fnmatch_lines([
-            "*1 passed*1 error*"
-            ])
-
-    def test_request_getmodulepath(self, testdir):
-        modcol = testdir.getmodulecol("def test_somefunc(): pass")
-        item, = testdir.genitems([modcol])
-        req = funcargs.FuncargRequest(item)
-        assert req.fspath == modcol.fspath
-
-def test_applymarker(testdir):
-    item1,item2 = testdir.getitems("""
-        class TestClass:
-            def test_func1(self, something):
-                pass
-            def test_func2(self, something):
-                pass
-    """)
-    req1 = funcargs.FuncargRequest(item1)
-    assert 'xfail' not in item1.keywords
-    req1.applymarker(py.test.mark.xfail)
-    assert 'xfail' in item1.keywords
-    assert 'skipif' not in item1.keywords
-    req1.applymarker(py.test.mark.skipif)
-    assert 'skipif' in item1.keywords
-    py.test.raises(ValueError, "req1.applymarker(42)")
-
-class TestRequestCachedSetup:
-    def test_request_cachedsetup(self, testdir):
-        item1,item2 = testdir.getitems("""
-            class TestClass:
-                def test_func1(self, something):
-                    pass
-                def test_func2(self, something):
-                    pass
-        """)
-        req1 = funcargs.FuncargRequest(item1)
-        l = ["hello"]
-        def setup():
-            return l.pop()
-        ret1 = req1.cached_setup(setup)
-        assert ret1 == "hello"
-        ret1b = req1.cached_setup(setup)
-        assert ret1 == ret1b
-        req2 = funcargs.FuncargRequest(item2)
-        ret2 = req2.cached_setup(setup)
-        assert ret2 == ret1
-
-    def test_request_cachedsetup_extrakey(self, testdir):
-        item1 = testdir.getitem("def test_func(): pass")
-        req1 = funcargs.FuncargRequest(item1)
-        l = ["hello", "world"]
-        def setup():
-            return l.pop()
-        ret1 = req1.cached_setup(setup, extrakey=1)
-        ret2 = req1.cached_setup(setup, extrakey=2)
-        assert ret2 == "hello"
-        assert ret1 == "world"
-        ret1b = req1.cached_setup(setup, extrakey=1)
-        ret2b = req1.cached_setup(setup, extrakey=2)
-        assert ret1 == ret1b
-        assert ret2 == ret2b
-
-    def test_request_cachedsetup_cache_deletion(self, testdir):
-        item1 = testdir.getitem("def test_func(): pass")
-        req1 = funcargs.FuncargRequest(item1)
-        l = []
-        def setup():
-            l.append("setup")
-        def teardown(val):
-            l.append("teardown")
-        ret1 = req1.cached_setup(setup, teardown, scope="function")
-        assert l == ['setup']
-        # artificial call of finalizer
-        req1.config._setupstate._callfinalizers(item1)
-        assert l == ["setup", "teardown"]
-        ret2 = req1.cached_setup(setup, teardown, scope="function")
-        assert l == ["setup", "teardown", "setup"]
-        req1.config._setupstate._callfinalizers(item1)
-        assert l == ["setup", "teardown", "setup", "teardown"]
-
-    def test_request_cached_setup_two_args(self, testdir):
-        testdir.makepyfile("""
-            def pytest_funcarg__arg1(request):
-                return request.cached_setup(lambda: 42)
-            def pytest_funcarg__arg2(request):
-                return request.cached_setup(lambda: 17)
-            def test_two_different_setups(arg1, arg2):
-                assert arg1 != arg2
-        """)
-        result = testdir.runpytest("-v")
-        result.stdout.fnmatch_lines([
-            "*1 passed*"
-        ])
-
-    def test_request_cached_setup_getfuncargvalue(self, testdir):
-        testdir.makepyfile("""
-            def pytest_funcarg__arg1(request):
-                arg1 = request.getfuncargvalue("arg2")
-                return request.cached_setup(lambda: arg1 + 1)
-            def pytest_funcarg__arg2(request):
-                return request.cached_setup(lambda: 10)
-            def test_two_funcarg(arg1):
-                assert arg1 == 11
-        """)
-        result = testdir.runpytest("-v")
-        result.stdout.fnmatch_lines([
-            "*1 passed*"
-        ])
-
-    def test_request_cached_setup_functional(self, testdir):
-        testdir.makepyfile(test_0="""
-            l = []
-            def pytest_funcarg__something(request):
-                val = request.cached_setup(fsetup, fteardown)
-                return val
-            def fsetup(mycache=[1]):
-                l.append(mycache.pop())
-                return l
-            def fteardown(something):
-                l.remove(something[0])
-                l.append(2)
-            def test_list_once(something):
-                assert something == [1]
-            def test_list_twice(something):
-                assert something == [1]
-        """)
-        testdir.makepyfile(test_1="""
-            import test_0 # should have run already
-            def test_check_test0_has_teardown_correct():
-                assert test_0.l == [2]
-        """)
-        result = testdir.runpytest("-v")
-        result.stdout.fnmatch_lines([
-            "*3 passed*"
-        ])
-
-class TestMetafunc:
-    def test_no_funcargs(self, testdir):
-        def function(): pass
-        metafunc = funcargs.Metafunc(function)
-        assert not metafunc.funcargnames
-
-    def test_function_basic(self):
-        def func(arg1, arg2="qwe"): pass
-        metafunc = funcargs.Metafunc(func)
-        assert len(metafunc.funcargnames) == 1
-        assert 'arg1' in metafunc.funcargnames
-        assert metafunc.function is func
-        assert metafunc.cls is None
-
-    def test_addcall_no_args(self):
-        def func(arg1): pass
-        metafunc = funcargs.Metafunc(func)
-        metafunc.addcall()
-        assert len(metafunc._calls) == 1
-        call = metafunc._calls[0]
-        assert call.id == "0"
-        assert not hasattr(call, 'param')
-
-    def test_addcall_id(self):
-        def func(arg1): pass
-        metafunc = funcargs.Metafunc(func)
-        py.test.raises(ValueError, "metafunc.addcall(id=None)")
-
-        metafunc.addcall(id=1)
-        py.test.raises(ValueError, "metafunc.addcall(id=1)")
-        py.test.raises(ValueError, "metafunc.addcall(id='1')")
-        metafunc.addcall(id=2)
-        assert len(metafunc._calls) == 2
-        assert metafunc._calls[0].id == "1"
-        assert metafunc._calls[1].id == "2"
-
-    def test_addcall_param(self):
-        def func(arg1): pass
-        metafunc = funcargs.Metafunc(func)
-        class obj: pass
-        metafunc.addcall(param=obj)
-        metafunc.addcall(param=obj)
-        metafunc.addcall(param=1)
-        assert len(metafunc._calls) == 3
-        assert metafunc._calls[0].param == obj
-        assert metafunc._calls[1].param == obj
-        assert metafunc._calls[2].param == 1
-
-    def test_addcall_funcargs(self):
-        def func(arg1): pass
-        metafunc = funcargs.Metafunc(func)
-        class obj: pass
-        metafunc.addcall(funcargs={"x": 2})
-        metafunc.addcall(funcargs={"x": 3})
-        assert len(metafunc._calls) == 2
-        assert metafunc._calls[0].funcargs == {'x': 2}
-        assert metafunc._calls[1].funcargs == {'x': 3}
-        assert not hasattr(metafunc._calls[1], 'param')
-
-class TestGenfuncFunctional:
-    def test_attributes(self, testdir):
-        p = testdir.makepyfile("""
-            # assumes that generate/provide runs in the same process
-            import py
-            def pytest_generate_tests(metafunc):
-                metafunc.addcall(param=metafunc)
-
-            def pytest_funcarg__metafunc(request):
-                assert request._pyfuncitem._genid == "0"
-                return request.param
-
-            def test_function(metafunc, pytestconfig):
-                assert metafunc.config == pytestconfig
-                assert metafunc.module.__name__ == __name__
-                assert metafunc.function == test_function
-                assert metafunc.cls is None
-
-            class TestClass:
-                def test_method(self, metafunc, pytestconfig):
-                    assert metafunc.config == pytestconfig
-                    assert metafunc.module.__name__ == __name__
-                    if py.std.sys.version_info > (3, 0):
-                        unbound = TestClass.test_method
-                    else:
-                        unbound = TestClass.test_method.im_func
-                    # XXX actually have an unbound test function here?
-                    assert metafunc.function == unbound
-                    assert metafunc.cls == TestClass
-        """)
-        result = testdir.runpytest(p, "-v")
-        result.stdout.fnmatch_lines([
-            "*2 passed in*",
-        ])
-
-    def test_addcall_with_two_funcargs_generators(self, testdir):
-        testdir.makeconftest("""
-            def pytest_generate_tests(metafunc):
-                assert "arg1" in metafunc.funcargnames
-                metafunc.addcall(funcargs=dict(arg1=1, arg2=2))
-        """)
-        p = testdir.makepyfile("""
-            def pytest_generate_tests(metafunc):
-                metafunc.addcall(funcargs=dict(arg1=1, arg2=1))
-
-            class TestClass:
-                def test_myfunc(self, arg1, arg2):
-                    assert arg1 == arg2
-        """)
-        result = testdir.runpytest("-v", p)
-        result.stdout.fnmatch_lines([
-            "*test_myfunc*0*PASS*",
-            "*test_myfunc*1*FAIL*",
-            "*1 failed, 1 passed*"
-        ])
-
-    def test_two_functions(self, testdir):
-        p = testdir.makepyfile("""
-            def pytest_generate_tests(metafunc):
-                metafunc.addcall(param=10)
-                metafunc.addcall(param=20)
-
-            def pytest_funcarg__arg1(request):
-                return request.param
-
-            def test_func1(arg1):
-                assert arg1 == 10
-            def test_func2(arg1):
-                assert arg1 in (10, 20)
-        """)
-        result = testdir.runpytest("-v", p)
-        result.stdout.fnmatch_lines([
-            "*test_func1*0*PASS*",
-            "*test_func1*1*FAIL*",
-            "*test_func2*PASS*",
-            "*1 failed, 3 passed*"
-        ])
-
-    def test_generate_plugin_and_module(self, testdir):
-        testdir.makeconftest("""
-            def pytest_generate_tests(metafunc):
-                assert "arg1" in metafunc.funcargnames
-                metafunc.addcall(id="world", param=(2,100))
-        """)
-        p = testdir.makepyfile("""
-            def pytest_generate_tests(metafunc):
-                metafunc.addcall(param=(1,1), id="hello")
-
-            def pytest_funcarg__arg1(request):
-                return request.param[0]
-            def pytest_funcarg__arg2(request):
-                return request.param[1]
-
-            class TestClass:
-                def test_myfunc(self, arg1, arg2):
-                    assert arg1 == arg2
-        """)
-        result = testdir.runpytest("-v", p)
-        result.stdout.fnmatch_lines([
-            "*test_myfunc*hello*PASS*",
-            "*test_myfunc*world*FAIL*",
-            "*1 failed, 1 passed*"
-        ])
-
-    def test_generate_tests_in_class(self, testdir):
-        p = testdir.makepyfile("""
-            class TestClass:
-                def pytest_generate_tests(self, metafunc):
-                    metafunc.addcall(funcargs={'hello': 'world'}, id="hello")
-
-                def test_myfunc(self, hello):
-                    assert hello == "world"
-        """)
-        result = testdir.runpytest("-v", p)
-        result.stdout.fnmatch_lines([
-            "*test_myfunc*hello*PASS*",
-            "*1 passed*"
-        ])
-
-    def test_two_functions_not_same_instance(self, testdir):
-        p = testdir.makepyfile("""
-            def pytest_generate_tests(metafunc):
-                metafunc.addcall({'arg1': 10})
-                metafunc.addcall({'arg1': 20})
-
-            class TestClass:
-                def test_func(self, arg1):
-                    assert not hasattr(self, 'x')
-                    self.x = 1
-        """)
-        result = testdir.runpytest("-v", p)
-        result.stdout.fnmatch_lines([
-            "*test_func*0*PASS*",
-            "*test_func*1*PASS*",
-            "*2 pass*",
-        ])
-
-
-def test_conftest_funcargs_only_available_in_subdir(testdir):
-    sub1 = testdir.mkpydir("sub1")
-    sub2 = testdir.mkpydir("sub2")
-    sub1.join("conftest.py").write(py.code.Source("""
-        import py
-        def pytest_funcarg__arg1(request):
-            py.test.raises(Exception, "request.getfuncargvalue('arg2')")
-    """))
-    sub2.join("conftest.py").write(py.code.Source("""
-        import py
-        def pytest_funcarg__arg2(request):
-            py.test.raises(Exception, "request.getfuncargvalue('arg1')")
-    """))
-
-    sub1.join("test_in_sub1.py").write("def test_1(arg1): pass")
-    sub2.join("test_in_sub2.py").write("def test_2(arg2): pass")
-    result = testdir.runpytest("-v")
-    result.stdout.fnmatch_lines([
-        "*2 passed*"
-    ])
-
-def test_funcarg_non_pycollectobj(testdir): # rough jstests usage
-    testdir.makeconftest("""
-        import pytest
-        def pytest_pycollect_makeitem(collector, name, obj):
-            if name == "MyClass":
-                return MyCollector(name, parent=collector)
-        class MyCollector(pytest.Collector):
-            def reportinfo(self):
-                return self.fspath, 3, "xyz"
-    """)
-    modcol = testdir.getmodulecol("""
-        def pytest_funcarg__arg1(request):
-            return 42
-        class MyClass:
-            pass
-    """)
-    clscol = modcol.collect()[0]
-    clscol.obj = lambda arg1: None
-    clscol.funcargs = {}
-    funcargs.fillfuncargs(clscol)
-    assert clscol.funcargs['arg1'] == 42
-
-
-def test_funcarg_lookup_error(testdir):
-    p = testdir.makepyfile("""
-        def test_lookup_error(unknown):
-            pass
-    """)
-    result = testdir.runpytest()
-    result.stdout.fnmatch_lines([
-        "*ERROR at setup of test_lookup_error*",
-        "*def test_lookup_error(unknown):*",
-        "*LookupError: no factory found*unknown*",
-        "*available funcargs*",
-        "*1 error*",
-    ])
-    assert "INTERNAL" not in result.stdout.str()
-
-class TestReportInfo:
-    def test_itemreport_reportinfo(self, testdir, linecomp):
-        testdir.makeconftest("""
-            import pytest
-            class MyFunction(pytest.Function):
-                def reportinfo(self):
-                    return "ABCDE", 42, "custom"
-            def pytest_pycollect_makeitem(collector, name, obj):
-                if name == "test_func":
-                    return MyFunction(name, parent=collector)
-        """)
-        item = testdir.getitem("def test_func(): pass")
-        runner = item.config.pluginmanager.getplugin("runner")
-        assert item.location == ("ABCDE", 42, "custom")
-
-    def test_func_reportinfo(self, testdir):
-        item = testdir.getitem("def test_func(): pass")
-        fspath, lineno, modpath = item.reportinfo()
-        assert fspath == item.fspath
-        assert lineno == 0
-        assert modpath == "test_func"
-
-    def test_class_reportinfo(self, testdir):
-        modcol = testdir.getmodulecol("""
-            # lineno 0
-            class TestClass:
-                def test_hello(self): pass
-        """)
-        classcol = testdir.collect_by_name(modcol, "TestClass")
-        fspath, lineno, msg = classcol.reportinfo()
-        assert fspath == modcol.fspath
-        assert lineno == 1
-        assert msg == "TestClass"
-
-    def test_generator_reportinfo(self, testdir):
-        modcol = testdir.getmodulecol("""
-            # lineno 0
-            def test_gen():
-                def check(x):
-                    assert x
-                yield check, 3
-        """)
-        gencol = testdir.collect_by_name(modcol, "test_gen")
-        fspath, lineno, modpath = gencol.reportinfo()
-        assert fspath == modcol.fspath
-        assert lineno == 1
-        assert modpath == "test_gen"
-
-        genitem = gencol.collect()[0]
-        fspath, lineno, modpath = genitem.reportinfo()
-        assert fspath == modcol.fspath
-        assert lineno == 2
-        assert modpath == "test_gen[0]"
-        """
-            def test_func():
-                pass
-            def test_genfunc():
-                def check(x):
-                    pass
-                yield check, 3
-            class TestClass:
-                def test_method(self):
-                    pass
-       """
-
-def test_show_funcarg(testdir):
-    result = testdir.runpytest("--funcargs")
-    result.stdout.fnmatch_lines([
-            "*tmpdir*",
-            "*temporary directory*",
-        ]
-    )
-
-class TestRaises:
-    def test_raises(self):
-        source = "int('qwe')"
-        excinfo = py.test.raises(ValueError, source)
-        code = excinfo.traceback[-1].frame.code
-        s = str(code.fullsource)
-        assert s == source
-
-    def test_raises_exec(self):
-        py.test.raises(ValueError, "a,x = []")
-
-    def test_raises_syntax_error(self):
-        py.test.raises(SyntaxError, "qwe qwe qwe")
-
-    def test_raises_function(self):
-        py.test.raises(ValueError, int, 'hello')
-
-    def test_raises_callable_no_exception(self):
-        class A:
-            def __call__(self):
-                pass
-        try:
-            py.test.raises(ValueError, A())
-        except py.test.raises.Exception:
-            pass
-
-    @py.test.mark.skipif('sys.version < "2.5"')
-    def test_raises_as_contextmanager(self, testdir):
-        testdir.makepyfile("""
-            from __future__ import with_statement
-            import py
-
-            def test_simple():
-                with py.test.raises(ZeroDivisionError) as excinfo:
-                    assert isinstance(excinfo, py.code.ExceptionInfo)
-                    1/0
-                print (excinfo)
-                assert excinfo.type == ZeroDivisionError
-
-            def test_noraise():
-                with py.test.raises(py.test.raises.Exception):
-                    with py.test.raises(ValueError):
-                           int()
-
-            def test_raise_wrong_exception_passes_by():
-                with py.test.raises(ZeroDivisionError):
-                    with py.test.raises(ValueError):
-                           1/0
-        """)
-        result = testdir.runpytest()
-        result.stdout.fnmatch_lines([
-            '*3 passed*',
-        ])
-
-
-

--- a/testing/plugin/test_mark.py
+++ /dev/null
@@ -1,295 +0,0 @@
-import py
-from _pytest.mark import MarkGenerator as Mark
-
-class TestMark:
-    def test_pytest_mark_notcallable(self):
-        mark = Mark()
-        py.test.raises((AttributeError, TypeError), "mark()")
-
-    def test_pytest_mark_bare(self):
-        mark = Mark()
-        def f():
-            pass
-        mark.hello(f)
-        assert f.hello
-
-    def test_pytest_mark_keywords(self):
-        mark = Mark()
-        def f():
-            pass
-        mark.world(x=3, y=4)(f)
-        assert f.world
-        assert f.world.kwargs['x'] == 3
-        assert f.world.kwargs['y'] == 4
-
-    def test_apply_multiple_and_merge(self):
-        mark = Mark()
-        def f():
-            pass
-        marker = mark.world
-        mark.world(x=3)(f)
-        assert f.world.kwargs['x'] == 3
-        mark.world(y=4)(f)
-        assert f.world.kwargs['x'] == 3
-        assert f.world.kwargs['y'] == 4
-        mark.world(y=1)(f)
-        assert f.world.kwargs['y'] == 1
-        assert len(f.world.args) == 0
-
-    def test_pytest_mark_positional(self):
-        mark = Mark()
-        def f():
-            pass
-        mark.world("hello")(f)
-        assert f.world.args[0] == "hello"
-        mark.world("world")(f)
-
-class TestFunctional:
-    def test_mark_per_function(self, testdir):
-        p = testdir.makepyfile("""
-            import py
-            @py.test.mark.hello
-            def test_hello():
-                assert hasattr(test_hello, 'hello')
-        """)
-        result = testdir.runpytest(p)
-        result.stdout.fnmatch_lines(["*passed*"])
-
-    def test_mark_per_module(self, testdir):
-        item = testdir.getitem("""
-            import py
-            pytestmark = py.test.mark.hello
-            def test_func():
-                pass
-        """)
-        keywords = item.keywords
-        assert 'hello' in keywords
-
-    def test_marklist_per_class(self, testdir):
-        item = testdir.getitem("""
-            import py
-            class TestClass:
-                pytestmark = [py.test.mark.hello, py.test.mark.world]
-                def test_func(self):
-                    assert TestClass.test_func.hello
-                    assert TestClass.test_func.world
-        """)
-        keywords = item.keywords
-        assert 'hello' in keywords
-
-    def test_marklist_per_module(self, testdir):
-        item = testdir.getitem("""
-            import py
-            pytestmark = [py.test.mark.hello, py.test.mark.world]
-            class TestClass:
-                def test_func(self):
-                    assert TestClass.test_func.hello
-                    assert TestClass.test_func.world
-        """)
-        keywords = item.keywords
-        assert 'hello' in keywords
-        assert 'world' in keywords
-
-    @py.test.mark.skipif("sys.version_info < (2,6)")
-    def test_mark_per_class_decorator(self, testdir):
-        item = testdir.getitem("""
-            import py
-            @py.test.mark.hello
-            class TestClass:
-                def test_func(self):
-                    assert TestClass.test_func.hello
-        """)
-        keywords = item.keywords
-        assert 'hello' in keywords
-
-    @py.test.mark.skipif("sys.version_info < (2,6)")
-    def test_mark_per_class_decorator_plus_existing_dec(self, testdir):
-        item = testdir.getitem("""
-            import py
-            @py.test.mark.hello
-            class TestClass:
-                pytestmark = py.test.mark.world
-                def test_func(self):
-                    assert TestClass.test_func.hello
-                    assert TestClass.test_func.world
-        """)
-        keywords = item.keywords
-        assert 'hello' in keywords
-        assert 'world' in keywords
-
-    def test_merging_markers(self, testdir):
-        p = testdir.makepyfile("""
-            import py
-            pytestmark = py.test.mark.hello("pos1", x=1, y=2)
-            class TestClass:
-                # classlevel overrides module level
-                pytestmark = py.test.mark.hello(x=3)
-                @py.test.mark.hello("pos0", z=4)
-                def test_func(self):
-                    pass
-        """)
-        items, rec = testdir.inline_genitems(p)
-        item, = items
-        keywords = item.keywords
-        marker = keywords['hello']
-        assert marker.args == ["pos0", "pos1"]
-        assert marker.kwargs == {'x': 3, 'y': 2, 'z': 4}
-
-    def test_mark_other(self, testdir):
-        py.test.raises(TypeError, '''
-            testdir.getitem("""
-                import py
-                class pytestmark:
-                    pass
-                def test_func():
-                    pass
-            """)
-        ''')
-
-    def test_mark_dynamically_in_funcarg(self, testdir):
-        testdir.makeconftest("""
-            import py
-            def pytest_funcarg__arg(request):
-                request.applymarker(py.test.mark.hello)
-            def pytest_terminal_summary(terminalreporter):
-                l = terminalreporter.stats['passed']
-                terminalreporter._tw.line("keyword: %s" % l[0].keywords)
-        """)
-        testdir.makepyfile("""
-            def test_func(arg):
-                pass
-        """)
-        result = testdir.runpytest()
-        result.stdout.fnmatch_lines([
-            "keyword: *hello*"
-        ])
-
-
-class Test_genitems:
-    def test_check_collect_hashes(self, testdir):
-        p = testdir.makepyfile("""
-            def test_1():
-                pass
-
-            def test_2():
-                pass
-        """)
-        p.copy(p.dirpath(p.purebasename + "2" + ".py"))
-        items, reprec = testdir.inline_genitems(p.dirpath())
-        assert len(items) == 4
-        for numi, i in enumerate(items):
-            for numj, j in enumerate(items):
-                if numj != numi:
-                    assert hash(i) != hash(j)
-                    assert i != j
-
-    def test_root_conftest_syntax_error(self, testdir):
-        # do we want to unify behaviour with
-        # test_subdir_conftest_error?
-        p = testdir.makepyfile(conftest="raise SyntaxError\n")
-        py.test.raises(SyntaxError, testdir.inline_genitems, p.dirpath())
-
-    def test_example_items1(self, testdir):
-        p = testdir.makepyfile('''
-            def testone():
-                pass
-
-            class TestX:
-                def testmethod_one(self):
-                    pass
-
-            class TestY(TestX):
-                pass
-        ''')
-        items, reprec = testdir.inline_genitems(p)
-        assert len(items) == 3
-        assert items[0].name == 'testone'
-        assert items[1].name == 'testmethod_one'
-        assert items[2].name == 'testmethod_one'
-
-        # let's also test getmodpath here
-        assert items[0].getmodpath() == "testone"
-        assert items[1].getmodpath() == "TestX.testmethod_one"
-        assert items[2].getmodpath() == "TestY.testmethod_one"
-
-        s = items[0].getmodpath(stopatmodule=False)
-        assert s.endswith("test_example_items1.testone")
-        print(s)
-
-
-class TestKeywordSelection:
-    def test_select_simple(self, testdir):
-        file_test = testdir.makepyfile("""
-            def test_one():
-                assert 0
-            class TestClass(object):
-                def test_method_one(self):
-                    assert 42 == 43
-        """)
-        def check(keyword, name):
-            reprec = testdir.inline_run("-s", "-k", keyword, file_test)
-            passed, skipped, failed = reprec.listoutcomes()
-            assert len(failed) == 1
-            assert failed[0].nodeid.split("::")[-1] == name
-            assert len(reprec.getcalls('pytest_deselected')) == 1
-
-        for keyword in ['test_one', 'est_on']:
-            #yield check, keyword, 'test_one'
-            check(keyword, 'test_one')
-        check('TestClass.test', 'test_method_one')
-
-    def test_select_extra_keywords(self, testdir):
-        p = testdir.makepyfile(test_select="""
-            def test_1():
-                pass
-            class TestClass:
-                def test_2(self):
-                    pass
-        """)
-        testdir.makepyfile(conftest="""
-            import py
-            def pytest_pycollect_makeitem(__multicall__, name):
-                if name == "TestClass":
-                    item = __multicall__.execute()
-                    item.keywords['xxx'] = True
-                    return item
-        """)
-        for keyword in ('xxx', 'xxx test_2', 'TestClass', 'xxx -test_1',
-                        'TestClass test_2', 'xxx TestClass test_2',):
-            reprec = testdir.inline_run(p.dirpath(), '-s', '-k', keyword)
-            py.builtin.print_("keyword", repr(keyword))
-            passed, skipped, failed = reprec.listoutcomes()
-            assert len(passed) == 1
-            assert passed[0].nodeid.endswith("test_2")
-            dlist = reprec.getcalls("pytest_deselected")
-            assert len(dlist) == 1
-            assert dlist[0].items[0].name == 'test_1'
-
-    def test_select_starton(self, testdir):
-        threepass = testdir.makepyfile(test_threepass="""
-            def test_one(): assert 1
-            def test_two(): assert 1
-            def test_three(): assert 1
-        """)
-        reprec = testdir.inline_run("-k", "test_two:", threepass)
-        passed, skipped, failed = reprec.listoutcomes()
-        assert len(passed) == 2
-        assert not failed
-        dlist = reprec.getcalls("pytest_deselected")
-        assert len(dlist) == 1
-        item = dlist[0].items[0]
-        assert item.name == "test_one"
-
-
-    def test_keyword_extra(self, testdir):
-        p = testdir.makepyfile("""
-           def test_one():
-               assert 0
-           test_one.mykeyword = True
-        """)
-        reprec = testdir.inline_run("-k", "-mykeyword", p)
-        passed, skipped, failed = reprec.countoutcomes()
-        assert passed + skipped + failed == 0
-        reprec = testdir.inline_run("-k", "mykeyword", p)
-        passed, skipped, failed = reprec.countoutcomes()
-        assert failed == 1

--- /dev/null
+++ b/testing/test_doctest.py
@@ -0,0 +1,114 @@
+from _pytest.doctest import DoctestModule, DoctestTextfile
+import py
+
+pytest_plugins = ["pytest_doctest"]
+
+class TestDoctests:
+
+    def test_collect_testtextfile(self, testdir):
+        testdir.maketxtfile(whatever="")
+        checkfile = testdir.maketxtfile(test_something="""
+            alskdjalsdk
+            >>> i = 5
+            >>> i-1
+            4
+        """)
+        for x in (testdir.tmpdir, checkfile):
+            #print "checking that %s returns custom items" % (x,)
+            items, reprec = testdir.inline_genitems(x)
+            assert len(items) == 1
+            assert isinstance(items[0], DoctestTextfile)
+
+    def test_collect_module(self, testdir):
+        path = testdir.makepyfile(whatever="#")
+        for p in (path, testdir.tmpdir):
+            items, reprec = testdir.inline_genitems(p,
+                '--doctest-modules')
+            assert len(items) == 1
+            assert isinstance(items[0], DoctestModule)
+
+    def test_simple_doctestfile(self, testdir):
+        p = testdir.maketxtfile(test_doc="""
+            >>> x = 1
+            >>> x == 1
+            False
+        """)
+        reprec = testdir.inline_run(p, )
+        reprec.assertoutcome(failed=1)
+
+    def test_new_pattern(self, testdir):
+        p = testdir.maketxtfile(xdoc ="""
+            >>> x = 1
+            >>> x == 1
+            False
+        """)
+        reprec = testdir.inline_run(p, "--doctest-glob=x*.txt")
+        reprec.assertoutcome(failed=1)
+
+    def test_doctest_unexpected_exception(self, testdir):
+        p = testdir.maketxtfile("""
+            >>> i = 0
+            >>> i = 1
+            >>> x
+            2
+        """)
+        reprec = testdir.inline_run(p)
+        call = reprec.getcall("pytest_runtest_logreport")
+        assert call.report.failed
+        assert call.report.longrepr
+        # XXX
+        #testitem, = items
+        #excinfo = py.test.raises(Failed, "testitem.runtest()")
+        #repr = testitem.repr_failure(excinfo, ("", ""))
+        #assert repr.reprlocation
+
+    def test_doctestmodule(self, testdir):
+        p = testdir.makepyfile("""
+            '''
+                >>> x = 1
+                >>> x == 1
+                False
+
+            '''
+        """)
+        reprec = testdir.inline_run(p, "--doctest-modules")
+        reprec.assertoutcome(failed=1)
+
+    def test_doctestmodule_external_and_issue116(self, testdir):
+        p = testdir.mkpydir("hello")
+        p.join("__init__.py").write(py.code.Source("""
+            def somefunc():
+                '''
+                    >>> i = 0
+                    >>> i + 1
+                    2
+                '''
+        """))
+        result = testdir.runpytest(p, "--doctest-modules")
+        result.stdout.fnmatch_lines([
+            '004 *>>> i = 0',
+            '005 *>>> i + 1',
+            '*Expected:',
+            "*    2",
+            "*Got:",
+            "*    1",
+            "*:5: DocTestFailure"
+        ])
+
+
+    def test_txtfile_failing(self, testdir):
+        p = testdir.maketxtfile("""
+            >>> i = 0
+            >>> i + 1
+            2
+        """)
+        result = testdir.runpytest(p)
+        result.stdout.fnmatch_lines([
+            '001 >>> i = 0',
+            '002 >>> i + 1',
+            'Expected:',
+            "    2",
+            "Got:",
+            "    1",
+            "*test_txtfile_failing.txt:2: DocTestFailure"
+        ])

--- a/testing/plugin/test_pytester.py
+++ /dev/null
@@ -1,114 +0,0 @@
-import py
-import os, sys
-from _pytest.pytester import LineMatcher, LineComp, HookRecorder
-from _pytest.core import PluginManager
-
-def test_reportrecorder(testdir):
-    item = testdir.getitem("def test_func(): pass")
-    recorder = testdir.getreportrecorder(item.config)
-    assert not recorder.getfailures()
-
-    py.test.xfail("internal reportrecorder tests need refactoring")
-    class rep:
-        excinfo = None
-        passed = False
-        failed = True
-        skipped = False
-        when = "call"
-
-    recorder.hook.pytest_runtest_logreport(report=rep)
-    failures = recorder.getfailures()
-    assert failures == [rep]
-    failures = recorder.getfailures()
-    assert failures == [rep]
-
-    class rep:
-        excinfo = None
-        passed = False
-        failed = False
-        skipped = True
-        when = "call"
-    rep.passed = False
-    rep.skipped = True
-    recorder.hook.pytest_runtest_logreport(report=rep)
-
-    modcol = testdir.getmodulecol("")
-    rep = modcol.config.hook.pytest_make_collect_report(collector=modcol)
-    rep.passed = False
-    rep.failed = True
-    rep.skipped = False
-    recorder.hook.pytest_collectreport(report=rep)
-
-    passed, skipped, failed = recorder.listoutcomes()
-    assert not passed and skipped and failed
-
-    numpassed, numskipped, numfailed = recorder.countoutcomes()
-    assert numpassed == 0
-    assert numskipped == 1
-    assert numfailed == 1
-    assert len(recorder.getfailedcollections()) == 1
-
-    recorder.unregister()
-    recorder.clear()
-    recorder.hook.pytest_runtest_logreport(report=rep)
-    py.test.raises(ValueError, "recorder.getfailures()")
-
-
-def test_parseconfig(testdir):
-    config1 = testdir.parseconfig()
-    config2 = testdir.parseconfig()
-    assert config2 != config1
-    assert config1 != py.test.config
-
-def test_testdir_runs_with_plugin(testdir):
-    testdir.makepyfile("""
-        pytest_plugins = "pytest_pytester"
-        def test_hello(testdir):
-            assert 1
-    """)
-    result = testdir.runpytest()
-    result.stdout.fnmatch_lines([
-        "*1 passed*"
-    ])
-
-def test_hookrecorder_basic():
-    rec = HookRecorder(PluginManager())
-    class ApiClass:
-        def pytest_xyz(self, arg):
-            "x"
-    rec.start_recording(ApiClass)
-    rec.hook.pytest_xyz(arg=123)
-    call = rec.popcall("pytest_xyz")
-    assert call.arg == 123
-    assert call._name == "pytest_xyz"
-    py.test.raises(ValueError, "rec.popcall('abc')")
-
-def test_hookrecorder_basic_no_args_hook():
-    rec = HookRecorder(PluginManager())
-    apimod = type(os)('api')
-    def pytest_xyz():
-        "x"
-    apimod.pytest_xyz = pytest_xyz
-    rec.start_recording(apimod)
-    rec.hook.pytest_xyz()
-    call = rec.popcall("pytest_xyz")
-    assert call._name == "pytest_xyz"
-
-def test_functional(testdir, linecomp):
-    reprec = testdir.inline_runsource("""
-        import py
-        from _pytest.core import HookRelay, PluginManager
-        pytest_plugins="pytester"
-        def test_func(_pytest):
-            class ApiClass:
-                def pytest_xyz(self, arg):  "x"
-            hook = HookRelay([ApiClass], PluginManager(load=False))
-            rec = _pytest.gethookrecorder(hook)
-            class Plugin:
-                def pytest_xyz(self, arg):
-                    return arg + 1
-            rec._pluginmanager.register(Plugin())
-            res = rec.hook.pytest_xyz(arg=41)
-            assert res == [42]
-    """)
-    reprec.assertoutcome(passed=1)

--- a/testing/plugin/test_genscript.py
+++ /dev/null
@@ -1,41 +0,0 @@
-import py, os, sys
-import subprocess
-
-
-def pytest_funcarg__standalone(request):
-    return request.cached_setup(scope="module", setup=lambda: Standalone(request))
-
-class Standalone:
-    def __init__(self, request):
-        self.testdir = request.getfuncargvalue("testdir")
-        script = "mypytest"
-        result = self.testdir.runpytest("--genscript=%s" % script)
-        assert result.ret == 0
-        self.script = self.testdir.tmpdir.join(script)
-        assert self.script.check()
-
-    def run(self, anypython, testdir, *args):
-        testdir.chdir()
-        return testdir._run(anypython, self.script, *args)
-
-def test_gen(testdir, anypython, standalone):
-    result = standalone.run(anypython, testdir, '--version')
-    assert result.ret == 0
-    result.stderr.fnmatch_lines([
-        "*imported from*mypytest*"
-    ])
-    p = testdir.makepyfile("def test_func(): assert 0")
-    result = standalone.run(anypython, testdir, p)
-    assert result.ret != 0
-
-def test_rundist(testdir, pytestconfig, standalone):
-    pytestconfig.pluginmanager.skipifmissing("xdist")
-    testdir.makepyfile("""
-        def test_one():
-            pass
-    """)
-    result = standalone.run(sys.executable, testdir, '-n', '3')
-    assert result.ret == 0
-    result.stdout.fnmatch_lines([
-        "*1 passed*",
-    ])

--- /dev/null
+++ b/testing/test_capture.py
@@ -0,0 +1,379 @@
+import py, os, sys
+from _pytest.capture import CaptureManager
+
+needsosdup = py.test.mark.xfail("not hasattr(os, 'dup')")
+
+class TestCaptureManager:
+    def test_getmethod_default_no_fd(self, testdir, monkeypatch):
+        config = testdir.parseconfig(testdir.tmpdir)
+        assert config.getvalue("capture") is None
+        capman = CaptureManager()
+        monkeypatch.delattr(os, 'dup', raising=False)
+        try:
+            assert capman._getmethod(config, None) == "sys"
+        finally:
+            monkeypatch.undo()
+
+    def test_configure_per_fspath(self, testdir):
+        config = testdir.parseconfig(testdir.tmpdir)
+        assert config.getvalue("capture") is None
+        capman = CaptureManager()
+        hasfd = hasattr(os, 'dup')
+        if hasfd:
+            assert capman._getmethod(config, None) == "fd"
+        else:
+            assert capman._getmethod(config, None) == "sys"
+
+        for name in ('no', 'fd', 'sys'):
+            if not hasfd and name == 'fd':
+                continue
+            sub = testdir.tmpdir.mkdir("dir" + name)
+            sub.ensure("__init__.py")
+            sub.join("conftest.py").write('option_capture = %r' % name)
+            assert capman._getmethod(config, sub.join("test_hello.py")) == name
+
+    @needsosdup
+    @py.test.mark.multi(method=['no', 'fd', 'sys'])
+    def test_capturing_basic_api(self, method):
+        capouter = py.io.StdCaptureFD()
+        old = sys.stdout, sys.stderr, sys.stdin
+        try:
+            capman = CaptureManager()
+            # call suspend without resume or start
+            outerr = capman.suspendcapture()
+            outerr = capman.suspendcapture()
+            assert outerr == ("", "")
+            capman.resumecapture(method)
+            print ("hello")
+            out, err = capman.suspendcapture()
+            if method == "no":
+                assert old == (sys.stdout, sys.stderr, sys.stdin)
+            else:
+                assert out == "hello\n"
+            capman.resumecapture(method)
+            out, err = capman.suspendcapture()
+            assert not out and not err
+        finally:
+            capouter.reset()
+
+    @needsosdup
+    def test_juggle_capturings(self, testdir):
+        capouter = py.io.StdCaptureFD()
+        try:
+            config = testdir.parseconfig(testdir.tmpdir)
+            capman = CaptureManager()
+            capman.resumecapture("fd")
+            py.test.raises(ValueError, 'capman.resumecapture("fd")')
+            py.test.raises(ValueError, 'capman.resumecapture("sys")')
+            os.write(1, "hello\n".encode('ascii'))
+            out, err = capman.suspendcapture()
+            assert out == "hello\n"
+            capman.resumecapture("sys")
+            os.write(1, "hello\n".encode('ascii'))
+            py.builtin.print_("world", file=sys.stderr)
+            out, err = capman.suspendcapture()
+            assert not out
+            assert err == "world\n"
+        finally:
+            capouter.reset()
+
+ at py.test.mark.multi(method=['fd', 'sys'])
+def test_capturing_unicode(testdir, method):
+    if sys.version_info >= (3,0):
+        obj = "'b\u00f6y'"
+    else:
+        obj = "u'\u00f6y'"
+    testdir.makepyfile("""
+        # coding=utf8
+        # taken from issue 227 from nosetests
+        def test_unicode():
+            import sys
+            print (sys.stdout)
+            print (%s)
+    """ % obj)
+    result = testdir.runpytest("--capture=%s" % method)
+    result.stdout.fnmatch_lines([
+        "*1 passed*"
+    ])
+
+ at py.test.mark.multi(method=['fd', 'sys'])
+def test_capturing_bytes_in_utf8_encoding(testdir, method):
+    testdir.makepyfile("""
+        def test_unicode():
+            print ('b\\u00f6y')
+    """)
+    result = testdir.runpytest("--capture=%s" % method)
+    result.stdout.fnmatch_lines([
+        "*1 passed*"
+    ])
+
+def test_collect_capturing(testdir):
+    p = testdir.makepyfile("""
+        print ("collect %s failure" % 13)
+        import xyz42123
+    """)
+    result = testdir.runpytest(p)
+    result.stdout.fnmatch_lines([
+        "*Captured stdout*",
+        "*collect 13 failure*",
+    ])
+
+class TestPerTestCapturing:
+    def test_capture_and_fixtures(self, testdir):
+        p = testdir.makepyfile("""
+            def setup_module(mod):
+                print ("setup module")
+            def setup_function(function):
+                print ("setup " + function.__name__)
+            def test_func1():
+                print ("in func1")
+                assert 0
+            def test_func2():
+                print ("in func2")
+                assert 0
+        """)
+        result = testdir.runpytest(p)
+        result.stdout.fnmatch_lines([
+            "setup module*",
+            "setup test_func1*",
+            "in func1*",
+            "setup test_func2*",
+            "in func2*",
+        ])
+
+    @py.test.mark.xfail
+    def test_capture_scope_cache(self, testdir):
+        p = testdir.makepyfile("""
+            import sys
+            def setup_module(func):
+                print ("module-setup")
+            def setup_function(func):
+                print ("function-setup")
+            def test_func():
+                print ("in function")
+                assert 0
+            def teardown_function(func):
+                print ("in teardown")
+        """)
+        result = testdir.runpytest(p)
+        result.stdout.fnmatch_lines([
+            "*test_func():*",
+            "*Captured stdout during setup*",
+            "module-setup*",
+            "function-setup*",
+            "*Captured stdout*",
+            "in teardown*",
+        ])
+
+
+    def test_no_carry_over(self, testdir):
+        p = testdir.makepyfile("""
+            def test_func1():
+                print ("in func1")
+            def test_func2():
+                print ("in func2")
+                assert 0
+        """)
+        result = testdir.runpytest(p)
+        s = result.stdout.str()
+        assert "in func1" not in s
+        assert "in func2" in s
+
+
+    def test_teardown_capturing(self, testdir):
+        p = testdir.makepyfile("""
+            def setup_function(function):
+                print ("setup func1")
+            def teardown_function(function):
+                print ("teardown func1")
+                assert 0
+            def test_func1():
+                print ("in func1")
+                pass
+        """)
+        result = testdir.runpytest(p)
+        result.stdout.fnmatch_lines([
+            '*teardown_function*',
+            '*Captured stdout*',
+            "setup func1*",
+            "in func1*",
+            "teardown func1*",
+            #"*1 fixture failure*"
+        ])
+
+    def test_teardown_final_capturing(self, testdir):
+        p = testdir.makepyfile("""
+            def teardown_module(mod):
+                print ("teardown module")
+                assert 0
+            def test_func():
+                pass
+        """)
+        result = testdir.runpytest(p)
+        result.stdout.fnmatch_lines([
+            "*def teardown_module(mod):*",
+            "*Captured stdout*",
+            "*teardown module*",
+            "*1 error*",
+        ])
+
+    def test_capturing_outerr(self, testdir):
+        p1 = testdir.makepyfile("""
+            import sys
+            def test_capturing():
+                print (42)
+                sys.stderr.write(str(23))
+            def test_capturing_error():
+                print (1)
+                sys.stderr.write(str(2))
+                raise ValueError
+        """)
+        result = testdir.runpytest(p1)
+        result.stdout.fnmatch_lines([
+            "*test_capturing_outerr.py .F",
+            "====* FAILURES *====",
+            "____*____",
+            "*test_capturing_outerr.py:8: ValueError",
+            "*--- Captured stdout ---*",
+            "1",
+            "*--- Captured stderr ---*",
+            "2",
+        ])
+
+class TestLoggingInteraction:
+    def test_logging_stream_ownership(self, testdir):
+        p = testdir.makepyfile("""
+            def test_logging():
+                import logging
+                import py
+                stream = py.io.TextIO()
+                logging.basicConfig(stream=stream)
+                stream.close() # to free memory/release resources
+        """)
+        result = testdir.runpytest(p)
+        result.stderr.str().find("atexit") == -1
+
+    def test_logging_and_immediate_setupteardown(self, testdir):
+        p = testdir.makepyfile("""
+            import logging
+            def setup_function(function):
+                logging.warn("hello1")
+
+            def test_logging():
+                logging.warn("hello2")
+                assert 0
+
+            def teardown_function(function):
+                logging.warn("hello3")
+                assert 0
+        """)
+        for optargs in (('--capture=sys',), ('--capture=fd',)):
+            print (optargs)
+            result = testdir.runpytest(p, *optargs)
+            s = result.stdout.str()
+            result.stdout.fnmatch_lines([
+                "*WARN*hello3",  # errors show first!
+                "*WARN*hello1",
+                "*WARN*hello2",
+            ])
+            # verify proper termination
+            assert "closed" not in s
+
+    def test_logging_and_crossscope_fixtures(self, testdir):
+        p = testdir.makepyfile("""
+            import logging
+            def setup_module(function):
+                logging.warn("hello1")
+
+            def test_logging():
+                logging.warn("hello2")
+                assert 0
+
+            def teardown_module(function):
+                logging.warn("hello3")
+                assert 0
+        """)
+        for optargs in (('--capture=sys',), ('--capture=fd',)):
+            print (optargs)
+            result = testdir.runpytest(p, *optargs)
+            s = result.stdout.str()
+            result.stdout.fnmatch_lines([
+                "*WARN*hello3",  # errors come first
+                "*WARN*hello1",
+                "*WARN*hello2",
+            ])
+            # verify proper termination
+            assert "closed" not in s
+
+class TestCaptureFuncarg:
+    def test_std_functional(self, testdir):
+        reprec = testdir.inline_runsource("""
+            def test_hello(capsys):
+                print (42)
+                out, err = capsys.readouterr()
+                assert out.startswith("42")
+        """)
+        reprec.assertoutcome(passed=1)
+
+    @needsosdup
+    def test_stdfd_functional(self, testdir):
+        reprec = testdir.inline_runsource("""
+            def test_hello(capfd):
+                import os
+                os.write(1, "42".encode('ascii'))
+                out, err = capfd.readouterr()
+                assert out.startswith("42")
+                capfd.close()
+        """)
+        reprec.assertoutcome(passed=1)
+
+    def test_partial_setup_failure(self, testdir):
+        p = testdir.makepyfile("""
+            def test_hello(capsys, missingarg):
+                pass
+        """)
+        result = testdir.runpytest(p)
+        result.stdout.fnmatch_lines([
+            "*test_partial_setup_failure*",
+            "*1 error*",
+        ])
+
+    @needsosdup
+    def test_keyboardinterrupt_disables_capturing(self, testdir):
+        p = testdir.makepyfile("""
+            def test_hello(capfd):
+                import os
+                os.write(1, str(42).encode('ascii'))
+                raise KeyboardInterrupt()
+        """)
+        result = testdir.runpytest(p)
+        result.stdout.fnmatch_lines([
+            "*KeyboardInterrupt*"
+        ])
+        assert result.ret == 2
+
+def test_setup_failure_does_not_kill_capturing(testdir):
+    sub1 = testdir.mkpydir("sub1")
+    sub1.join("conftest.py").write(py.code.Source("""
+        def pytest_runtest_setup(item):
+            raise ValueError(42)
+    """))
+    sub1.join("test_mod.py").write("def test_func1(): pass")
+    result = testdir.runpytest(testdir.tmpdir, '--traceconfig')
+    result.stdout.fnmatch_lines([
+        "*ValueError(42)*",
+        "*1 error*"
+    ])
+
+def test_fdfuncarg_skips_on_no_osdup(testdir):
+    testdir.makepyfile("""
+        import os
+        if hasattr(os, 'dup'):
+            del os.dup
+        def test_hello(capfd):
+            pass
+    """)
+    result = testdir.runpytest("--capture=no")
+    result.stdout.fnmatch_lines([
+        "*1 skipped*"
+    ])

--- a/testing/plugin/test_monkeypatch.py
+++ /dev/null
@@ -1,139 +0,0 @@
-import os, sys
-import py
-from _pytest.monkeypatch import monkeypatch as MonkeyPatch
-
-def test_setattr():
-    class A:
-        x = 1
-    monkeypatch = MonkeyPatch()
-    py.test.raises(AttributeError, "monkeypatch.setattr(A, 'notexists', 2)")
-    monkeypatch.setattr(A, 'y', 2, raising=False)
-    assert A.y == 2
-    monkeypatch.undo()
-    assert not hasattr(A, 'y')
-
-    monkeypatch = MonkeyPatch()
-    monkeypatch.setattr(A, 'x', 2)
-    assert A.x == 2
-    monkeypatch.setattr(A, 'x', 3)
-    assert A.x == 3
-    monkeypatch.undo()
-    assert A.x == 1
-
-    A.x = 5
-    monkeypatch.undo() # double-undo makes no modification
-    assert A.x == 5
-
-def test_delattr():
-    class A:
-        x = 1
-    monkeypatch = MonkeyPatch()
-    monkeypatch.delattr(A, 'x')
-    assert not hasattr(A, 'x')
-    monkeypatch.undo()
-    assert A.x == 1
-
-    monkeypatch = MonkeyPatch()
-    monkeypatch.delattr(A, 'x')
-    py.test.raises(AttributeError, "monkeypatch.delattr(A, 'y')")
-    monkeypatch.delattr(A, 'y', raising=False)
-    monkeypatch.setattr(A, 'x', 5, raising=False)
-    assert A.x == 5
-    monkeypatch.undo()
-    assert A.x == 1
-
-def test_setitem():
-    d = {'x': 1}
-    monkeypatch = MonkeyPatch()
-    monkeypatch.setitem(d, 'x', 2)
-    monkeypatch.setitem(d, 'y', 1700)
-    monkeypatch.setitem(d, 'y', 1700)
-    assert d['x'] == 2
-    assert d['y'] == 1700
-    monkeypatch.setitem(d, 'x', 3)
-    assert d['x'] == 3
-    monkeypatch.undo()
-    assert d['x'] == 1
-    assert 'y' not in d
-    d['x'] = 5
-    monkeypatch.undo()
-    assert d['x'] == 5
-
-def test_delitem():
-    d = {'x': 1}
-    monkeypatch = MonkeyPatch()
-    monkeypatch.delitem(d, 'x')
-    assert 'x' not in d
-    monkeypatch.delitem(d, 'y', raising=False)
-    py.test.raises(KeyError, "monkeypatch.delitem(d, 'y')")
-    assert not d
-    monkeypatch.setitem(d, 'y', 1700)
-    assert d['y'] == 1700
-    d['hello'] = 'world'
-    monkeypatch.setitem(d, 'x', 1500)
-    assert d['x'] == 1500
-    monkeypatch.undo()
-    assert d == {'hello': 'world', 'x': 1}
-
-def test_setenv():
-    monkeypatch = MonkeyPatch()
-    monkeypatch.setenv('XYZ123', 2)
-    import os
-    assert os.environ['XYZ123'] == "2"
-    monkeypatch.undo()
-    assert 'XYZ123' not in os.environ
-
-def test_delenv():
-    name = 'xyz1234'
-    assert name not in os.environ
-    monkeypatch = MonkeyPatch()
-    py.test.raises(KeyError, "monkeypatch.delenv(%r, raising=True)" % name)
-    monkeypatch.delenv(name, raising=False)
-    monkeypatch.undo()
-    os.environ[name] = "1"
-    try:
-        monkeypatch = MonkeyPatch()
-        monkeypatch.delenv(name)
-        assert name not in os.environ
-        monkeypatch.setenv(name, "3")
-        assert os.environ[name] == "3"
-        monkeypatch.undo()
-        assert os.environ[name] == "1"
-    finally:
-        if name in os.environ:
-            del os.environ[name]
-
-def test_setenv_prepend():
-    import os
-    monkeypatch = MonkeyPatch()
-    monkeypatch.setenv('XYZ123', 2, prepend="-")
-    assert os.environ['XYZ123'] == "2"
-    monkeypatch.setenv('XYZ123', 3, prepend="-")
-    assert os.environ['XYZ123'] == "3-2"
-    monkeypatch.undo()
-    assert 'XYZ123' not in os.environ
-
-def test_monkeypatch_plugin(testdir):
-    reprec = testdir.inline_runsource("""
-        def test_method(monkeypatch):
-            assert monkeypatch.__class__.__name__ == "monkeypatch"
-    """)
-    res = reprec.countoutcomes()
-    assert tuple(res) == (1, 0, 0), res
-
-def test_syspath_prepend():
-    old = list(sys.path)
-    try:
-        monkeypatch = MonkeyPatch()
-        monkeypatch.syspath_prepend('world')
-        monkeypatch.syspath_prepend('hello')
-        assert sys.path[0] == "hello"
-        assert sys.path[1] == "world"
-        monkeypatch.undo()
-        assert sys.path == old
-        monkeypatch.undo()
-        assert sys.path == old
-    finally:
-        sys.path[:] = old
-
-

--- /dev/null
+++ b/testing/test_pytester.py
@@ -0,0 +1,114 @@
+import py
+import os, sys
+from _pytest.pytester import LineMatcher, LineComp, HookRecorder
+from _pytest.core import PluginManager
+
+def test_reportrecorder(testdir):
+    item = testdir.getitem("def test_func(): pass")
+    recorder = testdir.getreportrecorder(item.config)
+    assert not recorder.getfailures()
+
+    py.test.xfail("internal reportrecorder tests need refactoring")
+    class rep:
+        excinfo = None
+        passed = False
+        failed = True
+        skipped = False
+        when = "call"
+
+    recorder.hook.pytest_runtest_logreport(report=rep)
+    failures = recorder.getfailures()
+    assert failures == [rep]
+    failures = recorder.getfailures()
+    assert failures == [rep]
+
+    class rep:
+        excinfo = None
+        passed = False
+        failed = False
+        skipped = True
+        when = "call"
+    rep.passed = False
+    rep.skipped = True
+    recorder.hook.pytest_runtest_logreport(report=rep)
+
+    modcol = testdir.getmodulecol("")
+    rep = modcol.config.hook.pytest_make_collect_report(collector=modcol)
+    rep.passed = False
+    rep.failed = True
+    rep.skipped = False
+    recorder.hook.pytest_collectreport(report=rep)
+
+    passed, skipped, failed = recorder.listoutcomes()
+    assert not passed and skipped and failed
+
+    numpassed, numskipped, numfailed = recorder.countoutcomes()
+    assert numpassed == 0
+    assert numskipped == 1
+    assert numfailed == 1
+    assert len(recorder.getfailedcollections()) == 1
+
+    recorder.unregister()
+    recorder.clear()
+    recorder.hook.pytest_runtest_logreport(report=rep)
+    py.test.raises(ValueError, "recorder.getfailures()")
+
+
+def test_parseconfig(testdir):
+    config1 = testdir.parseconfig()
+    config2 = testdir.parseconfig()
+    assert config2 != config1
+    assert config1 != py.test.config
+
+def test_testdir_runs_with_plugin(testdir):
+    testdir.makepyfile("""
+        pytest_plugins = "pytest_pytester"
+        def test_hello(testdir):
+            assert 1
+    """)
+    result = testdir.runpytest()
+    result.stdout.fnmatch_lines([
+        "*1 passed*"
+    ])
+
+def test_hookrecorder_basic():
+    rec = HookRecorder(PluginManager())
+    class ApiClass:
+        def pytest_xyz(self, arg):
+            "x"
+    rec.start_recording(ApiClass)
+    rec.hook.pytest_xyz(arg=123)
+    call = rec.popcall("pytest_xyz")
+    assert call.arg == 123
+    assert call._name == "pytest_xyz"
+    py.test.raises(ValueError, "rec.popcall('abc')")
+
+def test_hookrecorder_basic_no_args_hook():
+    rec = HookRecorder(PluginManager())
+    apimod = type(os)('api')
+    def pytest_xyz():
+        "x"
+    apimod.pytest_xyz = pytest_xyz
+    rec.start_recording(apimod)
+    rec.hook.pytest_xyz()
+    call = rec.popcall("pytest_xyz")
+    assert call._name == "pytest_xyz"
+
+def test_functional(testdir, linecomp):
+    reprec = testdir.inline_runsource("""
+        import py
+        from _pytest.core import HookRelay, PluginManager
+        pytest_plugins="pytester"
+        def test_func(_pytest):
+            class ApiClass:
+                def pytest_xyz(self, arg):  "x"
+            hook = HookRelay([ApiClass], PluginManager(load=False))
+            rec = _pytest.gethookrecorder(hook)
+            class Plugin:
+                def pytest_xyz(self, arg):
+                    return arg + 1
+            rec._pluginmanager.register(Plugin())
+            res = rec.hook.pytest_xyz(arg=41)
+            assert res == [42]
+    """)
+    reprec.assertoutcome(passed=1)

--- /dev/null
+++ b/testing/test_python.py
@@ -0,0 +1,1176 @@
+import pytest, py, sys
+from _pytest import python as funcargs
+
+class TestModule:
+    def test_failing_import(self, testdir):
+        modcol = testdir.getmodulecol("import alksdjalskdjalkjals")
+        py.test.raises(ImportError, modcol.collect)
+        py.test.raises(ImportError, modcol.collect)
+
+    def test_import_duplicate(self, testdir):
+        a = testdir.mkdir("a")
+        b = testdir.mkdir("b")
+        p = a.ensure("test_whatever.py")
+        p.pyimport()
+        del py.std.sys.modules['test_whatever']
+        b.ensure("test_whatever.py")
+        result = testdir.runpytest()
+        result.stdout.fnmatch_lines([
+            "*import*mismatch*",
+            "*imported*test_whatever*",
+            "*%s*" % a.join("test_whatever.py"),
+            "*not the same*",
+            "*%s*" % b.join("test_whatever.py"),
+            "*HINT*",
+        ])
+
+    def test_syntax_error_in_module(self, testdir):
+        modcol = testdir.getmodulecol("this is a syntax error")
+        py.test.raises(modcol.CollectError, modcol.collect)
+        py.test.raises(modcol.CollectError, modcol.collect)
+
+    def test_module_considers_pluginmanager_at_import(self, testdir):
+        modcol = testdir.getmodulecol("pytest_plugins='xasdlkj',")
+        py.test.raises(ImportError, "modcol.obj")
+
+class TestClass:
+    def test_class_with_init_not_collected(self, testdir):
+        modcol = testdir.getmodulecol("""
+            class TestClass1:
+                def __init__(self):
+                    pass
+            class TestClass2(object):
+                def __init__(self):
+                    pass
+        """)
+        l = modcol.collect()
+        assert len(l) == 0
+
+class TestGenerator:
+    def test_generative_functions(self, testdir):
+        modcol = testdir.getmodulecol("""
+            def func1(arg, arg2):
+                assert arg == arg2
+
+            def test_gen():
+                yield func1, 17, 3*5
+                yield func1, 42, 6*7
+        """)
+        colitems = modcol.collect()
+        assert len(colitems) == 1
+        gencol = colitems[0]
+        assert isinstance(gencol, pytest.Generator)
+        gencolitems = gencol.collect()
+        assert len(gencolitems) == 2
+        assert isinstance(gencolitems[0], pytest.Function)
+        assert isinstance(gencolitems[1], pytest.Function)
+        assert gencolitems[0].name == '[0]'
+        assert gencolitems[0].obj.__name__ == 'func1'
+
+    def test_generative_methods(self, testdir):
+        modcol = testdir.getmodulecol("""
+            def func1(arg, arg2):
+                assert arg == arg2
+            class TestGenMethods:
+                def test_gen(self):
+                    yield func1, 17, 3*5
+                    yield func1, 42, 6*7
+        """)
+        gencol = modcol.collect()[0].collect()[0].collect()[0]
+        assert isinstance(gencol, pytest.Generator)
+        gencolitems = gencol.collect()
+        assert len(gencolitems) == 2
+        assert isinstance(gencolitems[0], pytest.Function)
+        assert isinstance(gencolitems[1], pytest.Function)
+        assert gencolitems[0].name == '[0]'
+        assert gencolitems[0].obj.__name__ == 'func1'
+
+    def test_generative_functions_with_explicit_names(self, testdir):
+        modcol = testdir.getmodulecol("""
+            def func1(arg, arg2):
+                assert arg == arg2
+
+            def test_gen():
+                yield "seventeen", func1, 17, 3*5
+                yield "fortytwo", func1, 42, 6*7
+        """)
+        colitems = modcol.collect()
+        assert len(colitems) == 1
+        gencol = colitems[0]
+        assert isinstance(gencol, pytest.Generator)
+        gencolitems = gencol.collect()
+        assert len(gencolitems) == 2
+        assert isinstance(gencolitems[0], pytest.Function)
+        assert isinstance(gencolitems[1], pytest.Function)
+        assert gencolitems[0].name == "['seventeen']"
+        assert gencolitems[0].obj.__name__ == 'func1'
+        assert gencolitems[1].name == "['fortytwo']"
+        assert gencolitems[1].obj.__name__ == 'func1'
+
+    def test_generative_functions_unique_explicit_names(self, testdir):
+        # generative
+        modcol = testdir.getmodulecol("""
+            def func(): pass
+            def test_gen():
+                yield "name", func
+                yield "name", func
+        """)
+        colitems = modcol.collect()
+        assert len(colitems) == 1
+        gencol = colitems[0]
+        assert isinstance(gencol, pytest.Generator)
+        py.test.raises(ValueError, "gencol.collect()")
+
+    def test_generative_methods_with_explicit_names(self, testdir):
+        modcol = testdir.getmodulecol("""
+            def func1(arg, arg2):
+                assert arg == arg2
+            class TestGenMethods:
+                def test_gen(self):
+                    yield "m1", func1, 17, 3*5
+                    yield "m2", func1, 42, 6*7
+        """)
+        gencol = modcol.collect()[0].collect()[0].collect()[0]
+        assert isinstance(gencol, pytest.Generator)
+        gencolitems = gencol.collect()
+        assert len(gencolitems) == 2
+        assert isinstance(gencolitems[0], pytest.Function)
+        assert isinstance(gencolitems[1], pytest.Function)
+        assert gencolitems[0].name == "['m1']"
+        assert gencolitems[0].obj.__name__ == 'func1'
+        assert gencolitems[1].name == "['m2']"
+        assert gencolitems[1].obj.__name__ == 'func1'
+
+    def test_order_of_execution_generator_same_codeline(self, testdir, tmpdir):
+        o = testdir.makepyfile("""
+            def test_generative_order_of_execution():
+                import py
+                test_list = []
+                expected_list = list(range(6))
+
+                def list_append(item):
+                    test_list.append(item)
+
+                def assert_order_of_execution():
+                    py.builtin.print_('expected order', expected_list)
+                    py.builtin.print_('but got       ', test_list)
+                    assert test_list == expected_list
+
+                for i in expected_list:
+                    yield list_append, i
+                yield assert_order_of_execution
+        """)
+        reprec = testdir.inline_run(o)
+        passed, skipped, failed = reprec.countoutcomes()
+        assert passed == 7
+        assert not skipped and not failed
+
+    def test_order_of_execution_generator_different_codeline(self, testdir):
+        o = testdir.makepyfile("""
+            def test_generative_tests_different_codeline():
+                import py
+                test_list = []
+                expected_list = list(range(3))
+
+                def list_append_2():
+                    test_list.append(2)
+
+                def list_append_1():
+                    test_list.append(1)
+
+                def list_append_0():
+                    test_list.append(0)
+
+                def assert_order_of_execution():
+                    py.builtin.print_('expected order', expected_list)
+                    py.builtin.print_('but got       ', test_list)
+                    assert test_list == expected_list
+
+                yield list_append_0
+                yield list_append_1
+                yield list_append_2
+                yield assert_order_of_execution
+        """)
+        reprec = testdir.inline_run(o)
+        passed, skipped, failed = reprec.countoutcomes()
+        assert passed == 4
+        assert not skipped and not failed
+
+class TestFunction:
+    def test_getmodulecollector(self, testdir):
+        item = testdir.getitem("def test_func(): pass")
+        modcol = item.getparent(pytest.Module)
+        assert isinstance(modcol, pytest.Module)
+        assert hasattr(modcol.obj, 'test_func')
+
+    def test_function_equality(self, testdir, tmpdir):
+        config = testdir.reparseconfig()
+        session = testdir.Session(config)
+        f1 = pytest.Function(name="name", config=config,
+                args=(1,), callobj=isinstance, session=session)
+        f2 = pytest.Function(name="name",config=config,
+                args=(1,), callobj=py.builtin.callable, session=session)
+        assert not f1 == f2
+        assert f1 != f2
+        f3 = pytest.Function(name="name", config=config,
+                args=(1,2), callobj=py.builtin.callable, session=session)
+        assert not f3 == f2
+        assert f3 != f2
+
+        assert not f3 == f1
+        assert f3 != f1
+
+        f1_b = pytest.Function(name="name", config=config,
+              args=(1,), callobj=isinstance, session=session)
+        assert f1 == f1_b
+        assert not f1 != f1_b
+
+    def test_function_equality_with_callspec(self, testdir, tmpdir):
+        config = testdir.reparseconfig()
+        class callspec1:
+            param = 1
+            funcargs = {}
+            id = "hello"
+        class callspec2:
+            param = 1
+            funcargs = {}
+            id = "world"
+        session = testdir.Session(config)
+        f5 = pytest.Function(name="name", config=config,
+            callspec=callspec1, callobj=isinstance, session=session)
+        f5b = pytest.Function(name="name", config=config,
+            callspec=callspec2, callobj=isinstance, session=session)
+        assert f5 != f5b
+        assert not (f5 == f5b)
+
+    def test_pyfunc_call(self, testdir):
+        item = testdir.getitem("def test_func(): raise ValueError")
+        config = item.config
+        class MyPlugin1:
+            def pytest_pyfunc_call(self, pyfuncitem):
+                raise ValueError
+        class MyPlugin2:
+            def pytest_pyfunc_call(self, pyfuncitem):
+                return True
+        config.pluginmanager.register(MyPlugin1())
+        config.pluginmanager.register(MyPlugin2())
+        config.hook.pytest_pyfunc_call(pyfuncitem=item)
+
+class TestSorting:
+    def test_check_equality(self, testdir):
+        modcol = testdir.getmodulecol("""
+            def test_pass(): pass
+            def test_fail(): assert 0
+        """)
+        fn1 = testdir.collect_by_name(modcol, "test_pass")
+        assert isinstance(fn1, pytest.Function)
+        fn2 = testdir.collect_by_name(modcol, "test_pass")
+        assert isinstance(fn2, pytest.Function)
+
+        assert fn1 == fn2
+        assert fn1 != modcol
+        if py.std.sys.version_info < (3, 0):
+            assert cmp(fn1, fn2) == 0
+        assert hash(fn1) == hash(fn2)
+
+        fn3 = testdir.collect_by_name(modcol, "test_fail")
+        assert isinstance(fn3, pytest.Function)
+        assert not (fn1 == fn3)
+        assert fn1 != fn3
+
+        for fn in fn1,fn2,fn3:
+            assert fn != 3
+            assert fn != modcol
+            assert fn != [1,2,3]
+            assert [1,2,3] != fn
+            assert modcol != fn
+
+    def test_allow_sane_sorting_for_decorators(self, testdir):
+        modcol = testdir.getmodulecol("""
+            def dec(f):
+                g = lambda: f(2)
+                g.place_as = f
+                return g
+
+
+            def test_b(y):
+                pass
+            test_b = dec(test_b)
+
+            def test_a(y):
+                pass
+            test_a = dec(test_a)
+        """)
+        colitems = modcol.collect()
+        assert len(colitems) == 2
+        assert [item.name for item in colitems] == ['test_b', 'test_a']
+
+
+class TestConftestCustomization:
+    def test_pytest_pycollect_module(self, testdir):
+        testdir.makeconftest("""
+            import pytest
+            class MyModule(pytest.Module):
+                pass
+            def pytest_pycollect_makemodule(path, parent):
+                if path.basename == "test_xyz.py":
+                    return MyModule(path, parent)
+        """)
+        testdir.makepyfile("def some(): pass")
+        testdir.makepyfile(test_xyz="")
+        result = testdir.runpytest("--collectonly")
+        result.stdout.fnmatch_lines([
+            "*<Module*test_pytest*",
+            "*<MyModule*xyz*",
+        ])
+
+    def test_pytest_pycollect_makeitem(self, testdir):
+        testdir.makeconftest("""
+            import pytest
+            class MyFunction(pytest.Function):
+                pass
+            def pytest_pycollect_makeitem(collector, name, obj):
+                if name == "some":
+                    return MyFunction(name, collector)
+        """)
+        testdir.makepyfile("def some(): pass")
+        result = testdir.runpytest("--collectonly")
+        result.stdout.fnmatch_lines([
+            "*MyFunction*some*",
+        ])
+
+    def test_makeitem_non_underscore(self, testdir, monkeypatch):
+        modcol = testdir.getmodulecol("def _hello(): pass")
+        l = []
+        monkeypatch.setattr(pytest.Module, 'makeitem',
+            lambda self, name, obj: l.append(name))
+        l = modcol.collect()
+        assert '_hello' not in l
+
+def test_setup_only_available_in_subdir(testdir):
+    sub1 = testdir.mkpydir("sub1")
+    sub2 = testdir.mkpydir("sub2")
+    sub1.join("conftest.py").write(py.code.Source("""
+        import py
+        def pytest_runtest_setup(item):
+            assert item.fspath.purebasename == "test_in_sub1"
+        def pytest_runtest_call(item):
+            assert item.fspath.purebasename == "test_in_sub1"
+        def pytest_runtest_teardown(item):
+            assert item.fspath.purebasename == "test_in_sub1"
+    """))
+    sub2.join("conftest.py").write(py.code.Source("""
+        import py
+        def pytest_runtest_setup(item):
+            assert item.fspath.purebasename == "test_in_sub2"
+        def pytest_runtest_call(item):
+            assert item.fspath.purebasename == "test_in_sub2"
+        def pytest_runtest_teardown(item):
+            assert item.fspath.purebasename == "test_in_sub2"
+    """))
+    sub1.join("test_in_sub1.py").write("def test_1(): pass")
+    sub2.join("test_in_sub2.py").write("def test_2(): pass")
+    result = testdir.runpytest("-v", "-s")
+    result.stdout.fnmatch_lines([
+        "*2 passed*"
+    ])
+
+def test_generate_tests_only_done_in_subdir(testdir):
+    sub1 = testdir.mkpydir("sub1")
+    sub2 = testdir.mkpydir("sub2")
+    sub1.join("conftest.py").write(py.code.Source("""
+        def pytest_generate_tests(metafunc):
+            assert metafunc.function.__name__ == "test_1"
+    """))
+    sub2.join("conftest.py").write(py.code.Source("""
+        def pytest_generate_tests(metafunc):
+            assert metafunc.function.__name__ == "test_2"
+    """))
+    sub1.join("test_in_sub1.py").write("def test_1(): pass")
+    sub2.join("test_in_sub2.py").write("def test_2(): pass")
+    result = testdir.runpytest("-v", "-s", sub1, sub2, sub1)
+    result.stdout.fnmatch_lines([
+        "*3 passed*"
+    ])
+
+def test_modulecol_roundtrip(testdir):
+    modcol = testdir.getmodulecol("pass", withinit=True)
+    trail = modcol.nodeid
+    newcol = modcol.session.perform_collect([trail], genitems=0)[0]
+    assert modcol.name == newcol.name
+
+
+class TestTracebackCutting:
+    def test_skip_simple(self):
+        excinfo = py.test.raises(py.test.skip.Exception, 'py.test.skip("xxx")')
+        assert excinfo.traceback[-1].frame.code.name == "skip"
+        assert excinfo.traceback[-1].ishidden()
+
+    def test_traceback_argsetup(self, testdir):
+        testdir.makeconftest("""
+            def pytest_funcarg__hello(request):
+                raise ValueError("xyz")
+        """)
+        p = testdir.makepyfile("def test(hello): pass")
+        result = testdir.runpytest(p)
+        assert result.ret != 0
+        out = result.stdout.str()
+        assert out.find("xyz") != -1
+        assert out.find("conftest.py:2: ValueError") != -1
+        numentries = out.count("_ _ _") # separator for traceback entries
+        assert numentries == 0
+
+        result = testdir.runpytest("--fulltrace", p)
+        out = result.stdout.str()
+        assert out.find("conftest.py:2: ValueError") != -1
+        numentries = out.count("_ _ _ _") # separator for traceback entries
+        assert numentries >3
+
+    def test_traceback_error_during_import(self, testdir):
+        testdir.makepyfile("""
+            x = 1
+            x = 2
+            x = 17
+            asd
+        """)
+        result = testdir.runpytest()
+        assert result.ret != 0
+        out = result.stdout.str()
+        assert "x = 1" not in out
+        assert "x = 2" not in out
+        result.stdout.fnmatch_lines([
+            ">*asd*",
+            "E*NameError*",
+        ])
+        result = testdir.runpytest("--fulltrace")
+        out = result.stdout.str()
+        assert "x = 1" in out
+        assert "x = 2" in out
+        result.stdout.fnmatch_lines([
+            ">*asd*",
+            "E*NameError*",
+        ])
+
+def test_getfuncargnames():
+    def f(): pass
+    assert not funcargs.getfuncargnames(f)
+    def g(arg): pass
+    assert funcargs.getfuncargnames(g) == ['arg']
+    def h(arg1, arg2="hello"): pass
+    assert funcargs.getfuncargnames(h) == ['arg1']
+    def h(arg1, arg2, arg3="hello"): pass
+    assert funcargs.getfuncargnames(h) == ['arg1', 'arg2']
+    class A:
+        def f(self, arg1, arg2="hello"):
+            pass
+    assert funcargs.getfuncargnames(A().f) == ['arg1']
+    if sys.version_info < (3,0):
+        assert funcargs.getfuncargnames(A.f) == ['arg1']
+
+def test_callspec_repr():
+    cs = funcargs.CallSpec({}, 'hello', 1)
+    repr(cs)
+    cs = funcargs.CallSpec({}, 'hello', funcargs._notexists)
+    repr(cs)
+
+class TestFillFuncArgs:
+    def test_funcarg_lookupfails(self, testdir):
+        testdir.makeconftest("""
+            def pytest_funcarg__xyzsomething(request):
+                return 42
+        """)
+        item = testdir.getitem("def test_func(some): pass")
+        exc = py.test.raises(funcargs.FuncargRequest.LookupError,
+            "funcargs.fillfuncargs(item)")
+        s = str(exc.value)
+        assert s.find("xyzsomething") != -1
+
+    def test_funcarg_lookup_default(self, testdir):
+        item = testdir.getitem("def test_func(some, other=42): pass")
+        class Provider:
+            def pytest_funcarg__some(self, request):
+                return request.function.__name__
+        item.config.pluginmanager.register(Provider())
+        funcargs.fillfuncargs(item)
+        assert len(item.funcargs) == 1
+
+    def test_funcarg_basic(self, testdir):
+        item = testdir.getitem("def test_func(some, other): pass")
+        class Provider:
+            def pytest_funcarg__some(self, request):
+                return request.function.__name__
+            def pytest_funcarg__other(self, request):
+                return 42
+        item.config.pluginmanager.register(Provider())
+        funcargs.fillfuncargs(item)
+        assert len(item.funcargs) == 2
+        assert item.funcargs['some'] == "test_func"
+        assert item.funcargs['other'] == 42
+
+    def test_funcarg_lookup_modulelevel(self, testdir):
+        modcol = testdir.getmodulecol("""
+            def pytest_funcarg__something(request):
+                return request.function.__name__
+
+            class TestClass:
+                def test_method(self, something):
+                    pass
+            def test_func(something):
+                pass
+        """)
+        item1, item2 = testdir.genitems([modcol])
+        funcargs.fillfuncargs(item1)
+        assert item1.funcargs['something'] ==  "test_method"
+        funcargs.fillfuncargs(item2)
+        assert item2.funcargs['something'] ==  "test_func"
+
+    def test_funcarg_lookup_classlevel(self, testdir):
+        p = testdir.makepyfile("""
+            class TestClass:
+                def pytest_funcarg__something(self, request):
+                    return request.instance
+                def test_method(self, something):
+                    assert something is self
+        """)
+        result = testdir.runpytest(p)
+        result.stdout.fnmatch_lines([
+            "*1 passed*"
+        ])
+
+    def test_fillfuncargs_exposed(self, testdir):
+        item = testdir.getitem("def test_func(some, other=42): pass")
+        class Provider:
+            def pytest_funcarg__some(self, request):
+                return request.function.__name__
+        item.config.pluginmanager.register(Provider())
+        if hasattr(item, '_args'):
+            del item._args
+        pytest._fillfuncargs(item)
+        assert len(item.funcargs) == 1
+
+class TestRequest:
+    def test_request_attributes(self, testdir):
+        item = testdir.getitem("""
+            def pytest_funcarg__something(request): pass
+            def test_func(something): pass
+        """)
+        req = funcargs.FuncargRequest(item)
+        assert req.function == item.obj
+        assert req.keywords is item.keywords
+        assert hasattr(req.module, 'test_func')
+        assert req.cls is None
+        assert req.function.__name__ == "test_func"
+        assert req.config == item.config
+        assert repr(req).find(req.function.__name__) != -1
+
+    def test_request_attributes_method(self, testdir):
+        item, = testdir.getitems("""
+            class TestB:
+                def test_func(self, something):
+                    pass
+        """)
+        req = funcargs.FuncargRequest(item)
+        assert req.cls.__name__ == "TestB"
+        assert req.instance.__class__ == req.cls
+
+    def XXXtest_request_contains_funcarg_name2factory(self, testdir):
+        modcol = testdir.getmodulecol("""
+            def pytest_funcarg__something(request):
+                pass
+            class TestClass:
+                def test_method(self, something):
+                    pass
+        """)
+        item1, = testdir.genitems([modcol])
+        assert item1.name == "test_method"
+        name2factory = funcargs.FuncargRequest(item1)._name2factory
+        assert len(name2factory) == 1
+        assert name2factory[0].__name__ == "pytest_funcarg__something"
+
+    def test_getfuncargvalue_recursive(self, testdir):
+        testdir.makeconftest("""
+            def pytest_funcarg__something(request):
+                return 1
+        """)
+        item = testdir.getitem("""
+            def pytest_funcarg__something(request):
+                return request.getfuncargvalue("something") + 1
+            def test_func(something):
+                assert something == 2
+        """)
+        req = funcargs.FuncargRequest(item)
+        val = req.getfuncargvalue("something")
+        assert val == 2
+
+    def test_getfuncargvalue(self, testdir):
+        item = testdir.getitem("""
+            l = [2]
+            def pytest_funcarg__something(request): return 1
+            def pytest_funcarg__other(request):
+                return l.pop()
+            def test_func(something): pass
+        """)
+        req = funcargs.FuncargRequest(item)
+        py.test.raises(req.LookupError, req.getfuncargvalue, "notexists")
+        val = req.getfuncargvalue("something")
+        assert val == 1
+        val = req.getfuncargvalue("something")
+        assert val == 1
+        val2 = req.getfuncargvalue("other")
+        assert val2 == 2
+        val2 = req.getfuncargvalue("other")  # see about caching
+        assert val2 == 2
+        req._fillfuncargs()
+        assert item.funcargs == {'something': 1}
+
+    def test_request_addfinalizer(self, testdir):
+        item = testdir.getitem("""
+            teardownlist = []
+            def pytest_funcarg__something(request):
+                request.addfinalizer(lambda: teardownlist.append(1))
+            def test_func(something): pass
+        """)
+        req = funcargs.FuncargRequest(item)
+        req.config._setupstate.prepare(item) # XXX
+        req._fillfuncargs()
+        # successively check finalization calls
+        teardownlist = item.getparent(pytest.Module).obj.teardownlist
+        ss = item.config._setupstate
+        assert not teardownlist
+        ss.teardown_exact(item)
+        print(ss.stack)
+        assert teardownlist == [1]
+
+    def test_request_addfinalizer_partial_setup_failure(self, testdir):
+        p = testdir.makepyfile("""
+            l = []
+            def pytest_funcarg__something(request):
+                request.addfinalizer(lambda: l.append(None))
+            def test_func(something, missingarg):
+                pass
+            def test_second():
+                assert len(l) == 1
+        """)
+        result = testdir.runpytest(p)
+        result.stdout.fnmatch_lines([
+            "*1 passed*1 error*"
+            ])
+
+    def test_request_getmodulepath(self, testdir):
+        modcol = testdir.getmodulecol("def test_somefunc(): pass")
+        item, = testdir.genitems([modcol])
+        req = funcargs.FuncargRequest(item)
+        assert req.fspath == modcol.fspath
+
+def test_applymarker(testdir):
+    item1,item2 = testdir.getitems("""
+        class TestClass:
+            def test_func1(self, something):
+                pass
+            def test_func2(self, something):
+                pass
+    """)
+    req1 = funcargs.FuncargRequest(item1)
+    assert 'xfail' not in item1.keywords
+    req1.applymarker(py.test.mark.xfail)
+    assert 'xfail' in item1.keywords
+    assert 'skipif' not in item1.keywords
+    req1.applymarker(py.test.mark.skipif)
+    assert 'skipif' in item1.keywords
+    py.test.raises(ValueError, "req1.applymarker(42)")
+
+class TestRequestCachedSetup:
+    def test_request_cachedsetup(self, testdir):
+        item1,item2 = testdir.getitems("""
+            class TestClass:
+                def test_func1(self, something):
+                    pass
+                def test_func2(self, something):
+                    pass
+        """)
+        req1 = funcargs.FuncargRequest(item1)
+        l = ["hello"]
+        def setup():
+            return l.pop()
+        ret1 = req1.cached_setup(setup)
+        assert ret1 == "hello"
+        ret1b = req1.cached_setup(setup)
+        assert ret1 == ret1b
+        req2 = funcargs.FuncargRequest(item2)
+        ret2 = req2.cached_setup(setup)
+        assert ret2 == ret1
+
+    def test_request_cachedsetup_extrakey(self, testdir):
+        item1 = testdir.getitem("def test_func(): pass")
+        req1 = funcargs.FuncargRequest(item1)
+        l = ["hello", "world"]
+        def setup():
+            return l.pop()
+        ret1 = req1.cached_setup(setup, extrakey=1)
+        ret2 = req1.cached_setup(setup, extrakey=2)
+        assert ret2 == "hello"
+        assert ret1 == "world"
+        ret1b = req1.cached_setup(setup, extrakey=1)
+        ret2b = req1.cached_setup(setup, extrakey=2)
+        assert ret1 == ret1b
+        assert ret2 == ret2b
+
+    def test_request_cachedsetup_cache_deletion(self, testdir):
+        item1 = testdir.getitem("def test_func(): pass")
+        req1 = funcargs.FuncargRequest(item1)
+        l = []
+        def setup():
+            l.append("setup")
+        def teardown(val):
+            l.append("teardown")
+        ret1 = req1.cached_setup(setup, teardown, scope="function")
+        assert l == ['setup']
+        # artificial call of finalizer
+        req1.config._setupstate._callfinalizers(item1)
+        assert l == ["setup", "teardown"]
+        ret2 = req1.cached_setup(setup, teardown, scope="function")
+        assert l == ["setup", "teardown", "setup"]
+        req1.config._setupstate._callfinalizers(item1)
+        assert l == ["setup", "teardown", "setup", "teardown"]
+
+    def test_request_cached_setup_two_args(self, testdir):
+        testdir.makepyfile("""
+            def pytest_funcarg__arg1(request):
+                return request.cached_setup(lambda: 42)
+            def pytest_funcarg__arg2(request):
+                return request.cached_setup(lambda: 17)
+            def test_two_different_setups(arg1, arg2):
+                assert arg1 != arg2
+        """)
+        result = testdir.runpytest("-v")
+        result.stdout.fnmatch_lines([
+            "*1 passed*"
+        ])
+
+    def test_request_cached_setup_getfuncargvalue(self, testdir):
+        testdir.makepyfile("""
+            def pytest_funcarg__arg1(request):
+                arg1 = request.getfuncargvalue("arg2")
+                return request.cached_setup(lambda: arg1 + 1)
+            def pytest_funcarg__arg2(request):
+                return request.cached_setup(lambda: 10)
+            def test_two_funcarg(arg1):
+                assert arg1 == 11
+        """)
+        result = testdir.runpytest("-v")
+        result.stdout.fnmatch_lines([
+            "*1 passed*"
+        ])
+
+    def test_request_cached_setup_functional(self, testdir):
+        testdir.makepyfile(test_0="""
+            l = []
+            def pytest_funcarg__something(request):
+                val = request.cached_setup(fsetup, fteardown)
+                return val
+            def fsetup(mycache=[1]):
+                l.append(mycache.pop())
+                return l
+            def fteardown(something):
+                l.remove(something[0])
+                l.append(2)
+            def test_list_once(something):
+                assert something == [1]
+            def test_list_twice(something):
+                assert something == [1]
+        """)
+        testdir.makepyfile(test_1="""
+            import test_0 # should have run already
+            def test_check_test0_has_teardown_correct():
+                assert test_0.l == [2]
+        """)
+        result = testdir.runpytest("-v")
+        result.stdout.fnmatch_lines([
+            "*3 passed*"
+        ])
+
+class TestMetafunc:
+    def test_no_funcargs(self, testdir):
+        def function(): pass
+        metafunc = funcargs.Metafunc(function)
+        assert not metafunc.funcargnames
+
+    def test_function_basic(self):
+        def func(arg1, arg2="qwe"): pass
+        metafunc = funcargs.Metafunc(func)
+        assert len(metafunc.funcargnames) == 1
+        assert 'arg1' in metafunc.funcargnames
+        assert metafunc.function is func
+        assert metafunc.cls is None
+
+    def test_addcall_no_args(self):
+        def func(arg1): pass
+        metafunc = funcargs.Metafunc(func)
+        metafunc.addcall()
+        assert len(metafunc._calls) == 1
+        call = metafunc._calls[0]
+        assert call.id == "0"
+        assert not hasattr(call, 'param')
+
+    def test_addcall_id(self):
+        def func(arg1): pass
+        metafunc = funcargs.Metafunc(func)
+        py.test.raises(ValueError, "metafunc.addcall(id=None)")
+
+        metafunc.addcall(id=1)
+        py.test.raises(ValueError, "metafunc.addcall(id=1)")
+        py.test.raises(ValueError, "metafunc.addcall(id='1')")
+        metafunc.addcall(id=2)
+        assert len(metafunc._calls) == 2
+        assert metafunc._calls[0].id == "1"
+        assert metafunc._calls[1].id == "2"
+
+    def test_addcall_param(self):
+        def func(arg1): pass
+        metafunc = funcargs.Metafunc(func)
+        class obj: pass
+        metafunc.addcall(param=obj)
+        metafunc.addcall(param=obj)
+        metafunc.addcall(param=1)
+        assert len(metafunc._calls) == 3
+        assert metafunc._calls[0].param == obj
+        assert metafunc._calls[1].param == obj
+        assert metafunc._calls[2].param == 1
+
+    def test_addcall_funcargs(self):
+        def func(arg1): pass
+        metafunc = funcargs.Metafunc(func)
+        class obj: pass
+        metafunc.addcall(funcargs={"x": 2})
+        metafunc.addcall(funcargs={"x": 3})
+        assert len(metafunc._calls) == 2
+        assert metafunc._calls[0].funcargs == {'x': 2}
+        assert metafunc._calls[1].funcargs == {'x': 3}
+        assert not hasattr(metafunc._calls[1], 'param')
+
+class TestGenfuncFunctional:
+    def test_attributes(self, testdir):
+        p = testdir.makepyfile("""
+            # assumes that generate/provide runs in the same process
+            import py
+            def pytest_generate_tests(metafunc):
+                metafunc.addcall(param=metafunc)
+
+            def pytest_funcarg__metafunc(request):
+                assert request._pyfuncitem._genid == "0"
+                return request.param
+
+            def test_function(metafunc, pytestconfig):
+                assert metafunc.config == pytestconfig
+                assert metafunc.module.__name__ == __name__
+                assert metafunc.function == test_function
+                assert metafunc.cls is None
+
+            class TestClass:
+                def test_method(self, metafunc, pytestconfig):
+                    assert metafunc.config == pytestconfig
+                    assert metafunc.module.__name__ == __name__
+                    if py.std.sys.version_info > (3, 0):
+                        unbound = TestClass.test_method
+                    else:
+                        unbound = TestClass.test_method.im_func
+                    # XXX actually have an unbound test function here?
+                    assert metafunc.function == unbound
+                    assert metafunc.cls == TestClass
+        """)
+        result = testdir.runpytest(p, "-v")
+        result.stdout.fnmatch_lines([
+            "*2 passed in*",
+        ])
+
+    def test_addcall_with_two_funcargs_generators(self, testdir):
+        testdir.makeconftest("""
+            def pytest_generate_tests(metafunc):
+                assert "arg1" in metafunc.funcargnames
+                metafunc.addcall(funcargs=dict(arg1=1, arg2=2))
+        """)
+        p = testdir.makepyfile("""
+            def pytest_generate_tests(metafunc):
+                metafunc.addcall(funcargs=dict(arg1=1, arg2=1))
+
+            class TestClass:
+                def test_myfunc(self, arg1, arg2):
+                    assert arg1 == arg2
+        """)
+        result = testdir.runpytest("-v", p)
+        result.stdout.fnmatch_lines([
+            "*test_myfunc*0*PASS*",
+            "*test_myfunc*1*FAIL*",
+            "*1 failed, 1 passed*"
+        ])
+
+    def test_two_functions(self, testdir):
+        p = testdir.makepyfile("""
+            def pytest_generate_tests(metafunc):
+                metafunc.addcall(param=10)
+                metafunc.addcall(param=20)
+
+            def pytest_funcarg__arg1(request):
+                return request.param
+
+            def test_func1(arg1):
+                assert arg1 == 10
+            def test_func2(arg1):
+                assert arg1 in (10, 20)
+        """)
+        result = testdir.runpytest("-v", p)
+        result.stdout.fnmatch_lines([
+            "*test_func1*0*PASS*",
+            "*test_func1*1*FAIL*",
+            "*test_func2*PASS*",
+            "*1 failed, 3 passed*"
+        ])
+
+    def test_generate_plugin_and_module(self, testdir):
+        testdir.makeconftest("""
+            def pytest_generate_tests(metafunc):
+                assert "arg1" in metafunc.funcargnames
+                metafunc.addcall(id="world", param=(2,100))
+        """)
+        p = testdir.makepyfile("""
+            def pytest_generate_tests(metafunc):
+                metafunc.addcall(param=(1,1), id="hello")
+
+            def pytest_funcarg__arg1(request):
+                return request.param[0]
+            def pytest_funcarg__arg2(request):
+                return request.param[1]
+
+            class TestClass:
+                def test_myfunc(self, arg1, arg2):
+                    assert arg1 == arg2
+        """)
+        result = testdir.runpytest("-v", p)
+        result.stdout.fnmatch_lines([
+            "*test_myfunc*hello*PASS*",
+            "*test_myfunc*world*FAIL*",
+            "*1 failed, 1 passed*"
+        ])
+
+    def test_generate_tests_in_class(self, testdir):
+        p = testdir.makepyfile("""
+            class TestClass:
+                def pytest_generate_tests(self, metafunc):
+                    metafunc.addcall(funcargs={'hello': 'world'}, id="hello")
+
+                def test_myfunc(self, hello):
+                    assert hello == "world"
+        """)
+        result = testdir.runpytest("-v", p)
+        result.stdout.fnmatch_lines([
+            "*test_myfunc*hello*PASS*",
+            "*1 passed*"
+        ])
+
+    def test_two_functions_not_same_instance(self, testdir):
+        p = testdir.makepyfile("""
+            def pytest_generate_tests(metafunc):
+                metafunc.addcall({'arg1': 10})
+                metafunc.addcall({'arg1': 20})
+
+            class TestClass:
+                def test_func(self, arg1):
+                    assert not hasattr(self, 'x')
+                    self.x = 1
+        """)
+        result = testdir.runpytest("-v", p)
+        result.stdout.fnmatch_lines([
+            "*test_func*0*PASS*",
+            "*test_func*1*PASS*",
+            "*2 pass*",
+        ])
+
+
+def test_conftest_funcargs_only_available_in_subdir(testdir):
+    sub1 = testdir.mkpydir("sub1")
+    sub2 = testdir.mkpydir("sub2")
+    sub1.join("conftest.py").write(py.code.Source("""
+        import py
+        def pytest_funcarg__arg1(request):
+            py.test.raises(Exception, "request.getfuncargvalue('arg2')")
+    """))
+    sub2.join("conftest.py").write(py.code.Source("""
+        import py
+        def pytest_funcarg__arg2(request):
+            py.test.raises(Exception, "request.getfuncargvalue('arg1')")
+    """))
+
+    sub1.join("test_in_sub1.py").write("def test_1(arg1): pass")
+    sub2.join("test_in_sub2.py").write("def test_2(arg2): pass")
+    result = testdir.runpytest("-v")
+    result.stdout.fnmatch_lines([
+        "*2 passed*"
+    ])
+
+def test_funcarg_non_pycollectobj(testdir): # rough jstests usage
+    testdir.makeconftest("""
+        import pytest
+        def pytest_pycollect_makeitem(collector, name, obj):
+            if name == "MyClass":
+                return MyCollector(name, parent=collector)
+        class MyCollector(pytest.Collector):
+            def reportinfo(self):
+                return self.fspath, 3, "xyz"
+    """)
+    modcol = testdir.getmodulecol("""
+        def pytest_funcarg__arg1(request):
+            return 42
+        class MyClass:
+            pass
+    """)
+    clscol = modcol.collect()[0]
+    clscol.obj = lambda arg1: None
+    clscol.funcargs = {}
+    funcargs.fillfuncargs(clscol)
+    assert clscol.funcargs['arg1'] == 42
+
+
+def test_funcarg_lookup_error(testdir):
+    p = testdir.makepyfile("""
+        def test_lookup_error(unknown):
+            pass
+    """)
+    result = testdir.runpytest()
+    result.stdout.fnmatch_lines([
+        "*ERROR at setup of test_lookup_error*",
+        "*def test_lookup_error(unknown):*",
+        "*LookupError: no factory found*unknown*",
+        "*available funcargs*",
+        "*1 error*",
+    ])
+    assert "INTERNAL" not in result.stdout.str()
+
+class TestReportInfo:
+    def test_itemreport_reportinfo(self, testdir, linecomp):
+        testdir.makeconftest("""
+            import pytest
+            class MyFunction(pytest.Function):
+                def reportinfo(self):
+                    return "ABCDE", 42, "custom"
+            def pytest_pycollect_makeitem(collector, name, obj):
+                if name == "test_func":
+                    return MyFunction(name, parent=collector)
+        """)
+        item = testdir.getitem("def test_func(): pass")
+        runner = item.config.pluginmanager.getplugin("runner")
+        assert item.location == ("ABCDE", 42, "custom")
+
+    def test_func_reportinfo(self, testdir):
+        item = testdir.getitem("def test_func(): pass")
+        fspath, lineno, modpath = item.reportinfo()
+        assert fspath == item.fspath
+        assert lineno == 0
+        assert modpath == "test_func"
+
+    def test_class_reportinfo(self, testdir):
+        modcol = testdir.getmodulecol("""
+            # lineno 0
+            class TestClass:
+                def test_hello(self): pass
+        """)
+        classcol = testdir.collect_by_name(modcol, "TestClass")
+        fspath, lineno, msg = classcol.reportinfo()
+        assert fspath == modcol.fspath
+        assert lineno == 1
+        assert msg == "TestClass"
+
+    def test_generator_reportinfo(self, testdir):
+        modcol = testdir.getmodulecol("""
+            # lineno 0
+            def test_gen():
+                def check(x):
+                    assert x
+                yield check, 3
+        """)
+        gencol = testdir.collect_by_name(modcol, "test_gen")
+        fspath, lineno, modpath = gencol.reportinfo()
+        assert fspath == modcol.fspath
+        assert lineno == 1
+        assert modpath == "test_gen"
+
+        genitem = gencol.collect()[0]
+        fspath, lineno, modpath = genitem.reportinfo()
+        assert fspath == modcol.fspath
+        assert lineno == 2
+        assert modpath == "test_gen[0]"
+        """
+            def test_func():
+                pass
+            def test_genfunc():
+                def check(x):
+                    pass
+                yield check, 3
+            class TestClass:
+                def test_method(self):
+                    pass
+       """
+
+def test_show_funcarg(testdir):
+    result = testdir.runpytest("--funcargs")
+    result.stdout.fnmatch_lines([
+            "*tmpdir*",
+            "*temporary directory*",
+        ]
+    )
+
+class TestRaises:
+    def test_raises(self):
+        source = "int('qwe')"
+        excinfo = py.test.raises(ValueError, source)
+        code = excinfo.traceback[-1].frame.code
+        s = str(code.fullsource)
+        assert s == source
+
+    def test_raises_exec(self):
+        py.test.raises(ValueError, "a,x = []")
+
+    def test_raises_syntax_error(self):
+        py.test.raises(SyntaxError, "qwe qwe qwe")
+
+    def test_raises_function(self):
+        py.test.raises(ValueError, int, 'hello')
+
+    def test_raises_callable_no_exception(self):
+        class A:
+            def __call__(self):
+                pass
+        try:
+            py.test.raises(ValueError, A())
+        except py.test.raises.Exception:
+            pass
+
+    @py.test.mark.skipif('sys.version < "2.5"')
+    def test_raises_as_contextmanager(self, testdir):
+        testdir.makepyfile("""
+            from __future__ import with_statement
+            import py
+
+            def test_simple():
+                with py.test.raises(ZeroDivisionError) as excinfo:
+                    assert isinstance(excinfo, py.code.ExceptionInfo)
+                    1/0
+                print (excinfo)
+                assert excinfo.type == ZeroDivisionError
+
+            def test_noraise():
+                with py.test.raises(py.test.raises.Exception):
+                    with py.test.raises(ValueError):
+                           int()
+
+            def test_raise_wrong_exception_passes_by():
+                with py.test.raises(ZeroDivisionError):
+                    with py.test.raises(ValueError):
+                           1/0
+        """)
+        result = testdir.runpytest()
+        result.stdout.fnmatch_lines([
+            '*3 passed*',
+        ])
+
+
+

--- a/testing/plugin/test_recwarn.py
+++ /dev/null
@@ -1,81 +0,0 @@
-import py
-from _pytest.recwarn import WarningsRecorder
-
-def test_WarningRecorder(recwarn):
-    showwarning = py.std.warnings.showwarning
-    rec = WarningsRecorder()
-    assert py.std.warnings.showwarning != showwarning
-    assert not rec.list
-    py.std.warnings.warn_explicit("hello", UserWarning, "xyz", 13)
-    assert len(rec.list) == 1
-    py.std.warnings.warn(DeprecationWarning("hello"))
-    assert len(rec.list) == 2
-    warn = rec.pop()
-    assert str(warn.message) == "hello"
-    l = rec.list
-    rec.clear()
-    assert len(rec.list) == 0
-    assert l is rec.list
-    py.test.raises(AssertionError, "rec.pop()")
-    rec.finalize()
-    assert showwarning == py.std.warnings.showwarning
-
-def test_recwarn_functional(testdir):
-    reprec = testdir.inline_runsource("""
-        pytest_plugins = 'pytest_recwarn',
-        import warnings
-        oldwarn = warnings.showwarning
-        def test_method(recwarn):
-            assert warnings.showwarning != oldwarn
-            warnings.warn("hello")
-            warn = recwarn.pop()
-            assert isinstance(warn.message, UserWarning)
-        def test_finalized():
-            assert warnings.showwarning == oldwarn
-    """)
-    res = reprec.countoutcomes()
-    assert tuple(res) == (2, 0, 0), res
-
-#
-# ============ test py.test.deprecated_call() ==============
-#
-
-def dep(i):
-    if i == 0:
-        py.std.warnings.warn("is deprecated", DeprecationWarning)
-    return 42
-
-reg = {}
-def dep_explicit(i):
-    if i == 0:
-        py.std.warnings.warn_explicit("dep_explicit", category=DeprecationWarning,
-                                      filename="hello", lineno=3)
-
-def test_deprecated_call_raises():
-    excinfo = py.test.raises(AssertionError,
-                   "py.test.deprecated_call(dep, 3)")
-    assert str(excinfo).find("did not produce") != -1
-
-def test_deprecated_call():
-    py.test.deprecated_call(dep, 0)
-
-def test_deprecated_call_ret():
-    ret = py.test.deprecated_call(dep, 0)
-    assert ret == 42
-
-def test_deprecated_call_preserves():
-    r = py.std.warnings.onceregistry.copy()
-    f = py.std.warnings.filters[:]
-    test_deprecated_call_raises()
-    test_deprecated_call()
-    assert r == py.std.warnings.onceregistry
-    assert f == py.std.warnings.filters
-
-def test_deprecated_explicit_call_raises():
-    py.test.raises(AssertionError,
-                   "py.test.deprecated_call(dep_explicit, 3)")
-
-def test_deprecated_explicit_call():
-    py.test.deprecated_call(dep_explicit, 0)
-    py.test.deprecated_call(dep_explicit, 0)
-

--- /dev/null
+++ b/testing/test_unittest.py
@@ -0,0 +1,105 @@
+import py
+
+def test_simple_unittest(testdir):
+    testpath = testdir.makepyfile("""
+        import unittest
+        pytest_plugins = "pytest_unittest"
+        class MyTestCase(unittest.TestCase):
+            def testpassing(self):
+                self.assertEquals('foo', 'foo')
+            def test_failing(self):
+                self.assertEquals('foo', 'bar')
+    """)
+    reprec = testdir.inline_run(testpath)
+    assert reprec.matchreport("testpassing").passed
+    assert reprec.matchreport("test_failing").failed
+
+def test_isclasscheck_issue53(testdir):
+    testpath = testdir.makepyfile("""
+        import unittest
+        class _E(object):
+            def __getattr__(self, tag):
+                pass
+        E = _E()
+    """)
+    result = testdir.runpytest(testpath)
+    assert result.ret == 0
+
+def test_setup(testdir):
+    testpath = testdir.makepyfile(test_two="""
+        import unittest
+        class MyTestCase(unittest.TestCase):
+            def setUp(self):
+                self.foo = 1
+            def test_setUp(self):
+                self.assertEquals(1, self.foo)
+    """)
+    reprec = testdir.inline_run(testpath)
+    rep = reprec.matchreport("test_setUp")
+    assert rep.passed
+
+def test_new_instances(testdir):
+    testpath = testdir.makepyfile("""
+        import unittest
+        class MyTestCase(unittest.TestCase):
+            def test_func1(self):
+                self.x = 2
+            def test_func2(self):
+                assert not hasattr(self, 'x')
+    """)
+    reprec = testdir.inline_run(testpath)
+    reprec.assertoutcome(passed=2)
+
+def test_teardown(testdir):
+    testpath = testdir.makepyfile("""
+        import unittest
+        pytest_plugins = "pytest_unittest" # XXX
+        class MyTestCase(unittest.TestCase):
+            l = []
+            def test_one(self):
+                pass
+            def tearDown(self):
+                self.l.append(None)
+        class Second(unittest.TestCase):
+            def test_check(self):
+                self.assertEquals(MyTestCase.l, [None])
+    """)
+    reprec = testdir.inline_run(testpath)
+    passed, skipped, failed = reprec.countoutcomes()
+    assert failed == 0, failed
+    assert passed == 2
+    assert passed + skipped + failed == 2
+
+def test_module_level_pytestmark(testdir):
+    testpath = testdir.makepyfile("""
+        import unittest
+        import py
+        pytestmark = py.test.mark.xfail
+        class MyTestCase(unittest.TestCase):
+            def test_func1(self):
+                assert 0
+    """)
+    reprec = testdir.inline_run(testpath, "-s")
+    reprec.assertoutcome(skipped=1)
+
+def test_class_setup(testdir):
+    testpath = testdir.makepyfile("""
+        import unittest
+        import py
+        class MyTestCase(unittest.TestCase):
+            x = 0
+            @classmethod
+            def setUpClass(cls):
+                cls.x += 1
+            def test_func1(self):
+                assert self.x == 1
+            def test_func2(self):
+                assert self.x == 1
+            @classmethod
+            def tearDownClass(cls):
+                cls.x -= 1
+        def test_teareddown():
+            assert MyTestCase.x == 0
+    """)
+    reprec = testdir.inline_run(testpath)
+    reprec.assertoutcome(passed=3)



More information about the pytest-commit mailing list