[Pytest-commit] commit/pytest: 4 new changesets
commits-noreply at bitbucket.org
commits-noreply at bitbucket.org
Fri Feb 27 07:18:28 CET 2015
4 new commits in pytest:
https://bitbucket.org/hpk42/pytest/commits/ef582d901aeb/
Changeset: ef582d901aeb
Branch: merge-cache
User: RonnyPfannschmidt
Date: 2015-02-27 05:26:57+00:00
Summary: simply copy the files, fails all tests
Affected #: 8 files
diff -r 6104c7ea0dccd13b892fa021a2fc989f60a8ff59 -r ef582d901aeb52d7bf62acfdb2358ab184b0d37e _pytest/cache.py
--- /dev/null
+++ b/_pytest/cache.py
@@ -0,0 +1,201 @@
+import py
+import pytest
+import json
+
+
+class Cache:
+ def __init__(self, config):
+ self.config = config
+ self._cachedir = getrootdir(config, ".cache")
+ self.trace = config.trace.root.get("cache")
+ if config.getvalue("clearcache"):
+ self.trace("clearing cachedir")
+ if self._cachedir.check():
+ self._cachedir.remove()
+ self._cachedir.mkdir()
+
+ def makedir(self, name):
+ """ return a directory path object with the given name. If the
+ directory does not yet exist, it will be created. You can use it
+ to manage files likes e. g. store/retrieve database
+ dumps across test sessions.
+
+ :param name: must be a string not containing a ``/`` separator.
+ Make sure the name contains your plugin or application
+ identifiers to prevent clashes with other cache users.
+ """
+ if name.count("/") != 0:
+ raise ValueError("name is not allowed to contain '/'")
+ p = self._cachedir.join("d/" + name)
+ p.ensure(dir=1)
+ return p
+
+ def _getpath(self, key):
+ if not key.count("/") > 1:
+ raise KeyError("Key must be of format 'dir/.../subname")
+ return self._cachedir.join(key)
+
+ def _getvaluepath(self, key):
+ p = self._getpath("v/" + key)
+ p.dirpath().ensure(dir=1)
+ return p
+
+ def get(self, key, default):
+ """ return cached value for the given key. If no value
+ was yet cached or the value cannot be read, the specified
+ default is returned.
+
+ :param key: must be a ``/`` separated value. Usually the first
+ name is the name of your plugin or your application.
+ :param default: must be provided in case of a cache-miss or
+ invalid cache values.
+
+ """
+ path = self._getvaluepath(key)
+ if path.check():
+ try:
+ with path.open("r") as f:
+ return json.load(f)
+ except ValueError:
+ self.trace("cache-invalid at %s" % (path,))
+ return default
+
+ def set(self, key, value):
+ """ save value for the given key.
+
+ :param key: must be a ``/`` separated value. Usually the first
+ name is the name of your plugin or your application.
+ :param value: must be of any combination of basic
+ python types, including nested types
+ like e. g. lists of dictionaries.
+ """
+ path = self._getvaluepath(key)
+ with path.open("w") as f:
+ self.trace("cache-write %s: %r" % (key, value,))
+ json.dump(value, f, indent=2, sort_keys=True)
+
+
+### XXX consider shifting part of the below to pytest config object
+
+def getrootdir(config, name):
+ """ return a best-effort root subdir for this test run.
+
+ Starting from files specified at the command line (or cwd)
+ search starts upward for the first "tox.ini", "pytest.ini",
+ "setup.cfg" or "setup.py" file. The first directory containing
+ such a file will be used to return a named subdirectory
+ (py.path.local object).
+
+ """
+ if config.inicfg:
+ p = py.path.local(config.inicfg.config.path).dirpath()
+ else:
+ inibasenames = ["setup.py", "setup.cfg", "tox.ini", "pytest.ini"]
+ for x in getroot(config.args, inibasenames):
+ p = x.dirpath()
+ break
+ else:
+ p = py.path.local()
+ config.trace.get("warn")("no rootdir found, using %s" % p)
+ subdir = p.join(name)
+ config.trace("root %s: %s" % (name, subdir))
+ return subdir
+
+def getroot(args, inibasenames):
+ args = [x for x in args if not str(x).startswith("-")]
+ if not args:
+ args = [py.path.local()]
+ for arg in args:
+ arg = py.path.local(arg)
+ for base in arg.parts(reverse=True):
+ for inibasename in inibasenames:
+ p = base.join(inibasename)
+ if p.check():
+ yield p
+
+
+import py
+import pytest
+
+
+def pytest_addoption(parser):
+ group = parser.getgroup("general")
+ group.addoption(
+ '--lf', action='store_true', dest="lf",
+ help="rerun only the tests that failed at the last run (or all if none failed)")
+ group.addoption(
+ '--ff', action='store_true', dest="failedfirst",
+ help="run all tests but run the last failures first. This may re-order "
+ "tests and thus lead to repeated fixture setup/teardown")
+ group.addoption(
+ '--cache', action='store_true', dest="showcache",
+ help="show cache contents, don't perform collection or tests")
+ group.addoption(
+ '--clearcache', action='store_true', dest="clearcache",
+ help="remove all cache contents at start of test run.")
+ group.addoption(
+ '--looponchange', action='store_true', dest='looponchange',
+ help='rerun every time the workdir changes')
+ group.addoption(
+ '--looponfail', action='store_true', dest='looponfail',
+ help='rerun every time the workdir changes')
+ parser.addini(
+ "looponchangeroots", type="pathlist",
+ help="directories to check for changes", default=[py.path.local()])
+
+
+def pytest_cmdline_main(config):
+ if config.option.showcache:
+ from _pytest.main import wrap_session
+ return wrap_session(config, showcache)
+ if config.option.looponchange or config.option.looponfail:
+ from .onchange import looponchange
+ return looponchange(config)
+
+
+ at pytest.mark.tryfirst
+def pytest_configure(config):
+ from .cache import Cache
+ from .lastfail import LFPlugin
+ config.cache = cache = Cache(config)
+ config.pluginmanager.register(LFPlugin(config), "lfplugin")
+
+def pytest_report_header(config):
+ if config.option.verbose:
+ relpath = py.path.local().bestrelpath(config.cache._cachedir)
+ return "cachedir: %s" % config.cache._cachedir
+
+def showcache(config, session):
+ from pprint import pprint
+ tw = py.io.TerminalWriter()
+ tw.line("cachedir: " + str(config.cache._cachedir))
+ if not config.cache._cachedir.check():
+ tw.line("cache is empty")
+ return 0
+ dummy = object()
+ basedir = config.cache._cachedir
+ vdir = basedir.join("v")
+ tw.sep("-", "cache values")
+ for valpath in vdir.visit(lambda x: x.check(file=1)):
+ key = valpath.relto(vdir).replace(valpath.sep, "/")
+ val = config.cache.get(key, dummy)
+ if val is dummy:
+ tw.line("%s contains unreadable content, "
+ "will be ignored" % key)
+ else:
+ tw.line("%s contains:" % key)
+ stream = py.io.TextIO()
+ pprint(val, stream=stream)
+ for line in stream.getvalue().splitlines():
+ tw.line(" " + line)
+
+ ddir = basedir.join("d")
+ if ddir.check(dir=1) and ddir.listdir():
+ tw.sep("-", "cache directories")
+ for p in basedir.join("d").visit():
+ #if p.check(dir=1):
+ # print("%s/" % p.relto(basedir))
+ if p.check(file=1):
+ key = p.relto(basedir)
+ tw.line("%s is a file of length %d" % (
+ key, p.size()))
diff -r 6104c7ea0dccd13b892fa021a2fc989f60a8ff59 -r ef582d901aeb52d7bf62acfdb2358ab184b0d37e _pytest/lastfail.py
--- /dev/null
+++ b/_pytest/lastfail.py
@@ -0,0 +1,65 @@
+
+
+class LFPlugin:
+ """ Plugin which implements the --lf (run last-failing) option """
+ def __init__(self, config):
+ self.config = config
+ active_keys = 'lf', 'failedfirst', 'looponfail'
+ self.active = any(config.getvalue(key) for key in active_keys)
+ if self.active:
+ self.lastfailed = config.cache.get("cache/lastfailed", {})
+ else:
+ self.lastfailed = {}
+
+ def pytest_report_header(self):
+ if self.active:
+ if not self.lastfailed:
+ mode = "run all (no recorded failures)"
+ else:
+ mode = "rerun last %d failures%s" % (
+ len(self.lastfailed),
+ " first" if self.config.getvalue("failedfirst") else "")
+ return "run-last-failure: %s" % mode
+
+ def pytest_runtest_logreport(self, report):
+ if report.failed and "xfail" not in report.keywords:
+ self.lastfailed[report.nodeid] = True
+ elif not report.failed:
+ if report.when == "call":
+ self.lastfailed.pop(report.nodeid, None)
+
+ def pytest_collectreport(self, report):
+ passed = report.outcome in ('passed', 'skipped')
+ if passed:
+ if report.nodeid in self.lastfailed:
+ self.lastfailed.pop(report.nodeid)
+ self.lastfailed.update(
+ (item.nodeid, True)
+ for item in report.result)
+ else:
+ self.lastfailed[report.nodeid] = True
+
+ def pytest_collection_modifyitems(self, session, config, items):
+ if self.active and self.lastfailed:
+ previously_failed = []
+ previously_passed = []
+ for item in items:
+ if item.nodeid in self.lastfailed:
+ previously_failed.append(item)
+ else:
+ previously_passed.append(item)
+ if not previously_failed and previously_passed:
+ # running a subset of all tests with recorded failures outside
+ # of the set of tests currently executing
+ pass
+ elif self.config.getvalue("failedfirst"):
+ items[:] = previously_failed + previously_passed
+ else:
+ items[:] = previously_failed
+ config.hook.pytest_deselected(items=previously_passed)
+
+ def pytest_sessionfinish(self, session):
+ config = self.config
+ if config.getvalue("showcache") or hasattr(config, "slaveinput"):
+ return
+ config.cache.set("cache/lastfailed", self.lastfailed)
diff -r 6104c7ea0dccd13b892fa021a2fc989f60a8ff59 -r ef582d901aeb52d7bf62acfdb2358ab184b0d37e _pytest/onchange.py
--- /dev/null
+++ b/_pytest/onchange.py
@@ -0,0 +1,75 @@
+import py
+
+
+SCRIPT = """
+import pytest
+pytest.main(%r)
+"""
+
+
+def looponchange(config):
+ newargs = config._origargs[:]
+ newargs.remove('--looponchange')
+ stats = StatRecorder(config.getini('looponchangeroots'))
+ command = py.std.functools.partial(
+ py.std.subprocess.call, [
+ py.std.sys.executable,
+ '-c', SCRIPT % newargs])
+ loop_forever(stats, command)
+ return 2
+
+
+def loop_forever(stats, command):
+ while True:
+ stats.waitonchange()
+ command()
+
+
+class StatRecorder(object):
+ def __init__(self, rootdirlist):
+ self.rootdirlist = rootdirlist
+ self.statcache = {}
+ self.check() # snapshot state
+
+ def fil(self, p):
+ return p.check(file=1, dotfile=0) and p.ext != ".pyc"
+ def rec(self, p):
+ return p.check(dotfile=0)
+
+ def waitonchange(self, checkinterval=1.0):
+ while 1:
+ changed = self.check()
+ if changed:
+ return
+ py.std.time.sleep(checkinterval)
+
+ def check(self, removepycfiles=True):
+ changed = False
+ statcache = self.statcache
+ newstat = {}
+ for rootdir in self.rootdirlist:
+ for path in rootdir.visit(self.fil, self.rec):
+ oldstat = statcache.pop(path, None)
+ try:
+ newstat[path] = curstat = path.stat()
+ except py.error.ENOENT:
+ if oldstat:
+ changed = True
+ else:
+ if oldstat:
+ if oldstat.mtime != curstat.mtime or \
+ oldstat.size != curstat.size:
+ changed = True
+ py.builtin.print_("# MODIFIED", path)
+ if removepycfiles and path.ext == ".py":
+ pycfile = path + "c"
+ if pycfile.check():
+ pycfile.remove()
+
+ else:
+ changed = True
+ if statcache:
+ changed = True
+ self.statcache = newstat
+ return changed
+
diff -r 6104c7ea0dccd13b892fa021a2fc989f60a8ff59 -r ef582d901aeb52d7bf62acfdb2358ab184b0d37e _pytest/plugin.py
--- /dev/null
+++ b/_pytest/plugin.py
@@ -0,0 +1,85 @@
+import py
+import pytest
+
+
+def pytest_addoption(parser):
+ group = parser.getgroup("general")
+ group.addoption(
+ '--lf', action='store_true', dest="lf",
+ help="rerun only the tests that failed at the last run (or all if none failed)")
+ group.addoption(
+ '--ff', action='store_true', dest="failedfirst",
+ help="run all tests but run the last failures first. This may re-order "
+ "tests and thus lead to repeated fixture setup/teardown")
+ group.addoption(
+ '--cache', action='store_true', dest="showcache",
+ help="show cache contents, don't perform collection or tests")
+ group.addoption(
+ '--clearcache', action='store_true', dest="clearcache",
+ help="remove all cache contents at start of test run.")
+ group.addoption(
+ '--looponchange', action='store_true', dest='looponchange',
+ help='rerun every time the workdir changes')
+ group.addoption(
+ '--looponfail', action='store_true', dest='looponfail',
+ help='rerun every time the workdir changes')
+ parser.addini(
+ "looponchangeroots", type="pathlist",
+ help="directories to check for changes", default=[py.path.local()])
+
+
+def pytest_cmdline_main(config):
+ if config.option.showcache:
+ from _pytest.main import wrap_session
+ return wrap_session(config, showcache)
+ if config.option.looponchange or config.option.looponfail:
+ from .onchange import looponchange
+ return looponchange(config)
+
+
+ at pytest.mark.tryfirst
+def pytest_configure(config):
+ from .cache import Cache
+ from .lastfail import LFPlugin
+ config.cache = cache = Cache(config)
+ config.pluginmanager.register(LFPlugin(config), "lfplugin")
+
+def pytest_report_header(config):
+ if config.option.verbose:
+ relpath = py.path.local().bestrelpath(config.cache._cachedir)
+ return "cachedir: %s" % config.cache._cachedir
+
+def showcache(config, session):
+ from pprint import pprint
+ tw = py.io.TerminalWriter()
+ tw.line("cachedir: " + str(config.cache._cachedir))
+ if not config.cache._cachedir.check():
+ tw.line("cache is empty")
+ return 0
+ dummy = object()
+ basedir = config.cache._cachedir
+ vdir = basedir.join("v")
+ tw.sep("-", "cache values")
+ for valpath in vdir.visit(lambda x: x.check(file=1)):
+ key = valpath.relto(vdir).replace(valpath.sep, "/")
+ val = config.cache.get(key, dummy)
+ if val is dummy:
+ tw.line("%s contains unreadable content, "
+ "will be ignored" % key)
+ else:
+ tw.line("%s contains:" % key)
+ stream = py.io.TextIO()
+ pprint(val, stream=stream)
+ for line in stream.getvalue().splitlines():
+ tw.line(" " + line)
+
+ ddir = basedir.join("d")
+ if ddir.check(dir=1) and ddir.listdir():
+ tw.sep("-", "cache directories")
+ for p in basedir.join("d").visit():
+ #if p.check(dir=1):
+ # print("%s/" % p.relto(basedir))
+ if p.check(file=1):
+ key = p.relto(basedir)
+ tw.line("%s is a file of length %d" % (
+ key, p.size()))
diff -r 6104c7ea0dccd13b892fa021a2fc989f60a8ff59 -r ef582d901aeb52d7bf62acfdb2358ab184b0d37e testing/test_cache.py
--- /dev/null
+++ b/testing/test_cache.py
@@ -0,0 +1,54 @@
+import os
+import pytest
+import shutil
+import py
+
+pytest_plugins = "pytester",
+
+class TestNewAPI:
+ def test_config_cache_makedir(self, testdir):
+ testdir.makeini("[pytest]")
+ config = testdir.parseconfigure()
+ pytest.raises(ValueError, lambda:
+ config.cache.makedir("key/name"))
+ p = config.cache.makedir("name")
+ assert p.check()
+
+ def test_config_cache_dataerror(self, testdir):
+ testdir.makeini("[pytest]")
+ config = testdir.parseconfigure()
+ cache = config.cache
+ pytest.raises(TypeError, lambda: cache.set("key/name", cache))
+ config.cache.set("key/name", 0)
+ config.cache._getvaluepath("key/name").write("123invalid")
+ val = config.cache.get("key/name", -2)
+ assert val == -2
+
+ def test_config_cache(self, testdir):
+ testdir.makeconftest("""
+ def pytest_configure(config):
+ # see that we get cache information early on
+ assert hasattr(config, "cache")
+ """)
+ testdir.makepyfile("""
+ def test_session(pytestconfig):
+ assert hasattr(pytestconfig, "cache")
+ """)
+ result = testdir.runpytest()
+ assert result.ret == 0
+ result.stdout.fnmatch_lines(["*1 passed*"])
+
+ def XXX_test_cachefuncarg(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+ def test_cachefuncarg(cache):
+ val = cache.get("some/thing", None)
+ assert val is None
+ cache.set("some/thing", [1])
+ pytest.raises(TypeError, lambda: cache.get("some/thing"))
+ val = cache.get("some/thing", [])
+ assert val == [1]
+ """)
+ result = testdir.runpytest()
+ assert result.ret == 0
+ result.stdout.fnmatch_lines(["*1 passed*"])
diff -r 6104c7ea0dccd13b892fa021a2fc989f60a8ff59 -r ef582d901aeb52d7bf62acfdb2358ab184b0d37e testing/test_lastfailed.py
--- /dev/null
+++ b/testing/test_lastfailed.py
@@ -0,0 +1,235 @@
+import os
+import pytest
+import shutil
+import py
+
+pytest_plugins = "pytester",
+
+
+class TestLastFailed:
+ @pytest.mark.skipif("sys.version_info < (2,6)")
+ def test_lastfailed_usecase(self, testdir, monkeypatch):
+ monkeypatch.setenv("PYTHONDONTWRITEBYTECODE", 1)
+ p = testdir.makepyfile("""
+ def test_1():
+ assert 0
+ def test_2():
+ assert 0
+ def test_3():
+ assert 1
+ """)
+ result = testdir.runpytest()
+ result.stdout.fnmatch_lines([
+ "*2 failed*",
+ ])
+ p.write(py.code.Source("""
+ def test_1():
+ assert 1
+
+ def test_2():
+ assert 1
+
+ def test_3():
+ assert 0
+ """))
+ result = testdir.runpytest("--lf")
+ result.stdout.fnmatch_lines([
+ "*2 passed*1 desel*",
+ ])
+ result = testdir.runpytest("--lf")
+ result.stdout.fnmatch_lines([
+ "*1 failed*2 passed*",
+ ])
+ result = testdir.runpytest("--lf", "--clearcache")
+ result.stdout.fnmatch_lines([
+ "*1 failed*2 passed*",
+ ])
+
+ # Run this again to make sure clearcache is robust
+ if os.path.isdir('.cache'):
+ shutil.rmtree('.cache')
+ result = testdir.runpytest("--lf", "--clearcache")
+ result.stdout.fnmatch_lines([
+ "*1 failed*2 passed*",
+ ])
+
+ def test_failedfirst_order(self, testdir):
+ always_pass = testdir.tmpdir.join('test_a.py').write(py.code.Source("""
+ def test_always_passes():
+ assert 1
+ """))
+ always_fail = testdir.tmpdir.join('test_b.py').write(py.code.Source("""
+ def test_always_fails():
+ assert 0
+ """))
+ result = testdir.runpytest()
+ # Test order will be collection order; alphabetical
+ result.stdout.fnmatch_lines([
+ "test_a.py*",
+ "test_b.py*",
+ ])
+ result = testdir.runpytest("--lf", "--ff")
+ # Test order will be failing tests firs
+ result.stdout.fnmatch_lines([
+ "test_b.py*",
+ "test_a.py*",
+ ])
+
+ @pytest.mark.skipif("sys.version_info < (2,6)")
+ def test_lastfailed_difference_invocations(self, testdir, monkeypatch):
+ monkeypatch.setenv("PYTHONDONTWRITEBYTECODE", 1)
+ testdir.makepyfile(test_a="""
+ def test_a1():
+ assert 0
+ def test_a2():
+ assert 1
+ """, test_b="""
+ def test_b1():
+ assert 0
+ """)
+ p = testdir.tmpdir.join("test_a.py")
+ p2 = testdir.tmpdir.join("test_b.py")
+
+ result = testdir.runpytest()
+ result.stdout.fnmatch_lines([
+ "*2 failed*",
+ ])
+ result = testdir.runpytest("--lf", p2)
+ result.stdout.fnmatch_lines([
+ "*1 failed*",
+ ])
+ p2.write(py.code.Source("""
+ def test_b1():
+ assert 1
+ """))
+ result = testdir.runpytest("--lf", p2)
+ result.stdout.fnmatch_lines([
+ "*1 passed*",
+ ])
+ result = testdir.runpytest("--lf", p)
+ result.stdout.fnmatch_lines([
+ "*1 failed*1 desel*",
+ ])
+
+ @pytest.mark.skipif("sys.version_info < (2,6)")
+ def test_lastfailed_usecase_splice(self, testdir, monkeypatch):
+ monkeypatch.setenv("PYTHONDONTWRITEBYTECODE", 1)
+ p1 = testdir.makepyfile("""
+ def test_1():
+ assert 0
+ """)
+ p2 = testdir.tmpdir.join("test_something.py")
+ p2.write(py.code.Source("""
+ def test_2():
+ assert 0
+ """))
+ result = testdir.runpytest()
+ result.stdout.fnmatch_lines([
+ "*2 failed*",
+ ])
+ result = testdir.runpytest("--lf", p2)
+ result.stdout.fnmatch_lines([
+ "*1 failed*",
+ ])
+ result = testdir.runpytest("--lf")
+ result.stdout.fnmatch_lines([
+ "*2 failed*",
+ ])
+
+ def test_lastfailed_xpass(self, testdir):
+ rep = testdir.inline_runsource1("""
+ import pytest
+ @pytest.mark.xfail
+ def test_hello():
+ assert 1
+ """)
+ config = testdir.parseconfigure()
+ lastfailed = config.cache.get("cache/lastfailed", -1)
+ assert not lastfailed
+
+ def test_lastfailed_collectfailure(self, testdir, monkeypatch):
+
+ testdir.makepyfile(test_maybe="""
+ import py
+ env = py.std.os.environ
+ if '1' == env['FAILIMPORT']:
+ raise ImportError('fail')
+ def test_hello():
+ assert '0' == env['FAILTEST']
+ """)
+
+ def rlf(fail_import, fail_run):
+ monkeypatch.setenv('FAILIMPORT', fail_import)
+ monkeypatch.setenv('FAILTEST', fail_run)
+
+ testdir.runpytest('-q')
+ config = testdir.parseconfigure()
+ lastfailed = config.cache.get("cache/lastfailed", -1)
+ return lastfailed
+
+ lastfailed = rlf(fail_import=0, fail_run=0)
+ assert not lastfailed
+
+ lastfailed = rlf(fail_import=1, fail_run=0)
+ assert list(lastfailed) == ['test_maybe.py']
+
+ lastfailed = rlf(fail_import=0, fail_run=1)
+ assert list(lastfailed) == ['test_maybe.py::test_hello']
+
+
+ def test_lastfailed_failure_subset(self, testdir, monkeypatch):
+
+ testdir.makepyfile(test_maybe="""
+ import py
+ env = py.std.os.environ
+ if '1' == env['FAILIMPORT']:
+ raise ImportError('fail')
+ def test_hello():
+ assert '0' == env['FAILTEST']
+ """)
+
+ testdir.makepyfile(test_maybe2="""
+ import py
+ env = py.std.os.environ
+ if '1' == env['FAILIMPORT']:
+ raise ImportError('fail')
+ def test_hello():
+ assert '0' == env['FAILTEST']
+
+ def test_pass():
+ pass
+ """)
+
+ def rlf(fail_import, fail_run, args=()):
+ monkeypatch.setenv('FAILIMPORT', fail_import)
+ monkeypatch.setenv('FAILTEST', fail_run)
+
+ result = testdir.runpytest('-q', '--lf', *args)
+ config = testdir.parseconfigure()
+ lastfailed = config.cache.get("cache/lastfailed", -1)
+ return result, lastfailed
+
+ result, lastfailed = rlf(fail_import=0, fail_run=0)
+ assert not lastfailed
+ result.stdout.fnmatch_lines([
+ '*3 passed*',
+ ])
+
+ result, lastfailed = rlf(fail_import=1, fail_run=0)
+ assert sorted(list(lastfailed)) == ['test_maybe.py', 'test_maybe2.py']
+
+
+ result, lastfailed = rlf(fail_import=0, fail_run=0,
+ args=('test_maybe2.py',))
+ assert list(lastfailed) == ['test_maybe.py']
+
+
+ # edge case of test selection - even if we remember failures
+ # from other tests we still need to run all tests if no test
+ # matches the failures
+ result, lastfailed = rlf(fail_import=0, fail_run=0,
+ args=('test_maybe2.py',))
+ assert list(lastfailed) == ['test_maybe.py']
+ result.stdout.fnmatch_lines([
+ '*2 passed*',
+ ])
diff -r 6104c7ea0dccd13b892fa021a2fc989f60a8ff59 -r ef582d901aeb52d7bf62acfdb2358ab184b0d37e testing/test_onchange.py
--- /dev/null
+++ b/testing/test_onchange.py
@@ -0,0 +1,88 @@
+from pytest_cache.onchange import StatRecorder
+
+class TestStatRecorder:
+ def test_filechange(self, tmpdir):
+ tmp = tmpdir
+ hello = tmp.ensure("hello.py")
+ sd = StatRecorder([tmp])
+ changed = sd.check()
+ assert not changed
+
+ hello.write("world")
+ changed = sd.check()
+ assert changed
+
+ (hello + "c").write("hello")
+ changed = sd.check()
+ assert not changed
+
+ p = tmp.ensure("new.py")
+ changed = sd.check()
+ assert changed
+
+ p.remove()
+ changed = sd.check()
+ assert changed
+
+ tmp.join("a", "b", "c.py").ensure()
+ changed = sd.check()
+ assert changed
+
+ tmp.join("a", "c.txt").ensure()
+ changed = sd.check()
+ assert changed
+ changed = sd.check()
+ assert not changed
+
+ tmp.join("a").remove()
+ changed = sd.check()
+ assert changed
+
+ def test_dirchange(self, tmpdir):
+ tmp = tmpdir
+ hello = tmp.ensure("dir", "hello.py")
+ sd = StatRecorder([tmp])
+ assert not sd.fil(tmp.join("dir"))
+
+ def test_filechange_deletion_race(self, tmpdir, monkeypatch):
+ tmp = tmpdir
+ sd = StatRecorder([tmp])
+ changed = sd.check()
+ assert not changed
+
+ p = tmp.ensure("new.py")
+ changed = sd.check()
+ assert changed
+
+ p.remove()
+ # make check()'s visit() call return our just removed
+ # path as if we were in a race condition
+ monkeypatch.setattr(tmp, 'visit', lambda *args: [p])
+
+ changed = sd.check()
+ assert changed
+
+ def test_pycremoval(self, tmpdir):
+ tmp = tmpdir
+ hello = tmp.ensure("hello.py")
+ sd = StatRecorder([tmp])
+ changed = sd.check()
+ assert not changed
+
+ pycfile = hello + "c"
+ pycfile.ensure()
+ hello.write("world")
+ changed = sd.check()
+ assert changed
+ assert not pycfile.check()
+
+ def test_waitonchange(self, tmpdir, monkeypatch):
+ tmp = tmpdir
+ sd = StatRecorder([tmp])
+
+ l = [True, False]
+ monkeypatch.setattr(StatRecorder, 'check', lambda self: l.pop())
+ sd.waitonchange(checkinterval=0.2)
+ assert not l
+
+
diff -r 6104c7ea0dccd13b892fa021a2fc989f60a8ff59 -r ef582d901aeb52d7bf62acfdb2358ab184b0d37e testing/test_plugin.py
--- /dev/null
+++ b/testing/test_plugin.py
@@ -0,0 +1,50 @@
+import os
+import pytest
+import shutil
+import py
+
+pytest_plugins = "pytester",
+
+def test_version():
+ import pytest_cache
+ assert pytest_cache.__version__
+
+def test_cache_reportheader(testdir):
+ p = testdir.makepyfile("""
+ def test_hello():
+ pass
+ """)
+ cachedir = p.dirpath(".cache")
+ result = testdir.runpytest("-v")
+ result.stdout.fnmatch_lines([
+ "cachedir: %s" % cachedir,
+ ])
+
+def test_cache_show(testdir):
+ result = testdir.runpytest("--cache")
+ assert result.ret == 0
+ result.stdout.fnmatch_lines([
+ "*cache is empty*"
+ ])
+ p = testdir.makeconftest("""
+ def pytest_configure(config):
+ config.cache.set("my/name", [1,2,3])
+ config.cache.set("other/some", {1:2})
+ dp = config.cache.makedir("mydb")
+ dp.ensure("hello")
+ dp.ensure("world")
+ """)
+ result = testdir.runpytest()
+ assert result.ret == 0
+ result = testdir.runpytest("--cache")
+ result.stdout.fnmatch_lines_random([
+ "*cachedir:*",
+ "-*cache values*-",
+ "*my/name contains:",
+ " [1, 2, 3]",
+ "*other/some contains*",
+ " {*1*: 2}",
+ "-*cache directories*-",
+ "*mydb/hello*length 0*",
+ "*mydb/world*length 0*",
+ ])
https://bitbucket.org/hpk42/pytest/commits/92cace41d1b6/
Changeset: 92cace41d1b6
Branch: merge-cache
User: RonnyPfannschmidt
Date: 2015-02-26 18:24:35+00:00
Summary: make the tests pass, this is the baselevel for creating backward compat
Affected #: 8 files
diff -r ef582d901aeb52d7bf62acfdb2358ab184b0d37e -r 92cace41d1b64f910484cd45e54954c65f6de447 _pytest/config.py
--- a/_pytest/config.py
+++ b/_pytest/config.py
@@ -51,7 +51,7 @@
default_plugins = (
"mark main terminal runner python pdb unittest capture skipping "
"tmpdir monkeypatch recwarn pastebin helpconfig nose assertion genscript "
- "junitxml resultlog doctest").split()
+ "junitxml resultlog doctest cache").split()
def _preloadplugins():
assert not _preinit
diff -r ef582d901aeb52d7bf62acfdb2358ab184b0d37e -r 92cace41d1b64f910484cd45e54954c65f6de447 _pytest/core.py
--- a/_pytest/core.py
+++ b/_pytest/core.py
@@ -10,7 +10,11 @@
assert py.__version__.split(".")[:2] >= ['1', '4'], ("installation problem: "
"%s is too old, remove or upgrade 'py'" % (py.__version__))
-py3 = sys.version_info > (3,0)
+py3 = sys.version_info > (3, 0)
+
+#
+_ALREADY_INCLUDED_PLUGINS = 'pytest-cache',
+
class TagTracer:
def __init__(self):
@@ -269,6 +273,11 @@
except ImportError:
return # XXX issue a warning
for ep in iter_entry_points('pytest11'):
+ project_name = getattr(ep.dist, 'project_name', None)
+ if project_name in _ALREADY_INCLUDED_PLUGINS:
+ self._warnings.append(
+ 'ignoring now included plugin ' + project_name)
+ continue
name = ep.name
if name.startswith("pytest_"):
name = name[7:]
diff -r ef582d901aeb52d7bf62acfdb2358ab184b0d37e -r 92cace41d1b64f910484cd45e54954c65f6de447 _pytest/plugin.py
--- a/_pytest/plugin.py
+++ /dev/null
@@ -1,85 +0,0 @@
-import py
-import pytest
-
-
-def pytest_addoption(parser):
- group = parser.getgroup("general")
- group.addoption(
- '--lf', action='store_true', dest="lf",
- help="rerun only the tests that failed at the last run (or all if none failed)")
- group.addoption(
- '--ff', action='store_true', dest="failedfirst",
- help="run all tests but run the last failures first. This may re-order "
- "tests and thus lead to repeated fixture setup/teardown")
- group.addoption(
- '--cache', action='store_true', dest="showcache",
- help="show cache contents, don't perform collection or tests")
- group.addoption(
- '--clearcache', action='store_true', dest="clearcache",
- help="remove all cache contents at start of test run.")
- group.addoption(
- '--looponchange', action='store_true', dest='looponchange',
- help='rerun every time the workdir changes')
- group.addoption(
- '--looponfail', action='store_true', dest='looponfail',
- help='rerun every time the workdir changes')
- parser.addini(
- "looponchangeroots", type="pathlist",
- help="directories to check for changes", default=[py.path.local()])
-
-
-def pytest_cmdline_main(config):
- if config.option.showcache:
- from _pytest.main import wrap_session
- return wrap_session(config, showcache)
- if config.option.looponchange or config.option.looponfail:
- from .onchange import looponchange
- return looponchange(config)
-
-
- at pytest.mark.tryfirst
-def pytest_configure(config):
- from .cache import Cache
- from .lastfail import LFPlugin
- config.cache = cache = Cache(config)
- config.pluginmanager.register(LFPlugin(config), "lfplugin")
-
-def pytest_report_header(config):
- if config.option.verbose:
- relpath = py.path.local().bestrelpath(config.cache._cachedir)
- return "cachedir: %s" % config.cache._cachedir
-
-def showcache(config, session):
- from pprint import pprint
- tw = py.io.TerminalWriter()
- tw.line("cachedir: " + str(config.cache._cachedir))
- if not config.cache._cachedir.check():
- tw.line("cache is empty")
- return 0
- dummy = object()
- basedir = config.cache._cachedir
- vdir = basedir.join("v")
- tw.sep("-", "cache values")
- for valpath in vdir.visit(lambda x: x.check(file=1)):
- key = valpath.relto(vdir).replace(valpath.sep, "/")
- val = config.cache.get(key, dummy)
- if val is dummy:
- tw.line("%s contains unreadable content, "
- "will be ignored" % key)
- else:
- tw.line("%s contains:" % key)
- stream = py.io.TextIO()
- pprint(val, stream=stream)
- for line in stream.getvalue().splitlines():
- tw.line(" " + line)
-
- ddir = basedir.join("d")
- if ddir.check(dir=1) and ddir.listdir():
- tw.sep("-", "cache directories")
- for p in basedir.join("d").visit():
- #if p.check(dir=1):
- # print("%s/" % p.relto(basedir))
- if p.check(file=1):
- key = p.relto(basedir)
- tw.line("%s is a file of length %d" % (
- key, p.size()))
diff -r ef582d901aeb52d7bf62acfdb2358ab184b0d37e -r 92cace41d1b64f910484cd45e54954c65f6de447 testing/test_config.py
--- a/testing/test_config.py
+++ b/testing/test_config.py
@@ -307,6 +307,7 @@
assert name == "pytest11"
class EntryPoint:
name = "mytestplugin"
+ dist = None
def load(self):
assert 0, "should not arrive here"
return iter([EntryPoint()])
diff -r ef582d901aeb52d7bf62acfdb2358ab184b0d37e -r 92cace41d1b64f910484cd45e54954c65f6de447 testing/test_core.py
--- a/testing/test_core.py
+++ b/testing/test_core.py
@@ -70,12 +70,13 @@
assert name == "pytest11"
class EntryPoint:
name = "pytest_mytestplugin"
- dist = None
+ class dist:
+ name = None
def load(self):
class PseudoPlugin:
x = 42
return PseudoPlugin()
- return iter([EntryPoint()])
+ yield EntryPoint()
monkeypatch.setattr(pkg_resources, 'iter_entry_points', my_iter)
pluginmanager = PluginManager()
@@ -83,6 +84,28 @@
plugin = pluginmanager.getplugin("mytestplugin")
assert plugin.x == 42
+ @pytest.mark.parametrize('distname', ['pytest-cache'])
+ def test_dont_consider_setuptools_included(self, distname, monkeypatch):
+ pkg_resources = pytest.importorskip("pkg_resources")
+ def my_iter(name):
+ assert name == "pytest11"
+ class EntryPoint:
+ name = "pytest_mytestplugin"
+ class dist:
+ project_name = distname
+ def load(self):
+ class PseudoPlugin:
+ x = 42
+ return PseudoPlugin()
+ yield EntryPoint()
+
+ monkeypatch.setattr(pkg_resources, 'iter_entry_points', my_iter)
+ pluginmanager = PluginManager()
+ pluginmanager.consider_setuptools_entrypoints()
+ plugin = pluginmanager.getplugin("mytestplugin")
+ assert plugin is None
+
+
def test_consider_setuptools_not_installed(self, monkeypatch):
monkeypatch.setitem(py.std.sys.modules, 'pkg_resources',
py.std.types.ModuleType("pkg_resources"))
diff -r ef582d901aeb52d7bf62acfdb2358ab184b0d37e -r 92cace41d1b64f910484cd45e54954c65f6de447 testing/test_onchange.py
--- a/testing/test_onchange.py
+++ b/testing/test_onchange.py
@@ -1,4 +1,4 @@
-from pytest_cache.onchange import StatRecorder
+from _pytest.onchange import StatRecorder
class TestStatRecorder:
def test_filechange(self, tmpdir):
diff -r ef582d901aeb52d7bf62acfdb2358ab184b0d37e -r 92cace41d1b64f910484cd45e54954c65f6de447 testing/test_plugin.py
--- a/testing/test_plugin.py
+++ b/testing/test_plugin.py
@@ -5,9 +5,6 @@
pytest_plugins = "pytester",
-def test_version():
- import pytest_cache
- assert pytest_cache.__version__
def test_cache_reportheader(testdir):
p = testdir.makepyfile("""
diff -r ef582d901aeb52d7bf62acfdb2358ab184b0d37e -r 92cace41d1b64f910484cd45e54954c65f6de447 tox.ini
--- a/tox.ini
+++ b/tox.ini
@@ -15,8 +15,9 @@
[testenv:flakes]
changedir=
-deps = pytest-flakes>=0.2
-commands = py.test --flakes -m flakes _pytest testing
+deps = flake8
+ mccabe
+commands = pytest.py _pytest testing
[testenv:py27-xdist]
changedir=.
@@ -141,5 +142,8 @@
python_files=test_*.py *_test.py testing/*/*.py
python_classes=Test Acceptance
python_functions=test
-pep8ignore = E401 E225 E261 E128 E124 E302
norecursedirs = .tox ja .hg
+
+
+[flake8]
+ignore = E401 E225 E261 E128 E124 E302
https://bitbucket.org/hpk42/pytest/commits/e6a40d65fe50/
Changeset: e6a40d65fe50
Branch: merge-cache
User: RonnyPfannschmidt
Date: 2015-02-26 18:24:39+00:00
Summary: finish backward compat for looponfail
Affected #: 2 files
diff -r 92cace41d1b64f910484cd45e54954c65f6de447 -r e6a40d65fe5071539fa1ecd07af7f5831504a436 _pytest/cache.py
--- a/_pytest/cache.py
+++ b/_pytest/cache.py
@@ -1,7 +1,8 @@
import py
import pytest
import json
-
+import sys
+import pkg_resources
class Cache:
def __init__(self, config):
@@ -114,11 +115,15 @@
yield p
-import py
-import pytest
-
def pytest_addoption(parser):
+ try:
+ ls = pkg_resources.resource_listdir('xdist', '.')
+ except:
+ outside_looponfail = False
+ else:
+ outside_looponfail = 'looponfail.py' in ls
+
group = parser.getgroup("general")
group.addoption(
'--lf', action='store_true', dest="lf",
@@ -136,9 +141,10 @@
group.addoption(
'--looponchange', action='store_true', dest='looponchange',
help='rerun every time the workdir changes')
- group.addoption(
- '--looponfail', action='store_true', dest='looponfail',
- help='rerun every time the workdir changes')
+ if not outside_looponfail:
+ group._addoption(
+ '-f', '--looponfail', action='store_true', dest='looponfail',
+ help='rerun every time the workdir changes')
parser.addini(
"looponchangeroots", type="pathlist",
help="directories to check for changes", default=[py.path.local()])
diff -r 92cace41d1b64f910484cd45e54954c65f6de447 -r e6a40d65fe5071539fa1ecd07af7f5831504a436 _pytest/onchange.py
--- a/_pytest/onchange.py
+++ b/_pytest/onchange.py
@@ -1,5 +1,5 @@
import py
-
+import subprocess
SCRIPT = """
import pytest
@@ -7,14 +7,24 @@
"""
+def run_once(args, tw=None):
+ tw = py.io.TerminalWriter()
+ subprocess.call(args)
+ tw.line()
+ tw.sep('#', 'waiting for changes')
+ tw.line()
+
+
def looponchange(config):
newargs = config._origargs[:]
- newargs.remove('--looponchange')
+ if '--looponchange' in newargs:
+ newargs.remove('--looponchange')
+ else:
+ newargs.remove('-f')
stats = StatRecorder(config.getini('looponchangeroots'))
- command = py.std.functools.partial(
- py.std.subprocess.call, [
- py.std.sys.executable,
- '-c', SCRIPT % newargs])
+ command = py.std.functools.partial(run_once, [
+ py.std.sys.executable, '-c', SCRIPT % newargs])
+ command()
loop_forever(stats, command)
return 2
https://bitbucket.org/hpk42/pytest/commits/8614045feec5/
Changeset: 8614045feec5
Branch: merge-cache
User: RonnyPfannschmidt
Date: 2015-02-26 18:24:42+00:00
Summary: universal wheel
Affected #: 1 file
diff -r e6a40d65fe5071539fa1ecd07af7f5831504a436 -r 8614045feec5db98c19b2222409d55b446ac8b1b setup.cfg
--- a/setup.cfg
+++ b/setup.cfg
@@ -3,6 +3,9 @@
build-dir = doc/build
all_files = 1
+[wheel]
+universal = 1
+
[upload_sphinx]
upload-dir = doc/en/build/html
Repository URL: https://bitbucket.org/hpk42/pytest/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
More information about the pytest-commit
mailing list