[pypy-svn] r74289 - in pypy/branch/py12/py: . _cmdline _code _compat _io _log _path _path/gateway _plugin _process _test
hpk at codespeak.net
hpk at codespeak.net
Fri Apr 30 17:07:56 CEST 2010
Author: hpk
Date: Fri Apr 30 17:07:52 2010
New Revision: 74289
Added:
pypy/branch/py12/py/
pypy/branch/py12/py/__init__.py
pypy/branch/py12/py/__init__.py.orig
pypy/branch/py12/py/__metainfo.py
pypy/branch/py12/py/_builtin.py
pypy/branch/py12/py/_cmdline/
pypy/branch/py12/py/_cmdline/__init__.py
pypy/branch/py12/py/_cmdline/pycleanup.py (contents, props changed)
pypy/branch/py12/py/_cmdline/pyconvert_unittest.py
pypy/branch/py12/py/_cmdline/pycountloc.py (contents, props changed)
pypy/branch/py12/py/_cmdline/pylookup.py (contents, props changed)
pypy/branch/py12/py/_cmdline/pysvnwcrevert.py (contents, props changed)
pypy/branch/py12/py/_cmdline/pytest.py (contents, props changed)
pypy/branch/py12/py/_cmdline/pywhich.py (contents, props changed)
pypy/branch/py12/py/_code/
pypy/branch/py12/py/_code/__init__.py
pypy/branch/py12/py/_code/_assertionnew.py
pypy/branch/py12/py/_code/_assertionold.py
pypy/branch/py12/py/_code/assertion.py
pypy/branch/py12/py/_code/code.py
pypy/branch/py12/py/_code/oldmagic.py
pypy/branch/py12/py/_code/oldmagic2.py
pypy/branch/py12/py/_code/source.py
pypy/branch/py12/py/_compat/
pypy/branch/py12/py/_compat/__init__.py
pypy/branch/py12/py/_compat/dep_doctest.py
pypy/branch/py12/py/_compat/dep_optparse.py
pypy/branch/py12/py/_compat/dep_subprocess.py
pypy/branch/py12/py/_compat/dep_textwrap.py
pypy/branch/py12/py/_error.py
pypy/branch/py12/py/_io/
pypy/branch/py12/py/_io/__init__.py
pypy/branch/py12/py/_io/capture.py
pypy/branch/py12/py/_io/saferepr.py
pypy/branch/py12/py/_io/terminalwriter.py
pypy/branch/py12/py/_log/
pypy/branch/py12/py/_log/__init__.py
pypy/branch/py12/py/_log/log.py
pypy/branch/py12/py/_log/warning.py
pypy/branch/py12/py/_path/
pypy/branch/py12/py/_path/__init__.py
pypy/branch/py12/py/_path/cacheutil.py
pypy/branch/py12/py/_path/common.py
pypy/branch/py12/py/_path/gateway/
pypy/branch/py12/py/_path/gateway/__init__.py
pypy/branch/py12/py/_path/gateway/channeltest.py
pypy/branch/py12/py/_path/gateway/channeltest2.py
pypy/branch/py12/py/_path/gateway/remotepath.py
pypy/branch/py12/py/_path/local.py
pypy/branch/py12/py/_path/svnurl.py
pypy/branch/py12/py/_path/svnwc.py
pypy/branch/py12/py/_plugin/
pypy/branch/py12/py/_plugin/__init__.py
pypy/branch/py12/py/_plugin/hookspec.py
pypy/branch/py12/py/_plugin/pytest__pytest.py
pypy/branch/py12/py/_plugin/pytest_assertion.py
pypy/branch/py12/py/_plugin/pytest_capture.py
pypy/branch/py12/py/_plugin/pytest_default.py
pypy/branch/py12/py/_plugin/pytest_doctest.py
pypy/branch/py12/py/_plugin/pytest_genscript.py (contents, props changed)
pypy/branch/py12/py/_plugin/pytest_helpconfig.py
pypy/branch/py12/py/_plugin/pytest_hooklog.py
pypy/branch/py12/py/_plugin/pytest_junitxml.py
pypy/branch/py12/py/_plugin/pytest_mark.py
pypy/branch/py12/py/_plugin/pytest_monkeypatch.py
pypy/branch/py12/py/_plugin/pytest_nose.py
pypy/branch/py12/py/_plugin/pytest_pastebin.py
pypy/branch/py12/py/_plugin/pytest_pdb.py
pypy/branch/py12/py/_plugin/pytest_pylint.py
pypy/branch/py12/py/_plugin/pytest_pytester.py
pypy/branch/py12/py/_plugin/pytest_pytester.py.orig
pypy/branch/py12/py/_plugin/pytest_recwarn.py
pypy/branch/py12/py/_plugin/pytest_restdoc.py
pypy/branch/py12/py/_plugin/pytest_resultlog.py
pypy/branch/py12/py/_plugin/pytest_runner.py
pypy/branch/py12/py/_plugin/pytest_skipping.py
pypy/branch/py12/py/_plugin/pytest_terminal.py
pypy/branch/py12/py/_plugin/pytest_terminal.py.orig
pypy/branch/py12/py/_plugin/pytest_tmpdir.py
pypy/branch/py12/py/_plugin/pytest_unittest.py
pypy/branch/py12/py/_plugin/standalonetemplate.py (contents, props changed)
pypy/branch/py12/py/_process/
pypy/branch/py12/py/_process/__init__.py
pypy/branch/py12/py/_process/cmdexec.py
pypy/branch/py12/py/_process/forkedfunc.py
pypy/branch/py12/py/_process/killproc.py
pypy/branch/py12/py/_std.py
pypy/branch/py12/py/_test/
pypy/branch/py12/py/_test/.pluginmanager.py.swp (contents, props changed)
pypy/branch/py12/py/_test/__init__.py
pypy/branch/py12/py/_test/cmdline.py
pypy/branch/py12/py/_test/collect.py
pypy/branch/py12/py/_test/config.py
pypy/branch/py12/py/_test/conftesthandle.py
pypy/branch/py12/py/_test/funcargs.py
pypy/branch/py12/py/_test/parseopt.py
pypy/branch/py12/py/_test/pluginmanager.py
pypy/branch/py12/py/_test/pycollect.py
pypy/branch/py12/py/_test/session.py
pypy/branch/py12/py/_xmlgen.py
pypy/branch/py12/py/apipkg.py
Log:
inlining 1694:c3cb8c7b94aa snapshot of bitbucket's py-trunk repo here for now
Added: pypy/branch/py12/py/__init__.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/__init__.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,178 @@
+"""
+py.test and pylib: rapid testing and development utils
+
+this module uses apipkg.py for lazy-loading sub modules
+and classes. The initpkg-dictionary below specifies
+name->value mappings where value can be another namespace
+dictionary or an import path.
+
+(c) Holger Krekel and others, 2004-2010
+"""
+__version__ = version = "1.2.2"
+
+import py.apipkg
+
+py.apipkg.initpkg(__name__, dict(
+ # access to all standard lib modules
+ std = '._std:std',
+ # access to all posix errno's as classes
+ error = '._error:error',
+
+ _pydir = '.__metainfo:pydir',
+ version = 'py:__version__', # backward compatibility
+
+ cmdline = {
+ 'pytest': '._cmdline.pytest:main',
+ 'pylookup': '._cmdline.pylookup:main',
+ 'pycountloc': '._cmdline.pycountlog:main',
+ 'pytest': '._test.cmdline:main',
+ 'pylookup': '._cmdline.pylookup:main',
+ 'pycountloc': '._cmdline.pycountloc:main',
+ 'pycleanup': '._cmdline.pycleanup:main',
+ 'pywhich' : '._cmdline.pywhich:main',
+ 'pysvnwcrevert' : '._cmdline.pysvnwcrevert:main',
+ 'pyconvert_unittest' : '._cmdline.pyconvert_unittest:main',
+ },
+
+ test = {
+ # helpers for use from test functions or collectors
+ '__onfirstaccess__' : '._test.config:onpytestaccess',
+ '__doc__' : '._test:__doc__',
+ # configuration/initialization related test api
+ 'config' : '._test.config:config_per_process',
+ 'ensuretemp' : '._test.config:ensuretemp',
+ 'collect': {
+ 'Collector' : '._test.collect:Collector',
+ 'Directory' : '._test.collect:Directory',
+ 'File' : '._test.collect:File',
+ 'Item' : '._test.collect:Item',
+ 'Module' : '._test.pycollect:Module',
+ 'Class' : '._test.pycollect:Class',
+ 'Instance' : '._test.pycollect:Instance',
+ 'Generator' : '._test.pycollect:Generator',
+ 'Function' : '._test.pycollect:Function',
+ '_fillfuncargs' : '._test.funcargs:fillfuncargs',
+ },
+ 'cmdline': {
+ 'main' : '._test.cmdline:main', # backward compat
+ },
+ },
+
+ # hook into the top-level standard library
+ process = {
+ '__doc__' : '._process:__doc__',
+ 'cmdexec' : '._process.cmdexec:cmdexec',
+ 'kill' : '._process.killproc:kill',
+ 'ForkedFunc' : '._process.forkedfunc:ForkedFunc',
+ },
+
+ path = {
+ '__doc__' : '._path:__doc__',
+ 'svnwc' : '._path.svnwc:SvnWCCommandPath',
+ 'svnurl' : '._path.svnurl:SvnCommandPath',
+ 'local' : '._path.local:LocalPath',
+ 'SvnAuth' : '._path.svnwc:SvnAuth',
+ },
+
+ # some nice slightly magic APIs
+ magic = {
+ 'invoke' : '._code.oldmagic:invoke',
+ 'revoke' : '._code.oldmagic:revoke',
+ 'patch' : '._code.oldmagic:patch',
+ 'revert' : '._code.oldmagic:revert',
+ 'autopath' : '._path.local:autopath',
+ 'AssertionError' : '._code.oldmagic2:AssertionError',
+ },
+
+ # python inspection/code-generation API
+ code = {
+ '__doc__' : '._code:__doc__',
+ 'compile' : '._code.source:compile_',
+ 'Source' : '._code.source:Source',
+ 'Code' : '._code.code:Code',
+ 'Frame' : '._code.code:Frame',
+ 'ExceptionInfo' : '._code.code:ExceptionInfo',
+ 'Traceback' : '._code.code:Traceback',
+ 'getfslineno' : '._code.source:getfslineno',
+ 'getrawcode' : '._code.code:getrawcode',
+ 'patch_builtins' : '._code.code:patch_builtins',
+ 'unpatch_builtins' : '._code.code:unpatch_builtins',
+ '_AssertionError' : '._code.assertion:AssertionError',
+ '_reinterpret_old' : '._code.assertion:reinterpret_old',
+ '_reinterpret' : '._code.assertion:reinterpret',
+ },
+
+ # backports and additions of builtins
+ builtin = {
+ '__doc__' : '._builtin:__doc__',
+ 'enumerate' : '._builtin:enumerate',
+ 'reversed' : '._builtin:reversed',
+ 'sorted' : '._builtin:sorted',
+ 'set' : '._builtin:set',
+ 'frozenset' : '._builtin:frozenset',
+ 'BaseException' : '._builtin:BaseException',
+ 'GeneratorExit' : '._builtin:GeneratorExit',
+ 'print_' : '._builtin:print_',
+ '_reraise' : '._builtin:_reraise',
+ '_tryimport' : '._builtin:_tryimport',
+ 'exec_' : '._builtin:exec_',
+ '_basestring' : '._builtin:_basestring',
+ '_totext' : '._builtin:_totext',
+ '_isbytes' : '._builtin:_isbytes',
+ '_istext' : '._builtin:_istext',
+ '_getimself' : '._builtin:_getimself',
+ '_getfuncdict' : '._builtin:_getfuncdict',
+ '_getcode' : '._builtin:_getcode',
+ 'builtins' : '._builtin:builtins',
+ 'execfile' : '._builtin:execfile',
+ 'callable' : '._builtin:callable',
+ },
+
+ # input-output helping
+ io = {
+ '__doc__' : '._io:__doc__',
+ 'dupfile' : '._io.capture:dupfile',
+ 'TextIO' : '._io.capture:TextIO',
+ 'BytesIO' : '._io.capture:BytesIO',
+ 'FDCapture' : '._io.capture:FDCapture',
+ 'StdCapture' : '._io.capture:StdCapture',
+ 'StdCaptureFD' : '._io.capture:StdCaptureFD',
+ 'TerminalWriter' : '._io.terminalwriter:TerminalWriter',
+ 'ansi_print' : '._io.terminalwriter:ansi_print',
+ 'get_terminal_width' : '._io.terminalwriter:get_terminal_width',
+ 'saferepr' : '._io.saferepr:saferepr',
+ },
+
+ # small and mean xml/html generation
+ xml = {
+ '__doc__' : '._xmlgen:__doc__',
+ 'html' : '._xmlgen:html',
+ 'Tag' : '._xmlgen:Tag',
+ 'raw' : '._xmlgen:raw',
+ 'Namespace' : '._xmlgen:Namespace',
+ 'escape' : '._xmlgen:escape',
+ },
+
+ log = {
+ # logging API ('producers' and 'consumers' connected via keywords)
+ '__doc__' : '._log:__doc__',
+ '_apiwarn' : '._log.warning:_apiwarn',
+ 'Producer' : '._log.log:Producer',
+ 'setconsumer' : '._log.log:setconsumer',
+ '_setstate' : '._log.log:setstate',
+ '_getstate' : '._log.log:getstate',
+ 'Path' : '._log.log:Path',
+ 'STDOUT' : '._log.log:STDOUT',
+ 'STDERR' : '._log.log:STDERR',
+ 'Syslog' : '._log.log:Syslog',
+ },
+
+ # compatibility modules (deprecated)
+ compat = {
+ '__doc__' : '._compat:__doc__',
+ 'doctest' : '._compat.dep_doctest:doctest',
+ 'optparse' : '._compat.dep_optparse:optparse',
+ 'textwrap' : '._compat.dep_textwrap:textwrap',
+ 'subprocess' : '._compat.dep_subprocess:subprocess',
+ },
+))
Added: pypy/branch/py12/py/__init__.py.orig
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/__init__.py.orig Fri Apr 30 17:07:52 2010
@@ -0,0 +1,174 @@
+"""
+py.test and pylib: rapid testing and development utils
+
+this module uses apipkg.py for lazy-loading sub modules
+and classes. The initpkg-dictionary below specifies
+name->value mappings where value can be another namespace
+dictionary or an import path.
+
+(c) Holger Krekel and others, 2004-2010
+"""
+__version__ = version = "1.2.2"
+
+import py.apipkg
+
+py.apipkg.initpkg(__name__, dict(
+ # access to all standard lib modules
+ std = '._std:std',
+ # access to all posix errno's as classes
+ error = '._error:error',
+
+ _pydir = '.__metainfo:pydir',
+ version = 'py:__version__', # backward compatibility
+
+ cmdline = {
+ 'pytest': '._cmdline.pytest:main',
+ 'pylookup': '._cmdline.pylookup:main',
+ 'pycountloc': '._cmdline.pycountlog:main',
+ 'pytest': '._test.cmdline:main',
+ 'pylookup': '._cmdline.pylookup:main',
+ 'pycountloc': '._cmdline.pycountloc:main',
+ 'pycleanup': '._cmdline.pycleanup:main',
+ 'pywhich' : '._cmdline.pywhich:main',
+ 'pysvnwcrevert' : '._cmdline.pysvnwcrevert:main',
+ 'pyconvert_unittest' : '._cmdline.pyconvert_unittest:main',
+ },
+
+ test = {
+ # helpers for use from test functions or collectors
+ '__onfirstaccess__' : '._test.config:onpytestaccess',
+ '__doc__' : '._test:__doc__',
+ # configuration/initialization related test api
+ 'config' : '._test.config:config_per_process',
+ 'ensuretemp' : '._test.config:ensuretemp',
+ 'collect': {
+ 'Collector' : '._test.collect:Collector',
+ 'Directory' : '._test.collect:Directory',
+ 'File' : '._test.collect:File',
+ 'Item' : '._test.collect:Item',
+ 'Module' : '._test.pycollect:Module',
+ 'Class' : '._test.pycollect:Class',
+ 'Instance' : '._test.pycollect:Instance',
+ 'Generator' : '._test.pycollect:Generator',
+ 'Function' : '._test.pycollect:Function',
+ '_fillfuncargs' : '._test.funcargs:fillfuncargs',
+ },
+ 'cmdline': {
+ 'main' : '._test.cmdline:main', # backward compat
+ },
+ },
+
+ # hook into the top-level standard library
+ process = {
+ '__doc__' : '._process:__doc__',
+ 'cmdexec' : '._process.cmdexec:cmdexec',
+ 'kill' : '._process.killproc:kill',
+ 'ForkedFunc' : '._process.forkedfunc:ForkedFunc',
+ },
+
+ path = {
+ '__doc__' : '._path:__doc__',
+ 'svnwc' : '._path.svnwc:SvnWCCommandPath',
+ 'svnurl' : '._path.svnurl:SvnCommandPath',
+ 'local' : '._path.local:LocalPath',
+ 'SvnAuth' : '._path.svnwc:SvnAuth',
+ },
+
+ # some nice slightly magic APIs
+ magic = {
+ 'invoke' : '._code.oldmagic:invoke',
+ 'revoke' : '._code.oldmagic:revoke',
+ 'patch' : '._code.oldmagic:patch',
+ 'revert' : '._code.oldmagic:revert',
+ 'autopath' : '._path.local:autopath',
+ 'AssertionError' : '._code.oldmagic2:AssertionError',
+ },
+
+ # python inspection/code-generation API
+ code = {
+ '__doc__' : '._code:__doc__',
+ 'compile' : '._code.source:compile_',
+ 'Source' : '._code.source:Source',
+ 'Code' : '._code.code:Code',
+ 'Frame' : '._code.code:Frame',
+ 'ExceptionInfo' : '._code.code:ExceptionInfo',
+ 'Traceback' : '._code.code:Traceback',
+ 'getfslineno' : '._code.source:getfslineno',
+ 'getrawcode' : '._code.code:getrawcode',
+ 'patch_builtins' : '._code.code:patch_builtins',
+ 'unpatch_builtins' : '._code.code:unpatch_builtins',
+ '_AssertionError' : '._code.assertion:AssertionError',
+ '_reinterpret_old' : '._code.assertion:reinterpret_old',
+ },
+
+ # backports and additions of builtins
+ builtin = {
+ '__doc__' : '._builtin:__doc__',
+ 'enumerate' : '._builtin:enumerate',
+ 'reversed' : '._builtin:reversed',
+ 'sorted' : '._builtin:sorted',
+ 'set' : '._builtin:set',
+ 'frozenset' : '._builtin:frozenset',
+ 'BaseException' : '._builtin:BaseException',
+ 'GeneratorExit' : '._builtin:GeneratorExit',
+ 'print_' : '._builtin:print_',
+ '_reraise' : '._builtin:_reraise',
+ '_tryimport' : '._builtin:_tryimport',
+ 'exec_' : '._builtin:exec_',
+ '_basestring' : '._builtin:_basestring',
+ '_totext' : '._builtin:_totext',
+ '_isbytes' : '._builtin:_isbytes',
+ '_istext' : '._builtin:_istext',
+ '_getimself' : '._builtin:_getimself',
+ '_getfuncdict' : '._builtin:_getfuncdict',
+ '_getcode' : '._builtin:_getcode',
+ 'builtins' : '._builtin:builtins',
+ 'execfile' : '._builtin:execfile',
+ 'callable' : '._builtin:callable',
+ },
+
+ # input-output helping
+ io = {
+ '__doc__' : '._io:__doc__',
+ 'dupfile' : '._io.capture:dupfile',
+ 'TextIO' : '._io.capture:TextIO',
+ 'BytesIO' : '._io.capture:BytesIO',
+ 'FDCapture' : '._io.capture:FDCapture',
+ 'StdCapture' : '._io.capture:StdCapture',
+ 'StdCaptureFD' : '._io.capture:StdCaptureFD',
+ 'TerminalWriter' : '._io.terminalwriter:TerminalWriter',
+ },
+
+ # small and mean xml/html generation
+ xml = {
+ '__doc__' : '._xmlgen:__doc__',
+ 'html' : '._xmlgen:html',
+ 'Tag' : '._xmlgen:Tag',
+ 'raw' : '._xmlgen:raw',
+ 'Namespace' : '._xmlgen:Namespace',
+ 'escape' : '._xmlgen:escape',
+ },
+
+ log = {
+ # logging API ('producers' and 'consumers' connected via keywords)
+ '__doc__' : '._log:__doc__',
+ '_apiwarn' : '._log.warning:_apiwarn',
+ 'Producer' : '._log.log:Producer',
+ 'setconsumer' : '._log.log:setconsumer',
+ '_setstate' : '._log.log:setstate',
+ '_getstate' : '._log.log:getstate',
+ 'Path' : '._log.log:Path',
+ 'STDOUT' : '._log.log:STDOUT',
+ 'STDERR' : '._log.log:STDERR',
+ 'Syslog' : '._log.log:Syslog',
+ },
+
+ # compatibility modules (deprecated)
+ compat = {
+ '__doc__' : '._compat:__doc__',
+ 'doctest' : '._compat.dep_doctest:doctest',
+ 'optparse' : '._compat.dep_optparse:optparse',
+ 'textwrap' : '._compat.dep_textwrap:textwrap',
+ 'subprocess' : '._compat.dep_subprocess:subprocess',
+ },
+))
Added: pypy/branch/py12/py/__metainfo.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/__metainfo.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,2 @@
+import py
+pydir = py.path.local(py.__file__).dirpath()
Added: pypy/branch/py12/py/_builtin.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_builtin.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,209 @@
+import sys
+
+try:
+ reversed = reversed
+except NameError:
+ def reversed(sequence):
+ """reversed(sequence) -> reverse iterator over values of the sequence
+
+ Return a reverse iterator
+ """
+ if hasattr(sequence, '__reversed__'):
+ return sequence.__reversed__()
+ if not hasattr(sequence, '__getitem__'):
+ raise TypeError("argument to reversed() must be a sequence")
+ return reversed_iterator(sequence)
+
+ class reversed_iterator(object):
+
+ def __init__(self, seq):
+ self.seq = seq
+ self.remaining = len(seq)
+
+ def __iter__(self):
+ return self
+
+ def next(self):
+ i = self.remaining
+ if i > 0:
+ i -= 1
+ item = self.seq[i]
+ self.remaining = i
+ return item
+ raise StopIteration
+
+ def __length_hint__(self):
+ return self.remaining
+
+try:
+ sorted = sorted
+except NameError:
+ builtin_cmp = cmp # need to use cmp as keyword arg
+
+ def sorted(iterable, cmp=None, key=None, reverse=0):
+ use_cmp = None
+ if key is not None:
+ if cmp is None:
+ def use_cmp(x, y):
+ return builtin_cmp(x[0], y[0])
+ else:
+ def use_cmp(x, y):
+ return cmp(x[0], y[0])
+ l = [(key(element), element) for element in iterable]
+ else:
+ if cmp is not None:
+ use_cmp = cmp
+ l = list(iterable)
+ if use_cmp is not None:
+ l.sort(use_cmp)
+ else:
+ l.sort()
+ if reverse:
+ l.reverse()
+ if key is not None:
+ return [element for (_, element) in l]
+ return l
+
+try:
+ set, frozenset = set, frozenset
+except NameError:
+ from sets import set, frozenset
+
+# pass through
+enumerate = enumerate
+
+try:
+ BaseException = BaseException
+except NameError:
+ BaseException = Exception
+
+try:
+ GeneratorExit = GeneratorExit
+except NameError:
+ class GeneratorExit(Exception):
+ """ This exception is never raised, it is there to make it possible to
+ write code compatible with CPython 2.5 even in lower CPython
+ versions."""
+ pass
+ GeneratorExit.__module__ = 'exceptions'
+
+if sys.version_info >= (3, 0):
+ exec ("print_ = print ; exec_=exec")
+ import builtins
+
+ # some backward compatibility helpers
+ _basestring = str
+ def _totext(obj, encoding=None):
+ if isinstance(obj, bytes):
+ obj = obj.decode(encoding)
+ elif not isinstance(obj, str):
+ obj = str(obj)
+ return obj
+
+ def _isbytes(x):
+ return isinstance(x, bytes)
+ def _istext(x):
+ return isinstance(x, str)
+
+ def _getimself(function):
+ return getattr(function, '__self__', None)
+
+ def _getfuncdict(function):
+ return getattr(function, "__dict__", None)
+
+ def _getcode(function):
+ return getattr(function, "__code__", None)
+
+ def execfile(fn, globs=None, locs=None):
+ if globs is None:
+ back = sys._getframe(1)
+ globs = back.f_globals
+ locs = back.f_locals
+ del back
+ elif locs is None:
+ locs = globs
+ fp = open(fn, "rb")
+ try:
+ source = fp.read()
+ finally:
+ fp.close()
+ co = compile(source, fn, "exec", dont_inherit=True)
+ exec_(co, globs, locs)
+
+ def callable(obj):
+ return hasattr(obj, "__call__")
+
+else:
+ import __builtin__ as builtins
+ _totext = unicode
+ _basestring = basestring
+ execfile = execfile
+ callable = callable
+ def _isbytes(x):
+ return isinstance(x, str)
+ def _istext(x):
+ return isinstance(x, unicode)
+
+ def _getimself(function):
+ return getattr(function, 'im_self', None)
+
+ def _getfuncdict(function):
+ return getattr(function, "__dict__", None)
+
+ def _getcode(function):
+ return getattr(function, "func_code", None)
+
+ def print_(*args, **kwargs):
+ """ minimal backport of py3k print statement. """
+ sep = ' '
+ if 'sep' in kwargs:
+ sep = kwargs.pop('sep')
+ end = '\n'
+ if 'end' in kwargs:
+ end = kwargs.pop('end')
+ file = 'file' in kwargs and kwargs.pop('file') or sys.stdout
+ if kwargs:
+ args = ", ".join([str(x) for x in kwargs])
+ raise TypeError("invalid keyword arguments: %s" % args)
+ at_start = True
+ for x in args:
+ if not at_start:
+ file.write(sep)
+ file.write(str(x))
+ at_start = False
+ file.write(end)
+
+ def exec_(obj, globals=None, locals=None):
+ """ minimal backport of py3k exec statement. """
+ if globals is None:
+ frame = sys._getframe(1)
+ globals = frame.f_globals
+ if locals is None:
+ locals = frame.f_locals
+ elif locals is None:
+ locals = globals
+ exec2(obj, globals, locals)
+
+if sys.version_info >= (3,0):
+ exec ("""
+def _reraise(cls, val, tb):
+ assert hasattr(val, '__traceback__')
+ raise val
+""")
+else:
+ exec ("""
+def _reraise(cls, val, tb):
+ raise cls, val, tb
+def exec2(obj, globals, locals):
+ exec obj in globals, locals
+""")
+
+def _tryimport(*names):
+ """ return the first successfully imported module. """
+ assert names
+ for name in names:
+ try:
+ return __import__(name, None, None, '__doc__')
+ except ImportError:
+ excinfo = sys.exc_info()
+ _reraise(*excinfo)
Added: pypy/branch/py12/py/_cmdline/__init__.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_cmdline/__init__.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1 @@
+#
Added: pypy/branch/py12/py/_cmdline/pycleanup.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_cmdline/pycleanup.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,86 @@
+#!/usr/bin/env python
+
+"""\
+py.cleanup [PATH] ...
+
+Delete typical python development related files recursively under the specified PATH (which defaults to the current working directory). Don't follow links and don't recurse into directories with a dot. Optionally remove setup.py related files and empty
+directories.
+
+"""
+import py
+import sys, subprocess
+
+def main():
+ parser = py.std.optparse.OptionParser(usage=__doc__)
+ parser.add_option("-e", metavar="ENDING",
+ dest="endings", default=[".pyc", "$py.class"], action="append",
+ help=("(multi) recursively remove files with the given ending."
+ " '.pyc' and '$py.class' are in the default list."))
+ parser.add_option("-d", action="store_true", dest="removedir",
+ help="remove empty directories.")
+ parser.add_option("-s", action="store_true", dest="setup",
+ help="remove 'build' and 'dist' directories next to setup.py files")
+ parser.add_option("-a", action="store_true", dest="all",
+ help="synonym for '-S -d -e pip-log.txt'")
+ parser.add_option("-n", "--dryrun", dest="dryrun", default=False,
+ action="store_true",
+ help="don't actually delete but display would-be-removed filenames.")
+ (options, args) = parser.parse_args()
+
+ Cleanup(options, args).main()
+
+class Cleanup:
+ def __init__(self, options, args):
+ if not args:
+ args = ["."]
+ self.options = options
+ self.args = [py.path.local(x) for x in args]
+ if options.all:
+ options.setup = True
+ options.removedir = True
+ options.endings.append("pip-log.txt")
+
+ def main(self):
+ if self.options.setup:
+ for arg in self.args:
+ self.setupclean(arg)
+
+ for path in self.args:
+ py.builtin.print_("cleaning path", path,
+ "of extensions", self.options.endings)
+ for x in path.visit(self.shouldremove, self.recursedir):
+ self.remove(x)
+ if self.options.removedir:
+ for x in path.visit(lambda x: x.check(dir=1), self.recursedir):
+ if not x.listdir():
+ self.remove(x)
+
+ def shouldremove(self, p):
+ for ending in self.options.endings:
+ if p.basename.endswith(ending):
+ return True
+
+ def recursedir(self, path):
+ return path.check(dotfile=0, link=0)
+
+ def remove(self, path):
+ if not path.check():
+ return
+ if self.options.dryrun:
+ py.builtin.print_("would remove", path)
+ else:
+ py.builtin.print_("removing", path)
+ path.remove()
+
+ def XXXcallsetup(self, setup, *args):
+ old = setup.dirpath().chdir()
+ try:
+ subprocess.call([sys.executable, str(setup)] + list(args))
+ finally:
+ old.chdir()
+
+ def setupclean(self, path):
+ for x in path.visit("setup.py", self.recursedir):
+ basepath = x.dirpath()
+ self.remove(basepath / "build")
+ self.remove(basepath / "dist")
Added: pypy/branch/py12/py/_cmdline/pyconvert_unittest.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_cmdline/pyconvert_unittest.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,253 @@
+import re
+import sys
+
+try:
+ import parser
+except ImportError:
+ parser = None
+
+d={}
+# d is the dictionary of unittest changes, keyed to the old name
+# used by unittest.
+# d[old][0] is the new replacement function.
+# d[old][1] is the operator you will substitute, or '' if there is none.
+# d[old][2] is the possible number of arguments to the unittest
+# function.
+
+# Old Unittest Name new name operator # of args
+d['assertRaises'] = ('raises', '', ['Any'])
+d['fail'] = ('raise AssertionError', '', [0,1])
+d['assert_'] = ('assert', '', [1,2])
+d['failIf'] = ('assert not', '', [1,2])
+d['assertEqual'] = ('assert', ' ==', [2,3])
+d['failIfEqual'] = ('assert not', ' ==', [2,3])
+d['assertIn'] = ('assert', ' in', [2,3])
+d['assertNotIn'] = ('assert', ' not in', [2,3])
+d['assertNotEqual'] = ('assert', ' !=', [2,3])
+d['failUnlessEqual'] = ('assert', ' ==', [2,3])
+d['assertAlmostEqual'] = ('assert round', ' ==', [2,3,4])
+d['failIfAlmostEqual'] = ('assert not round', ' ==', [2,3,4])
+d['assertNotAlmostEqual'] = ('assert round', ' !=', [2,3,4])
+d['failUnlessAlmostEquals'] = ('assert round', ' ==', [2,3,4])
+
+# the list of synonyms
+d['failUnlessRaises'] = d['assertRaises']
+d['failUnless'] = d['assert_']
+d['assertEquals'] = d['assertEqual']
+d['assertNotEquals'] = d['assertNotEqual']
+d['assertAlmostEquals'] = d['assertAlmostEqual']
+d['assertNotAlmostEquals'] = d['assertNotAlmostEqual']
+
+# set up the regular expressions we will need
+leading_spaces = re.compile(r'^(\s*)') # this never fails
+
+pat = ''
+for k in d.keys(): # this complicated pattern to match all unittests
+ pat += '|' + r'^(\s*)' + 'self.' + k + r'\(' # \tself.whatever(
+
+old_names = re.compile(pat[1:])
+linesep='\n' # nobody will really try to convert files not read
+ # in text mode, will they?
+
+
+def blocksplitter(fp):
+ '''split a file into blocks that are headed by functions to rename'''
+
+ blocklist = []
+ blockstring = ''
+
+ for line in fp:
+ interesting = old_names.match(line)
+ if interesting :
+ if blockstring:
+ blocklist.append(blockstring)
+ blockstring = line # reset the block
+ else:
+ blockstring += line
+
+ blocklist.append(blockstring)
+ return blocklist
+
+def rewrite_utest(block):
+ '''rewrite every block to use the new utest functions'''
+
+ '''returns the rewritten unittest, unless it ran into problems,
+ in which case it just returns the block unchanged.
+ '''
+ utest = old_names.match(block)
+
+ if not utest:
+ return block
+
+ old = utest.group(0).lstrip()[5:-1] # the name we want to replace
+ new = d[old][0] # the name of the replacement function
+ op = d[old][1] # the operator you will use , or '' if there is none.
+ possible_args = d[old][2] # a list of the number of arguments the
+ # unittest function could possibly take.
+
+ if possible_args == ['Any']: # just rename assertRaises & friends
+ return re.sub('self.'+old, new, block)
+
+ message_pos = possible_args[-1]
+ # the remaining unittests can have an optional message to print
+ # when they fail. It is always the last argument to the function.
+
+ try:
+ indent, argl, trailer = decompose_unittest(old, block)
+
+ except SyntaxError: # but we couldn't parse it!
+ return block
+
+ argnum = len(argl)
+ if argnum not in possible_args:
+ # sanity check - this one isn't real either
+ return block
+
+ elif argnum == message_pos:
+ message = argl[-1]
+ argl = argl[:-1]
+ else:
+ message = None
+
+ if argnum is 0 or (argnum is 1 and argnum is message_pos): #unittest fail()
+ string = ''
+ if message:
+ message = ' ' + message
+
+ elif message_pos is 4: # assertAlmostEqual & friends
+ try:
+ pos = argl[2].lstrip()
+ except IndexError:
+ pos = '7' # default if none is specified
+ string = '(%s -%s, %s)%s 0' % (argl[0], argl[1], pos, op )
+
+ else: # assert_, assertEquals and all the rest
+ string = ' ' + op.join(argl)
+
+ if message:
+ string = string + ',' + message
+
+ return indent + new + string + trailer
+
+def decompose_unittest(old, block):
+ '''decompose the block into its component parts'''
+
+ ''' returns indent, arglist, trailer
+ indent -- the indentation
+ arglist -- the arguments to the unittest function
+ trailer -- any extra junk after the closing paren, such as #commment
+ '''
+
+ indent = re.match(r'(\s*)', block).group()
+ pat = re.search('self.' + old + r'\(', block)
+
+ args, trailer = get_expr(block[pat.end():], ')')
+ arglist = break_args(args, [])
+
+ if arglist == ['']: # there weren't any
+ return indent, [], trailer
+
+ for i in range(len(arglist)):
+ try:
+ parser.expr(arglist[i].lstrip('\t '))
+ except SyntaxError:
+ if i == 0:
+ arglist[i] = '(' + arglist[i] + ')'
+ else:
+ arglist[i] = ' (' + arglist[i] + ')'
+
+ return indent, arglist, trailer
+
+def break_args(args, arglist):
+ '''recursively break a string into a list of arguments'''
+ try:
+ first, rest = get_expr(args, ',')
+ if not rest:
+ return arglist + [first]
+ else:
+ return [first] + break_args(rest, arglist)
+ except SyntaxError:
+ return arglist + [args]
+
+def get_expr(s, char):
+ '''split a string into an expression, and the rest of the string'''
+
+ pos=[]
+ for i in range(len(s)):
+ if s[i] == char:
+ pos.append(i)
+ if pos == []:
+ raise SyntaxError # we didn't find the expected char. Ick.
+
+ for p in pos:
+ # make the python parser do the hard work of deciding which comma
+ # splits the string into two expressions
+ try:
+ parser.expr('(' + s[:p] + ')')
+ return s[:p], s[p+1:]
+ except SyntaxError: # It's not an expression yet
+ pass
+ raise SyntaxError # We never found anything that worked.
+
+
+def main():
+ import sys
+ import py
+
+ usage = "usage: %prog [-s [filename ...] | [-i | -c filename ...]]"
+ optparser = py.std.optparse.OptionParser(usage)
+
+ def select_output (option, opt, value, optparser, **kw):
+ if hasattr(optparser, 'output'):
+ optparser.error(
+ 'Cannot combine -s -i and -c options. Use one only.')
+ else:
+ optparser.output = kw['output']
+
+ optparser.add_option("-s", "--stdout", action="callback",
+ callback=select_output,
+ callback_kwargs={'output':'stdout'},
+ help="send your output to stdout")
+
+ optparser.add_option("-i", "--inplace", action="callback",
+ callback=select_output,
+ callback_kwargs={'output':'inplace'},
+ help="overwrite files in place")
+
+ optparser.add_option("-c", "--copy", action="callback",
+ callback=select_output,
+ callback_kwargs={'output':'copy'},
+ help="copy files ... fn.py --> fn_cp.py")
+
+ options, args = optparser.parse_args()
+
+ output = getattr(optparser, 'output', 'stdout')
+
+ if output in ['inplace', 'copy'] and not args:
+ optparser.error(
+ '-i and -c option require at least one filename')
+
+ if not args:
+ s = ''
+ for block in blocksplitter(sys.stdin):
+ s += rewrite_utest(block)
+ sys.stdout.write(s)
+
+ else:
+ for infilename in args: # no error checking to see if we can open, etc.
+ infile = file(infilename)
+ s = ''
+ for block in blocksplitter(infile):
+ s += rewrite_utest(block)
+ if output == 'inplace':
+ outfile = file(infilename, 'w+')
+ elif output == 'copy': # yes, just go clobber any existing .cp
+ outfile = file (infilename[:-3]+ '_cp.py', 'w+')
+ else:
+ outfile = sys.stdout
+
+ outfile.write(s)
+
+
+if __name__ == '__main__':
+ main()
Added: pypy/branch/py12/py/_cmdline/pycountloc.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_cmdline/pycountloc.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,94 @@
+#!/usr/bin/env python
+
+# hands on script to compute the non-empty Lines of Code
+# for tests and non-test code
+
+"""\
+py.countloc [PATHS]
+
+Count (non-empty) lines of python code and number of python files recursively
+starting from a list of paths given on the command line (starting from the
+current working directory). Distinguish between test files and normal ones and
+report them separately.
+"""
+import py
+
+def main():
+ parser = py.std.optparse.OptionParser(usage=__doc__)
+ (options, args) = parser.parse_args()
+ countloc(args)
+
+def nodot(p):
+ return p.check(dotfile=0)
+
+class FileCounter(object):
+ def __init__(self):
+ self.file2numlines = {}
+ self.numlines = 0
+ self.numfiles = 0
+
+ def addrecursive(self, directory, fil="*.py", rec=nodot):
+ for x in directory.visit(fil, rec):
+ self.addfile(x)
+
+ def addfile(self, fn, emptylines=False):
+ if emptylines:
+ s = len(p.readlines())
+ else:
+ s = 0
+ for i in fn.readlines():
+ if i.strip():
+ s += 1
+ self.file2numlines[fn] = s
+ self.numfiles += 1
+ self.numlines += s
+
+ def getnumlines(self, fil):
+ numlines = 0
+ for path, value in self.file2numlines.items():
+ if fil(path):
+ numlines += value
+ return numlines
+
+ def getnumfiles(self, fil):
+ numfiles = 0
+ for path in self.file2numlines:
+ if fil(path):
+ numfiles += 1
+ return numfiles
+
+def get_loccount(locations=None):
+ if locations is None:
+ localtions = [py.path.local()]
+ counter = FileCounter()
+ for loc in locations:
+ counter.addrecursive(loc, '*.py', rec=nodot)
+
+ def istestfile(p):
+ return p.check(fnmatch='test_*.py')
+ isnottestfile = lambda x: not istestfile(x)
+
+ numfiles = counter.getnumfiles(isnottestfile)
+ numlines = counter.getnumlines(isnottestfile)
+ numtestfiles = counter.getnumfiles(istestfile)
+ numtestlines = counter.getnumlines(istestfile)
+
+ return counter, numfiles, numlines, numtestfiles, numtestlines
+
+def countloc(paths=None):
+ if not paths:
+ paths = ['.']
+ locations = [py.path.local(x) for x in paths]
+ (counter, numfiles, numlines, numtestfiles,
+ numtestlines) = get_loccount(locations)
+
+ items = counter.file2numlines.items()
+ items.sort(lambda x,y: cmp(x[1], y[1]))
+ for x, y in items:
+ print("%3d %30s" % (y,x))
+
+ print("%30s %3d" %("number of testfiles", numtestfiles))
+ print("%30s %3d" %("number of non-empty testlines", numtestlines))
+ print("%30s %3d" %("number of files", numfiles))
+ print("%30s %3d" %("number of non-empty lines", numlines))
+
Added: pypy/branch/py12/py/_cmdline/pylookup.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_cmdline/pylookup.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,85 @@
+#!/usr/bin/env python
+
+"""\
+py.lookup [search_directory] SEARCH_STRING [options]
+
+Looks recursively at Python files for a SEARCH_STRING, starting from the
+present working directory. Prints the line, with the filename and line-number
+prepended."""
+
+import sys, os
+import py
+from py.io import ansi_print, get_terminal_width
+import re
+
+def rec(p):
+ return p.check(dotfile=0)
+
+parser = py.std.optparse.OptionParser(usage=__doc__)
+parser.add_option("-i", "--ignore-case", action="store_true", dest="ignorecase",
+ help="ignore case distinctions")
+parser.add_option("-C", "--context", action="store", type="int", dest="context",
+ default=0, help="How many lines of output to show")
+
+terminal_width = get_terminal_width()
+
+def find_indexes(search_line, string):
+ indexes = []
+ before = 0
+ while 1:
+ i = search_line.find(string, before)
+ if i == -1:
+ break
+ indexes.append(i)
+ before = i + len(string)
+ return indexes
+
+def main():
+ (options, args) = parser.parse_args()
+ if len(args) == 2:
+ search_dir, string = args
+ search_dir = py.path.local(search_dir)
+ else:
+ search_dir = py.path.local()
+ string = args[0]
+ if options.ignorecase:
+ string = string.lower()
+ for x in search_dir.visit('*.py', rec):
+ # match filename directly
+ s = x.relto(search_dir)
+ if options.ignorecase:
+ s = s.lower()
+ if s.find(string) != -1:
+ sys.stdout.write("%s: filename matches %r" %(x, string) + "\n")
+
+ try:
+ s = x.read()
+ except py.error.ENOENT:
+ pass # whatever, probably broken link (ie emacs lock)
+ searchs = s
+ if options.ignorecase:
+ searchs = s.lower()
+ if s.find(string) != -1:
+ lines = s.splitlines()
+ if options.ignorecase:
+ searchlines = s.lower().splitlines()
+ else:
+ searchlines = lines
+ for i, (line, searchline) in enumerate(zip(lines, searchlines)):
+ indexes = find_indexes(searchline, string)
+ if not indexes:
+ continue
+ if not options.context:
+ sys.stdout.write("%s:%d: " %(x.relto(search_dir), i+1))
+ last_index = 0
+ for index in indexes:
+ sys.stdout.write(line[last_index: index])
+ ansi_print(line[index: index+len(string)],
+ file=sys.stdout, esc=31, newline=False)
+ last_index = index + len(string)
+ sys.stdout.write(line[last_index:] + "\n")
+ else:
+ context = (options.context)/2
+ for count in range(max(0, i-context), min(len(lines) - 1, i+context+1)):
+ print("%s:%d: %s" %(x.relto(search_dir), count+1, lines[count].rstrip()))
+ print("-" * terminal_width)
Added: pypy/branch/py12/py/_cmdline/pysvnwcrevert.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_cmdline/pysvnwcrevert.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,55 @@
+#! /usr/bin/env python
+"""\
+py.svnwcrevert [options] WCPATH
+
+Running this script and then 'svn up' puts the working copy WCPATH in a state
+as clean as a fresh check-out.
+
+WARNING: you'll loose all local changes, obviously!
+
+This script deletes all files that have been modified
+or that svn doesn't explicitly know about, including svn:ignored files
+(like .pyc files, hint hint).
+
+The goal of this script is to leave the working copy with some files and
+directories possibly missing, but - most importantly - in a state where
+the following 'svn up' won't just crash.
+"""
+
+import sys, py
+
+def kill(p, root):
+ print('< %s' % (p.relto(root),))
+ p.remove(rec=1)
+
+def svnwcrevert(path, root=None, precious=[]):
+ if root is None:
+ root = path
+ wcpath = py.path.svnwc(path)
+ try:
+ st = wcpath.status()
+ except ValueError: # typically, "bad char in wcpath"
+ kill(path, root)
+ return
+ for p in path.listdir():
+ if p.basename == '.svn' or p.basename in precious:
+ continue
+ wcp = py.path.svnwc(p)
+ if wcp not in st.unchanged and wcp not in st.external:
+ kill(p, root)
+ elif p.check(dir=1):
+ svnwcrevert(p, root)
+
+# XXX add a functional test
+
+parser = py.std.optparse.OptionParser(usage=__doc__)
+parser.add_option("-p", "--precious",
+ action="append", dest="precious", default=[],
+ help="preserve files with this name")
+
+def main():
+ opts, args = parser.parse_args()
+ if len(args) != 1:
+ parser.print_help()
+ sys.exit(2)
+ svnwcrevert(py.path.local(args[0]), precious=opts.precious)
Added: pypy/branch/py12/py/_cmdline/pytest.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_cmdline/pytest.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,5 @@
+#!/usr/bin/env python
+import py
+
+def main(args):
+ py.test.cmdline.main(args)
Added: pypy/branch/py12/py/_cmdline/pywhich.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_cmdline/pywhich.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,23 @@
+#!/usr/bin/env python
+
+"""\
+py.which [name]
+
+print the location of the given python module or package name
+"""
+
+import sys
+
+def main():
+ name = sys.argv[1]
+ try:
+ mod = __import__(name)
+ except ImportError:
+ sys.stderr.write("could not import: " + name + "\n")
+ else:
+ try:
+ location = mod.__file__
+ except AttributeError:
+ sys.stderr.write("module (has no __file__): " + str(mod))
+ else:
+ print(location)
Added: pypy/branch/py12/py/_code/__init__.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_code/__init__.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1 @@
+""" python inspection/code generation API """
Added: pypy/branch/py12/py/_code/_assertionnew.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_code/_assertionnew.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,337 @@
+"""
+Like _assertion.py but using builtin AST. It should replace _assertionold.py
+eventually.
+"""
+
+import sys
+import ast
+
+import py
+from py._code.assertion import _format_explanation, BuiltinAssertionError
+
+
+if sys.platform.startswith("java") and sys.version_info < (2, 5, 2):
+ # See http://bugs.jython.org/issue1497
+ _exprs = ("BoolOp", "BinOp", "UnaryOp", "Lambda", "IfExp", "Dict",
+ "ListComp", "GeneratorExp", "Yield", "Compare", "Call",
+ "Repr", "Num", "Str", "Attribute", "Subscript", "Name",
+ "List", "Tuple")
+ _stmts = ("FunctionDef", "ClassDef", "Return", "Delete", "Assign",
+ "AugAssign", "Print", "For", "While", "If", "With", "Raise",
+ "TryExcept", "TryFinally", "Assert", "Import", "ImportFrom",
+ "Exec", "Global", "Expr", "Pass", "Break", "Continue")
+ _expr_nodes = set(getattr(ast, name) for name in _exprs)
+ _stmt_nodes = set(getattr(ast, name) for name in _stmts)
+ def _is_ast_expr(node):
+ return node.__class__ in _expr_nodes
+ def _is_ast_stmt(node):
+ return node.__class__ in _stmt_nodes
+else:
+ def _is_ast_expr(node):
+ return isinstance(node, ast.expr)
+ def _is_ast_stmt(node):
+ return isinstance(node, ast.stmt)
+
+
+class Failure(Exception):
+ """Error found while interpreting AST."""
+
+ def __init__(self, explanation=""):
+ self.cause = sys.exc_info()
+ self.explanation = explanation
+
+
+def interpret(source, frame, should_fail=False):
+ mod = ast.parse(source)
+ visitor = DebugInterpreter(frame)
+ try:
+ visitor.visit(mod)
+ except Failure:
+ failure = sys.exc_info()[1]
+ return getfailure(failure)
+ if should_fail:
+ return ("(assertion failed, but when it was re-run for "
+ "printing intermediate values, it did not fail. Suggestions: "
+ "compute assert expression before the assert or use --no-assert)")
+
+def run(offending_line, frame=None):
+ if frame is None:
+ frame = py.code.Frame(sys._getframe(1))
+ return interpret(offending_line, frame)
+
+def getfailure(failure):
+ explanation = _format_explanation(failure.explanation)
+ value = failure.cause[1]
+ if str(value):
+ lines = explanation.splitlines()
+ if not lines:
+ lines.append("")
+ lines[0] += " << %s" % (value,)
+ explanation = "\n".join(lines)
+ text = "%s: %s" % (failure.cause[0].__name__, explanation)
+ if text.startswith("AssertionError: assert "):
+ text = text[16:]
+ return text
+
+
+operator_map = {
+ ast.BitOr : "|",
+ ast.BitXor : "^",
+ ast.BitAnd : "&",
+ ast.LShift : "<<",
+ ast.RShift : ">>",
+ ast.Add : "+",
+ ast.Sub : "-",
+ ast.Mult : "*",
+ ast.Div : "/",
+ ast.FloorDiv : "//",
+ ast.Mod : "%",
+ ast.Eq : "==",
+ ast.NotEq : "!=",
+ ast.Lt : "<",
+ ast.LtE : "<=",
+ ast.Gt : ">",
+ ast.GtE : ">=",
+ ast.Is : "is",
+ ast.IsNot : "is not",
+ ast.In : "in",
+ ast.NotIn : "not in"
+}
+
+unary_map = {
+ ast.Not : "not %s",
+ ast.Invert : "~%s",
+ ast.USub : "-%s",
+ ast.UAdd : "+%s"
+}
+
+
+class DebugInterpreter(ast.NodeVisitor):
+ """Interpret AST nodes to gleam useful debugging information."""
+
+ def __init__(self, frame):
+ self.frame = frame
+
+ def generic_visit(self, node):
+ # Fallback when we don't have a special implementation.
+ if _is_ast_expr(node):
+ mod = ast.Expression(node)
+ co = self._compile(mod)
+ try:
+ result = self.frame.eval(co)
+ except Exception:
+ raise Failure()
+ explanation = self.frame.repr(result)
+ return explanation, result
+ elif _is_ast_stmt(node):
+ mod = ast.Module([node])
+ co = self._compile(mod, "exec")
+ try:
+ self.frame.exec_(co)
+ except Exception:
+ raise Failure()
+ return None, None
+ else:
+ raise AssertionError("can't handle %s" %(node,))
+
+ def _compile(self, source, mode="eval"):
+ return compile(source, "<assertion interpretation>", mode)
+
+ def visit_Expr(self, expr):
+ return self.visit(expr.value)
+
+ def visit_Module(self, mod):
+ for stmt in mod.body:
+ self.visit(stmt)
+
+ def visit_Name(self, name):
+ explanation, result = self.generic_visit(name)
+ # See if the name is local.
+ source = "%r in locals() is not globals()" % (name.id,)
+ co = self._compile(source)
+ try:
+ local = self.frame.eval(co)
+ except Exception:
+ # have to assume it isn't
+ local = False
+ if not local:
+ return name.id, result
+ return explanation, result
+
+ def visit_Compare(self, comp):
+ left = comp.left
+ left_explanation, left_result = self.visit(left)
+ got_result = False
+ for op, next_op in zip(comp.ops, comp.comparators):
+ if got_result and not result:
+ break
+ next_explanation, next_result = self.visit(next_op)
+ op_symbol = operator_map[op.__class__]
+ explanation = "%s %s %s" % (left_explanation, op_symbol,
+ next_explanation)
+ source = "__exprinfo_left %s __exprinfo_right" % (op_symbol,)
+ co = self._compile(source)
+ try:
+ result = self.frame.eval(co, __exprinfo_left=left_result,
+ __exprinfo_right=next_result)
+ except Exception:
+ raise Failure(explanation)
+ else:
+ got_result = True
+ left_explanation, left_result = next_explanation, next_result
+ return explanation, result
+
+ def visit_BoolOp(self, boolop):
+ is_or = isinstance(boolop.op, ast.Or)
+ explanations = []
+ for operand in boolop.values:
+ explanation, result = self.visit(operand)
+ explanations.append(explanation)
+ if result == is_or:
+ break
+ name = is_or and " or " or " and "
+ explanation = "(" + name.join(explanations) + ")"
+ return explanation, result
+
+ def visit_UnaryOp(self, unary):
+ pattern = unary_map[unary.op.__class__]
+ operand_explanation, operand_result = self.visit(unary.operand)
+ explanation = pattern % (operand_explanation,)
+ co = self._compile(pattern % ("__exprinfo_expr",))
+ try:
+ result = self.frame.eval(co, __exprinfo_expr=operand_result)
+ except Exception:
+ raise Failure(explanation)
+ return explanation, result
+
+ def visit_BinOp(self, binop):
+ left_explanation, left_result = self.visit(binop.left)
+ right_explanation, right_result = self.visit(binop.right)
+ symbol = operator_map[binop.op.__class__]
+ explanation = "(%s %s %s)" % (left_explanation, symbol,
+ right_explanation)
+ source = "__exprinfo_left %s __exprinfo_right" % (symbol,)
+ co = self._compile(source)
+ try:
+ result = self.frame.eval(co, __exprinfo_left=left_result,
+ __exprinfo_right=right_result)
+ except Exception:
+ raise Failure(explanation)
+ return explanation, result
+
+ def visit_Call(self, call):
+ func_explanation, func = self.visit(call.func)
+ arg_explanations = []
+ ns = {"__exprinfo_func" : func}
+ arguments = []
+ for arg in call.args:
+ arg_explanation, arg_result = self.visit(arg)
+ arg_name = "__exprinfo_%s" % (len(ns),)
+ ns[arg_name] = arg_result
+ arguments.append(arg_name)
+ arg_explanations.append(arg_explanation)
+ for keyword in call.keywords:
+ arg_explanation, arg_result = self.visit(keyword.value)
+ arg_name = "__exprinfo_%s" % (len(ns),)
+ ns[arg_name] = arg_result
+ keyword_source = "%s=%%s" % (keyword.arg)
+ arguments.append(keyword_source % (arg_name,))
+ arg_explanations.append(keyword_source % (arg_explanation,))
+ if call.starargs:
+ arg_explanation, arg_result = self.visit(call.starargs)
+ arg_name = "__exprinfo_star"
+ ns[arg_name] = arg_result
+ arguments.append("*%s" % (arg_name,))
+ arg_explanations.append("*%s" % (arg_explanation,))
+ if call.kwargs:
+ arg_explanation, arg_result = self.visit(call.kwargs)
+ arg_name = "__exprinfo_kwds"
+ ns[arg_name] = arg_result
+ arguments.append("**%s" % (arg_name,))
+ arg_explanations.append("**%s" % (arg_explanation,))
+ args_explained = ", ".join(arg_explanations)
+ explanation = "%s(%s)" % (func_explanation, args_explained)
+ args = ", ".join(arguments)
+ source = "__exprinfo_func(%s)" % (args,)
+ co = self._compile(source)
+ try:
+ result = self.frame.eval(co, **ns)
+ except Exception:
+ raise Failure(explanation)
+ # Only show result explanation if it's not a builtin call or returns a
+ # bool.
+ if not isinstance(call.func, ast.Name) or \
+ not self._is_builtin_name(call.func):
+ source = "isinstance(__exprinfo_value, bool)"
+ co = self._compile(source)
+ try:
+ is_bool = self.frame.eval(co, __exprinfo_value=result)
+ except Exception:
+ is_bool = False
+ if not is_bool:
+ pattern = "%s\n{%s = %s\n}"
+ rep = self.frame.repr(result)
+ explanation = pattern % (rep, rep, explanation)
+ return explanation, result
+
+ def _is_builtin_name(self, name):
+ pattern = "%r not in globals() and %r not in locals()"
+ source = pattern % (name.id, name.id)
+ co = self._compile(source)
+ try:
+ return self.frame.eval(co)
+ except Exception:
+ return False
+
+ def visit_Attribute(self, attr):
+ if not isinstance(attr.ctx, ast.Load):
+ return self.generic_visit(attr)
+ source_explanation, source_result = self.visit(attr.value)
+ explanation = "%s.%s" % (source_explanation, attr.attr)
+ source = "__exprinfo_expr.%s" % (attr.attr,)
+ co = self._compile(source)
+ try:
+ result = self.frame.eval(co, __exprinfo_expr=source_result)
+ except Exception:
+ raise Failure(explanation)
+ # Check if the attr is from an instance.
+ source = "%r in getattr(__exprinfo_expr, '__dict__', {})"
+ source = source % (attr.attr,)
+ co = self._compile(source)
+ try:
+ from_instance = self.frame.eval(co, __exprinfo_expr=source_result)
+ except Exception:
+ from_instance = True
+ if from_instance:
+ rep = self.frame.repr(result)
+ pattern = "%s\n{%s = %s\n}"
+ explanation = pattern % (rep, rep, explanation)
+ return explanation, result
+
+ def visit_Assert(self, assrt):
+ test_explanation, test_result = self.visit(assrt.test)
+ if test_explanation.startswith("False\n{False =") and \
+ test_explanation.endswith("\n"):
+ test_explanation = test_explanation[15:-2]
+ explanation = "assert %s" % (test_explanation,)
+ if not test_result:
+ try:
+ raise BuiltinAssertionError
+ except Exception:
+ raise Failure(explanation)
+ return explanation, test_result
+
+ def visit_Assign(self, assign):
+ value_explanation, value_result = self.visit(assign.value)
+ explanation = "... = %s" % (value_explanation,)
+ name = ast.Name("__exprinfo_expr", ast.Load(), assign.value.lineno,
+ assign.value.col_offset)
+ new_assign = ast.Assign(assign.targets, name, assign.lineno,
+ assign.col_offset)
+ mod = ast.Module([new_assign])
+ co = self._compile(mod, "exec")
+ try:
+ self.frame.exec_(co, __exprinfo_expr=value_result)
+ except Exception:
+ raise Failure(explanation)
+ return explanation, value_result
Added: pypy/branch/py12/py/_code/_assertionold.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_code/_assertionold.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,556 @@
+import py
+import sys, inspect
+from compiler import parse, ast, pycodegen
+from py._code.assertion import BuiltinAssertionError, _format_explanation
+
+passthroughex = (KeyboardInterrupt, SystemExit, MemoryError)
+
+class Failure:
+ def __init__(self, node):
+ self.exc, self.value, self.tb = sys.exc_info()
+ self.node = node
+
+class View(object):
+ """View base class.
+
+ If C is a subclass of View, then C(x) creates a proxy object around
+ the object x. The actual class of the proxy is not C in general,
+ but a *subclass* of C determined by the rules below. To avoid confusion
+ we call view class the class of the proxy (a subclass of C, so of View)
+ and object class the class of x.
+
+ Attributes and methods not found in the proxy are automatically read on x.
+ Other operations like setting attributes are performed on the proxy, as
+ determined by its view class. The object x is available from the proxy
+ as its __obj__ attribute.
+
+ The view class selection is determined by the __view__ tuples and the
+ optional __viewkey__ method. By default, the selected view class is the
+ most specific subclass of C whose __view__ mentions the class of x.
+ If no such subclass is found, the search proceeds with the parent
+ object classes. For example, C(True) will first look for a subclass
+ of C with __view__ = (..., bool, ...) and only if it doesn't find any
+ look for one with __view__ = (..., int, ...), and then ..., object,...
+ If everything fails the class C itself is considered to be the default.
+
+ Alternatively, the view class selection can be driven by another aspect
+ of the object x, instead of the class of x, by overriding __viewkey__.
+ See last example at the end of this module.
+ """
+
+ _viewcache = {}
+ __view__ = ()
+
+ def __new__(rootclass, obj, *args, **kwds):
+ self = object.__new__(rootclass)
+ self.__obj__ = obj
+ self.__rootclass__ = rootclass
+ key = self.__viewkey__()
+ try:
+ self.__class__ = self._viewcache[key]
+ except KeyError:
+ self.__class__ = self._selectsubclass(key)
+ return self
+
+ def __getattr__(self, attr):
+ # attributes not found in the normal hierarchy rooted on View
+ # are looked up in the object's real class
+ return getattr(self.__obj__, attr)
+
+ def __viewkey__(self):
+ return self.__obj__.__class__
+
+ def __matchkey__(self, key, subclasses):
+ if inspect.isclass(key):
+ keys = inspect.getmro(key)
+ else:
+ keys = [key]
+ for key in keys:
+ result = [C for C in subclasses if key in C.__view__]
+ if result:
+ return result
+ return []
+
+ def _selectsubclass(self, key):
+ subclasses = list(enumsubclasses(self.__rootclass__))
+ for C in subclasses:
+ if not isinstance(C.__view__, tuple):
+ C.__view__ = (C.__view__,)
+ choices = self.__matchkey__(key, subclasses)
+ if not choices:
+ return self.__rootclass__
+ elif len(choices) == 1:
+ return choices[0]
+ else:
+ # combine the multiple choices
+ return type('?', tuple(choices), {})
+
+ def __repr__(self):
+ return '%s(%r)' % (self.__rootclass__.__name__, self.__obj__)
+
+
+def enumsubclasses(cls):
+ for subcls in cls.__subclasses__():
+ for subsubclass in enumsubclasses(subcls):
+ yield subsubclass
+ yield cls
+
+
+class Interpretable(View):
+ """A parse tree node with a few extra methods."""
+ explanation = None
+
+ def is_builtin(self, frame):
+ return False
+
+ def eval(self, frame):
+ # fall-back for unknown expression nodes
+ try:
+ expr = ast.Expression(self.__obj__)
+ expr.filename = '<eval>'
+ self.__obj__.filename = '<eval>'
+ co = pycodegen.ExpressionCodeGenerator(expr).getCode()
+ result = frame.eval(co)
+ except passthroughex:
+ raise
+ except:
+ raise Failure(self)
+ self.result = result
+ self.explanation = self.explanation or frame.repr(self.result)
+
+ def run(self, frame):
+ # fall-back for unknown statement nodes
+ try:
+ expr = ast.Module(None, ast.Stmt([self.__obj__]))
+ expr.filename = '<run>'
+ co = pycodegen.ModuleCodeGenerator(expr).getCode()
+ frame.exec_(co)
+ except passthroughex:
+ raise
+ except:
+ raise Failure(self)
+
+ def nice_explanation(self):
+ return _format_explanation(self.explanation)
+
+
+class Name(Interpretable):
+ __view__ = ast.Name
+
+ def is_local(self, frame):
+ co = compile('%r in locals() is not globals()' % self.name, '?', 'eval')
+ try:
+ return frame.is_true(frame.eval(co))
+ except passthroughex:
+ raise
+ except:
+ return False
+
+ def is_global(self, frame):
+ co = compile('%r in globals()' % self.name, '?', 'eval')
+ try:
+ return frame.is_true(frame.eval(co))
+ except passthroughex:
+ raise
+ except:
+ return False
+
+ def is_builtin(self, frame):
+ co = compile('%r not in locals() and %r not in globals()' % (
+ self.name, self.name), '?', 'eval')
+ try:
+ return frame.is_true(frame.eval(co))
+ except passthroughex:
+ raise
+ except:
+ return False
+
+ def eval(self, frame):
+ super(Name, self).eval(frame)
+ if not self.is_local(frame):
+ self.explanation = self.name
+
+class Compare(Interpretable):
+ __view__ = ast.Compare
+
+ def eval(self, frame):
+ expr = Interpretable(self.expr)
+ expr.eval(frame)
+ for operation, expr2 in self.ops:
+ if hasattr(self, 'result'):
+ # shortcutting in chained expressions
+ if not frame.is_true(self.result):
+ break
+ expr2 = Interpretable(expr2)
+ expr2.eval(frame)
+ self.explanation = "%s %s %s" % (
+ expr.explanation, operation, expr2.explanation)
+ co = compile("__exprinfo_left %s __exprinfo_right" % operation,
+ '?', 'eval')
+ try:
+ self.result = frame.eval(co, __exprinfo_left=expr.result,
+ __exprinfo_right=expr2.result)
+ except passthroughex:
+ raise
+ except:
+ raise Failure(self)
+ expr = expr2
+
+class And(Interpretable):
+ __view__ = ast.And
+
+ def eval(self, frame):
+ explanations = []
+ for expr in self.nodes:
+ expr = Interpretable(expr)
+ expr.eval(frame)
+ explanations.append(expr.explanation)
+ self.result = expr.result
+ if not frame.is_true(expr.result):
+ break
+ self.explanation = '(' + ' and '.join(explanations) + ')'
+
+class Or(Interpretable):
+ __view__ = ast.Or
+
+ def eval(self, frame):
+ explanations = []
+ for expr in self.nodes:
+ expr = Interpretable(expr)
+ expr.eval(frame)
+ explanations.append(expr.explanation)
+ self.result = expr.result
+ if frame.is_true(expr.result):
+ break
+ self.explanation = '(' + ' or '.join(explanations) + ')'
+
+
+# == Unary operations ==
+keepalive = []
+for astclass, astpattern in {
+ ast.Not : 'not __exprinfo_expr',
+ ast.Invert : '(~__exprinfo_expr)',
+ }.items():
+
+ class UnaryArith(Interpretable):
+ __view__ = astclass
+
+ def eval(self, frame, astpattern=astpattern,
+ co=compile(astpattern, '?', 'eval')):
+ expr = Interpretable(self.expr)
+ expr.eval(frame)
+ self.explanation = astpattern.replace('__exprinfo_expr',
+ expr.explanation)
+ try:
+ self.result = frame.eval(co, __exprinfo_expr=expr.result)
+ except passthroughex:
+ raise
+ except:
+ raise Failure(self)
+
+ keepalive.append(UnaryArith)
+
+# == Binary operations ==
+for astclass, astpattern in {
+ ast.Add : '(__exprinfo_left + __exprinfo_right)',
+ ast.Sub : '(__exprinfo_left - __exprinfo_right)',
+ ast.Mul : '(__exprinfo_left * __exprinfo_right)',
+ ast.Div : '(__exprinfo_left / __exprinfo_right)',
+ ast.Mod : '(__exprinfo_left % __exprinfo_right)',
+ ast.Power : '(__exprinfo_left ** __exprinfo_right)',
+ }.items():
+
+ class BinaryArith(Interpretable):
+ __view__ = astclass
+
+ def eval(self, frame, astpattern=astpattern,
+ co=compile(astpattern, '?', 'eval')):
+ left = Interpretable(self.left)
+ left.eval(frame)
+ right = Interpretable(self.right)
+ right.eval(frame)
+ self.explanation = (astpattern
+ .replace('__exprinfo_left', left .explanation)
+ .replace('__exprinfo_right', right.explanation))
+ try:
+ self.result = frame.eval(co, __exprinfo_left=left.result,
+ __exprinfo_right=right.result)
+ except passthroughex:
+ raise
+ except:
+ raise Failure(self)
+
+ keepalive.append(BinaryArith)
+
+
+class CallFunc(Interpretable):
+ __view__ = ast.CallFunc
+
+ def is_bool(self, frame):
+ co = compile('isinstance(__exprinfo_value, bool)', '?', 'eval')
+ try:
+ return frame.is_true(frame.eval(co, __exprinfo_value=self.result))
+ except passthroughex:
+ raise
+ except:
+ return False
+
+ def eval(self, frame):
+ node = Interpretable(self.node)
+ node.eval(frame)
+ explanations = []
+ vars = {'__exprinfo_fn': node.result}
+ source = '__exprinfo_fn('
+ for a in self.args:
+ if isinstance(a, ast.Keyword):
+ keyword = a.name
+ a = a.expr
+ else:
+ keyword = None
+ a = Interpretable(a)
+ a.eval(frame)
+ argname = '__exprinfo_%d' % len(vars)
+ vars[argname] = a.result
+ if keyword is None:
+ source += argname + ','
+ explanations.append(a.explanation)
+ else:
+ source += '%s=%s,' % (keyword, argname)
+ explanations.append('%s=%s' % (keyword, a.explanation))
+ if self.star_args:
+ star_args = Interpretable(self.star_args)
+ star_args.eval(frame)
+ argname = '__exprinfo_star'
+ vars[argname] = star_args.result
+ source += '*' + argname + ','
+ explanations.append('*' + star_args.explanation)
+ if self.dstar_args:
+ dstar_args = Interpretable(self.dstar_args)
+ dstar_args.eval(frame)
+ argname = '__exprinfo_kwds'
+ vars[argname] = dstar_args.result
+ source += '**' + argname + ','
+ explanations.append('**' + dstar_args.explanation)
+ self.explanation = "%s(%s)" % (
+ node.explanation, ', '.join(explanations))
+ if source.endswith(','):
+ source = source[:-1]
+ source += ')'
+ co = compile(source, '?', 'eval')
+ try:
+ self.result = frame.eval(co, **vars)
+ except passthroughex:
+ raise
+ except:
+ raise Failure(self)
+ if not node.is_builtin(frame) or not self.is_bool(frame):
+ r = frame.repr(self.result)
+ self.explanation = '%s\n{%s = %s\n}' % (r, r, self.explanation)
+
+class Getattr(Interpretable):
+ __view__ = ast.Getattr
+
+ def eval(self, frame):
+ expr = Interpretable(self.expr)
+ expr.eval(frame)
+ co = compile('__exprinfo_expr.%s' % self.attrname, '?', 'eval')
+ try:
+ self.result = frame.eval(co, __exprinfo_expr=expr.result)
+ except passthroughex:
+ raise
+ except:
+ raise Failure(self)
+ self.explanation = '%s.%s' % (expr.explanation, self.attrname)
+ # if the attribute comes from the instance, its value is interesting
+ co = compile('hasattr(__exprinfo_expr, "__dict__") and '
+ '%r in __exprinfo_expr.__dict__' % self.attrname,
+ '?', 'eval')
+ try:
+ from_instance = frame.is_true(
+ frame.eval(co, __exprinfo_expr=expr.result))
+ except passthroughex:
+ raise
+ except:
+ from_instance = True
+ if from_instance:
+ r = frame.repr(self.result)
+ self.explanation = '%s\n{%s = %s\n}' % (r, r, self.explanation)
+
+# == Re-interpretation of full statements ==
+
+class Assert(Interpretable):
+ __view__ = ast.Assert
+
+ def run(self, frame):
+ test = Interpretable(self.test)
+ test.eval(frame)
+ # simplify 'assert False where False = ...'
+ if (test.explanation.startswith('False\n{False = ') and
+ test.explanation.endswith('\n}')):
+ test.explanation = test.explanation[15:-2]
+ # print the result as 'assert <explanation>'
+ self.result = test.result
+ self.explanation = 'assert ' + test.explanation
+ if not frame.is_true(test.result):
+ try:
+ raise BuiltinAssertionError
+ except passthroughex:
+ raise
+ except:
+ raise Failure(self)
+
+class Assign(Interpretable):
+ __view__ = ast.Assign
+
+ def run(self, frame):
+ expr = Interpretable(self.expr)
+ expr.eval(frame)
+ self.result = expr.result
+ self.explanation = '... = ' + expr.explanation
+ # fall-back-run the rest of the assignment
+ ass = ast.Assign(self.nodes, ast.Name('__exprinfo_expr'))
+ mod = ast.Module(None, ast.Stmt([ass]))
+ mod.filename = '<run>'
+ co = pycodegen.ModuleCodeGenerator(mod).getCode()
+ try:
+ frame.exec_(co, __exprinfo_expr=expr.result)
+ except passthroughex:
+ raise
+ except:
+ raise Failure(self)
+
+class Discard(Interpretable):
+ __view__ = ast.Discard
+
+ def run(self, frame):
+ expr = Interpretable(self.expr)
+ expr.eval(frame)
+ self.result = expr.result
+ self.explanation = expr.explanation
+
+class Stmt(Interpretable):
+ __view__ = ast.Stmt
+
+ def run(self, frame):
+ for stmt in self.nodes:
+ stmt = Interpretable(stmt)
+ stmt.run(frame)
+
+
+def report_failure(e):
+ explanation = e.node.nice_explanation()
+ if explanation:
+ explanation = ", in: " + explanation
+ else:
+ explanation = ""
+ sys.stdout.write("%s: %s%s\n" % (e.exc.__name__, e.value, explanation))
+
+def check(s, frame=None):
+ if frame is None:
+ frame = sys._getframe(1)
+ frame = py.code.Frame(frame)
+ expr = parse(s, 'eval')
+ assert isinstance(expr, ast.Expression)
+ node = Interpretable(expr.node)
+ try:
+ node.eval(frame)
+ except passthroughex:
+ raise
+ except Failure:
+ e = sys.exc_info()[1]
+ report_failure(e)
+ else:
+ if not frame.is_true(node.result):
+ sys.stderr.write("assertion failed: %s\n" % node.nice_explanation())
+
+
+###########################################################
+# API / Entry points
+# #########################################################
+
+def interpret(source, frame, should_fail=False):
+ module = Interpretable(parse(source, 'exec').node)
+ #print "got module", module
+ if isinstance(frame, py.std.types.FrameType):
+ frame = py.code.Frame(frame)
+ try:
+ module.run(frame)
+ except Failure:
+ e = sys.exc_info()[1]
+ return getfailure(e)
+ except passthroughex:
+ raise
+ except:
+ import traceback
+ traceback.print_exc()
+ if should_fail:
+ return ("(assertion failed, but when it was re-run for "
+ "printing intermediate values, it did not fail. Suggestions: "
+ "compute assert expression before the assert or use --nomagic)")
+ else:
+ return None
+
+def getmsg(excinfo):
+ if isinstance(excinfo, tuple):
+ excinfo = py.code.ExceptionInfo(excinfo)
+ #frame, line = gettbline(tb)
+ #frame = py.code.Frame(frame)
+ #return interpret(line, frame)
+
+ tb = excinfo.traceback[-1]
+ source = str(tb.statement).strip()
+ x = interpret(source, tb.frame, should_fail=True)
+ if not isinstance(x, str):
+ raise TypeError("interpret returned non-string %r" % (x,))
+ return x
+
+def getfailure(e):
+ explanation = e.node.nice_explanation()
+ if str(e.value):
+ lines = explanation.split('\n')
+ lines[0] += " << %s" % (e.value,)
+ explanation = '\n'.join(lines)
+ text = "%s: %s" % (e.exc.__name__, explanation)
+ if text.startswith('AssertionError: assert '):
+ text = text[16:]
+ return text
+
+def run(s, frame=None):
+ if frame is None:
+ frame = sys._getframe(1)
+ frame = py.code.Frame(frame)
+ module = Interpretable(parse(s, 'exec').node)
+ try:
+ module.run(frame)
+ except Failure:
+ e = sys.exc_info()[1]
+ report_failure(e)
+
+
+if __name__ == '__main__':
+ # example:
+ def f():
+ return 5
+ def g():
+ return 3
+ def h(x):
+ return 'never'
+ check("f() * g() == 5")
+ check("not f()")
+ check("not (f() and g() or 0)")
+ check("f() == g()")
+ i = 4
+ check("i == f()")
+ check("len(f()) == 0")
+ check("isinstance(2+3+4, float)")
+
+ run("x = i")
+ check("x == 5")
+
+ run("assert not f(), 'oops'")
+ run("a, b, c = 1, 2")
+ run("a, b, c = f()")
+
+ check("max([f(),g()]) == 4")
+ check("'hello'[g()] == 'h'")
+ run("'guk%d' % h(f())")
Added: pypy/branch/py12/py/_code/assertion.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_code/assertion.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,77 @@
+import sys
+import py
+
+BuiltinAssertionError = py.builtin.builtins.AssertionError
+
+
+def _format_explanation(explanation):
+ # uck! See CallFunc for where \n{ and \n} escape sequences are used
+ raw_lines = (explanation or '').split('\n')
+ # escape newlines not followed by { and }
+ lines = [raw_lines[0]]
+ for l in raw_lines[1:]:
+ if l.startswith('{') or l.startswith('}'):
+ lines.append(l)
+ else:
+ lines[-1] += '\\n' + l
+
+ result = lines[:1]
+ stack = [0]
+ stackcnt = [0]
+ for line in lines[1:]:
+ if line.startswith('{'):
+ if stackcnt[-1]:
+ s = 'and '
+ else:
+ s = 'where '
+ stack.append(len(result))
+ stackcnt[-1] += 1
+ stackcnt.append(0)
+ result.append(' +' + ' '*(len(stack)-1) + s + line[1:])
+ else:
+ assert line.startswith('}')
+ stack.pop()
+ stackcnt.pop()
+ result[stack[-1]] += line[1:]
+ assert len(stack) == 1
+ return '\n'.join(result)
+
+
+class AssertionError(BuiltinAssertionError):
+
+ def __init__(self, *args):
+ BuiltinAssertionError.__init__(self, *args)
+ if args:
+ try:
+ self.msg = str(args[0])
+ except (KeyboardInterrupt, SystemExit):
+ raise
+ except:
+ self.msg = "<[broken __repr__] %s at %0xd>" %(
+ args[0].__class__, id(args[0]))
+ else:
+ f = py.code.Frame(sys._getframe(1))
+ try:
+ source = f.statement
+ source = str(source.deindent()).strip()
+ except py.error.ENOENT:
+ source = None
+ # this can also occur during reinterpretation, when the
+ # co_filename is set to "<run>".
+ if source:
+ self.msg = reinterpret(source, f, should_fail=True)
+ if not self.args:
+ self.args = (self.msg,)
+ else:
+ self.msg = None
+
+if sys.version_info > (3, 0):
+ AssertionError.__module__ = "builtins"
+ reinterpret_old = "old reinterpretation not available for py3"
+else:
+ from py._code._assertionold import interpret as reinterpret_old
+if sys.version_info >= (2, 6) or (sys.platform.startswith("java")):
+ from py._code._assertionnew import interpret as reinterpret
+else:
+ reinterpret = reinterpret_old
+
Added: pypy/branch/py12/py/_code/code.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_code/code.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,748 @@
+import py
+import sys, os.path
+
+builtin_repr = repr
+
+reprlib = py.builtin._tryimport('repr', 'reprlib')
+
+class Code(object):
+ """ wrapper around Python code objects """
+ def __init__(self, rawcode):
+ rawcode = py.code.getrawcode(rawcode)
+ self.raw = rawcode
+ try:
+ self.filename = rawcode.co_filename
+ self.firstlineno = rawcode.co_firstlineno - 1
+ self.name = rawcode.co_name
+ except AttributeError:
+ raise TypeError("not a code object: %r" %(rawcode,))
+
+ def __eq__(self, other):
+ return self.raw == other.raw
+
+ def __ne__(self, other):
+ return not self == other
+
+ def new(self, rec=False, **kwargs):
+ """ return new code object with modified attributes.
+ if rec-cursive is true then dive into code
+ objects contained in co_consts.
+ """
+ if sys.platform.startswith("java"):
+ # XXX jython does not support the below co_filename hack
+ return self.raw
+ names = [x for x in dir(self.raw) if x[:3] == 'co_']
+ for name in kwargs:
+ if name not in names:
+ raise TypeError("unknown code attribute: %r" %(name, ))
+ if rec and hasattr(self.raw, 'co_consts'): # jython
+ newconstlist = []
+ co = self.raw
+ cotype = type(co)
+ for c in co.co_consts:
+ if isinstance(c, cotype):
+ c = self.__class__(c).new(rec=True, **kwargs)
+ newconstlist.append(c)
+ return self.new(rec=False, co_consts=tuple(newconstlist), **kwargs)
+ for name in names:
+ if name not in kwargs:
+ kwargs[name] = getattr(self.raw, name)
+ arglist = [
+ kwargs['co_argcount'],
+ kwargs['co_nlocals'],
+ kwargs.get('co_stacksize', 0), # jython
+ kwargs.get('co_flags', 0), # jython
+ kwargs.get('co_code', ''), # jython
+ kwargs.get('co_consts', ()), # jython
+ kwargs.get('co_names', []), #
+ kwargs['co_varnames'],
+ kwargs['co_filename'],
+ kwargs['co_name'],
+ kwargs['co_firstlineno'],
+ kwargs.get('co_lnotab', ''), #jython
+ kwargs.get('co_freevars', None), #jython
+ kwargs.get('co_cellvars', None), # jython
+ ]
+ if sys.version_info >= (3,0):
+ arglist.insert(1, kwargs['co_kwonlyargcount'])
+ return self.raw.__class__(*arglist)
+ else:
+ return py.std.new.code(*arglist)
+
+ def path(self):
+ """ return a path object pointing to source code"""
+ fn = self.raw.co_filename
+ try:
+ return fn.__path__
+ except AttributeError:
+ p = py.path.local(self.raw.co_filename)
+ if not p.check():
+ # XXX maybe try harder like the weird logic
+ # in the standard lib [linecache.updatecache] does?
+ p = self.raw.co_filename
+ return p
+
+ path = property(path, None, None, "path of this code object")
+
+ def fullsource(self):
+ """ return a py.code.Source object for the full source file of the code
+ """
+ from py._code import source
+ full, _ = source.findsource(self.raw)
+ return full
+ fullsource = property(fullsource, None, None,
+ "full source containing this code object")
+
+ def source(self):
+ """ return a py.code.Source object for the code object's source only
+ """
+ # return source only for that part of code
+ return py.code.Source(self.raw)
+
+ def getargs(self):
+ """ return a tuple with the argument names for the code object
+ """
+ # handfull shortcut for getting args
+ raw = self.raw
+ return raw.co_varnames[:raw.co_argcount]
+
+class Frame(object):
+ """Wrapper around a Python frame holding f_locals and f_globals
+ in which expressions can be evaluated."""
+
+ def __init__(self, frame):
+ self.code = py.code.Code(frame.f_code)
+ self.lineno = frame.f_lineno - 1
+ self.f_globals = frame.f_globals
+ self.f_locals = frame.f_locals
+ self.raw = frame
+
+ def statement(self):
+ if self.code.fullsource is None:
+ return py.code.Source("")
+ return self.code.fullsource.getstatement(self.lineno)
+ statement = property(statement, None, None,
+ "statement this frame is at")
+
+ def eval(self, code, **vars):
+ """ evaluate 'code' in the frame
+
+ 'vars' are optional additional local variables
+
+ returns the result of the evaluation
+ """
+ f_locals = self.f_locals.copy()
+ f_locals.update(vars)
+ return eval(code, self.f_globals, f_locals)
+
+ def exec_(self, code, **vars):
+ """ exec 'code' in the frame
+
+ 'vars' are optiona; additional local variables
+ """
+ f_locals = self.f_locals.copy()
+ f_locals.update(vars)
+ py.builtin.exec_(code, self.f_globals, f_locals )
+
+ def repr(self, object):
+ """ return a 'safe' (non-recursive, one-line) string repr for 'object'
+ """
+ return py.io.saferepr(object)
+
+ def is_true(self, object):
+ return object
+
+ def getargs(self):
+ """ return a list of tuples (name, value) for all arguments
+ """
+ retval = []
+ for arg in self.code.getargs():
+ try:
+ retval.append((arg, self.f_locals[arg]))
+ except KeyError:
+ pass # this can occur when using Psyco
+ return retval
+
+class TracebackEntry(object):
+ """ a single entry in a traceback """
+
+ exprinfo = None
+
+ def __init__(self, rawentry):
+ self._rawentry = rawentry
+ self.frame = py.code.Frame(rawentry.tb_frame)
+ # Ugh. 2.4 and 2.5 differs here when encountering
+ # multi-line statements. Not sure about the solution, but
+ # should be portable
+ self.lineno = rawentry.tb_lineno - 1
+ self.relline = self.lineno - self.frame.code.firstlineno
+
+ def __repr__(self):
+ return "<TracebackEntry %s:%d>" %(self.frame.code.path, self.lineno+1)
+
+ def statement(self):
+ """ return a py.code.Source object for the current statement """
+ source = self.frame.code.fullsource
+ return source.getstatement(self.lineno)
+ statement = property(statement, None, None,
+ "statement of this traceback entry.")
+
+ def path(self):
+ return self.frame.code.path
+ path = property(path, None, None, "path to the full source code")
+
+ def getlocals(self):
+ return self.frame.f_locals
+ locals = property(getlocals, None, None, "locals of underlaying frame")
+
+ def reinterpret(self):
+ """Reinterpret the failing statement and returns a detailed information
+ about what operations are performed."""
+ if self.exprinfo is None:
+ source = str(self.statement).strip()
+ x = py.code._reinterpret(source, self.frame, should_fail=True)
+ if not isinstance(x, str):
+ raise TypeError("interpret returned non-string %r" % (x,))
+ self.exprinfo = x
+ return self.exprinfo
+
+ def getfirstlinesource(self):
+ return self.frame.code.firstlineno
+
+ def getsource(self):
+ """ return failing source code. """
+ source = self.frame.code.fullsource
+ if source is None:
+ return None
+ start = self.getfirstlinesource()
+ end = self.lineno
+ try:
+ _, end = source.getstatementrange(end)
+ except IndexError:
+ end = self.lineno + 1
+ # heuristic to stop displaying source on e.g.
+ # if something: # assume this causes a NameError
+ # # _this_ lines and the one
+ # below we don't want from entry.getsource()
+ for i in range(self.lineno, end):
+ if source[i].rstrip().endswith(':'):
+ end = i + 1
+ break
+ return source[start:end]
+ source = property(getsource)
+
+ def ishidden(self):
+ """ return True if the current frame has a var __tracebackhide__
+ resolving to True
+
+ mostly for internal use
+ """
+ try:
+ return self.frame.eval("__tracebackhide__")
+ except (SystemExit, KeyboardInterrupt):
+ raise
+ except:
+ return False
+
+ def __str__(self):
+ try:
+ fn = str(self.path)
+ except py.error.Error:
+ fn = '???'
+ name = self.frame.code.name
+ try:
+ line = str(self.statement).lstrip()
+ except KeyboardInterrupt:
+ raise
+ except:
+ line = "???"
+ return " File %r:%d in %s\n %s\n" %(fn, self.lineno+1, name, line)
+
+ def name(self):
+ return self.frame.code.raw.co_name
+ name = property(name, None, None, "co_name of underlaying code")
+
+class Traceback(list):
+ """ Traceback objects encapsulate and offer higher level
+ access to Traceback entries.
+ """
+ Entry = TracebackEntry
+ def __init__(self, tb):
+ """ initialize from given python traceback object. """
+ if hasattr(tb, 'tb_next'):
+ def f(cur):
+ while cur is not None:
+ yield self.Entry(cur)
+ cur = cur.tb_next
+ list.__init__(self, f(tb))
+ else:
+ list.__init__(self, tb)
+
+ def cut(self, path=None, lineno=None, firstlineno=None, excludepath=None):
+ """ return a Traceback instance wrapping part of this Traceback
+
+ by provding any combination of path, lineno and firstlineno, the
+ first frame to start the to-be-returned traceback is determined
+
+ this allows cutting the first part of a Traceback instance e.g.
+ for formatting reasons (removing some uninteresting bits that deal
+ with handling of the exception/traceback)
+ """
+ for x in self:
+ code = x.frame.code
+ codepath = code.path
+ if ((path is None or codepath == path) and
+ (excludepath is None or not hasattr(codepath, 'relto') or
+ not codepath.relto(excludepath)) and
+ (lineno is None or x.lineno == lineno) and
+ (firstlineno is None or x.frame.code.firstlineno == firstlineno)):
+ return Traceback(x._rawentry)
+ return self
+
+ def __getitem__(self, key):
+ val = super(Traceback, self).__getitem__(key)
+ if isinstance(key, type(slice(0))):
+ val = self.__class__(val)
+ return val
+
+ def filter(self, fn=lambda x: not x.ishidden()):
+ """ return a Traceback instance with certain items removed
+
+ fn is a function that gets a single argument, a TracebackItem
+ instance, and should return True when the item should be added
+ to the Traceback, False when not
+
+ by default this removes all the TracebackItems which are hidden
+ (see ishidden() above)
+ """
+ return Traceback(filter(fn, self))
+
+ def getcrashentry(self):
+ """ return last non-hidden traceback entry that lead
+ to the exception of a traceback.
+ """
+ tb = self.filter()
+ if not tb:
+ tb = self
+ return tb[-1]
+
+ def recursionindex(self):
+ """ return the index of the frame/TracebackItem where recursion
+ originates if appropriate, None if no recursion occurred
+ """
+ cache = {}
+ for i, entry in enumerate(self):
+ key = entry.frame.code.path, entry.lineno
+ #print "checking for recursion at", key
+ l = cache.setdefault(key, [])
+ if l:
+ f = entry.frame
+ loc = f.f_locals
+ for otherloc in l:
+ if f.is_true(f.eval(co_equal,
+ __recursioncache_locals_1=loc,
+ __recursioncache_locals_2=otherloc)):
+ return i
+ l.append(entry.frame.f_locals)
+ return None
+
+co_equal = compile('__recursioncache_locals_1 == __recursioncache_locals_2',
+ '?', 'eval')
+
+class ExceptionInfo(object):
+ """ wraps sys.exc_info() objects and offers
+ help for navigating the traceback.
+ """
+ _striptext = ''
+ def __init__(self, tup=None, exprinfo=None):
+ # NB. all attributes are private! Subclasses or other
+ # ExceptionInfo-like classes may have different attributes.
+ if tup is None:
+ tup = sys.exc_info()
+ if exprinfo is None and isinstance(tup[1], py.code._AssertionError):
+ exprinfo = getattr(tup[1], 'msg', None)
+ if exprinfo is None:
+ exprinfo = str(tup[1])
+ if exprinfo and exprinfo.startswith('assert '):
+ self._striptext = 'AssertionError: '
+ self._excinfo = tup
+ self.type, self.value, tb = self._excinfo
+ self.typename = self.type.__name__
+ self.traceback = py.code.Traceback(tb)
+
+ def __repr__(self):
+ return "<ExceptionInfo %s tblen=%d>" % (self.typename, len(self.traceback))
+
+ def exconly(self, tryshort=False):
+ """ return the exception as a string
+
+ when 'tryshort' resolves to True, and the exception is a
+ py.code._AssertionError, only the actual exception part of
+ the exception representation is returned (so 'AssertionError: ' is
+ removed from the beginning)
+ """
+ lines = py.std.traceback.format_exception_only(self.type, self.value)
+ text = ''.join(lines)
+ text = text.rstrip()
+ if tryshort:
+ if text.startswith(self._striptext):
+ text = text[len(self._striptext):]
+ return text
+
+ def errisinstance(self, exc):
+ """ return True if the exception is an instance of exc """
+ return isinstance(self.value, exc)
+
+ def _getreprcrash(self):
+ exconly = self.exconly(tryshort=True)
+ entry = self.traceback.getcrashentry()
+ path, lineno = entry.path, entry.lineno
+ reprcrash = ReprFileLocation(path, lineno+1, exconly)
+ return reprcrash
+
+ def getrepr(self, showlocals=False, style="long",
+ abspath=False, tbfilter=True, funcargs=False):
+ """ return str()able representation of this exception info.
+ showlocals: show locals per traceback entry
+ style: long|short|no traceback style
+ tbfilter: hide entries (where __tracebackhide__ is true)
+ """
+ fmt = FormattedExcinfo(showlocals=showlocals, style=style,
+ abspath=abspath, tbfilter=tbfilter, funcargs=funcargs)
+ return fmt.repr_excinfo(self)
+
+ def __str__(self):
+ entry = self.traceback[-1]
+ loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly())
+ return str(loc)
+
+ def __unicode__(self):
+ entry = self.traceback[-1]
+ loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly())
+ return unicode(loc)
+
+
+class FormattedExcinfo(object):
+ """ presenting information about failing Functions and Generators. """
+ # for traceback entries
+ flow_marker = ">"
+ fail_marker = "E"
+
+ def __init__(self, showlocals=False, style="long", abspath=True, tbfilter=True, funcargs=False):
+ self.showlocals = showlocals
+ self.style = style
+ self.tbfilter = tbfilter
+ self.funcargs = funcargs
+ self.abspath = abspath
+
+ def _getindent(self, source):
+ # figure out indent for given source
+ try:
+ s = str(source.getstatement(len(source)-1))
+ except KeyboardInterrupt:
+ raise
+ except:
+ try:
+ s = str(source[-1])
+ except KeyboardInterrupt:
+ raise
+ except:
+ return 0
+ return 4 + (len(s) - len(s.lstrip()))
+
+ def _getentrysource(self, entry):
+ source = entry.getsource()
+ if source is not None:
+ source = source.deindent()
+ return source
+
+ def _saferepr(self, obj):
+ return py.io.saferepr(obj)
+
+ def repr_args(self, entry):
+ if self.funcargs:
+ args = []
+ for argname, argvalue in entry.frame.getargs():
+ args.append((argname, self._saferepr(argvalue)))
+ return ReprFuncArgs(args)
+
+ def get_source(self, source, line_index=-1, excinfo=None):
+ """ return formatted and marked up source lines. """
+ lines = []
+ if source is None:
+ source = py.code.Source("???")
+ line_index = 0
+ if line_index < 0:
+ line_index += len(source)
+ for i in range(len(source)):
+ if i == line_index:
+ prefix = self.flow_marker + " "
+ else:
+ prefix = " "
+ line = prefix + source[i]
+ lines.append(line)
+ if excinfo is not None:
+ indent = self._getindent(source)
+ lines.extend(self.get_exconly(excinfo, indent=indent, markall=True))
+ return lines
+
+ def get_exconly(self, excinfo, indent=4, markall=False):
+ lines = []
+ indent = " " * indent
+ # get the real exception information out
+ exlines = excinfo.exconly(tryshort=True).split('\n')
+ failindent = self.fail_marker + indent[1:]
+ for line in exlines:
+ lines.append(failindent + line)
+ if not markall:
+ failindent = indent
+ return lines
+
+ def repr_locals(self, locals):
+ if self.showlocals:
+ lines = []
+ keys = list(locals)
+ keys.sort()
+ for name in keys:
+ value = locals[name]
+ if name == '__builtins__':
+ lines.append("__builtins__ = <builtins>")
+ else:
+ # This formatting could all be handled by the
+ # _repr() function, which is only reprlib.Repr in
+ # disguise, so is very configurable.
+ str_repr = self._saferepr(value)
+ #if len(str_repr) < 70 or not isinstance(value,
+ # (list, tuple, dict)):
+ lines.append("%-10s = %s" %(name, str_repr))
+ #else:
+ # self._line("%-10s =\\" % (name,))
+ # # XXX
+ # py.std.pprint.pprint(value, stream=self.excinfowriter)
+ return ReprLocals(lines)
+
+ def repr_traceback_entry(self, entry, excinfo=None):
+ # excinfo is not None if this is the last tb entry
+ source = self._getentrysource(entry)
+ if source is None:
+ source = py.code.Source("???")
+ line_index = 0
+ else:
+ # entry.getfirstlinesource() can be -1, should be 0 on jython
+ line_index = entry.lineno - max(entry.getfirstlinesource(), 0)
+
+ lines = []
+ if self.style == "long":
+ reprargs = self.repr_args(entry)
+ lines.extend(self.get_source(source, line_index, excinfo))
+ message = excinfo and excinfo.typename or ""
+ path = self._makepath(entry.path)
+ filelocrepr = ReprFileLocation(path, entry.lineno+1, message)
+ localsrepr = self.repr_locals(entry.locals)
+ return ReprEntry(lines, reprargs, localsrepr, filelocrepr)
+ else:
+ if self.style == "short":
+ line = source[line_index].lstrip()
+ basename = os.path.basename(entry.frame.code.filename)
+ lines.append(' File "%s", line %d, in %s' % (
+ basename, entry.lineno+1, entry.name))
+ lines.append(" " + line)
+ if excinfo:
+ lines.extend(self.get_exconly(excinfo, indent=4))
+ return ReprEntry(lines, None, None, None)
+
+ def _makepath(self, path):
+ if not self.abspath:
+ np = py.path.local().bestrelpath(path)
+ if len(np) < len(str(path)):
+ path = np
+ return path
+
+ def repr_traceback(self, excinfo):
+ traceback = excinfo.traceback
+ if self.tbfilter:
+ traceback = traceback.filter()
+ recursionindex = None
+ if excinfo.errisinstance(RuntimeError):
+ recursionindex = traceback.recursionindex()
+ last = traceback[-1]
+ entries = []
+ extraline = None
+ for index, entry in enumerate(traceback):
+ einfo = (last == entry) and excinfo or None
+ reprentry = self.repr_traceback_entry(entry, einfo)
+ entries.append(reprentry)
+ if index == recursionindex:
+ extraline = "!!! Recursion detected (same locals & position)"
+ break
+ return ReprTraceback(entries, extraline, style=self.style)
+
+ def repr_excinfo(self, excinfo):
+ reprtraceback = self.repr_traceback(excinfo)
+ reprcrash = excinfo._getreprcrash()
+ return ReprExceptionInfo(reprtraceback, reprcrash)
+
+class TerminalRepr:
+ def __str__(self):
+ s = self.__unicode__()
+ if sys.version_info[0] < 3:
+ s = s.encode('utf-8')
+ return s
+
+ def __unicode__(self):
+ l = []
+ tw = py.io.TerminalWriter(l.append)
+ self.toterminal(tw)
+ l = map(unicode_or_repr, l)
+ return "".join(l).strip()
+
+ def __repr__(self):
+ return "<%s instance at %0x>" %(self.__class__, id(self))
+
+def unicode_or_repr(obj):
+ try:
+ return py.builtin._totext(obj)
+ except KeyboardInterrupt:
+ raise
+ except Exception:
+ return "<print-error: %r>" % py.io.saferepr(obj)
+
+class ReprExceptionInfo(TerminalRepr):
+ def __init__(self, reprtraceback, reprcrash):
+ self.reprtraceback = reprtraceback
+ self.reprcrash = reprcrash
+ self.sections = []
+
+ def addsection(self, name, content, sep="-"):
+ self.sections.append((name, content, sep))
+
+ def toterminal(self, tw):
+ self.reprtraceback.toterminal(tw)
+ for name, content, sep in self.sections:
+ tw.sep(sep, name)
+ tw.line(content)
+
+class ReprTraceback(TerminalRepr):
+ entrysep = "_ "
+
+ def __init__(self, reprentries, extraline, style):
+ self.reprentries = reprentries
+ self.extraline = extraline
+ self.style = style
+
+ def toterminal(self, tw):
+ sepok = False
+ for entry in self.reprentries:
+ if self.style == "long":
+ if sepok:
+ tw.sep(self.entrysep)
+ tw.line("")
+ sepok = True
+ entry.toterminal(tw)
+ if self.extraline:
+ tw.line(self.extraline)
+
+class ReprEntry(TerminalRepr):
+ localssep = "_ "
+
+ def __init__(self, lines, reprfuncargs, reprlocals, filelocrepr):
+ self.lines = lines
+ self.reprfuncargs = reprfuncargs
+ self.reprlocals = reprlocals
+ self.reprfileloc = filelocrepr
+
+ def toterminal(self, tw):
+ if self.reprfuncargs:
+ self.reprfuncargs.toterminal(tw)
+ for line in self.lines:
+ red = line.startswith("E ")
+ tw.line(line, bold=True, red=red)
+ if self.reprlocals:
+ #tw.sep(self.localssep, "Locals")
+ tw.line("")
+ self.reprlocals.toterminal(tw)
+ if self.reprfileloc:
+ tw.line("")
+ self.reprfileloc.toterminal(tw)
+
+ def __str__(self):
+ return "%s\n%s\n%s" % ("\n".join(self.lines),
+ self.reprlocals,
+ self.reprfileloc)
+
+class ReprFileLocation(TerminalRepr):
+ def __init__(self, path, lineno, message):
+ self.path = str(path)
+ self.lineno = lineno
+ self.message = message
+
+ def toterminal(self, tw):
+ # filename and lineno output for each entry,
+ # using an output format that most editors unterstand
+ msg = self.message
+ i = msg.find("\n")
+ if i != -1:
+ msg = msg[:i]
+ tw.line("%s:%s: %s" %(self.path, self.lineno, msg))
+
+class ReprLocals(TerminalRepr):
+ def __init__(self, lines):
+ self.lines = lines
+
+ def toterminal(self, tw):
+ for line in self.lines:
+ tw.line(line)
+
+class ReprFuncArgs(TerminalRepr):
+ def __init__(self, args):
+ self.args = args
+
+ def toterminal(self, tw):
+ if self.args:
+ linesofar = ""
+ for name, value in self.args:
+ ns = "%s = %s" %(name, value)
+ if len(ns) + len(linesofar) + 2 > tw.fullwidth:
+ if linesofar:
+ tw.line(linesofar)
+ linesofar = ns
+ else:
+ if linesofar:
+ linesofar += ", " + ns
+ else:
+ linesofar = ns
+ if linesofar:
+ tw.line(linesofar)
+ tw.line("")
+
+
+
+oldbuiltins = {}
+
+def patch_builtins(assertion=True, compile=True):
+ """ put compile and AssertionError builtins to Python's builtins. """
+ if assertion:
+ from py._code import assertion
+ l = oldbuiltins.setdefault('AssertionError', [])
+ l.append(py.builtin.builtins.AssertionError)
+ py.builtin.builtins.AssertionError = assertion.AssertionError
+ if compile:
+ l = oldbuiltins.setdefault('compile', [])
+ l.append(py.builtin.builtins.compile)
+ py.builtin.builtins.compile = py.code.compile
+
+def unpatch_builtins(assertion=True, compile=True):
+ """ remove compile and AssertionError builtins from Python builtins. """
+ if assertion:
+ py.builtin.builtins.AssertionError = oldbuiltins['AssertionError'].pop()
+ if compile:
+ py.builtin.builtins.compile = oldbuiltins['compile'].pop()
+
+def getrawcode(obj):
+ """ return code object for given function. """
+ obj = getattr(obj, 'im_func', obj)
+ obj = getattr(obj, 'func_code', obj)
+ obj = getattr(obj, 'f_code', obj)
+ obj = getattr(obj, '__code__', obj)
+ return obj
+
Added: pypy/branch/py12/py/_code/oldmagic.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_code/oldmagic.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,62 @@
+""" deprecated module for turning on/off some features. """
+
+import py
+
+from py.builtin import builtins as cpy_builtin
+
+def invoke(assertion=False, compile=False):
+ """ (deprecated) invoke magic, currently you can specify:
+
+ assertion patches the builtin AssertionError to try to give
+ more meaningful AssertionErrors, which by means
+ of deploying a mini-interpreter constructs
+ a useful error message.
+ """
+ py.log._apiwarn("1.1",
+ "py.magic.invoke() is deprecated, use py.code.patch_builtins()",
+ stacklevel=2,
+ )
+ py.code.patch_builtins(assertion=assertion, compile=compile)
+
+def revoke(assertion=False, compile=False):
+ """ (deprecated) revoke previously invoked magic (see invoke())."""
+ py.log._apiwarn("1.1",
+ "py.magic.revoke() is deprecated, use py.code.unpatch_builtins()",
+ stacklevel=2,
+ )
+ py.code.unpatch_builtins(assertion=assertion, compile=compile)
+
+patched = {}
+
+def patch(namespace, name, value):
+ """ (deprecated) rebind the 'name' on the 'namespace' to the 'value',
+ possibly and remember the original value. Multiple
+ invocations to the same namespace/name pair will
+ remember a list of old values.
+ """
+ py.log._apiwarn("1.1",
+ "py.magic.patch() is deprecated, in tests use monkeypatch funcarg.",
+ stacklevel=2,
+ )
+ nref = (namespace, name)
+ orig = getattr(namespace, name)
+ patched.setdefault(nref, []).append(orig)
+ setattr(namespace, name, value)
+ return orig
+
+def revert(namespace, name):
+ """ (deprecated) revert to the orginal value the last patch modified.
+ Raise ValueError if no such original value exists.
+ """
+ py.log._apiwarn("1.1",
+ "py.magic.revert() is deprecated, in tests use monkeypatch funcarg.",
+ stacklevel=2,
+ )
+ nref = (namespace, name)
+ if nref not in patched or not patched[nref]:
+ raise ValueError("No original value stored for %s.%s" % nref)
+ current = getattr(namespace, name)
+ orig = patched[nref].pop()
+ setattr(namespace, name, orig)
+ return current
+
Added: pypy/branch/py12/py/_code/oldmagic2.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_code/oldmagic2.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,6 @@
+
+import py
+
+py.log._apiwarn("1.1", "py.magic.AssertionError is deprecated, use py.code._AssertionError", stacklevel=2)
+
+from py.code import _AssertionError as AssertionError
Added: pypy/branch/py12/py/_code/source.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_code/source.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,347 @@
+from __future__ import generators
+import sys
+import inspect, tokenize
+import py
+cpy_compile = compile
+
+try:
+ import _ast
+ from _ast import PyCF_ONLY_AST as _AST_FLAG
+except ImportError:
+ _AST_FLAG = 0
+ _ast = None
+
+
+class Source(object):
+ """ a immutable object holding a source code fragment,
+ possibly deindenting it.
+ """
+ def __init__(self, *parts, **kwargs):
+ self.lines = lines = []
+ de = kwargs.get('deindent', True)
+ rstrip = kwargs.get('rstrip', True)
+ for part in parts:
+ if not part:
+ partlines = []
+ if isinstance(part, Source):
+ partlines = part.lines
+ elif isinstance(part, py.builtin._basestring):
+ partlines = part.split('\n')
+ if rstrip:
+ while partlines:
+ if partlines[-1].strip():
+ break
+ partlines.pop()
+ else:
+ partlines = getsource(part, deindent=de).lines
+ if de:
+ partlines = deindent(partlines)
+ lines.extend(partlines)
+
+ def __eq__(self, other):
+ try:
+ return self.lines == other.lines
+ except AttributeError:
+ if isinstance(other, str):
+ return str(self) == other
+ return False
+
+ def __getitem__(self, key):
+ if isinstance(key, int):
+ return self.lines[key]
+ else:
+ if key.step not in (None, 1):
+ raise IndexError("cannot slice a Source with a step")
+ return self.__getslice__(key.start, key.stop)
+
+ def __len__(self):
+ return len(self.lines)
+
+ def __getslice__(self, start, end):
+ newsource = Source()
+ newsource.lines = self.lines[start:end]
+ return newsource
+
+ def strip(self):
+ """ return new source object with trailing
+ and leading blank lines removed.
+ """
+ start, end = 0, len(self)
+ while start < end and not self.lines[start].strip():
+ start += 1
+ while end > start and not self.lines[end-1].strip():
+ end -= 1
+ source = Source()
+ source.lines[:] = self.lines[start:end]
+ return source
+
+ def putaround(self, before='', after='', indent=' ' * 4):
+ """ return a copy of the source object with
+ 'before' and 'after' wrapped around it.
+ """
+ before = Source(before)
+ after = Source(after)
+ newsource = Source()
+ lines = [ (indent + line) for line in self.lines]
+ newsource.lines = before.lines + lines + after.lines
+ return newsource
+
+ def indent(self, indent=' ' * 4):
+ """ return a copy of the source object with
+ all lines indented by the given indent-string.
+ """
+ newsource = Source()
+ newsource.lines = [(indent+line) for line in self.lines]
+ return newsource
+
+ def getstatement(self, lineno):
+ """ return Source statement which contains the
+ given linenumber (counted from 0).
+ """
+ start, end = self.getstatementrange(lineno)
+ return self[start:end]
+
+ def getstatementrange(self, lineno):
+ """ return (start, end) tuple which spans the minimal
+ statement region which containing the given lineno.
+ """
+ # XXX there must be a better than these heuristic ways ...
+ # XXX there may even be better heuristics :-)
+ if not (0 <= lineno < len(self)):
+ raise IndexError("lineno out of range")
+
+ # 1. find the start of the statement
+ from codeop import compile_command
+ for start in range(lineno, -1, -1):
+ trylines = self.lines[start:lineno+1]
+ # quick hack to indent the source and get it as a string in one go
+ trylines.insert(0, 'def xxx():')
+ trysource = '\n '.join(trylines)
+ # ^ space here
+ try:
+ compile_command(trysource)
+ except (SyntaxError, OverflowError, ValueError):
+ pass
+ else:
+ break # got a valid or incomplete statement
+
+ # 2. find the end of the statement
+ for end in range(lineno+1, len(self)+1):
+ trysource = self[start:end]
+ if trysource.isparseable():
+ break
+
+ return start, end
+
+ def getblockend(self, lineno):
+ # XXX
+ lines = [x + '\n' for x in self.lines[lineno:]]
+ blocklines = inspect.getblock(lines)
+ #print blocklines
+ return lineno + len(blocklines) - 1
+
+ def deindent(self, offset=None):
+ """ return a new source object deindented by offset.
+ If offset is None then guess an indentation offset from
+ the first non-blank line. Subsequent lines which have a
+ lower indentation offset will be copied verbatim as
+ they are assumed to be part of multilines.
+ """
+ # XXX maybe use the tokenizer to properly handle multiline
+ # strings etc.pp?
+ newsource = Source()
+ newsource.lines[:] = deindent(self.lines, offset)
+ return newsource
+
+ def isparseable(self, deindent=True):
+ """ return True if source is parseable, heuristically
+ deindenting it by default.
+ """
+ try:
+ import parser
+ except ImportError:
+ syntax_checker = lambda x: compile(x, 'asd', 'exec')
+ else:
+ syntax_checker = parser.suite
+
+ if deindent:
+ source = str(self.deindent())
+ else:
+ source = str(self)
+ try:
+ #compile(source+'\n', "x", "exec")
+ syntax_checker(source+'\n')
+ except SyntaxError:
+ return False
+ else:
+ return True
+
+ def __str__(self):
+ return "\n".join(self.lines)
+
+ def compile(self, filename=None, mode='exec',
+ flag=generators.compiler_flag,
+ dont_inherit=0, _genframe=None):
+ """ return compiled code object. if filename is None
+ invent an artificial filename which displays
+ the source/line position of the caller frame.
+ """
+ if not filename or py.path.local(filename).check(file=0):
+ if _genframe is None:
+ _genframe = sys._getframe(1) # the caller
+ fn,lineno = _genframe.f_code.co_filename, _genframe.f_lineno
+ if not filename:
+ filename = '<codegen %s:%d>' % (fn, lineno)
+ else:
+ filename = '<codegen %r %s:%d>' % (filename, fn, lineno)
+ source = "\n".join(self.lines) + '\n'
+ try:
+ co = cpy_compile(source, filename, mode, flag)
+ except SyntaxError:
+ ex = sys.exc_info()[1]
+ # re-represent syntax errors from parsing python strings
+ msglines = self.lines[:ex.lineno]
+ if ex.offset:
+ msglines.append(" "*ex.offset + '^')
+ msglines.append("syntax error probably generated here: %s" % filename)
+ newex = SyntaxError('\n'.join(msglines))
+ newex.offset = ex.offset
+ newex.lineno = ex.lineno
+ newex.text = ex.text
+ raise newex
+ else:
+ if flag & _AST_FLAG:
+ return co
+ co_filename = MyStr(filename)
+ co_filename.__source__ = self
+ return py.code.Code(co).new(rec=1, co_filename=co_filename)
+ #return newcode_withfilename(co, co_filename)
+
+#
+# public API shortcut functions
+#
+
+def compile_(source, filename=None, mode='exec', flags=
+ generators.compiler_flag, dont_inherit=0):
+ """ compile the given source to a raw code object,
+ which points back to the source code through
+ "co_filename.__source__". All code objects
+ contained in the code object will recursively
+ also have this special subclass-of-string
+ filename.
+ """
+ if _ast is not None and isinstance(source, _ast.AST):
+ # XXX should Source support having AST?
+ return cpy_compile(source, filename, mode, flags, dont_inherit)
+ _genframe = sys._getframe(1) # the caller
+ s = Source(source)
+ co = s.compile(filename, mode, flags, _genframe=_genframe)
+ return co
+
+
+def getfslineno(obj):
+ try:
+ code = py.code.Code(obj)
+ except TypeError:
+ # fallback to
+ fn = (py.std.inspect.getsourcefile(obj) or
+ py.std.inspect.getfile(obj))
+ fspath = fn and py.path.local(fn) or None
+ if fspath:
+ try:
+ _, lineno = findsource(obj)
+ except IOError:
+ lineno = None
+ else:
+ lineno = None
+ else:
+ fspath = code.path
+ lineno = code.firstlineno
+ return fspath, lineno
+
+#
+# helper functions
+#
+class MyStr(str):
+ """ custom string which allows to add attributes. """
+
+def findsource(obj):
+ obj = py.code.getrawcode(obj)
+ try:
+ fullsource = obj.co_filename.__source__
+ except AttributeError:
+ try:
+ sourcelines, lineno = py.std.inspect.findsource(obj)
+ except (KeyboardInterrupt, SystemExit):
+ raise
+ except:
+ return None, None
+ source = Source()
+ source.lines = [line.rstrip() for line in sourcelines]
+ return source, lineno
+ else:
+ lineno = obj.co_firstlineno - 1
+ return fullsource, lineno
+
+
+def getsource(obj, **kwargs):
+ obj = py.code.getrawcode(obj)
+ try:
+ fullsource = obj.co_filename.__source__
+ except AttributeError:
+ try:
+ strsrc = inspect.getsource(obj)
+ except IndentationError:
+ strsrc = "\"Buggy python version consider upgrading, cannot get source\""
+ assert isinstance(strsrc, str)
+ return Source(strsrc, **kwargs)
+ else:
+ lineno = obj.co_firstlineno - 1
+ end = fullsource.getblockend(lineno)
+ return Source(fullsource[lineno:end+1], deident=True)
+
+
+def deindent(lines, offset=None):
+ if offset is None:
+ for line in lines:
+ line = line.expandtabs()
+ s = line.lstrip()
+ if s:
+ offset = len(line)-len(s)
+ break
+ else:
+ offset = 0
+ if offset == 0:
+ return list(lines)
+ newlines = []
+ def readline_generator(lines):
+ for line in lines:
+ yield line + '\n'
+ while True:
+ yield ''
+
+ r = readline_generator(lines)
+ try:
+ readline = r.next
+ except AttributeError:
+ readline = r.__next__
+
+ try:
+ for _, _, (sline, _), (eline, _), _ in tokenize.generate_tokens(readline):
+ if sline > len(lines):
+ break # End of input reached
+ if sline > len(newlines):
+ line = lines[sline - 1].expandtabs()
+ if line.lstrip() and line[:offset].isspace():
+ line = line[offset:] # Deindent
+ newlines.append(line)
+
+ for i in range(sline, eline):
+ # Don't deindent continuing lines of
+ # multiline tokens (i.e. multiline strings)
+ newlines.append(lines[i])
+ except (IndentationError, tokenize.TokenError):
+ pass
+ # Add any lines we didn't see. E.g. if an exception was raised.
+ newlines.extend(lines[len(newlines):])
+ return newlines
Added: pypy/branch/py12/py/_compat/__init__.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_compat/__init__.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,2 @@
+""" compatibility modules (taken from 2.4.4) """
+
Added: pypy/branch/py12/py/_compat/dep_doctest.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_compat/dep_doctest.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,5 @@
+import py
+
+py.log._apiwarn("1.1", "py.compat.doctest deprecated, use standard library version.",
+stacklevel="apipkg")
+doctest = py.std.doctest
Added: pypy/branch/py12/py/_compat/dep_optparse.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_compat/dep_optparse.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,4 @@
+import py
+py.log._apiwarn("1.1", "py.compat.optparse deprecated, use standard library version.", stacklevel="apipkg")
+
+optparse = py.std.optparse
Added: pypy/branch/py12/py/_compat/dep_subprocess.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_compat/dep_subprocess.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,5 @@
+
+import py
+py.log._apiwarn("1.1", "py.compat.subprocess deprecated, use standard library version.",
+stacklevel="apipkg")
+subprocess = py.std.subprocess
Added: pypy/branch/py12/py/_compat/dep_textwrap.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_compat/dep_textwrap.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,5 @@
+import py
+
+py.log._apiwarn("1.1", "py.compat.textwrap deprecated, use standard library version.",
+ stacklevel="apipkg")
+textwrap = py.std.textwrap
Added: pypy/branch/py12/py/_error.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_error.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,83 @@
+"""
+create errno-specific classes for IO or os calls.
+
+"""
+import sys, os, errno
+
+class Error(EnvironmentError):
+ def __repr__(self):
+ return "%s.%s %r: %s " %(self.__class__.__module__,
+ self.__class__.__name__,
+ self.__class__.__doc__,
+ " ".join(map(str, self.args)),
+ #repr(self.args)
+ )
+
+ def __str__(self):
+ s = "[%s]: %s" %(self.__class__.__doc__,
+ " ".join(map(str, self.args)),
+ )
+ return s
+
+_winerrnomap = {
+ 2: errno.ENOENT,
+ 3: errno.ENOENT,
+ 17: errno.EEXIST,
+ 22: errno.ENOTDIR,
+ 267: errno.ENOTDIR,
+ 5: errno.EACCES, # anything better?
+}
+
+class ErrorMaker(object):
+ """ lazily provides Exception classes for each possible POSIX errno
+ (as defined per the 'errno' module). All such instances
+ subclass EnvironmentError.
+ """
+ Error = Error
+ _errno2class = {}
+
+ def __getattr__(self, name):
+ eno = getattr(errno, name)
+ cls = self._geterrnoclass(eno)
+ setattr(self, name, cls)
+ return cls
+
+ def _geterrnoclass(self, eno):
+ try:
+ return self._errno2class[eno]
+ except KeyError:
+ clsname = errno.errorcode.get(eno, "UnknownErrno%d" %(eno,))
+ errorcls = type(Error)(clsname, (Error,),
+ {'__module__':'py.error',
+ '__doc__': os.strerror(eno)})
+ self._errno2class[eno] = errorcls
+ return errorcls
+
+ def checked_call(self, func, *args):
+ """ call a function and raise an errno-exception if applicable. """
+ __tracebackhide__ = True
+ try:
+ return func(*args)
+ except self.Error:
+ raise
+ except EnvironmentError:
+ cls, value, tb = sys.exc_info()
+ if not hasattr(value, 'errno'):
+ raise
+ __tracebackhide__ = False
+ errno = value.errno
+ try:
+ if not isinstance(value, WindowsError):
+ raise NameError
+ except NameError:
+ # we are not on Windows, or we got a proper OSError
+ cls = self._geterrnoclass(errno)
+ else:
+ try:
+ cls = self._geterrnoclass(_winerrnomap[errno])
+ except KeyError:
+ raise value
+ raise cls("%s%r" % (func.__name__, args))
+ __tracebackhide__ = True
+
+error = ErrorMaker()
Added: pypy/branch/py12/py/_io/__init__.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_io/__init__.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1 @@
+""" input/output helping """
Added: pypy/branch/py12/py/_io/capture.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_io/capture.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,348 @@
+import os
+import sys
+import py
+import tempfile
+
+try:
+ from io import StringIO
+except ImportError:
+ from StringIO import StringIO
+
+if sys.version_info < (3,0):
+ class TextIO(StringIO):
+ def write(self, data):
+ if not isinstance(data, unicode):
+ data = unicode(data, getattr(self, '_encoding', 'UTF-8'))
+ StringIO.write(self, data)
+else:
+ TextIO = StringIO
+
+try:
+ from io import BytesIO
+except ImportError:
+ class BytesIO(StringIO):
+ def write(self, data):
+ if isinstance(data, unicode):
+ raise TypeError("not a byte value: %r" %(data,))
+ StringIO.write(self, data)
+
+class FDCapture:
+ """ Capture IO to/from a given os-level filedescriptor. """
+
+ def __init__(self, targetfd, tmpfile=None):
+ """ save targetfd descriptor, and open a new
+ temporary file there. If no tmpfile is
+ specified a tempfile.Tempfile() will be opened
+ in text mode.
+ """
+ self.targetfd = targetfd
+ if tmpfile is None:
+ f = tempfile.TemporaryFile('wb+')
+ tmpfile = dupfile(f, encoding="UTF-8")
+ f.close()
+ self.tmpfile = tmpfile
+ self._savefd = os.dup(targetfd)
+ os.dup2(self.tmpfile.fileno(), targetfd)
+ self._patched = []
+
+ def setasfile(self, name, module=sys):
+ """ patch <module>.<name> to self.tmpfile
+ """
+ key = (module, name)
+ self._patched.append((key, getattr(module, name)))
+ setattr(module, name, self.tmpfile)
+
+ def unsetfiles(self):
+ """ unpatch all patched items
+ """
+ while self._patched:
+ (module, name), value = self._patched.pop()
+ setattr(module, name, value)
+
+ def done(self):
+ """ unpatch and clean up, returns the self.tmpfile (file object)
+ """
+ os.dup2(self._savefd, self.targetfd)
+ self.unsetfiles()
+ os.close(self._savefd)
+ self.tmpfile.seek(0)
+ return self.tmpfile
+
+ def writeorg(self, data):
+ """ write a string to the original file descriptor
+ """
+ tempfp = tempfile.TemporaryFile()
+ try:
+ os.dup2(self._savefd, tempfp.fileno())
+ tempfp.write(data)
+ finally:
+ tempfp.close()
+
+
+def dupfile(f, mode=None, buffering=0, raising=False, encoding=None):
+ """ return a new open file object that's a duplicate of f
+
+ mode is duplicated if not given, 'buffering' controls
+ buffer size (defaulting to no buffering) and 'raising'
+ defines whether an exception is raised when an incompatible
+ file object is passed in (if raising is False, the file
+ object itself will be returned)
+ """
+ try:
+ fd = f.fileno()
+ except AttributeError:
+ if raising:
+ raise
+ return f
+ newfd = os.dup(fd)
+ mode = mode and mode or f.mode
+ if sys.version_info >= (3,0):
+ if encoding is not None:
+ mode = mode.replace("b", "")
+ buffering = True
+ return os.fdopen(newfd, mode, buffering, encoding, closefd=False)
+ else:
+ f = os.fdopen(newfd, mode, buffering)
+ if encoding is not None:
+ return EncodedFile(f, encoding)
+ return f
+
+class EncodedFile(object):
+ def __init__(self, _stream, encoding):
+ self._stream = _stream
+ self.encoding = encoding
+
+ def write(self, obj):
+ if isinstance(obj, unicode):
+ obj = obj.encode(self.encoding)
+ elif isinstance(obj, str):
+ pass
+ else:
+ obj = str(obj)
+ self._stream.write(obj)
+
+ def writelines(self, linelist):
+ data = ''.join(linelist)
+ self.write(data)
+
+ def __getattr__(self, name):
+ return getattr(self._stream, name)
+
+class Capture(object):
+ def call(cls, func, *args, **kwargs):
+ """ return a (res, out, err) tuple where
+ out and err represent the output/error output
+ during function execution.
+ call the given function with args/kwargs
+ and capture output/error during its execution.
+ """
+ so = cls()
+ try:
+ res = func(*args, **kwargs)
+ finally:
+ out, err = so.reset()
+ return res, out, err
+ call = classmethod(call)
+
+ def reset(self):
+ """ reset sys.stdout/stderr and return captured output as strings. """
+ if hasattr(self, '_suspended'):
+ outfile = self._kwargs['out']
+ errfile = self._kwargs['err']
+ del self._kwargs
+ else:
+ outfile, errfile = self.done()
+ out, err = "", ""
+ if outfile:
+ out = outfile.read()
+ outfile.close()
+ if errfile and errfile != outfile:
+ err = errfile.read()
+ errfile.close()
+ return out, err
+
+ def suspend(self):
+ """ return current snapshot captures, memorize tempfiles. """
+ assert not hasattr(self, '_suspended')
+ self._suspended = True
+ outerr = self.readouterr()
+ outfile, errfile = self.done()
+ self._kwargs['out'] = outfile
+ self._kwargs['err'] = errfile
+ return outerr
+
+ def resume(self):
+ """ resume capturing with original temp files. """
+ assert self._suspended
+ self._initialize(**self._kwargs)
+ del self._suspended
+
+
+class StdCaptureFD(Capture):
+ """ This class allows to capture writes to FD1 and FD2
+ and may connect a NULL file to FD0 (and prevent
+ reads from sys.stdin)
+ """
+ def __init__(self, out=True, err=True,
+ mixed=False, in_=True, patchsys=True):
+ self._kwargs = locals().copy()
+ del self._kwargs['self']
+ self._initialize(**self._kwargs)
+
+ def _initialize(self, out=True, err=True,
+ mixed=False, in_=True, patchsys=True):
+ if in_:
+ self._oldin = (sys.stdin, os.dup(0))
+ sys.stdin = DontReadFromInput()
+ fd = os.open(devnullpath, os.O_RDONLY)
+ os.dup2(fd, 0)
+ os.close(fd)
+ if out:
+ tmpfile = None
+ if hasattr(out, 'write'):
+ tmpfile = out
+ self.out = py.io.FDCapture(1, tmpfile=tmpfile)
+ if patchsys:
+ self.out.setasfile('stdout')
+ if err:
+ if mixed and out:
+ tmpfile = self.out.tmpfile
+ elif hasattr(err, 'write'):
+ tmpfile = err
+ else:
+ tmpfile = None
+ self.err = py.io.FDCapture(2, tmpfile=tmpfile)
+ if patchsys:
+ self.err.setasfile('stderr')
+
+ def done(self):
+ """ return (outfile, errfile) and stop capturing. """
+ if hasattr(self, 'out'):
+ outfile = self.out.done()
+ else:
+ outfile = None
+ if hasattr(self, 'err'):
+ errfile = self.err.done()
+ else:
+ errfile = None
+ if hasattr(self, '_oldin'):
+ oldsys, oldfd = self._oldin
+ os.dup2(oldfd, 0)
+ os.close(oldfd)
+ sys.stdin = oldsys
+ return outfile, errfile
+
+ def readouterr(self):
+ """ return snapshot value of stdout/stderr capturings. """
+ l = []
+ for name in ('out', 'err'):
+ res = ""
+ if hasattr(self, name):
+ f = getattr(self, name).tmpfile
+ f.seek(0)
+ res = f.read()
+ f.truncate(0)
+ f.seek(0)
+ l.append(res)
+ return l
+
+class StdCapture(Capture):
+ """ This class allows to capture writes to sys.stdout|stderr "in-memory"
+ and will raise errors on tries to read from sys.stdin. It only
+ modifies sys.stdout|stderr|stdin attributes and does not
+ touch underlying File Descriptors (use StdCaptureFD for that).
+ """
+ def __init__(self, out=True, err=True, in_=True, mixed=False):
+ self._kwargs = locals().copy()
+ del self._kwargs['self']
+ self._initialize(**self._kwargs)
+
+ def _initialize(self, out, err, in_, mixed):
+ self._out = out
+ self._err = err
+ self._in = in_
+ if out:
+ self._oldout = sys.stdout
+ if not hasattr(out, 'write'):
+ out = TextIO()
+ sys.stdout = self.out = out
+ if err:
+ self._olderr = sys.stderr
+ if out and mixed:
+ err = self.out
+ elif not hasattr(err, 'write'):
+ err = TextIO()
+ sys.stderr = self.err = err
+ if in_:
+ self._oldin = sys.stdin
+ sys.stdin = self.newin = DontReadFromInput()
+
+ def done(self):
+ """ return (outfile, errfile) and stop capturing. """
+ o,e = sys.stdout, sys.stderr
+ if self._out:
+ try:
+ sys.stdout = self._oldout
+ except AttributeError:
+ raise IOError("stdout capturing already reset")
+ del self._oldout
+ outfile = self.out
+ outfile.seek(0)
+ else:
+ outfile = None
+ if self._err:
+ try:
+ sys.stderr = self._olderr
+ except AttributeError:
+ raise IOError("stderr capturing already reset")
+ del self._olderr
+ errfile = self.err
+ errfile.seek(0)
+ else:
+ errfile = None
+ if self._in:
+ sys.stdin = self._oldin
+ return outfile, errfile
+
+ def readouterr(self):
+ """ return snapshot value of stdout/stderr capturings. """
+ out = err = ""
+ if self._out:
+ out = sys.stdout.getvalue()
+ sys.stdout.truncate(0)
+ sys.stdout.seek(0)
+ if self._err:
+ err = sys.stderr.getvalue()
+ sys.stderr.truncate(0)
+ sys.stderr.seek(0)
+ return out, err
+
+class DontReadFromInput:
+ """Temporary stub class. Ideally when stdin is accessed, the
+ capturing should be turned off, with possibly all data captured
+ so far sent to the screen. This should be configurable, though,
+ because in automated test runs it is better to crash than
+ hang indefinitely.
+ """
+ def read(self, *args):
+ raise IOError("reading from stdin while output is captured")
+ readline = read
+ readlines = read
+ __iter__ = read
+
+ def fileno(self):
+ raise ValueError("redirected Stdin is pseudofile, has no fileno()")
+ def isatty(self):
+ return False
+ def close(self):
+ pass
+
+try:
+ devnullpath = os.devnull
+except AttributeError:
+ if os.name == 'nt':
+ devnullpath = 'NUL'
+ else:
+ devnullpath = '/dev/null'
+
+
Added: pypy/branch/py12/py/_io/saferepr.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_io/saferepr.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,56 @@
+import py
+import sys, os.path
+
+builtin_repr = repr
+
+reprlib = py.builtin._tryimport('repr', 'reprlib')
+
+sysex = (KeyboardInterrupt, MemoryError, SystemExit)
+
+class SafeRepr(reprlib.Repr):
+ """ subclass of repr.Repr that limits the resulting size of repr()
+ and includes information on exceptions raised during the call.
+ """
+ def repr(self, x):
+ return self._callhelper(reprlib.Repr.repr, self, x)
+
+ def repr_instance(self, x, level):
+ return self._callhelper(builtin_repr, x)
+
+ def _callhelper(self, call, x, *args):
+ try:
+ # Try the vanilla repr and make sure that the result is a string
+ s = call(x, *args)
+ except sysex:
+ raise
+ except:
+ cls, e, tb = sys.exc_info()
+ exc_name = getattr(cls, '__name__', 'unknown')
+ try:
+ exc_info = str(e)
+ except sysex:
+ raise
+ except:
+ exc_info = 'unknown'
+ return '<[%s("%s") raised in repr()] %s object at 0x%x>' % (
+ exc_name, exc_info, x.__class__.__name__, id(x))
+ else:
+ if len(s) > self.maxsize:
+ i = max(0, (self.maxsize-3)//2)
+ j = max(0, self.maxsize-3-i)
+ s = s[:i] + '...' + s[len(s)-j:]
+ return s
+
+def saferepr(obj, maxsize=240):
+ """ return a size-limited safe repr-string for the given object.
+ Failing __repr__ functions of user instances will be represented
+ with a short exception info and 'saferepr' generally takes
+ care to never raise exceptions itself. This function is a wrapper
+ around the Repr/reprlib functionality of the standard 2.6 lib.
+ """
+ # review exception handling
+ srepr = SafeRepr()
+ srepr.maxstring = maxsize
+ srepr.maxsize = maxsize
+ srepr.maxother = 160
+ return srepr.repr(obj)
Added: pypy/branch/py12/py/_io/terminalwriter.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_io/terminalwriter.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,273 @@
+"""
+
+Helper functions for writing to terminals and files.
+
+"""
+
+
+import sys, os
+import py
+
+def _getdimensions():
+ import termios,fcntl,struct
+ call = fcntl.ioctl(0,termios.TIOCGWINSZ,"\000"*8)
+ height,width = struct.unpack( "hhhh", call ) [:2]
+ return height, width
+
+if sys.platform == 'win32':
+ # ctypes access to the Windows console
+
+ STD_OUTPUT_HANDLE = -11
+ STD_ERROR_HANDLE = -12
+ FOREGROUND_BLUE = 0x0001 # text color contains blue.
+ FOREGROUND_GREEN = 0x0002 # text color contains green.
+ FOREGROUND_RED = 0x0004 # text color contains red.
+ FOREGROUND_WHITE = 0x0007
+ FOREGROUND_INTENSITY = 0x0008 # text color is intensified.
+ BACKGROUND_BLUE = 0x0010 # background color contains blue.
+ BACKGROUND_GREEN = 0x0020 # background color contains green.
+ BACKGROUND_RED = 0x0040 # background color contains red.
+ BACKGROUND_WHITE = 0x0070
+ BACKGROUND_INTENSITY = 0x0080 # background color is intensified.
+
+ def GetStdHandle(kind):
+ import ctypes
+ return ctypes.windll.kernel32.GetStdHandle(kind)
+
+ def SetConsoleTextAttribute(handle, attr):
+ import ctypes
+ ctypes.windll.kernel32.SetConsoleTextAttribute(
+ handle, attr)
+
+ def _getdimensions():
+ import ctypes
+ from ctypes import wintypes
+
+ SHORT = ctypes.c_short
+ class COORD(ctypes.Structure):
+ _fields_ = [('X', SHORT),
+ ('Y', SHORT)]
+ class SMALL_RECT(ctypes.Structure):
+ _fields_ = [('Left', SHORT),
+ ('Top', SHORT),
+ ('Right', SHORT),
+ ('Bottom', SHORT)]
+ class CONSOLE_SCREEN_BUFFER_INFO(ctypes.Structure):
+ _fields_ = [('dwSize', COORD),
+ ('dwCursorPosition', COORD),
+ ('wAttributes', wintypes.WORD),
+ ('srWindow', SMALL_RECT),
+ ('dwMaximumWindowSize', COORD)]
+ STD_OUTPUT_HANDLE = -11
+ handle = GetStdHandle(STD_OUTPUT_HANDLE)
+ info = CONSOLE_SCREEN_BUFFER_INFO()
+ ctypes.windll.kernel32.GetConsoleScreenBufferInfo(
+ handle, ctypes.byref(info))
+ # Substract one from the width, otherwise the cursor wraps
+ # and the ending \n causes an empty line to display.
+ return info.dwSize.Y, info.dwSize.X - 1
+
+def get_terminal_width():
+ try:
+ height, width = _getdimensions()
+ except (SystemExit, KeyboardInterrupt):
+ raise
+ except:
+ # FALLBACK
+ width = int(os.environ.get('COLUMNS', 80))
+ else:
+ # XXX the windows getdimensions may be bogus, let's sanify a bit
+ if width < 40:
+ width = 80
+ return width
+
+terminal_width = get_terminal_width()
+
+# XXX unify with _escaped func below
+def ansi_print(text, esc, file=None, newline=True, flush=False):
+ if file is None:
+ file = sys.stderr
+ text = text.rstrip()
+ if esc and not isinstance(esc, tuple):
+ esc = (esc,)
+ if esc and sys.platform != "win32" and file.isatty():
+ text = (''.join(['\x1b[%sm' % cod for cod in esc]) +
+ text +
+ '\x1b[0m') # ANSI color code "reset"
+ if newline:
+ text += '\n'
+
+ if esc and sys.platform == "win32" and file.isatty():
+ if 1 in esc:
+ bold = True
+ esc = tuple([x for x in esc if x != 1])
+ else:
+ bold = False
+ esctable = {() : FOREGROUND_WHITE, # normal
+ (31,): FOREGROUND_RED, # red
+ (32,): FOREGROUND_GREEN, # green
+ (33,): FOREGROUND_GREEN|FOREGROUND_RED, # yellow
+ (34,): FOREGROUND_BLUE, # blue
+ (35,): FOREGROUND_BLUE|FOREGROUND_RED, # purple
+ (36,): FOREGROUND_BLUE|FOREGROUND_GREEN, # cyan
+ (37,): FOREGROUND_WHITE, # white
+ (39,): FOREGROUND_WHITE, # reset
+ }
+ attr = esctable.get(esc, FOREGROUND_WHITE)
+ if bold:
+ attr |= FOREGROUND_INTENSITY
+ STD_OUTPUT_HANDLE = -11
+ STD_ERROR_HANDLE = -12
+ if file is sys.stderr:
+ handle = GetStdHandle(STD_ERROR_HANDLE)
+ else:
+ handle = GetStdHandle(STD_OUTPUT_HANDLE)
+ SetConsoleTextAttribute(handle, attr)
+ file.write(text)
+ SetConsoleTextAttribute(handle, FOREGROUND_WHITE)
+ else:
+ file.write(text)
+
+ if flush:
+ file.flush()
+
+def should_do_markup(file):
+ return hasattr(file, 'isatty') and file.isatty() \
+ and os.environ.get('TERM') != 'dumb' \
+ and not (sys.platform.startswith('java') and os._name == 'nt')
+
+class TerminalWriter(object):
+ _esctable = dict(black=30, red=31, green=32, yellow=33,
+ blue=34, purple=35, cyan=36, white=37,
+ Black=40, Red=41, Green=42, Yellow=43,
+ Blue=44, Purple=45, Cyan=46, White=47,
+ bold=1, light=2, blink=5, invert=7)
+
+ # XXX deprecate stringio argument
+ def __init__(self, file=None, stringio=False, encoding=None):
+
+ if file is None:
+ if stringio:
+ self.stringio = file = py.io.TextIO()
+ else:
+ file = py.std.sys.stdout
+ if hasattr(file, 'encoding'):
+ encoding = file.encoding
+ elif hasattr(file, '__call__'):
+ file = WriteFile(file, encoding=encoding)
+ self.encoding = encoding
+ self._file = file
+ self.fullwidth = get_terminal_width()
+ self.hasmarkup = should_do_markup(file)
+
+ def _escaped(self, text, esc):
+ if esc and self.hasmarkup:
+ text = (''.join(['\x1b[%sm' % cod for cod in esc]) +
+ text +'\x1b[0m')
+ return text
+
+ def markup(self, text, **kw):
+ esc = []
+ for name in kw:
+ if name not in self._esctable:
+ raise ValueError("unknown markup: %r" %(name,))
+ if kw[name]:
+ esc.append(self._esctable[name])
+ return self._escaped(text, tuple(esc))
+
+ def sep(self, sepchar, title=None, fullwidth=None, **kw):
+ if fullwidth is None:
+ fullwidth = self.fullwidth
+ # the goal is to have the line be as long as possible
+ # under the condition that len(line) <= fullwidth
+ if title is not None:
+ # we want 2 + 2*len(fill) + len(title) <= fullwidth
+ # i.e. 2 + 2*len(sepchar)*N + len(title) <= fullwidth
+ # 2*len(sepchar)*N <= fullwidth - len(title) - 2
+ # N <= (fullwidth - len(title) - 2) // (2*len(sepchar))
+ N = (fullwidth - len(title) - 2) // (2*len(sepchar))
+ fill = sepchar * N
+ line = "%s %s %s" % (fill, title, fill)
+ else:
+ # we want len(sepchar)*N <= fullwidth
+ # i.e. N <= fullwidth // len(sepchar)
+ line = sepchar * (fullwidth // len(sepchar))
+ # in some situations there is room for an extra sepchar at the right,
+ # in particular if we consider that with a sepchar like "_ " the
+ # trailing space is not important at the end of the line
+ if len(line) + len(sepchar.rstrip()) <= fullwidth:
+ line += sepchar.rstrip()
+
+ self.line(line, **kw)
+
+ def write(self, s, **kw):
+ if s:
+ if not isinstance(self._file, WriteFile):
+ s = self._getbytestring(s)
+ if self.hasmarkup and kw:
+ s = self.markup(s, **kw)
+ self._file.write(s)
+ self._file.flush()
+
+ def _getbytestring(self, s):
+ # XXX review this and the whole logic
+ if self.encoding and sys.version_info[0] < 3 and isinstance(s, unicode):
+ return s.encode(self.encoding)
+ elif not isinstance(s, str):
+ try:
+ return str(s)
+ except UnicodeEncodeError:
+ return "<print-error '%s' object>" % type(s).__name__
+ return s
+
+ def line(self, s='', **kw):
+ self.write(s, **kw)
+ self.write('\n')
+
+class Win32ConsoleWriter(TerminalWriter):
+ def write(self, s, **kw):
+ if s:
+ s = self._getbytestring(s)
+ if self.hasmarkup:
+ handle = GetStdHandle(STD_OUTPUT_HANDLE)
+
+ if self.hasmarkup and kw:
+ attr = 0
+ if kw.pop('bold', False):
+ attr |= FOREGROUND_INTENSITY
+
+ if kw.pop('red', False):
+ attr |= FOREGROUND_RED
+ elif kw.pop('blue', False):
+ attr |= FOREGROUND_BLUE
+ elif kw.pop('green', False):
+ attr |= FOREGROUND_GREEN
+ else:
+ attr |= FOREGROUND_WHITE
+
+ SetConsoleTextAttribute(handle, attr)
+ self._file.write(s)
+ self._file.flush()
+ if self.hasmarkup:
+ SetConsoleTextAttribute(handle, FOREGROUND_WHITE)
+
+ def line(self, s="", **kw):
+ self.write(s+"\n", **kw)
+
+if sys.platform == 'win32':
+ TerminalWriter = Win32ConsoleWriter
+
+class WriteFile(object):
+ def __init__(self, writemethod, encoding=None):
+ self.encoding = encoding
+ self._writemethod = writemethod
+
+ def write(self, data):
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self._writemethod(data)
+
+ def flush(self):
+ return
+
+
Added: pypy/branch/py12/py/_log/__init__.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_log/__init__.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,2 @@
+""" logging API ('producers' and 'consumers' connected via keywords) """
+
Added: pypy/branch/py12/py/_log/log.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_log/log.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,186 @@
+"""
+basic logging functionality based on a producer/consumer scheme.
+
+XXX implement this API: (maybe put it into slogger.py?)
+
+ log = Logger(
+ info=py.log.STDOUT,
+ debug=py.log.STDOUT,
+ command=None)
+ log.info("hello", "world")
+ log.command("hello", "world")
+
+ log = Logger(info=Logger(something=...),
+ debug=py.log.STDOUT,
+ command=None)
+"""
+import py, sys
+
+class Message(object):
+ def __init__(self, keywords, args):
+ self.keywords = keywords
+ self.args = args
+
+ def content(self):
+ return " ".join(map(str, self.args))
+
+ def prefix(self):
+ return "[%s] " % (":".join(self.keywords))
+
+ def __str__(self):
+ return self.prefix() + self.content()
+
+
+class Producer(object):
+ """ (deprecated) Log producer API which sends messages to be logged
+ to a 'consumer' object, which then prints them to stdout,
+ stderr, files, etc. Used extensively by PyPy-1.1.
+ """
+
+ Message = Message # to allow later customization
+ keywords2consumer = {}
+
+ def __init__(self, keywords, keywordmapper=None, **kw):
+ if hasattr(keywords, 'split'):
+ keywords = tuple(keywords.split())
+ self._keywords = keywords
+ if keywordmapper is None:
+ keywordmapper = default_keywordmapper
+ self._keywordmapper = keywordmapper
+
+ def __repr__(self):
+ return "<py.log.Producer %s>" % ":".join(self._keywords)
+
+ def __getattr__(self, name):
+ if '_' in name:
+ raise AttributeError(name)
+ producer = self.__class__(self._keywords + (name,))
+ setattr(self, name, producer)
+ return producer
+
+ def __call__(self, *args):
+ """ write a message to the appropriate consumer(s) """
+ func = self._keywordmapper.getconsumer(self._keywords)
+ if func is not None:
+ func(self.Message(self._keywords, args))
+
+class KeywordMapper:
+ def __init__(self):
+ self.keywords2consumer = {}
+
+ def getstate(self):
+ return self.keywords2consumer.copy()
+ def setstate(self, state):
+ self.keywords2consumer.clear()
+ self.keywords2consumer.update(state)
+
+ def getconsumer(self, keywords):
+ """ return a consumer matching the given keywords.
+
+ tries to find the most suitable consumer by walking, starting from
+ the back, the list of keywords, the first consumer matching a
+ keyword is returned (falling back to py.log.default)
+ """
+ for i in range(len(keywords), 0, -1):
+ try:
+ return self.keywords2consumer[keywords[:i]]
+ except KeyError:
+ continue
+ return self.keywords2consumer.get('default', default_consumer)
+
+ def setconsumer(self, keywords, consumer):
+ """ set a consumer for a set of keywords. """
+ # normalize to tuples
+ if isinstance(keywords, str):
+ keywords = tuple(filter(None, keywords.split()))
+ elif hasattr(keywords, '_keywords'):
+ keywords = keywords._keywords
+ elif not isinstance(keywords, tuple):
+ raise TypeError("key %r is not a string or tuple" % (keywords,))
+ if consumer is not None and not py.builtin.callable(consumer):
+ if not hasattr(consumer, 'write'):
+ raise TypeError(
+ "%r should be None, callable or file-like" % (consumer,))
+ consumer = File(consumer)
+ self.keywords2consumer[keywords] = consumer
+
+def default_consumer(msg):
+ """ the default consumer, prints the message to stdout (using 'print') """
+ sys.stderr.write(str(msg)+"\n")
+
+default_keywordmapper = KeywordMapper()
+
+def setconsumer(keywords, consumer):
+ default_keywordmapper.setconsumer(keywords, consumer)
+
+def setstate(state):
+ default_keywordmapper.setstate(state)
+def getstate():
+ return default_keywordmapper.getstate()
+
+#
+# Consumers
+#
+
+class File(object):
+ """ log consumer wrapping a file(-like) object """
+ def __init__(self, f):
+ assert hasattr(f, 'write')
+ #assert isinstance(f, file) or not hasattr(f, 'open')
+ self._file = f
+
+ def __call__(self, msg):
+ """ write a message to the log """
+ self._file.write(str(msg) + "\n")
+ if hasattr(self._file, 'flush'):
+ self._file.flush()
+
+class Path(object):
+ """ log consumer that opens and writes to a Path """
+ def __init__(self, filename, append=False,
+ delayed_create=False, buffering=False):
+ self._append = append
+ self._filename = str(filename)
+ self._buffering = buffering
+ if not delayed_create:
+ self._openfile()
+
+ def _openfile(self):
+ mode = self._append and 'a' or 'w'
+ f = open(self._filename, mode)
+ self._file = f
+
+ def __call__(self, msg):
+ """ write a message to the log """
+ if not hasattr(self, "_file"):
+ self._openfile()
+ self._file.write(str(msg) + "\n")
+ if not self._buffering:
+ self._file.flush()
+
+def STDOUT(msg):
+ """ consumer that writes to sys.stdout """
+ sys.stdout.write(str(msg)+"\n")
+
+def STDERR(msg):
+ """ consumer that writes to sys.stderr """
+ sys.stderr.write(str(msg)+"\n")
+
+class Syslog:
+ """ consumer that writes to the syslog daemon """
+
+ def __init__(self, priority = None):
+ if priority is None:
+ priority = self.LOG_INFO
+ self.priority = priority
+
+ def __call__(self, msg):
+ """ write a message to the log """
+ py.std.syslog.syslog(self.priority, str(msg))
+
+for _prio in "EMERG ALERT CRIT ERR WARNING NOTICE INFO DEBUG".split():
+ _prio = "LOG_" + _prio
+ try:
+ setattr(Syslog, _prio, getattr(py.std.syslog, _prio))
+ except AttributeError:
+ pass
Added: pypy/branch/py12/py/_log/warning.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_log/warning.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,76 @@
+import py, sys
+
+class DeprecationWarning(DeprecationWarning):
+ def __init__(self, msg, path, lineno):
+ self.msg = msg
+ self.path = path
+ self.lineno = lineno
+ def __repr__(self):
+ return "%s:%d: %s" %(self.path, self.lineno+1, self.msg)
+ def __str__(self):
+ return self.msg
+
+def _apiwarn(startversion, msg, stacklevel=2, function=None):
+ # below is mostly COPIED from python2.4/warnings.py's def warn()
+ # Get context information
+ if isinstance(stacklevel, str):
+ frame = sys._getframe(1)
+ level = 1
+ found = frame.f_code.co_filename.find(stacklevel) != -1
+ while frame:
+ co = frame.f_code
+ if co.co_filename.find(stacklevel) == -1:
+ if found:
+ stacklevel = level
+ break
+ else:
+ found = True
+ level += 1
+ frame = frame.f_back
+ else:
+ stacklevel = 1
+ msg = "%s (since version %s)" %(msg, startversion)
+ warn(msg, stacklevel=stacklevel+1, function=function)
+
+def warn(msg, stacklevel=1, function=None):
+ if function is not None:
+ filename = py.std.inspect.getfile(function)
+ lineno = py.code.getrawcode(function).co_firstlineno
+ else:
+ try:
+ caller = sys._getframe(stacklevel)
+ except ValueError:
+ globals = sys.__dict__
+ lineno = 1
+ else:
+ globals = caller.f_globals
+ lineno = caller.f_lineno
+ if '__name__' in globals:
+ module = globals['__name__']
+ else:
+ module = "<string>"
+ filename = globals.get('__file__')
+ if filename:
+ fnl = filename.lower()
+ if fnl.endswith(".pyc") or fnl.endswith(".pyo"):
+ filename = filename[:-1]
+ elif fnl.endswith("$py.class"):
+ filename = filename.replace('$py.class', '.py')
+ else:
+ if module == "__main__":
+ try:
+ filename = sys.argv[0]
+ except AttributeError:
+ # embedded interpreters don't have sys.argv, see bug #839151
+ filename = '__main__'
+ if not filename:
+ filename = module
+ path = py.path.local(filename)
+ warning = DeprecationWarning(msg, path, lineno)
+ py.std.warnings.warn_explicit(warning, category=Warning,
+ filename=str(warning.path),
+ lineno=warning.lineno,
+ registry=py.std.warnings.__dict__.setdefault(
+ "__warningsregistry__", {})
+ )
+
Added: pypy/branch/py12/py/_path/__init__.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_path/__init__.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1 @@
+""" unified file system api """
Added: pypy/branch/py12/py/_path/cacheutil.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_path/cacheutil.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,114 @@
+"""
+This module contains multithread-safe cache implementations.
+
+All Caches have
+
+ getorbuild(key, builder)
+ delentry(key)
+
+methods and allow configuration when instantiating the cache class.
+"""
+from time import time as gettime
+
+class BasicCache(object):
+ def __init__(self, maxentries=128):
+ self.maxentries = maxentries
+ self.prunenum = int(maxentries - maxentries/8)
+ self._dict = {}
+
+ def clear(self):
+ self._dict.clear()
+
+ def _getentry(self, key):
+ return self._dict[key]
+
+ def _putentry(self, key, entry):
+ self._prunelowestweight()
+ self._dict[key] = entry
+
+ def delentry(self, key, raising=False):
+ try:
+ del self._dict[key]
+ except KeyError:
+ if raising:
+ raise
+
+ def getorbuild(self, key, builder):
+ try:
+ entry = self._getentry(key)
+ except KeyError:
+ entry = self._build(key, builder)
+ self._putentry(key, entry)
+ return entry.value
+
+ def _prunelowestweight(self):
+ """ prune out entries with lowest weight. """
+ numentries = len(self._dict)
+ if numentries >= self.maxentries:
+ # evict according to entry's weight
+ items = [(entry.weight, key)
+ for key, entry in self._dict.items()]
+ items.sort()
+ index = numentries - self.prunenum
+ if index > 0:
+ for weight, key in items[:index]:
+ # in MT situations the element might be gone
+ self.delentry(key, raising=False)
+
+class BuildcostAccessCache(BasicCache):
+ """ A BuildTime/Access-counting cache implementation.
+ the weight of a value is computed as the product of
+
+ num-accesses-of-a-value * time-to-build-the-value
+
+ The values with the least such weights are evicted
+ if the cache maxentries threshold is superceded.
+ For implementation flexibility more than one object
+ might be evicted at a time.
+ """
+ # time function to use for measuring build-times
+
+ def _build(self, key, builder):
+ start = gettime()
+ val = builder()
+ end = gettime()
+ return WeightedCountingEntry(val, end-start)
+
+
+class WeightedCountingEntry(object):
+ def __init__(self, value, oneweight):
+ self._value = value
+ self.weight = self._oneweight = oneweight
+
+ def value(self):
+ self.weight += self._oneweight
+ return self._value
+ value = property(value)
+
+class AgingCache(BasicCache):
+ """ This cache prunes out cache entries that are too old.
+ """
+ def __init__(self, maxentries=128, maxseconds=10.0):
+ super(AgingCache, self).__init__(maxentries)
+ self.maxseconds = maxseconds
+
+ def _getentry(self, key):
+ entry = self._dict[key]
+ if entry.isexpired():
+ self.delentry(key)
+ raise KeyError(key)
+ return entry
+
+ def _build(self, key, builder):
+ val = builder()
+ entry = AgingEntry(val, gettime() + self.maxseconds)
+ return entry
+
+class AgingEntry(object):
+ def __init__(self, value, expirationtime):
+ self.value = value
+ self.weight = expirationtime
+
+ def isexpired(self):
+ t = gettime()
+ return t >= self.weight
Added: pypy/branch/py12/py/_path/common.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_path/common.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,333 @@
+"""
+"""
+import os, sys
+import py
+
+class Checkers:
+ _depend_on_existence = 'exists', 'link', 'dir', 'file'
+
+ def __init__(self, path):
+ self.path = path
+
+ def dir(self):
+ raise NotImplementedError
+
+ def file(self):
+ raise NotImplementedError
+
+ def dotfile(self):
+ return self.path.basename.startswith('.')
+
+ def ext(self, arg):
+ if not arg.startswith('.'):
+ arg = '.' + arg
+ return self.path.ext == arg
+
+ def exists(self):
+ raise NotImplementedError
+
+ def basename(self, arg):
+ return self.path.basename == arg
+
+ def basestarts(self, arg):
+ return self.path.basename.startswith(arg)
+
+ def relto(self, arg):
+ return self.path.relto(arg)
+
+ def fnmatch(self, arg):
+ return FNMatcher(arg)(self.path)
+
+ def endswith(self, arg):
+ return str(self.path).endswith(arg)
+
+ def _evaluate(self, kw):
+ for name, value in kw.items():
+ invert = False
+ meth = None
+ try:
+ meth = getattr(self, name)
+ except AttributeError:
+ if name[:3] == 'not':
+ invert = True
+ try:
+ meth = getattr(self, name[3:])
+ except AttributeError:
+ pass
+ if meth is None:
+ raise TypeError(
+ "no %r checker available for %r" % (name, self.path))
+ try:
+ if py.code.getrawcode(meth).co_argcount > 1:
+ if (not meth(value)) ^ invert:
+ return False
+ else:
+ if bool(value) ^ bool(meth()) ^ invert:
+ return False
+ except (py.error.ENOENT, py.error.ENOTDIR):
+ for name in self._depend_on_existence:
+ if name in kw:
+ if kw.get(name):
+ return False
+ name = 'not' + name
+ if name in kw:
+ if not kw.get(name):
+ return False
+ return True
+
+class NeverRaised(Exception):
+ pass
+
+class PathBase(object):
+ """ shared implementation for filesystem path objects."""
+ Checkers = Checkers
+
+ def __div__(self, other):
+ return self.join(str(other))
+ __truediv__ = __div__ # py3k
+
+ def basename(self):
+ """ basename part of path. """
+ return self._getbyspec('basename')[0]
+ basename = property(basename, None, None, basename.__doc__)
+
+ def purebasename(self):
+ """ pure base name of the path."""
+ return self._getbyspec('purebasename')[0]
+ purebasename = property(purebasename, None, None, purebasename.__doc__)
+
+ def ext(self):
+ """ extension of the path (including the '.')."""
+ return self._getbyspec('ext')[0]
+ ext = property(ext, None, None, ext.__doc__)
+
+ def dirpath(self, *args, **kwargs):
+ """ return the directory Path of the current Path joined
+ with any given path arguments.
+ """
+ return self.new(basename='').join(*args, **kwargs)
+
+ def read(self, mode='r'):
+ """ read and return a bytestring from reading the path. """
+ if sys.version_info < (2,3):
+ for x in 'u', 'U':
+ if x in mode:
+ mode = mode.replace(x, '')
+ f = self.open(mode)
+ try:
+ return f.read()
+ finally:
+ f.close()
+
+ def readlines(self, cr=1):
+ """ read and return a list of lines from the path. if cr is False, the
+newline will be removed from the end of each line. """
+ if not cr:
+ content = self.read('rU')
+ return content.split('\n')
+ else:
+ f = self.open('rU')
+ try:
+ return f.readlines()
+ finally:
+ f.close()
+
+ def load(self):
+ """ (deprecated) return object unpickled from self.read() """
+ f = self.open('rb')
+ try:
+ return py.error.checked_call(py.std.pickle.load, f)
+ finally:
+ f.close()
+
+ def move(self, target):
+ """ move this path to target. """
+ if target.relto(self):
+ raise py.error.EINVAL(target,
+ "cannot move path into a subdirectory of itself")
+ try:
+ self.rename(target)
+ except py.error.EXDEV: # invalid cross-device link
+ self.copy(target)
+ self.remove()
+
+ def __repr__(self):
+ """ return a string representation of this path. """
+ return repr(str(self))
+
+ def check(self, **kw):
+ """ check a path for existence, or query its properties
+
+ without arguments, this returns True if the path exists (on the
+ filesystem), False if not
+
+ with (keyword only) arguments, the object compares the value
+ of the argument with the value of a property with the same name
+ (if it has one, else it raises a TypeError)
+
+ when for example the keyword argument 'ext' is '.py', this will
+ return True if self.ext == '.py', False otherwise
+ """
+ if not kw:
+ kw = {'exists' : 1}
+ return self.Checkers(self)._evaluate(kw)
+
+ def relto(self, relpath):
+ """ return a string which is the relative part of the path
+ to the given 'relpath'.
+ """
+ if not isinstance(relpath, (str, PathBase)):
+ raise TypeError("%r: not a string or path object" %(relpath,))
+ strrelpath = str(relpath)
+ if strrelpath and strrelpath[-1] != self.sep:
+ strrelpath += self.sep
+ #assert strrelpath[-1] == self.sep
+ #assert strrelpath[-2] != self.sep
+ strself = str(self)
+ if sys.platform == "win32" or getattr(os, '_name', None) == 'nt':
+ if os.path.normcase(strself).startswith(
+ os.path.normcase(strrelpath)):
+ return strself[len(strrelpath):]
+ elif strself.startswith(strrelpath):
+ return strself[len(strrelpath):]
+ return ""
+
+ def bestrelpath(self, dest):
+ """ return a string which is a relative path from self
+ to dest such that self.join(bestrelpath) == dest and
+ if not such path can be determined return dest.
+ """
+ try:
+ base = self.common(dest)
+ if not base: # can be the case on windows
+ return str(dest)
+ self2base = self.relto(base)
+ reldest = dest.relto(base)
+ if self2base:
+ n = self2base.count(self.sep) + 1
+ else:
+ n = 0
+ l = ['..'] * n
+ if reldest:
+ l.append(reldest)
+ target = dest.sep.join(l)
+ return target
+ except AttributeError:
+ return str(dest)
+
+
+ def parts(self, reverse=False):
+ """ return a root-first list of all ancestor directories
+ plus the path itself.
+ """
+ current = self
+ l = [self]
+ while 1:
+ last = current
+ current = current.dirpath()
+ if last == current:
+ break
+ l.insert(0, current)
+ if reverse:
+ l.reverse()
+ return l
+
+ def common(self, other):
+ """ return the common part shared with the other path
+ or None if there is no common part.
+ """
+ last = None
+ for x, y in zip(self.parts(), other.parts()):
+ if x != y:
+ return last
+ last = x
+ return last
+
+ def __add__(self, other):
+ """ return new path object with 'other' added to the basename"""
+ return self.new(basename=self.basename+str(other))
+
+ def __cmp__(self, other):
+ """ return sort value (-1, 0, +1). """
+ try:
+ return cmp(self.strpath, other.strpath)
+ except AttributeError:
+ return cmp(str(self), str(other)) # self.path, other.path)
+
+ def __lt__(self, other):
+ try:
+ return self.strpath < other.strpath
+ except AttributeError:
+ return str(self) < str(other)
+
+ def visit(self, fil=None, rec=None, ignore=NeverRaised):
+ """ yields all paths below the current one
+
+ fil is a filter (glob pattern or callable), if not matching the
+ path will not be yielded, defaulting to None (everything is
+ returned)
+
+ rec is a filter (glob pattern or callable) that controls whether
+ a node is descended, defaulting to None
+
+ ignore is an Exception class that is ignoredwhen calling dirlist()
+ on any of the paths (by default, all exceptions are reported)
+ """
+ if isinstance(fil, str):
+ fil = FNMatcher(fil)
+ if rec:
+ if isinstance(rec, str):
+ rec = fnmatch(fil)
+ elif not hasattr(rec, '__call__'):
+ rec = None
+ try:
+ entries = self.listdir()
+ except ignore:
+ return
+ dirs = [p for p in entries
+ if p.check(dir=1) and (rec is None or rec(p))]
+ for subdir in dirs:
+ for p in subdir.visit(fil=fil, rec=rec, ignore=ignore):
+ yield p
+ for p in entries:
+ if fil is None or fil(p):
+ yield p
+
+ def _sortlist(self, res, sort):
+ if sort:
+ if hasattr(sort, '__call__'):
+ res.sort(sort)
+ else:
+ res.sort()
+
+ def samefile(self, other):
+ """ return True if other refers to the same stat object as self. """
+ return self.strpath == str(other)
+
+class FNMatcher:
+ def __init__(self, pattern):
+ self.pattern = pattern
+ def __call__(self, path):
+ """return true if the basename/fullname matches the glob-'pattern'.
+
+ * matches everything
+ ? matches any single character
+ [seq] matches any character in seq
+ [!seq] matches any char not in seq
+
+ if the pattern contains a path-separator then the full path
+ is used for pattern matching and a '*' is prepended to the
+ pattern.
+
+ if the pattern doesn't contain a path-separator the pattern
+ is only matched against the basename.
+ """
+ pattern = self.pattern
+ if pattern.find(path.sep) == -1:
+ name = path.basename
+ else:
+ name = str(path) # path.strpath # XXX svn?
+ pattern = '*' + path.sep + pattern
+ from fnmatch import fnmatch
+ return fnmatch(name, pattern)
+
Added: pypy/branch/py12/py/_path/gateway/__init__.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_path/gateway/__init__.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1 @@
+#
Added: pypy/branch/py12/py/_path/gateway/channeltest.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_path/gateway/channeltest.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,65 @@
+import threading
+
+
+class PathServer:
+
+ def __init__(self, channel):
+ self.channel = channel
+ self.C2P = {}
+ self.next_id = 0
+ threading.Thread(target=self.serve).start()
+
+ def p2c(self, path):
+ id = self.next_id
+ self.next_id += 1
+ self.C2P[id] = path
+ return id
+
+ def command_LIST(self, id, *args):
+ path = self.C2P[id]
+ answer = [(self.p2c(p), p.basename) for p in path.listdir(*args)]
+ self.channel.send(answer)
+
+ def command_DEL(self, id):
+ del self.C2P[id]
+
+ def command_GET(self, id, spec):
+ path = self.C2P[id]
+ self.channel.send(path._getbyspec(spec))
+
+ def command_READ(self, id):
+ path = self.C2P[id]
+ self.channel.send(path.read())
+
+ def command_JOIN(self, id, resultid, *args):
+ path = self.C2P[id]
+ assert resultid not in self.C2P
+ self.C2P[resultid] = path.join(*args)
+
+ def command_DIRPATH(self, id, resultid):
+ path = self.C2P[id]
+ assert resultid not in self.C2P
+ self.C2P[resultid] = path.dirpath()
+
+ def serve(self):
+ try:
+ while 1:
+ msg = self.channel.receive()
+ meth = getattr(self, 'command_' + msg[0])
+ meth(*msg[1:])
+ except EOFError:
+ pass
+
+if __name__ == '__main__':
+ import py
+ gw = execnet.PopenGateway()
+ channel = gw._channelfactory.new()
+ srv = PathServer(channel)
+ c = gw.remote_exec("""
+ import remotepath
+ p = remotepath.RemotePath(channel.receive(), channel.receive())
+ channel.send(len(p.listdir()))
+ """)
+ c.send(channel)
+ c.send(srv.p2c(py.path.local('/tmp')))
+ print(c.receive())
Added: pypy/branch/py12/py/_path/gateway/channeltest2.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_path/gateway/channeltest2.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,21 @@
+import py
+from remotepath import RemotePath
+
+
+SRC = open('channeltest.py', 'r').read()
+
+SRC += '''
+import py
+srv = PathServer(channel.receive())
+channel.send(srv.p2c(py.path.local("/tmp")))
+'''
+
+
+#gw = execnet.SshGateway('codespeak.net')
+gw = execnet.PopenGateway()
+gw.remote_init_threads(5)
+c = gw.remote_exec(SRC, stdout=py.std.sys.stdout, stderr=py.std.sys.stderr)
+subchannel = gw._channelfactory.new()
+c.send(subchannel)
+
+p = RemotePath(subchannel, c.receive())
Added: pypy/branch/py12/py/_path/gateway/remotepath.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_path/gateway/remotepath.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,47 @@
+import py, itertools
+from py._path import common
+
+COUNTER = itertools.count()
+
+class RemotePath(common.PathBase):
+ sep = '/'
+
+ def __init__(self, channel, id, basename=None):
+ self._channel = channel
+ self._id = id
+ self._basename = basename
+ self._specs = {}
+
+ def __del__(self):
+ self._channel.send(('DEL', self._id))
+
+ def __repr__(self):
+ return 'RemotePath(%s)' % self.basename
+
+ def listdir(self, *args):
+ self._channel.send(('LIST', self._id) + args)
+ return [RemotePath(self._channel, id, basename)
+ for (id, basename) in self._channel.receive()]
+
+ def dirpath(self):
+ id = ~COUNTER.next()
+ self._channel.send(('DIRPATH', self._id, id))
+ return RemotePath(self._channel, id)
+
+ def join(self, *args):
+ id = ~COUNTER.next()
+ self._channel.send(('JOIN', self._id, id) + args)
+ return RemotePath(self._channel, id)
+
+ def _getbyspec(self, spec):
+ parts = spec.split(',')
+ ask = [x for x in parts if x not in self._specs]
+ if ask:
+ self._channel.send(('GET', self._id, ",".join(ask)))
+ for part, value in zip(ask, self._channel.receive()):
+ self._specs[part] = value
+ return [self._specs[x] for x in parts]
+
+ def read(self):
+ self._channel.send(('READ', self._id))
+ return self._channel.receive()
Added: pypy/branch/py12/py/_path/local.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_path/local.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,809 @@
+"""
+local path implementation.
+"""
+import sys, os, stat, re, atexit
+import py
+from py._path import common
+
+iswin32 = sys.platform == "win32" or (getattr(os, '_name', False) == 'nt')
+
+class Stat(object):
+ def __getattr__(self, name):
+ return getattr(self._osstatresult, "st_" + name)
+
+ def __init__(self, path, osstatresult):
+ self.path = path
+ self._osstatresult = osstatresult
+
+ def owner(self):
+ if iswin32:
+ raise NotImplementedError("XXX win32")
+ import pwd
+ entry = py.error.checked_call(pwd.getpwuid, self.uid)
+ return entry[0]
+ owner = property(owner, None, None, "owner of path")
+
+ def group(self):
+ """ return group name of file. """
+ if iswin32:
+ raise NotImplementedError("XXX win32")
+ import grp
+ entry = py.error.checked_call(grp.getgrgid, self.gid)
+ return entry[0]
+ group = property(group)
+
+class PosixPath(common.PathBase):
+ def chown(self, user, group, rec=0):
+ """ change ownership to the given user and group.
+ user and group may be specified by a number or
+ by a name. if rec is True change ownership
+ recursively.
+ """
+ uid = getuserid(user)
+ gid = getgroupid(group)
+ if rec:
+ for x in self.visit(rec=lambda x: x.check(link=0)):
+ if x.check(link=0):
+ py.error.checked_call(os.chown, str(x), uid, gid)
+ py.error.checked_call(os.chown, str(self), uid, gid)
+
+ def readlink(self):
+ """ return value of a symbolic link. """
+ return py.error.checked_call(os.readlink, self.strpath)
+
+ def mklinkto(self, oldname):
+ """ posix style hard link to another name. """
+ py.error.checked_call(os.link, str(oldname), str(self))
+
+ def mksymlinkto(self, value, absolute=1):
+ """ create a symbolic link with the given value (pointing to another name). """
+ if absolute:
+ py.error.checked_call(os.symlink, str(value), self.strpath)
+ else:
+ base = self.common(value)
+ # with posix local paths '/' is always a common base
+ relsource = self.__class__(value).relto(base)
+ reldest = self.relto(base)
+ n = reldest.count(self.sep)
+ target = self.sep.join(('..', )*n + (relsource, ))
+ py.error.checked_call(os.symlink, target, self.strpath)
+
+ def samefile(self, other):
+ """ return True if other refers to the same stat object as self. """
+ return py.error.checked_call(os.path.samefile, str(self), str(other))
+
+def getuserid(user):
+ import pwd
+ if not isinstance(user, int):
+ user = pwd.getpwnam(user)[2]
+ return user
+
+def getgroupid(group):
+ import grp
+ if not isinstance(group, int):
+ group = grp.getgrnam(group)[2]
+ return group
+
+FSBase = not iswin32 and PosixPath or common.PathBase
+
+class LocalPath(FSBase):
+ """ object oriented interface to os.path and other local filesystem
+ related information.
+ """
+ sep = os.sep
+ class Checkers(common.Checkers):
+ def _stat(self):
+ try:
+ return self._statcache
+ except AttributeError:
+ try:
+ self._statcache = self.path.stat()
+ except py.error.ELOOP:
+ self._statcache = self.path.lstat()
+ return self._statcache
+
+ def dir(self):
+ return stat.S_ISDIR(self._stat().mode)
+
+ def file(self):
+ return stat.S_ISREG(self._stat().mode)
+
+ def exists(self):
+ return self._stat()
+
+ def link(self):
+ st = self.path.lstat()
+ return stat.S_ISLNK(st.mode)
+
+ def __new__(cls, path=None):
+ """ Initialize and return a local Path instance.
+
+ Path can be relative to the current directory.
+ If it is None then the current working directory is taken.
+ Note that Path instances always carry an absolute path.
+ Note also that passing in a local path object will simply return
+ the exact same path object. Use new() to get a new copy.
+ """
+ if isinstance(path, common.PathBase):
+ if path.__class__ == cls:
+ return path
+ path = path.strpath
+ # initialize the path
+ self = object.__new__(cls)
+ if not path:
+ self.strpath = os.getcwd()
+ elif isinstance(path, py.builtin._basestring):
+ self.strpath = os.path.abspath(os.path.normpath(str(path)))
+ else:
+ raise ValueError("can only pass None, Path instances "
+ "or non-empty strings to LocalPath")
+ assert isinstance(self.strpath, str)
+ return self
+
+ def __hash__(self):
+ return hash(self.strpath)
+
+ def __eq__(self, other):
+ s1 = str(self)
+ s2 = str(other)
+ if iswin32:
+ s1 = s1.lower()
+ s2 = s2.lower()
+ return s1 == s2
+
+ def __ne__(self, other):
+ return not (self == other)
+
+ def __lt__(self, other):
+ return str(self) < str(other)
+
+ def remove(self, rec=1):
+ """ remove a file or directory (or a directory tree if rec=1). """
+ if self.check(dir=1, link=0):
+ if rec:
+ # force remove of readonly files on windows
+ if iswin32:
+ self.chmod(448, rec=1) # octcal 0700
+ py.error.checked_call(py.std.shutil.rmtree, self.strpath)
+ else:
+ py.error.checked_call(os.rmdir, self.strpath)
+ else:
+ if iswin32:
+ self.chmod(448) # octcal 0700
+ py.error.checked_call(os.remove, self.strpath)
+
+ def computehash(self, hashtype="md5", chunksize=524288):
+ """ return hexdigest of hashvalue for this file. """
+ try:
+ try:
+ import hashlib as mod
+ except ImportError:
+ if hashtype == "sha1":
+ hashtype = "sha"
+ mod = __import__(hashtype)
+ hash = getattr(mod, hashtype)()
+ except (AttributeError, ImportError):
+ raise ValueError("Don't know how to compute %r hash" %(hashtype,))
+ f = self.open('rb')
+ try:
+ while 1:
+ buf = f.read(chunksize)
+ if not buf:
+ return hash.hexdigest()
+ hash.update(buf)
+ finally:
+ f.close()
+
+ def new(self, **kw):
+ """ create a modified version of this path.
+ the following keyword arguments modify various path parts:
+
+ a:/some/path/to/a/file.ext
+ || drive
+ |-------------| dirname
+ |------| basename
+ |--| purebasename
+ |--| ext
+ """
+ obj = object.__new__(self.__class__)
+ drive, dirname, basename, purebasename,ext = self._getbyspec(
+ "drive,dirname,basename,purebasename,ext")
+ if 'basename' in kw:
+ if 'purebasename' in kw or 'ext' in kw:
+ raise ValueError("invalid specification %r" % kw)
+ else:
+ pb = kw.setdefault('purebasename', purebasename)
+ try:
+ ext = kw['ext']
+ except KeyError:
+ pass
+ else:
+ if ext and not ext.startswith('.'):
+ ext = '.' + ext
+ kw['basename'] = pb + ext
+
+ kw.setdefault('drive', drive)
+ kw.setdefault('dirname', dirname)
+ kw.setdefault('sep', self.sep)
+ obj.strpath = os.path.normpath(
+ "%(drive)s%(dirname)s%(sep)s%(basename)s" % kw)
+ return obj
+
+ def _getbyspec(self, spec):
+ """ return a sequence of specified path parts. 'spec' is
+ a comma separated string containing path part names.
+ according to the following convention:
+ a:/some/path/to/a/file.ext
+ || drive
+ |-------------| dirname
+ |------| basename
+ |--| purebasename
+ |--| ext
+ """
+ res = []
+ parts = self.strpath.split(self.sep)
+
+ args = filter(None, spec.split(',') )
+ append = res.append
+ for name in args:
+ if name == 'drive':
+ append(parts[0])
+ elif name == 'dirname':
+ append(self.sep.join(['']+parts[1:-1]))
+ else:
+ basename = parts[-1]
+ if name == 'basename':
+ append(basename)
+ else:
+ i = basename.rfind('.')
+ if i == -1:
+ purebasename, ext = basename, ''
+ else:
+ purebasename, ext = basename[:i], basename[i:]
+ if name == 'purebasename':
+ append(purebasename)
+ elif name == 'ext':
+ append(ext)
+ else:
+ raise ValueError("invalid part specification %r" % name)
+ return res
+
+ def join(self, *args, **kwargs):
+ """ return a new path by appending all 'args' as path
+ components. if abs=1 is used restart from root if any
+ of the args is an absolute path.
+ """
+ if not args:
+ return self
+ strpath = self.strpath
+ sep = self.sep
+ strargs = [str(x) for x in args]
+ if kwargs.get('abs', 0):
+ for i in range(len(strargs)-1, -1, -1):
+ if os.path.isabs(strargs[i]):
+ strpath = strargs[i]
+ strargs = strargs[i+1:]
+ break
+ for arg in strargs:
+ arg = arg.strip(sep)
+ if iswin32:
+ # allow unix style paths even on windows.
+ arg = arg.strip('/')
+ arg = arg.replace('/', sep)
+ if arg:
+ if not strpath.endswith(sep):
+ strpath += sep
+ strpath += arg
+ obj = self.new()
+ obj.strpath = os.path.normpath(strpath)
+ return obj
+
+ def open(self, mode='r'):
+ """ return an opened file with the given mode. """
+ return py.error.checked_call(open, self.strpath, mode)
+
+ def listdir(self, fil=None, sort=None):
+ """ list directory contents, possibly filter by the given fil func
+ and possibly sorted.
+ """
+ if isinstance(fil, str):
+ fil = common.FNMatcher(fil)
+ res = []
+ for name in py.error.checked_call(os.listdir, self.strpath):
+ childurl = self.join(name)
+ if fil is None or fil(childurl):
+ res.append(childurl)
+ self._sortlist(res, sort)
+ return res
+
+ def size(self):
+ """ return size of the underlying file object """
+ return self.stat().size
+
+ def mtime(self):
+ """ return last modification time of the path. """
+ return self.stat().mtime
+
+ def copy(self, target, archive=False):
+ """ copy path to target."""
+ assert not archive, "XXX archive-mode not supported"
+ if self.check(file=1):
+ if target.check(dir=1):
+ target = target.join(self.basename)
+ assert self!=target
+ copychunked(self, target)
+ else:
+ def rec(p):
+ return p.check(link=0)
+ for x in self.visit(rec=rec):
+ relpath = x.relto(self)
+ newx = target.join(relpath)
+ newx.dirpath().ensure(dir=1)
+ if x.check(link=1):
+ newx.mksymlinkto(x.readlink())
+ elif x.check(file=1):
+ copychunked(x, newx)
+ elif x.check(dir=1):
+ newx.ensure(dir=1)
+
+ def rename(self, target):
+ """ rename this path to target. """
+ return py.error.checked_call(os.rename, str(self), str(target))
+
+ def dump(self, obj, bin=1):
+ """ pickle object into path location"""
+ f = self.open('wb')
+ try:
+ py.error.checked_call(py.std.pickle.dump, obj, f, bin)
+ finally:
+ f.close()
+
+ def mkdir(self, *args):
+ """ create & return the directory joined with args. """
+ p = self.join(*args)
+ py.error.checked_call(os.mkdir, str(p))
+ return p
+
+ def write(self, data, mode='w'):
+ """ write data into path. """
+ if 'b' in mode:
+ if not py.builtin._isbytes(data):
+ raise ValueError("can only process bytes")
+ else:
+ if not py.builtin._istext(data):
+ if not py.builtin._isbytes(data):
+ data = str(data)
+ else:
+ data = py.builtin._totext(data, sys.getdefaultencoding())
+ f = self.open(mode)
+ try:
+ f.write(data)
+ finally:
+ f.close()
+
+ def _ensuredirs(self):
+ parent = self.dirpath()
+ if parent == self:
+ return self
+ if parent.check(dir=0):
+ parent._ensuredirs()
+ if self.check(dir=0):
+ try:
+ self.mkdir()
+ except py.error.EEXIST:
+ # race condition: file/dir created by another thread/process.
+ # complain if it is not a dir
+ if self.check(dir=0):
+ raise
+ return self
+
+ def ensure(self, *args, **kwargs):
+ """ ensure that an args-joined path exists (by default as
+ a file). if you specify a keyword argument 'dir=True'
+ then the path is forced to be a directory path.
+ """
+ p = self.join(*args)
+ if kwargs.get('dir', 0):
+ return p._ensuredirs()
+ else:
+ p.dirpath()._ensuredirs()
+ if not p.check(file=1):
+ p.open('w').close()
+ return p
+
+ def stat(self):
+ """ Return an os.stat() tuple. """
+ return Stat(self, py.error.checked_call(os.stat, self.strpath))
+
+ def lstat(self):
+ """ Return an os.lstat() tuple. """
+ return Stat(self, py.error.checked_call(os.lstat, self.strpath))
+
+ def setmtime(self, mtime=None):
+ """ set modification time for the given path. if 'mtime' is None
+ (the default) then the file's mtime is set to current time.
+
+ Note that the resolution for 'mtime' is platform dependent.
+ """
+ if mtime is None:
+ return py.error.checked_call(os.utime, self.strpath, mtime)
+ try:
+ return py.error.checked_call(os.utime, self.strpath, (-1, mtime))
+ except py.error.EINVAL:
+ return py.error.checked_call(os.utime, self.strpath, (self.atime(), mtime))
+
+ def chdir(self):
+ """ change directory to self and return old current directory """
+ old = self.__class__()
+ py.error.checked_call(os.chdir, self.strpath)
+ return old
+
+ def realpath(self):
+ """ return a new path which contains no symbolic links."""
+ return self.__class__(os.path.realpath(self.strpath))
+
+ def atime(self):
+ """ return last access time of the path. """
+ return self.stat().atime
+
+ def __repr__(self):
+ return 'local(%r)' % self.strpath
+
+ def __str__(self):
+ """ return string representation of the Path. """
+ return self.strpath
+
+ def pypkgpath(self, pkgname=None):
+ """ return the path's package path by looking for the given
+ pkgname. If pkgname is None then look for the last
+ directory upwards which still contains an __init__.py
+ and whose basename is python-importable.
+ Return None if a pkgpath can not be determined.
+ """
+ pkgpath = None
+ for parent in self.parts(reverse=True):
+ if pkgname is None:
+ if parent.check(file=1):
+ continue
+ if not isimportable(parent.basename):
+ break
+ if parent.join('__init__.py').check():
+ pkgpath = parent
+ continue
+ return pkgpath
+ else:
+ if parent.basename == pkgname:
+ return parent
+ return pkgpath
+
+ def _prependsyspath(self, path):
+ s = str(path)
+ if s != sys.path[0]:
+ #print "prepending to sys.path", s
+ sys.path.insert(0, s)
+
+ def chmod(self, mode, rec=0):
+ """ change permissions to the given mode. If mode is an
+ integer it directly encodes the os-specific modes.
+ if rec is True perform recursively.
+ """
+ if not isinstance(mode, int):
+ raise TypeError("mode %r must be an integer" % (mode,))
+ if rec:
+ for x in self.visit(rec=rec):
+ py.error.checked_call(os.chmod, str(x), mode)
+ py.error.checked_call(os.chmod, str(self), mode)
+
+ def pyimport(self, modname=None, ensuresyspath=True):
+ """ return path as an imported python module.
+ if modname is None, look for the containing package
+ and construct an according module name.
+ The module will be put/looked up in sys.modules.
+ """
+ if not self.check():
+ raise py.error.ENOENT(self)
+ #print "trying to import", self
+ pkgpath = None
+ if modname is None:
+ pkgpath = self.pypkgpath()
+ if pkgpath is not None:
+ if ensuresyspath:
+ self._prependsyspath(pkgpath.dirpath())
+ pkg = __import__(pkgpath.basename, None, None, [])
+ names = self.new(ext='').relto(pkgpath.dirpath())
+ names = names.split(self.sep)
+ modname = ".".join(names)
+ else:
+ # no package scope, still make it possible
+ if ensuresyspath:
+ self._prependsyspath(self.dirpath())
+ modname = self.purebasename
+ mod = __import__(modname, None, None, ['__doc__'])
+ modfile = mod.__file__
+ if modfile[-4:] in ('.pyc', '.pyo'):
+ modfile = modfile[:-1]
+ elif modfile.endswith('$py.class'):
+ modfile = modfile[:-9] + '.py'
+ if not self.samefile(modfile):
+ raise EnvironmentError("mismatch:\n"
+ "imported module %r\n"
+ "does not stem from %r\n"
+ "maybe __init__.py files are missing?" % (mod, str(self)))
+ return mod
+ else:
+ try:
+ return sys.modules[modname]
+ except KeyError:
+ # we have a custom modname, do a pseudo-import
+ mod = py.std.types.ModuleType(modname)
+ mod.__file__ = str(self)
+ sys.modules[modname] = mod
+ try:
+ py.builtin.execfile(str(self), mod.__dict__)
+ except:
+ del sys.modules[modname]
+ raise
+ return mod
+
+ def sysexec(self, *argv, **popen_opts):
+ """ return stdout text from executing a system child process,
+ where the 'self' path points to executable.
+ The process is directly invoked and not through a system shell.
+ """
+ from subprocess import Popen, PIPE
+ argv = map(str, argv)
+ popen_opts['stdout'] = popen_opts['stderr'] = PIPE
+ proc = Popen([str(self)] + list(argv), **popen_opts)
+ stdout, stderr = proc.communicate()
+ ret = proc.wait()
+ if py.builtin._isbytes(stdout):
+ stdout = py.builtin._totext(stdout, sys.getdefaultencoding())
+ if ret != 0:
+ if py.builtin._isbytes(stderr):
+ stderr = py.builtin._totext(stderr, sys.getdefaultencoding())
+ raise py.process.cmdexec.Error(ret, ret, str(self),
+ stdout, stderr,)
+ return stdout
+
+ def sysfind(cls, name, checker=None):
+ """ return a path object found by looking at the systems
+ underlying PATH specification. If the checker is not None
+ it will be invoked to filter matching paths. If a binary
+ cannot be found, None is returned
+ Note: This is probably not working on plain win32 systems
+ but may work on cygwin.
+ """
+ if os.path.isabs(name):
+ p = py.path.local(name)
+ if p.check(file=1):
+ return p
+ else:
+ if iswin32:
+ paths = py.std.os.environ['Path'].split(';')
+ if '' not in paths and '.' not in paths:
+ paths.append('.')
+ try:
+ systemroot = os.environ['SYSTEMROOT']
+ except KeyError:
+ pass
+ else:
+ paths = [re.sub('%SystemRoot%', systemroot, path)
+ for path in paths]
+ tryadd = '', '.exe', '.com', '.bat' # XXX add more?
+ else:
+ paths = py.std.os.environ['PATH'].split(':')
+ tryadd = ('',)
+
+ for x in paths:
+ for addext in tryadd:
+ p = py.path.local(x).join(name, abs=True) + addext
+ try:
+ if p.check(file=1):
+ if checker:
+ if not checker(p):
+ continue
+ return p
+ except py.error.EACCES:
+ pass
+ return None
+ sysfind = classmethod(sysfind)
+
+ def _gethomedir(cls):
+ try:
+ x = os.environ['HOME']
+ except KeyError:
+ x = os.environ['HOMEPATH']
+ return cls(x)
+ _gethomedir = classmethod(_gethomedir)
+
+ #"""
+ #special class constructors for local filesystem paths
+ #"""
+ def get_temproot(cls):
+ """ return the system's temporary directory
+ (where tempfiles are usually created in)
+ """
+ return py.path.local(py.std.tempfile.gettempdir())
+ get_temproot = classmethod(get_temproot)
+
+ def mkdtemp(cls):
+ """ return a Path object pointing to a fresh new temporary directory
+ (which we created ourself).
+ """
+ import tempfile
+ tries = 10
+ for i in range(tries):
+ dname = tempfile.mktemp()
+ dpath = cls(tempfile.mktemp())
+ try:
+ dpath.mkdir()
+ except (py.error.EEXIST, py.error.EPERM, py.error.EACCES):
+ continue
+ return dpath
+ raise py.error.ENOENT(dpath, "could not create tempdir, %d tries" % tries)
+ mkdtemp = classmethod(mkdtemp)
+
+ def make_numbered_dir(cls, prefix='session-', rootdir=None, keep=3,
+ lock_timeout = 172800): # two days
+ """ return unique directory with a number greater than the current
+ maximum one. The number is assumed to start directly after prefix.
+ if keep is true directories with a number less than (maxnum-keep)
+ will be removed.
+ """
+ if rootdir is None:
+ rootdir = cls.get_temproot()
+
+ def parse_num(path):
+ """ parse the number out of a path (if it matches the prefix) """
+ bn = path.basename
+ if bn.startswith(prefix):
+ try:
+ return int(bn[len(prefix):])
+ except ValueError:
+ pass
+
+ # compute the maximum number currently in use with the
+ # prefix
+ lastmax = None
+ while True:
+ maxnum = -1
+ for path in rootdir.listdir():
+ num = parse_num(path)
+ if num is not None:
+ maxnum = max(maxnum, num)
+
+ # make the new directory
+ try:
+ udir = rootdir.mkdir(prefix + str(maxnum+1))
+ except py.error.EEXIST:
+ # race condition: another thread/process created the dir
+ # in the meantime. Try counting again
+ if lastmax == maxnum:
+ raise
+ lastmax = maxnum
+ continue
+ break
+
+ # put a .lock file in the new directory that will be removed at
+ # process exit
+ if lock_timeout:
+ lockfile = udir.join('.lock')
+ mypid = os.getpid()
+ if hasattr(lockfile, 'mksymlinkto'):
+ lockfile.mksymlinkto(str(mypid))
+ else:
+ lockfile.write(str(mypid))
+ def try_remove_lockfile():
+ # in a fork() situation, only the last process should
+ # remove the .lock, otherwise the other processes run the
+ # risk of seeing their temporary dir disappear. For now
+ # we remove the .lock in the parent only (i.e. we assume
+ # that the children finish before the parent).
+ if os.getpid() != mypid:
+ return
+ try:
+ lockfile.remove()
+ except py.error.Error:
+ pass
+ atexit.register(try_remove_lockfile)
+
+ # prune old directories
+ if keep:
+ for path in rootdir.listdir():
+ num = parse_num(path)
+ if num is not None and num <= (maxnum - keep):
+ lf = path.join('.lock')
+ try:
+ t1 = lf.lstat().mtime
+ t2 = lockfile.lstat().mtime
+ if not lock_timeout or abs(t2-t1) < lock_timeout:
+ continue # skip directories still locked
+ except py.error.Error:
+ pass # assume that it means that there is no 'lf'
+ try:
+ path.remove(rec=1)
+ except KeyboardInterrupt:
+ raise
+ except: # this might be py.error.Error, WindowsError ...
+ pass
+
+ # make link...
+ try:
+ username = os.environ['USER'] #linux, et al
+ except KeyError:
+ try:
+ username = os.environ['USERNAME'] #windows
+ except KeyError:
+ username = 'current'
+
+ src = str(udir)
+ dest = src[:src.rfind('-')] + '-' + username
+ try:
+ os.unlink(dest)
+ except OSError:
+ pass
+ try:
+ os.symlink(src, dest)
+ except (OSError, AttributeError): # AttributeError on win32
+ pass
+
+ return udir
+ make_numbered_dir = classmethod(make_numbered_dir)
+
+def copychunked(src, dest):
+ chunksize = 524288 # half a meg of bytes
+ fsrc = src.open('rb')
+ try:
+ fdest = dest.open('wb')
+ try:
+ while 1:
+ buf = fsrc.read(chunksize)
+ if not buf:
+ break
+ fdest.write(buf)
+ finally:
+ fdest.close()
+ finally:
+ fsrc.close()
+
+def autopath(globs=None):
+ """ (deprecated) return the (local) path of the "current" file pointed to by globals or - if it is none - alternatively the callers frame globals.
+
+ the path will always point to a .py file or to None.
+ the path will have the following payload:
+ pkgdir is the last parent directory path containing __init__.py
+ """
+ py.log._apiwarn("1.1", "py.magic.autopath deprecated, "
+ "use py.path.local(__file__) and maybe pypkgpath/pyimport().")
+ if globs is None:
+ globs = sys._getframe(1).f_globals
+ try:
+ __file__ = globs['__file__']
+ except KeyError:
+ if not sys.argv[0]:
+ raise ValueError("cannot compute autopath in interactive mode")
+ __file__ = os.path.abspath(sys.argv[0])
+
+ ret = py.path.local(__file__)
+ if ret.ext in ('.pyc', '.pyo'):
+ ret = ret.new(ext='.py')
+ current = pkgdir = ret.dirpath()
+ while 1:
+ if current.join('__init__.py').check():
+ pkgdir = current
+ current = current.dirpath()
+ if pkgdir != current:
+ continue
+ elif str(current) not in sys.path:
+ sys.path.insert(0, str(current))
+ break
+ ret.pkgdir = pkgdir
+ return ret
+
+
+def isimportable(name):
+ if name:
+ if not (name[0].isalpha() or name[0] == '_'):
+ return False
+ name= name.replace("_", '')
+ return not name or name.isalnum()
Added: pypy/branch/py12/py/_path/svnurl.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_path/svnurl.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,378 @@
+"""
+module defining a subversion path object based on the external
+command 'svn'. This modules aims to work with svn 1.3 and higher
+but might also interact well with earlier versions.
+"""
+
+import os, sys, time, re
+import py
+from py import path, process
+from py._path import common
+from py._path import svnwc as svncommon
+from py._path.cacheutil import BuildcostAccessCache, AgingCache
+
+DEBUG=False
+
+class SvnCommandPath(svncommon.SvnPathBase):
+ """ path implementation that offers access to (possibly remote) subversion
+ repositories. """
+
+ _lsrevcache = BuildcostAccessCache(maxentries=128)
+ _lsnorevcache = AgingCache(maxentries=1000, maxseconds=60.0)
+
+ def __new__(cls, path, rev=None, auth=None):
+ self = object.__new__(cls)
+ if isinstance(path, cls):
+ rev = path.rev
+ auth = path.auth
+ path = path.strpath
+ svncommon.checkbadchars(path)
+ path = path.rstrip('/')
+ self.strpath = path
+ self.rev = rev
+ self.auth = auth
+ return self
+
+ def __repr__(self):
+ if self.rev == -1:
+ return 'svnurl(%r)' % self.strpath
+ else:
+ return 'svnurl(%r, %r)' % (self.strpath, self.rev)
+
+ def _svnwithrev(self, cmd, *args):
+ """ execute an svn command, append our own url and revision """
+ if self.rev is None:
+ return self._svnwrite(cmd, *args)
+ else:
+ args = ['-r', self.rev] + list(args)
+ return self._svnwrite(cmd, *args)
+
+ def _svnwrite(self, cmd, *args):
+ """ execute an svn command, append our own url """
+ l = ['svn %s' % cmd]
+ args = ['"%s"' % self._escape(item) for item in args]
+ l.extend(args)
+ l.append('"%s"' % self._encodedurl())
+ # fixing the locale because we can't otherwise parse
+ string = " ".join(l)
+ if DEBUG:
+ print("execing %s" % string)
+ out = self._svncmdexecauth(string)
+ return out
+
+ def _svncmdexecauth(self, cmd):
+ """ execute an svn command 'as is' """
+ cmd = svncommon.fixlocale() + cmd
+ if self.auth is not None:
+ cmd += ' ' + self.auth.makecmdoptions()
+ return self._cmdexec(cmd)
+
+ def _cmdexec(self, cmd):
+ try:
+ out = process.cmdexec(cmd)
+ except py.process.cmdexec.Error:
+ e = sys.exc_info()[1]
+ if (e.err.find('File Exists') != -1 or
+ e.err.find('File already exists') != -1):
+ raise py.error.EEXIST(self)
+ raise
+ return out
+
+ def _svnpopenauth(self, cmd):
+ """ execute an svn command, return a pipe for reading stdin """
+ cmd = svncommon.fixlocale() + cmd
+ if self.auth is not None:
+ cmd += ' ' + self.auth.makecmdoptions()
+ return self._popen(cmd)
+
+ def _popen(self, cmd):
+ return os.popen(cmd)
+
+ def _encodedurl(self):
+ return self._escape(self.strpath)
+
+ def _norev_delentry(self, path):
+ auth = self.auth and self.auth.makecmdoptions() or None
+ self._lsnorevcache.delentry((str(path), auth))
+
+ def open(self, mode='r'):
+ """ return an opened file with the given mode. """
+ if mode not in ("r", "rU",):
+ raise ValueError("mode %r not supported" % (mode,))
+ assert self.check(file=1) # svn cat returns an empty file otherwise
+ if self.rev is None:
+ return self._svnpopenauth('svn cat "%s"' % (
+ self._escape(self.strpath), ))
+ else:
+ return self._svnpopenauth('svn cat -r %s "%s"' % (
+ self.rev, self._escape(self.strpath)))
+
+ def dirpath(self, *args, **kwargs):
+ """ return the directory path of the current path joined
+ with any given path arguments.
+ """
+ l = self.strpath.split(self.sep)
+ if len(l) < 4:
+ raise py.error.EINVAL(self, "base is not valid")
+ elif len(l) == 4:
+ return self.join(*args, **kwargs)
+ else:
+ return self.new(basename='').join(*args, **kwargs)
+
+ # modifying methods (cache must be invalidated)
+ def mkdir(self, *args, **kwargs):
+ """ create & return the directory joined with args.
+ pass a 'msg' keyword argument to set the commit message.
+ """
+ commit_msg = kwargs.get('msg', "mkdir by py lib invocation")
+ createpath = self.join(*args)
+ createpath._svnwrite('mkdir', '-m', commit_msg)
+ self._norev_delentry(createpath.dirpath())
+ return createpath
+
+ def copy(self, target, msg='copied by py lib invocation'):
+ """ copy path to target with checkin message msg."""
+ if getattr(target, 'rev', None) is not None:
+ raise py.error.EINVAL(target, "revisions are immutable")
+ self._svncmdexecauth('svn copy -m "%s" "%s" "%s"' %(msg,
+ self._escape(self), self._escape(target)))
+ self._norev_delentry(target.dirpath())
+
+ def rename(self, target, msg="renamed by py lib invocation"):
+ """ rename this path to target with checkin message msg. """
+ if getattr(self, 'rev', None) is not None:
+ raise py.error.EINVAL(self, "revisions are immutable")
+ self._svncmdexecauth('svn move -m "%s" --force "%s" "%s"' %(
+ msg, self._escape(self), self._escape(target)))
+ self._norev_delentry(self.dirpath())
+ self._norev_delentry(self)
+
+ def remove(self, rec=1, msg='removed by py lib invocation'):
+ """ remove a file or directory (or a directory tree if rec=1) with
+checkin message msg."""
+ if self.rev is not None:
+ raise py.error.EINVAL(self, "revisions are immutable")
+ self._svncmdexecauth('svn rm -m "%s" "%s"' %(msg, self._escape(self)))
+ self._norev_delentry(self.dirpath())
+
+ def export(self, topath):
+ """ export to a local path
+
+ topath should not exist prior to calling this, returns a
+ py.path.local instance
+ """
+ topath = py.path.local(topath)
+ args = ['"%s"' % (self._escape(self),),
+ '"%s"' % (self._escape(topath),)]
+ if self.rev is not None:
+ args = ['-r', str(self.rev)] + args
+ self._svncmdexecauth('svn export %s' % (' '.join(args),))
+ return topath
+
+ def ensure(self, *args, **kwargs):
+ """ ensure that an args-joined path exists (by default as
+ a file). If you specify a keyword argument 'dir=True'
+ then the path is forced to be a directory path.
+ """
+ if getattr(self, 'rev', None) is not None:
+ raise py.error.EINVAL(self, "revisions are immutable")
+ target = self.join(*args)
+ dir = kwargs.get('dir', 0)
+ for x in target.parts(reverse=True):
+ if x.check():
+ break
+ else:
+ raise py.error.ENOENT(target, "has not any valid base!")
+ if x == target:
+ if not x.check(dir=dir):
+ raise dir and py.error.ENOTDIR(x) or py.error.EISDIR(x)
+ return x
+ tocreate = target.relto(x)
+ basename = tocreate.split(self.sep, 1)[0]
+ tempdir = py.path.local.mkdtemp()
+ try:
+ tempdir.ensure(tocreate, dir=dir)
+ cmd = 'svn import -m "%s" "%s" "%s"' % (
+ "ensure %s" % self._escape(tocreate),
+ self._escape(tempdir.join(basename)),
+ x.join(basename)._encodedurl())
+ self._svncmdexecauth(cmd)
+ self._norev_delentry(x)
+ finally:
+ tempdir.remove()
+ return target
+
+ # end of modifying methods
+ def _propget(self, name):
+ res = self._svnwithrev('propget', name)
+ return res[:-1] # strip trailing newline
+
+ def _proplist(self):
+ res = self._svnwithrev('proplist')
+ lines = res.split('\n')
+ lines = [x.strip() for x in lines[1:]]
+ return svncommon.PropListDict(self, lines)
+
+ def info(self):
+ """ return an Info structure with svn-provided information. """
+ parent = self.dirpath()
+ nameinfo_seq = parent._listdir_nameinfo()
+ bn = self.basename
+ for name, info in nameinfo_seq:
+ if name == bn:
+ return info
+ raise py.error.ENOENT(self)
+
+
+ def _listdir_nameinfo(self):
+ """ return sequence of name-info directory entries of self """
+ def builder():
+ try:
+ res = self._svnwithrev('ls', '-v')
+ except process.cmdexec.Error:
+ e = sys.exc_info()[1]
+ if e.err.find('non-existent in that revision') != -1:
+ raise py.error.ENOENT(self, e.err)
+ elif e.err.find('File not found') != -1:
+ raise py.error.ENOENT(self, e.err)
+ elif e.err.find('not part of a repository')!=-1:
+ raise py.error.ENOENT(self, e.err)
+ elif e.err.find('Unable to open')!=-1:
+ raise py.error.ENOENT(self, e.err)
+ elif e.err.lower().find('method not allowed')!=-1:
+ raise py.error.EACCES(self, e.err)
+ raise py.error.Error(e.err)
+ lines = res.split('\n')
+ nameinfo_seq = []
+ for lsline in lines:
+ if lsline:
+ info = InfoSvnCommand(lsline)
+ if info._name != '.': # svn 1.5 produces '.' dirs,
+ nameinfo_seq.append((info._name, info))
+ nameinfo_seq.sort()
+ return nameinfo_seq
+ auth = self.auth and self.auth.makecmdoptions() or None
+ if self.rev is not None:
+ return self._lsrevcache.getorbuild((self.strpath, self.rev, auth),
+ builder)
+ else:
+ return self._lsnorevcache.getorbuild((self.strpath, auth),
+ builder)
+
+ def listdir(self, fil=None, sort=None):
+ """ list directory contents, possibly filter by the given fil func
+ and possibly sorted.
+ """
+ if isinstance(fil, str):
+ fil = common.FNMatcher(fil)
+ nameinfo_seq = self._listdir_nameinfo()
+ if len(nameinfo_seq) == 1:
+ name, info = nameinfo_seq[0]
+ if name == self.basename and info.kind == 'file':
+ #if not self.check(dir=1):
+ raise py.error.ENOTDIR(self)
+ paths = [self.join(name) for (name, info) in nameinfo_seq]
+ if fil:
+ paths = [x for x in paths if fil(x)]
+ self._sortlist(paths, sort)
+ return paths
+
+
+ def log(self, rev_start=None, rev_end=1, verbose=False):
+ """ return a list of LogEntry instances for this path.
+rev_start is the starting revision (defaulting to the first one).
+rev_end is the last revision (defaulting to HEAD).
+if verbose is True, then the LogEntry instances also know which files changed.
+"""
+ assert self.check() #make it simpler for the pipe
+ rev_start = rev_start is None and "HEAD" or rev_start
+ rev_end = rev_end is None and "HEAD" or rev_end
+
+ if rev_start == "HEAD" and rev_end == 1:
+ rev_opt = ""
+ else:
+ rev_opt = "-r %s:%s" % (rev_start, rev_end)
+ verbose_opt = verbose and "-v" or ""
+ xmlpipe = self._svnpopenauth('svn log --xml %s %s "%s"' %
+ (rev_opt, verbose_opt, self.strpath))
+ from xml.dom import minidom
+ tree = minidom.parse(xmlpipe)
+ result = []
+ for logentry in filter(None, tree.firstChild.childNodes):
+ if logentry.nodeType == logentry.ELEMENT_NODE:
+ result.append(svncommon.LogEntry(logentry))
+ return result
+
+#01234567890123456789012345678901234567890123467
+# 2256 hpk 165 Nov 24 17:55 __init__.py
+# XXX spotted by Guido, SVN 1.3.0 has different aligning, breaks the code!!!
+# 1312 johnny 1627 May 05 14:32 test_decorators.py
+#
+class InfoSvnCommand:
+ # the '0?' part in the middle is an indication of whether the resource is
+ # locked, see 'svn help ls'
+ lspattern = re.compile(
+ r'^ *(?P<rev>\d+) +(?P<author>.+?) +(0? *(?P<size>\d+))? '
+ '*(?P<date>\w+ +\d{2} +[\d:]+) +(?P<file>.*)$')
+ def __init__(self, line):
+ # this is a typical line from 'svn ls http://...'
+ #_ 1127 jum 0 Jul 13 15:28 branch/
+ match = self.lspattern.match(line)
+ data = match.groupdict()
+ self._name = data['file']
+ if self._name[-1] == '/':
+ self._name = self._name[:-1]
+ self.kind = 'dir'
+ else:
+ self.kind = 'file'
+ #self.has_props = l.pop(0) == 'P'
+ self.created_rev = int(data['rev'])
+ self.last_author = data['author']
+ self.size = data['size'] and int(data['size']) or 0
+ self.mtime = parse_time_with_missing_year(data['date'])
+ self.time = self.mtime * 1000000
+
+ def __eq__(self, other):
+ return self.__dict__ == other.__dict__
+
+
+#____________________________________________________
+#
+# helper functions
+#____________________________________________________
+def parse_time_with_missing_year(timestr):
+ """ analyze the time part from a single line of "svn ls -v"
+ the svn output doesn't show the year makes the 'timestr'
+ ambigous.
+ """
+ import calendar
+ t_now = time.gmtime()
+
+ tparts = timestr.split()
+ month = time.strptime(tparts.pop(0), '%b')[1]
+ day = time.strptime(tparts.pop(0), '%d')[2]
+ last = tparts.pop(0) # year or hour:minute
+ try:
+ if ":" in last:
+ raise ValueError()
+ year = time.strptime(last, '%Y')[0]
+ hour = minute = 0
+ except ValueError:
+ hour, minute = time.strptime(last, '%H:%M')[3:5]
+ year = t_now[0]
+
+ t_result = (year, month, day, hour, minute, 0,0,0,0)
+ if t_result > t_now:
+ year -= 1
+ t_result = (year, month, day, hour, minute, 0,0,0,0)
+ return calendar.timegm(t_result)
+
+class PathEntry:
+ def __init__(self, ppart):
+ self.strpath = ppart.firstChild.nodeValue.encode('UTF-8')
+ self.action = ppart.getAttribute('action').encode('UTF-8')
+ if self.action == 'A':
+ self.copyfrom_path = ppart.getAttribute('copyfrom-path').encode('UTF-8')
+ if self.copyfrom_path:
+ self.copyfrom_rev = int(ppart.getAttribute('copyfrom-rev'))
+
Added: pypy/branch/py12/py/_path/svnwc.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_path/svnwc.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,1231 @@
+"""
+svn-Command based Implementation of a Subversion WorkingCopy Path.
+
+ SvnWCCommandPath is the main class.
+
+"""
+
+import os, sys, time, re, calendar
+import py
+import subprocess
+from py._path import common
+
+#-----------------------------------------------------------
+# Caching latest repository revision and repo-paths
+# (getting them is slow with the current implementations)
+#
+# XXX make mt-safe
+#-----------------------------------------------------------
+
+class cache:
+ proplist = {}
+ info = {}
+ entries = {}
+ prop = {}
+
+class RepoEntry:
+ def __init__(self, url, rev, timestamp):
+ self.url = url
+ self.rev = rev
+ self.timestamp = timestamp
+
+ def __str__(self):
+ return "repo: %s;%s %s" %(self.url, self.rev, self.timestamp)
+
+class RepoCache:
+ """ The Repocache manages discovered repository paths
+ and their revisions. If inside a timeout the cache
+ will even return the revision of the root.
+ """
+ timeout = 20 # seconds after which we forget that we know the last revision
+
+ def __init__(self):
+ self.repos = []
+
+ def clear(self):
+ self.repos = []
+
+ def put(self, url, rev, timestamp=None):
+ if rev is None:
+ return
+ if timestamp is None:
+ timestamp = time.time()
+
+ for entry in self.repos:
+ if url == entry.url:
+ entry.timestamp = timestamp
+ entry.rev = rev
+ #print "set repo", entry
+ break
+ else:
+ entry = RepoEntry(url, rev, timestamp)
+ self.repos.append(entry)
+ #print "appended repo", entry
+
+ def get(self, url):
+ now = time.time()
+ for entry in self.repos:
+ if url.startswith(entry.url):
+ if now < entry.timestamp + self.timeout:
+ #print "returning immediate Etrny", entry
+ return entry.url, entry.rev
+ return entry.url, -1
+ return url, -1
+
+repositories = RepoCache()
+
+
+# svn support code
+
+ALLOWED_CHARS = "_ -/\\=$.~+%" #add characters as necessary when tested
+if sys.platform == "win32":
+ ALLOWED_CHARS += ":"
+ALLOWED_CHARS_HOST = ALLOWED_CHARS + '@:'
+
+def _getsvnversion(ver=[]):
+ try:
+ return ver[0]
+ except IndexError:
+ v = py.process.cmdexec("svn -q --version")
+ v.strip()
+ v = '.'.join(v.split('.')[:2])
+ ver.append(v)
+ return v
+
+def _escape_helper(text):
+ text = str(text)
+ if py.std.sys.platform != 'win32':
+ text = str(text).replace('$', '\\$')
+ return text
+
+def _check_for_bad_chars(text, allowed_chars=ALLOWED_CHARS):
+ for c in str(text):
+ if c.isalnum():
+ continue
+ if c in allowed_chars:
+ continue
+ return True
+ return False
+
+def checkbadchars(url):
+ # (hpk) not quite sure about the exact purpose, guido w.?
+ proto, uri = url.split("://", 1)
+ if proto != "file":
+ host, uripath = uri.split('/', 1)
+ # only check for bad chars in the non-protocol parts
+ if (_check_for_bad_chars(host, ALLOWED_CHARS_HOST) \
+ or _check_for_bad_chars(uripath, ALLOWED_CHARS)):
+ raise ValueError("bad char in %r" % (url, ))
+
+
+#_______________________________________________________________
+
+class SvnPathBase(common.PathBase):
+ """ Base implementation for SvnPath implementations. """
+ sep = '/'
+
+ def _geturl(self):
+ return self.strpath
+ url = property(_geturl, None, None, "url of this svn-path.")
+
+ def __str__(self):
+ """ return a string representation (including rev-number) """
+ return self.strpath
+
+ def __hash__(self):
+ return hash(self.strpath)
+
+ def new(self, **kw):
+ """ create a modified version of this path. A 'rev' argument
+ indicates a new revision.
+ the following keyword arguments modify various path parts:
+
+ http://host.com/repo/path/file.ext
+ |-----------------------| dirname
+ |------| basename
+ |--| purebasename
+ |--| ext
+ """
+ obj = object.__new__(self.__class__)
+ obj.rev = kw.get('rev', self.rev)
+ obj.auth = kw.get('auth', self.auth)
+ dirname, basename, purebasename, ext = self._getbyspec(
+ "dirname,basename,purebasename,ext")
+ if 'basename' in kw:
+ if 'purebasename' in kw or 'ext' in kw:
+ raise ValueError("invalid specification %r" % kw)
+ else:
+ pb = kw.setdefault('purebasename', purebasename)
+ ext = kw.setdefault('ext', ext)
+ if ext and not ext.startswith('.'):
+ ext = '.' + ext
+ kw['basename'] = pb + ext
+
+ kw.setdefault('dirname', dirname)
+ kw.setdefault('sep', self.sep)
+ if kw['basename']:
+ obj.strpath = "%(dirname)s%(sep)s%(basename)s" % kw
+ else:
+ obj.strpath = "%(dirname)s" % kw
+ return obj
+
+ def _getbyspec(self, spec):
+ """ get specified parts of the path. 'arg' is a string
+ with comma separated path parts. The parts are returned
+ in exactly the order of the specification.
+
+ you may specify the following parts:
+
+ http://host.com/repo/path/file.ext
+ |-----------------------| dirname
+ |------| basename
+ |--| purebasename
+ |--| ext
+ """
+ res = []
+ parts = self.strpath.split(self.sep)
+ for name in spec.split(','):
+ name = name.strip()
+ if name == 'dirname':
+ res.append(self.sep.join(parts[:-1]))
+ elif name == 'basename':
+ res.append(parts[-1])
+ else:
+ basename = parts[-1]
+ i = basename.rfind('.')
+ if i == -1:
+ purebasename, ext = basename, ''
+ else:
+ purebasename, ext = basename[:i], basename[i:]
+ if name == 'purebasename':
+ res.append(purebasename)
+ elif name == 'ext':
+ res.append(ext)
+ else:
+ raise NameError("Don't know part %r" % name)
+ return res
+
+ def __eq__(self, other):
+ """ return true if path and rev attributes each match """
+ return (str(self) == str(other) and
+ (self.rev == other.rev or self.rev == other.rev))
+
+ def __ne__(self, other):
+ return not self == other
+
+ def join(self, *args):
+ """ return a new Path (with the same revision) which is composed
+ of the self Path followed by 'args' path components.
+ """
+ if not args:
+ return self
+
+ args = tuple([arg.strip(self.sep) for arg in args])
+ parts = (self.strpath, ) + args
+ newpath = self.__class__(self.sep.join(parts), self.rev, self.auth)
+ return newpath
+
+ def propget(self, name):
+ """ return the content of the given property. """
+ value = self._propget(name)
+ return value
+
+ def proplist(self):
+ """ list all property names. """
+ content = self._proplist()
+ return content
+
+ def size(self):
+ """ Return the size of the file content of the Path. """
+ return self.info().size
+
+ def mtime(self):
+ """ Return the last modification time of the file. """
+ return self.info().mtime
+
+ # shared help methods
+
+ def _escape(self, cmd):
+ return _escape_helper(cmd)
+
+
+ #def _childmaxrev(self):
+ # """ return maximum revision number of childs (or self.rev if no childs) """
+ # rev = self.rev
+ # for name, info in self._listdir_nameinfo():
+ # rev = max(rev, info.created_rev)
+ # return rev
+
+ #def _getlatestrevision(self):
+ # """ return latest repo-revision for this path. """
+ # url = self.strpath
+ # path = self.__class__(url, None)
+ #
+ # # we need a long walk to find the root-repo and revision
+ # while 1:
+ # try:
+ # rev = max(rev, path._childmaxrev())
+ # previous = path
+ # path = path.dirpath()
+ # except (IOError, process.cmdexec.Error):
+ # break
+ # if rev is None:
+ # raise IOError, "could not determine newest repo revision for %s" % self
+ # return rev
+
+ class Checkers(common.Checkers):
+ def dir(self):
+ try:
+ return self.path.info().kind == 'dir'
+ except py.error.Error:
+ return self._listdirworks()
+
+ def _listdirworks(self):
+ try:
+ self.path.listdir()
+ except py.error.ENOENT:
+ return False
+ else:
+ return True
+
+ def file(self):
+ try:
+ return self.path.info().kind == 'file'
+ except py.error.ENOENT:
+ return False
+
+ def exists(self):
+ try:
+ return self.path.info()
+ except py.error.ENOENT:
+ return self._listdirworks()
+
+def parse_apr_time(timestr):
+ i = timestr.rfind('.')
+ if i == -1:
+ raise ValueError("could not parse %s" % timestr)
+ timestr = timestr[:i]
+ parsedtime = time.strptime(timestr, "%Y-%m-%dT%H:%M:%S")
+ return time.mktime(parsedtime)
+
+class PropListDict(dict):
+ """ a Dictionary which fetches values (InfoSvnCommand instances) lazily"""
+ def __init__(self, path, keynames):
+ dict.__init__(self, [(x, None) for x in keynames])
+ self.path = path
+
+ def __getitem__(self, key):
+ value = dict.__getitem__(self, key)
+ if value is None:
+ value = self.path.propget(key)
+ dict.__setitem__(self, key, value)
+ return value
+
+def fixlocale():
+ if sys.platform != 'win32':
+ return 'LC_ALL=C '
+ return ''
+
+# some nasty chunk of code to solve path and url conversion and quoting issues
+ILLEGAL_CHARS = '* | \ / : < > ? \t \n \x0b \x0c \r'.split(' ')
+if os.sep in ILLEGAL_CHARS:
+ ILLEGAL_CHARS.remove(os.sep)
+ISWINDOWS = sys.platform == 'win32'
+_reg_allow_disk = re.compile(r'^([a-z]\:\\)?[^:]+$', re.I)
+def _check_path(path):
+ illegal = ILLEGAL_CHARS[:]
+ sp = path.strpath
+ if ISWINDOWS:
+ illegal.remove(':')
+ if not _reg_allow_disk.match(sp):
+ raise ValueError('path may not contain a colon (:)')
+ for char in sp:
+ if char not in string.printable or char in illegal:
+ raise ValueError('illegal character %r in path' % (char,))
+
+def path_to_fspath(path, addat=True):
+ _check_path(path)
+ sp = path.strpath
+ if addat and path.rev != -1:
+ sp = '%s@%s' % (sp, path.rev)
+ elif addat:
+ sp = '%s at HEAD' % (sp,)
+ return sp
+
+def url_from_path(path):
+ fspath = path_to_fspath(path, False)
+ quote = py.std.urllib.quote
+ if ISWINDOWS:
+ match = _reg_allow_disk.match(fspath)
+ fspath = fspath.replace('\\', '/')
+ if match.group(1):
+ fspath = '/%s%s' % (match.group(1).replace('\\', '/'),
+ quote(fspath[len(match.group(1)):]))
+ else:
+ fspath = quote(fspath)
+ else:
+ fspath = quote(fspath)
+ if path.rev != -1:
+ fspath = '%s@%s' % (fspath, path.rev)
+ else:
+ fspath = '%s at HEAD' % (fspath,)
+ return 'file://%s' % (fspath,)
+
+class SvnAuth(object):
+ """ container for auth information for Subversion """
+ def __init__(self, username, password, cache_auth=True, interactive=True):
+ self.username = username
+ self.password = password
+ self.cache_auth = cache_auth
+ self.interactive = interactive
+
+ def makecmdoptions(self):
+ uname = self.username.replace('"', '\\"')
+ passwd = self.password.replace('"', '\\"')
+ ret = []
+ if uname:
+ ret.append('--username="%s"' % (uname,))
+ if passwd:
+ ret.append('--password="%s"' % (passwd,))
+ if not self.cache_auth:
+ ret.append('--no-auth-cache')
+ if not self.interactive:
+ ret.append('--non-interactive')
+ return ' '.join(ret)
+
+ def __str__(self):
+ return "<SvnAuth username=%s ...>" %(self.username,)
+
+rex_blame = re.compile(r'\s*(\d+)\s*(\S+) (.*)')
+
+class SvnWCCommandPath(common.PathBase):
+ """ path implementation offering access/modification to svn working copies.
+ It has methods similar to the functions in os.path and similar to the
+ commands of the svn client.
+ """
+ sep = os.sep
+
+ def __new__(cls, wcpath=None, auth=None):
+ self = object.__new__(cls)
+ if isinstance(wcpath, cls):
+ if wcpath.__class__ == cls:
+ return wcpath
+ wcpath = wcpath.localpath
+ if _check_for_bad_chars(str(wcpath),
+ ALLOWED_CHARS):
+ raise ValueError("bad char in wcpath %s" % (wcpath, ))
+ self.localpath = py.path.local(wcpath)
+ self.auth = auth
+ return self
+
+ strpath = property(lambda x: str(x.localpath), None, None, "string path")
+ rev = property(lambda x: x.info(usecache=0).rev, None, None, "revision")
+
+ def __eq__(self, other):
+ return self.localpath == getattr(other, 'localpath', None)
+
+ def _geturl(self):
+ if getattr(self, '_url', None) is None:
+ info = self.info()
+ self._url = info.url #SvnPath(info.url, info.rev)
+ assert isinstance(self._url, py.builtin._basestring)
+ return self._url
+
+ url = property(_geturl, None, None, "url of this WC item")
+
+ def _escape(self, cmd):
+ return _escape_helper(cmd)
+
+ def dump(self, obj):
+ """ pickle object into path location"""
+ return self.localpath.dump(obj)
+
+ def svnurl(self):
+ """ return current SvnPath for this WC-item. """
+ info = self.info()
+ return py.path.svnurl(info.url)
+
+ def __repr__(self):
+ return "svnwc(%r)" % (self.strpath) # , self._url)
+
+ def __str__(self):
+ return str(self.localpath)
+
+ def _makeauthoptions(self):
+ if self.auth is None:
+ return ''
+ return self.auth.makecmdoptions()
+
+ def _authsvn(self, cmd, args=None):
+ args = args and list(args) or []
+ args.append(self._makeauthoptions())
+ return self._svn(cmd, *args)
+
+ def _svn(self, cmd, *args):
+ l = ['svn %s' % cmd]
+ args = [self._escape(item) for item in args]
+ l.extend(args)
+ l.append('"%s"' % self._escape(self.strpath))
+ # try fixing the locale because we can't otherwise parse
+ string = fixlocale() + " ".join(l)
+ try:
+ try:
+ key = 'LC_MESSAGES'
+ hold = os.environ.get(key)
+ os.environ[key] = 'C'
+ out = py.process.cmdexec(string)
+ finally:
+ if hold:
+ os.environ[key] = hold
+ else:
+ del os.environ[key]
+ except py.process.cmdexec.Error:
+ e = sys.exc_info()[1]
+ strerr = e.err.lower()
+ if strerr.find('file not found') != -1:
+ raise py.error.ENOENT(self)
+ if (strerr.find('file exists') != -1 or
+ strerr.find('file already exists') != -1 or
+ strerr.find("can't create directory") != -1):
+ raise py.error.EEXIST(self)
+ raise
+ return out
+
+ def switch(self, url):
+ """ switch to given URL. """
+ self._authsvn('switch', [url])
+
+ def checkout(self, url=None, rev=None):
+ """ checkout from url to local wcpath. """
+ args = []
+ if url is None:
+ url = self.url
+ if rev is None or rev == -1:
+ if (py.std.sys.platform != 'win32' and
+ _getsvnversion() == '1.3'):
+ url += "@HEAD"
+ else:
+ if _getsvnversion() == '1.3':
+ url += "@%d" % rev
+ else:
+ args.append('-r' + str(rev))
+ args.append(url)
+ self._authsvn('co', args)
+
+ def update(self, rev='HEAD', interactive=True):
+ """ update working copy item to given revision. (None -> HEAD). """
+ opts = ['-r', rev]
+ if not interactive:
+ opts.append("--non-interactive")
+ self._authsvn('up', opts)
+
+ def write(self, content, mode='w'):
+ """ write content into local filesystem wc. """
+ self.localpath.write(content, mode)
+
+ def dirpath(self, *args):
+ """ return the directory Path of the current Path. """
+ return self.__class__(self.localpath.dirpath(*args), auth=self.auth)
+
+ def _ensuredirs(self):
+ parent = self.dirpath()
+ if parent.check(dir=0):
+ parent._ensuredirs()
+ if self.check(dir=0):
+ self.mkdir()
+ return self
+
+ def ensure(self, *args, **kwargs):
+ """ ensure that an args-joined path exists (by default as
+ a file). if you specify a keyword argument 'directory=True'
+ then the path is forced to be a directory path.
+ """
+ p = self.join(*args)
+ if p.check():
+ if p.check(versioned=False):
+ p.add()
+ return p
+ if kwargs.get('dir', 0):
+ return p._ensuredirs()
+ parent = p.dirpath()
+ parent._ensuredirs()
+ p.write("")
+ p.add()
+ return p
+
+ def mkdir(self, *args):
+ """ create & return the directory joined with args. """
+ if args:
+ return self.join(*args).mkdir()
+ else:
+ self._svn('mkdir')
+ return self
+
+ def add(self):
+ """ add ourself to svn """
+ self._svn('add')
+
+ def remove(self, rec=1, force=1):
+ """ remove a file or a directory tree. 'rec'ursive is
+ ignored and considered always true (because of
+ underlying svn semantics.
+ """
+ assert rec, "svn cannot remove non-recursively"
+ if not self.check(versioned=True):
+ # not added to svn (anymore?), just remove
+ py.path.local(self).remove()
+ return
+ flags = []
+ if force:
+ flags.append('--force')
+ self._svn('remove', *flags)
+
+ def copy(self, target):
+ """ copy path to target."""
+ py.process.cmdexec("svn copy %s %s" %(str(self), str(target)))
+
+ def rename(self, target):
+ """ rename this path to target. """
+ py.process.cmdexec("svn move --force %s %s" %(str(self), str(target)))
+
+ def lock(self):
+ """ set a lock (exclusive) on the resource """
+ out = self._authsvn('lock').strip()
+ if not out:
+ # warning or error, raise exception
+ raise Exception(out[4:])
+
+ def unlock(self):
+ """ unset a previously set lock """
+ out = self._authsvn('unlock').strip()
+ if out.startswith('svn:'):
+ # warning or error, raise exception
+ raise Exception(out[4:])
+
+ def cleanup(self):
+ """ remove any locks from the resource """
+ # XXX should be fixed properly!!!
+ try:
+ self.unlock()
+ except:
+ pass
+
+ def status(self, updates=0, rec=0, externals=0):
+ """ return (collective) Status object for this file. """
+ # http://svnbook.red-bean.com/book.html#svn-ch-3-sect-4.3.1
+ # 2201 2192 jum test
+ # XXX
+ if externals:
+ raise ValueError("XXX cannot perform status() "
+ "on external items yet")
+ else:
+ #1.2 supports: externals = '--ignore-externals'
+ externals = ''
+ if rec:
+ rec= ''
+ else:
+ rec = '--non-recursive'
+
+ # XXX does not work on all subversion versions
+ #if not externals:
+ # externals = '--ignore-externals'
+
+ if updates:
+ updates = '-u'
+ else:
+ updates = ''
+
+ try:
+ cmd = 'status -v --xml --no-ignore %s %s %s' % (
+ updates, rec, externals)
+ out = self._authsvn(cmd)
+ except py.process.cmdexec.Error:
+ cmd = 'status -v --no-ignore %s %s %s' % (
+ updates, rec, externals)
+ out = self._authsvn(cmd)
+ rootstatus = WCStatus(self).fromstring(out, self)
+ else:
+ rootstatus = XMLWCStatus(self).fromstring(out, self)
+ return rootstatus
+
+ def diff(self, rev=None):
+ """ return a diff of the current path against revision rev (defaulting
+ to the last one).
+ """
+ args = []
+ if rev is not None:
+ args.append("-r %d" % rev)
+ out = self._authsvn('diff', args)
+ return out
+
+ def blame(self):
+ """ return a list of tuples of three elements:
+ (revision, commiter, line)
+ """
+ out = self._svn('blame')
+ result = []
+ blamelines = out.splitlines()
+ reallines = py.path.svnurl(self.url).readlines()
+ for i, (blameline, line) in enumerate(
+ zip(blamelines, reallines)):
+ m = rex_blame.match(blameline)
+ if not m:
+ raise ValueError("output line %r of svn blame does not match "
+ "expected format" % (line, ))
+ rev, name, _ = m.groups()
+ result.append((int(rev), name, line))
+ return result
+
+ _rex_commit = re.compile(r'.*Committed revision (\d+)\.$', re.DOTALL)
+ def commit(self, msg='', rec=1):
+ """ commit with support for non-recursive commits """
+ # XXX i guess escaping should be done better here?!?
+ cmd = 'commit -m "%s" --force-log' % (msg.replace('"', '\\"'),)
+ if not rec:
+ cmd += ' -N'
+ out = self._authsvn(cmd)
+ try:
+ del cache.info[self]
+ except KeyError:
+ pass
+ if out:
+ m = self._rex_commit.match(out)
+ return int(m.group(1))
+
+ def propset(self, name, value, *args):
+ """ set property name to value on this path. """
+ d = py.path.local.mkdtemp()
+ try:
+ p = d.join('value')
+ p.write(value)
+ self._svn('propset', name, '--file', str(p), *args)
+ finally:
+ d.remove()
+
+ def propget(self, name):
+ """ get property name on this path. """
+ res = self._svn('propget', name)
+ return res[:-1] # strip trailing newline
+
+ def propdel(self, name):
+ """ delete property name on this path. """
+ res = self._svn('propdel', name)
+ return res[:-1] # strip trailing newline
+
+ def proplist(self, rec=0):
+ """ return a mapping of property names to property values.
+If rec is True, then return a dictionary mapping sub-paths to such mappings.
+"""
+ if rec:
+ res = self._svn('proplist -R')
+ return make_recursive_propdict(self, res)
+ else:
+ res = self._svn('proplist')
+ lines = res.split('\n')
+ lines = [x.strip() for x in lines[1:]]
+ return PropListDict(self, lines)
+
+ def revert(self, rec=0):
+ """ revert the local changes of this path. if rec is True, do so
+recursively. """
+ if rec:
+ result = self._svn('revert -R')
+ else:
+ result = self._svn('revert')
+ return result
+
+ def new(self, **kw):
+ """ create a modified version of this path. A 'rev' argument
+ indicates a new revision.
+ the following keyword arguments modify various path parts:
+
+ http://host.com/repo/path/file.ext
+ |-----------------------| dirname
+ |------| basename
+ |--| purebasename
+ |--| ext
+ """
+ if kw:
+ localpath = self.localpath.new(**kw)
+ else:
+ localpath = self.localpath
+ return self.__class__(localpath, auth=self.auth)
+
+ def join(self, *args, **kwargs):
+ """ return a new Path (with the same revision) which is composed
+ of the self Path followed by 'args' path components.
+ """
+ if not args:
+ return self
+ localpath = self.localpath.join(*args, **kwargs)
+ return self.__class__(localpath, auth=self.auth)
+
+ def info(self, usecache=1):
+ """ return an Info structure with svn-provided information. """
+ info = usecache and cache.info.get(self)
+ if not info:
+ try:
+ output = self._svn('info')
+ except py.process.cmdexec.Error:
+ e = sys.exc_info()[1]
+ if e.err.find('Path is not a working copy directory') != -1:
+ raise py.error.ENOENT(self, e.err)
+ elif e.err.find("is not under version control") != -1:
+ raise py.error.ENOENT(self, e.err)
+ raise
+ # XXX SVN 1.3 has output on stderr instead of stdout (while it does
+ # return 0!), so a bit nasty, but we assume no output is output
+ # to stderr...
+ if (output.strip() == '' or
+ output.lower().find('not a versioned resource') != -1):
+ raise py.error.ENOENT(self, output)
+ info = InfoSvnWCCommand(output)
+
+ # Can't reliably compare on Windows without access to win32api
+ if py.std.sys.platform != 'win32':
+ if info.path != self.localpath:
+ raise py.error.ENOENT(self, "not a versioned resource:" +
+ " %s != %s" % (info.path, self.localpath))
+ cache.info[self] = info
+ return info
+
+ def listdir(self, fil=None, sort=None):
+ """ return a sequence of Paths.
+
+ listdir will return either a tuple or a list of paths
+ depending on implementation choices.
+ """
+ if isinstance(fil, str):
+ fil = common.FNMatcher(fil)
+ # XXX unify argument naming with LocalPath.listdir
+ def notsvn(path):
+ return path.basename != '.svn'
+
+ paths = []
+ for localpath in self.localpath.listdir(notsvn):
+ p = self.__class__(localpath, auth=self.auth)
+ if notsvn(p) and (not fil or fil(p)):
+ paths.append(p)
+ self._sortlist(paths, sort)
+ return paths
+
+ def open(self, mode='r'):
+ """ return an opened file with the given mode. """
+ return open(self.strpath, mode)
+
+ def _getbyspec(self, spec):
+ return self.localpath._getbyspec(spec)
+
+ class Checkers(py.path.local.Checkers):
+ def __init__(self, path):
+ self.svnwcpath = path
+ self.path = path.localpath
+ def versioned(self):
+ try:
+ s = self.svnwcpath.info()
+ except (py.error.ENOENT, py.error.EEXIST):
+ return False
+ except py.process.cmdexec.Error:
+ e = sys.exc_info()[1]
+ if e.err.find('is not a working copy')!=-1:
+ return False
+ if e.err.lower().find('not a versioned resource') != -1:
+ return False
+ raise
+ else:
+ return True
+
+ def log(self, rev_start=None, rev_end=1, verbose=False):
+ """ return a list of LogEntry instances for this path.
+rev_start is the starting revision (defaulting to the first one).
+rev_end is the last revision (defaulting to HEAD).
+if verbose is True, then the LogEntry instances also know which files changed.
+"""
+ assert self.check() # make it simpler for the pipe
+ rev_start = rev_start is None and "HEAD" or rev_start
+ rev_end = rev_end is None and "HEAD" or rev_end
+ if rev_start == "HEAD" and rev_end == 1:
+ rev_opt = ""
+ else:
+ rev_opt = "-r %s:%s" % (rev_start, rev_end)
+ verbose_opt = verbose and "-v" or ""
+ locale_env = fixlocale()
+ # some blather on stderr
+ auth_opt = self._makeauthoptions()
+ #stdin, stdout, stderr = os.popen3(locale_env +
+ # 'svn log --xml %s %s %s "%s"' % (
+ # rev_opt, verbose_opt, auth_opt,
+ # self.strpath))
+ cmd = locale_env + 'svn log --xml %s %s %s "%s"' % (
+ rev_opt, verbose_opt, auth_opt, self.strpath)
+
+ popen = subprocess.Popen(cmd,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ shell=True,
+ )
+ stdout, stderr = popen.communicate()
+ stdout = py.builtin._totext(stdout, sys.getdefaultencoding())
+ minidom,ExpatError = importxml()
+ try:
+ tree = minidom.parseString(stdout)
+ except ExpatError:
+ raise ValueError('no such revision')
+ result = []
+ for logentry in filter(None, tree.firstChild.childNodes):
+ if logentry.nodeType == logentry.ELEMENT_NODE:
+ result.append(LogEntry(logentry))
+ return result
+
+ def size(self):
+ """ Return the size of the file content of the Path. """
+ return self.info().size
+
+ def mtime(self):
+ """ Return the last modification time of the file. """
+ return self.info().mtime
+
+ def __hash__(self):
+ return hash((self.strpath, self.__class__, self.auth))
+
+
+class WCStatus:
+ attrnames = ('modified','added', 'conflict', 'unchanged', 'external',
+ 'deleted', 'prop_modified', 'unknown', 'update_available',
+ 'incomplete', 'kindmismatch', 'ignored', 'locked', 'replaced'
+ )
+
+ def __init__(self, wcpath, rev=None, modrev=None, author=None):
+ self.wcpath = wcpath
+ self.rev = rev
+ self.modrev = modrev
+ self.author = author
+
+ for name in self.attrnames:
+ setattr(self, name, [])
+
+ def allpath(self, sort=True, **kw):
+ d = {}
+ for name in self.attrnames:
+ if name not in kw or kw[name]:
+ for path in getattr(self, name):
+ d[path] = 1
+ l = d.keys()
+ if sort:
+ l.sort()
+ return l
+
+ # XXX a bit scary to assume there's always 2 spaces between username and
+ # path, however with win32 allowing spaces in user names there doesn't
+ # seem to be a more solid approach :(
+ _rex_status = re.compile(r'\s+(\d+|-)\s+(\S+)\s+(.+?)\s{2,}(.*)')
+
+ def fromstring(data, rootwcpath, rev=None, modrev=None, author=None):
+ """ return a new WCStatus object from data 's'
+ """
+ rootstatus = WCStatus(rootwcpath, rev, modrev, author)
+ update_rev = None
+ for line in data.split('\n'):
+ if not line.strip():
+ continue
+ #print "processing %r" % line
+ flags, rest = line[:8], line[8:]
+ # first column
+ c0,c1,c2,c3,c4,c5,x6,c7 = flags
+ #if '*' in line:
+ # print "flags", repr(flags), "rest", repr(rest)
+
+ if c0 in '?XI':
+ fn = line.split(None, 1)[1]
+ if c0 == '?':
+ wcpath = rootwcpath.join(fn, abs=1)
+ rootstatus.unknown.append(wcpath)
+ elif c0 == 'X':
+ wcpath = rootwcpath.__class__(
+ rootwcpath.localpath.join(fn, abs=1),
+ auth=rootwcpath.auth)
+ rootstatus.external.append(wcpath)
+ elif c0 == 'I':
+ wcpath = rootwcpath.join(fn, abs=1)
+ rootstatus.ignored.append(wcpath)
+
+ continue
+
+ #elif c0 in '~!' or c4 == 'S':
+ # raise NotImplementedError("received flag %r" % c0)
+
+ m = WCStatus._rex_status.match(rest)
+ if not m:
+ if c7 == '*':
+ fn = rest.strip()
+ wcpath = rootwcpath.join(fn, abs=1)
+ rootstatus.update_available.append(wcpath)
+ continue
+ if line.lower().find('against revision:')!=-1:
+ update_rev = int(rest.split(':')[1].strip())
+ continue
+ if line.lower().find('status on external') > -1:
+ # XXX not sure what to do here... perhaps we want to
+ # store some state instead of just continuing, as right
+ # now it makes the top-level external get added twice
+ # (once as external, once as 'normal' unchanged item)
+ # because of the way SVN presents external items
+ continue
+ # keep trying
+ raise ValueError("could not parse line %r" % line)
+ else:
+ rev, modrev, author, fn = m.groups()
+ wcpath = rootwcpath.join(fn, abs=1)
+ #assert wcpath.check()
+ if c0 == 'M':
+ assert wcpath.check(file=1), "didn't expect a directory with changed content here"
+ rootstatus.modified.append(wcpath)
+ elif c0 == 'A' or c3 == '+' :
+ rootstatus.added.append(wcpath)
+ elif c0 == 'D':
+ rootstatus.deleted.append(wcpath)
+ elif c0 == 'C':
+ rootstatus.conflict.append(wcpath)
+ elif c0 == '~':
+ rootstatus.kindmismatch.append(wcpath)
+ elif c0 == '!':
+ rootstatus.incomplete.append(wcpath)
+ elif c0 == 'R':
+ rootstatus.replaced.append(wcpath)
+ elif not c0.strip():
+ rootstatus.unchanged.append(wcpath)
+ else:
+ raise NotImplementedError("received flag %r" % c0)
+
+ if c1 == 'M':
+ rootstatus.prop_modified.append(wcpath)
+ # XXX do we cover all client versions here?
+ if c2 == 'L' or c5 == 'K':
+ rootstatus.locked.append(wcpath)
+ if c7 == '*':
+ rootstatus.update_available.append(wcpath)
+
+ if wcpath == rootwcpath:
+ rootstatus.rev = rev
+ rootstatus.modrev = modrev
+ rootstatus.author = author
+ if update_rev:
+ rootstatus.update_rev = update_rev
+ continue
+ return rootstatus
+ fromstring = staticmethod(fromstring)
+
+class XMLWCStatus(WCStatus):
+ def fromstring(data, rootwcpath, rev=None, modrev=None, author=None):
+ """ parse 'data' (XML string as outputted by svn st) into a status obj
+ """
+ # XXX for externals, the path is shown twice: once
+ # with external information, and once with full info as if
+ # the item was a normal non-external... the current way of
+ # dealing with this issue is by ignoring it - this does make
+ # externals appear as external items as well as 'normal',
+ # unchanged ones in the status object so this is far from ideal
+ rootstatus = WCStatus(rootwcpath, rev, modrev, author)
+ update_rev = None
+ minidom, ExpatError = importxml()
+ try:
+ doc = minidom.parseString(data)
+ except ExpatError:
+ e = sys.exc_info()[1]
+ raise ValueError(str(e))
+ urevels = doc.getElementsByTagName('against')
+ if urevels:
+ rootstatus.update_rev = urevels[-1].getAttribute('revision')
+ for entryel in doc.getElementsByTagName('entry'):
+ path = entryel.getAttribute('path')
+ statusel = entryel.getElementsByTagName('wc-status')[0]
+ itemstatus = statusel.getAttribute('item')
+
+ if itemstatus == 'unversioned':
+ wcpath = rootwcpath.join(path, abs=1)
+ rootstatus.unknown.append(wcpath)
+ continue
+ elif itemstatus == 'external':
+ wcpath = rootwcpath.__class__(
+ rootwcpath.localpath.join(path, abs=1),
+ auth=rootwcpath.auth)
+ rootstatus.external.append(wcpath)
+ continue
+ elif itemstatus == 'ignored':
+ wcpath = rootwcpath.join(path, abs=1)
+ rootstatus.ignored.append(wcpath)
+ continue
+ elif itemstatus == 'incomplete':
+ wcpath = rootwcpath.join(path, abs=1)
+ rootstatus.incomplete.append(wcpath)
+ continue
+
+ rev = statusel.getAttribute('revision')
+ if itemstatus == 'added' or itemstatus == 'none':
+ rev = '0'
+ modrev = '?'
+ author = '?'
+ date = ''
+ else:
+ #print entryel.toxml()
+ commitel = entryel.getElementsByTagName('commit')[0]
+ if commitel:
+ modrev = commitel.getAttribute('revision')
+ author = ''
+ author_els = commitel.getElementsByTagName('author')
+ if author_els:
+ for c in author_els[0].childNodes:
+ author += c.nodeValue
+ date = ''
+ for c in commitel.getElementsByTagName('date')[0]\
+ .childNodes:
+ date += c.nodeValue
+
+ wcpath = rootwcpath.join(path, abs=1)
+
+ assert itemstatus != 'modified' or wcpath.check(file=1), (
+ 'did\'t expect a directory with changed content here')
+
+ itemattrname = {
+ 'normal': 'unchanged',
+ 'unversioned': 'unknown',
+ 'conflicted': 'conflict',
+ 'none': 'added',
+ }.get(itemstatus, itemstatus)
+
+ attr = getattr(rootstatus, itemattrname)
+ attr.append(wcpath)
+
+ propsstatus = statusel.getAttribute('props')
+ if propsstatus not in ('none', 'normal'):
+ rootstatus.prop_modified.append(wcpath)
+
+ if wcpath == rootwcpath:
+ rootstatus.rev = rev
+ rootstatus.modrev = modrev
+ rootstatus.author = author
+ rootstatus.date = date
+
+ # handle repos-status element (remote info)
+ rstatusels = entryel.getElementsByTagName('repos-status')
+ if rstatusels:
+ rstatusel = rstatusels[0]
+ ritemstatus = rstatusel.getAttribute('item')
+ if ritemstatus in ('added', 'modified'):
+ rootstatus.update_available.append(wcpath)
+
+ lockels = entryel.getElementsByTagName('lock')
+ if len(lockels):
+ rootstatus.locked.append(wcpath)
+
+ return rootstatus
+ fromstring = staticmethod(fromstring)
+
+class InfoSvnWCCommand:
+ def __init__(self, output):
+ # Path: test
+ # URL: http://codespeak.net/svn/std.path/trunk/dist/std.path/test
+ # Repository UUID: fd0d7bf2-dfb6-0310-8d31-b7ecfe96aada
+ # Revision: 2151
+ # Node Kind: directory
+ # Schedule: normal
+ # Last Changed Author: hpk
+ # Last Changed Rev: 2100
+ # Last Changed Date: 2003-10-27 20:43:14 +0100 (Mon, 27 Oct 2003)
+ # Properties Last Updated: 2003-11-03 14:47:48 +0100 (Mon, 03 Nov 2003)
+
+ d = {}
+ for line in output.split('\n'):
+ if not line.strip():
+ continue
+ key, value = line.split(':', 1)
+ key = key.lower().replace(' ', '')
+ value = value.strip()
+ d[key] = value
+ try:
+ self.url = d['url']
+ except KeyError:
+ raise ValueError("Not a versioned resource")
+ #raise ValueError, "Not a versioned resource %r" % path
+ self.kind = d['nodekind'] == 'directory' and 'dir' or d['nodekind']
+ self.rev = int(d['revision'])
+ self.path = py.path.local(d['path'])
+ self.size = self.path.size()
+ if 'lastchangedrev' in d:
+ self.created_rev = int(d['lastchangedrev'])
+ if 'lastchangedauthor' in d:
+ self.last_author = d['lastchangedauthor']
+ if 'lastchangeddate' in d:
+ self.mtime = parse_wcinfotime(d['lastchangeddate'])
+ self.time = self.mtime * 1000000
+
+ def __eq__(self, other):
+ return self.__dict__ == other.__dict__
+
+def parse_wcinfotime(timestr):
+ """ Returns seconds since epoch, UTC. """
+ # example: 2003-10-27 20:43:14 +0100 (Mon, 27 Oct 2003)
+ m = re.match(r'(\d+-\d+-\d+ \d+:\d+:\d+) ([+-]\d+) .*', timestr)
+ if not m:
+ raise ValueError("timestring %r does not match" % timestr)
+ timestr, timezone = m.groups()
+ # do not handle timezone specially, return value should be UTC
+ parsedtime = time.strptime(timestr, "%Y-%m-%d %H:%M:%S")
+ return calendar.timegm(parsedtime)
+
+def make_recursive_propdict(wcroot,
+ output,
+ rex = re.compile("Properties on '(.*)':")):
+ """ Return a dictionary of path->PropListDict mappings. """
+ lines = [x for x in output.split('\n') if x]
+ pdict = {}
+ while lines:
+ line = lines.pop(0)
+ m = rex.match(line)
+ if not m:
+ raise ValueError("could not parse propget-line: %r" % line)
+ path = m.groups()[0]
+ wcpath = wcroot.join(path, abs=1)
+ propnames = []
+ while lines and lines[0].startswith(' '):
+ propname = lines.pop(0).strip()
+ propnames.append(propname)
+ assert propnames, "must have found properties!"
+ pdict[wcpath] = PropListDict(wcpath, propnames)
+ return pdict
+
+
+def importxml(cache=[]):
+ if cache:
+ return cache
+ from xml.dom import minidom
+ from xml.parsers.expat import ExpatError
+ cache.extend([minidom, ExpatError])
+ return cache
+
+class LogEntry:
+ def __init__(self, logentry):
+ self.rev = int(logentry.getAttribute('revision'))
+ for lpart in filter(None, logentry.childNodes):
+ if lpart.nodeType == lpart.ELEMENT_NODE:
+ if lpart.nodeName == 'author':
+ self.author = lpart.firstChild.nodeValue
+ elif lpart.nodeName == 'msg':
+ if lpart.firstChild:
+ self.msg = lpart.firstChild.nodeValue
+ else:
+ self.msg = ''
+ elif lpart.nodeName == 'date':
+ #2003-07-29T20:05:11.598637Z
+ timestr = lpart.firstChild.nodeValue
+ self.date = parse_apr_time(timestr)
+ elif lpart.nodeName == 'paths':
+ self.strpaths = []
+ for ppart in filter(None, lpart.childNodes):
+ if ppart.nodeType == ppart.ELEMENT_NODE:
+ self.strpaths.append(PathEntry(ppart))
+ def __repr__(self):
+ return '<Logentry rev=%d author=%s date=%s>' % (
+ self.rev, self.author, self.date)
+
+
Added: pypy/branch/py12/py/_plugin/__init__.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_plugin/__init__.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1 @@
+#
Added: pypy/branch/py12/py/_plugin/hookspec.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_plugin/hookspec.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,172 @@
+"""
+hook specifications for py.test plugins
+"""
+
+# -------------------------------------------------------------------------
+# Command line and configuration
+# -------------------------------------------------------------------------
+
+def pytest_addoption(parser):
+ """ called before commandline parsing. """
+
+def pytest_registerhooks(pluginmanager):
+ """ called after commandline parsing before pytest_configure. """
+
+def pytest_namespace():
+ """ return dict of name->object which will get stored at py.test. namespace"""
+
+def pytest_configure(config):
+ """ called after command line options have been parsed.
+ and all plugins and initial conftest files been loaded.
+ """
+
+def pytest_unconfigure(config):
+ """ called before test process is exited. """
+
+# -------------------------------------------------------------------------
+# collection hooks
+# -------------------------------------------------------------------------
+
+def pytest_ignore_collect_path(path, config):
+ """ return true value to prevent considering this path for collection.
+ This hook is consulted for all files and directories prior to considering
+ collection hooks.
+ """
+pytest_ignore_collect_path.firstresult = True
+
+def pytest_collect_directory(path, parent):
+ """ return Collection node or None for the given path. """
+pytest_collect_directory.firstresult = True
+
+def pytest_collect_file(path, parent):
+ """ return Collection node or None for the given path. """
+
+def pytest_collectstart(collector):
+ """ collector starts collecting. """
+
+def pytest_collectreport(report):
+ """ collector finished collecting. """
+
+def pytest_deselected(items):
+ """ called for test items deselected by keyword. """
+
+def pytest_make_collect_report(collector):
+ """ perform a collection and return a collection. """
+pytest_make_collect_report.firstresult = True
+
+# XXX rename to item_collected()? meaning in distribution context?
+def pytest_itemstart(item, node=None):
+ """ test item gets collected. """
+
+# -------------------------------------------------------------------------
+# Python test function related hooks
+# -------------------------------------------------------------------------
+
+def pytest_pycollect_makemodule(path, parent):
+ """ return a Module collector or None for the given path.
+ This hook will be called for each matching test module path.
+ The pytest_collect_file hook needs to be used if you want to
+ create test modules for files that do not match as a test module.
+ """
+pytest_pycollect_makemodule.firstresult = True
+
+def pytest_pycollect_makeitem(collector, name, obj):
+ """ return custom item/collector for a python object in a module, or None. """
+pytest_pycollect_makeitem.firstresult = True
+
+def pytest_pyfunc_call(pyfuncitem):
+ """ call underlying test function. """
+pytest_pyfunc_call.firstresult = True
+
+def pytest_generate_tests(metafunc):
+ """ generate (multiple) parametrized calls to a test function."""
+
+# -------------------------------------------------------------------------
+# generic runtest related hooks
+# -------------------------------------------------------------------------
+
+def pytest_runtest_protocol(item):
+ """ implement fixture, run and report about the given test item. """
+pytest_runtest_protocol.firstresult = True
+
+def pytest_runtest_setup(item):
+ """ called before pytest_runtest_call(). """
+
+def pytest_runtest_call(item):
+ """ execute test item. """
+
+def pytest_runtest_teardown(item):
+ """ called after pytest_runtest_call(). """
+
+def pytest_runtest_makereport(item, call):
+ """ make a test report for the given item and call outcome. """
+pytest_runtest_makereport.firstresult = True
+
+def pytest_runtest_logreport(report):
+ """ process item test report. """
+
+# special handling for final teardown - somewhat internal for now
+def pytest__teardown_final(session):
+ """ called before test session finishes. """
+pytest__teardown_final.firstresult = True
+
+def pytest__teardown_final_logerror(report):
+ """ called if runtest_teardown_final failed. """
+
+# -------------------------------------------------------------------------
+# test session related hooks
+# -------------------------------------------------------------------------
+
+def pytest_sessionstart(session):
+ """ before session.main() is called. """
+
+def pytest_sessionfinish(session, exitstatus):
+ """ whole test run finishes. """
+
+# -------------------------------------------------------------------------
+# hooks for influencing reporting (invoked from pytest_terminal)
+# -------------------------------------------------------------------------
+
+def pytest_report_header(config):
+ """ return a string to be displayed as header info for terminal reporting."""
+
+def pytest_report_teststatus(report):
+ """ return result-category, shortletter and verbose word for reporting."""
+pytest_report_teststatus.firstresult = True
+
+def pytest_terminal_summary(terminalreporter):
+ """ add additional section in terminal summary reporting. """
+
+def pytest_report_iteminfo(item):
+ """ return (fspath, lineno, name) for the item.
+ the information is used for result display and to sort tests
+ """
+pytest_report_iteminfo.firstresult = True
+
+# -------------------------------------------------------------------------
+# doctest hooks
+# -------------------------------------------------------------------------
+
+def pytest_doctest_prepare_content(content):
+ """ return processed content for a given doctest"""
+pytest_doctest_prepare_content.firstresult = True
+
+
+# -------------------------------------------------------------------------
+# error handling and internal debugging hooks
+# -------------------------------------------------------------------------
+
+def pytest_plugin_registered(plugin, manager):
+ """ a new py lib plugin got registered. """
+
+def pytest_plugin_unregistered(plugin):
+ """ a py lib plugin got unregistered. """
+
+def pytest_internalerror(excrepr):
+ """ called for internal errors. """
+
+def pytest_keyboard_interrupt(excinfo):
+ """ called for keyboard interrupt. """
+
+def pytest_trace(category, msg):
+ """ called for debug info. """
Added: pypy/branch/py12/py/_plugin/pytest__pytest.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_plugin/pytest__pytest.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,100 @@
+import py
+
+from py._test.pluginmanager import HookRelay
+
+def pytest_funcarg___pytest(request):
+ return PytestArg(request)
+
+class PytestArg:
+ def __init__(self, request):
+ self.request = request
+
+ def gethookrecorder(self, hook):
+ hookrecorder = HookRecorder(hook._registry)
+ hookrecorder.start_recording(hook._hookspecs)
+ self.request.addfinalizer(hookrecorder.finish_recording)
+ return hookrecorder
+
+class ParsedCall:
+ def __init__(self, name, locals):
+ assert '_name' not in locals
+ self.__dict__.update(locals)
+ self.__dict__.pop('self')
+ self._name = name
+
+ def __repr__(self):
+ d = self.__dict__.copy()
+ del d['_name']
+ return "<ParsedCall %r(**%r)>" %(self._name, d)
+
+class HookRecorder:
+ def __init__(self, registry):
+ self._registry = registry
+ self.calls = []
+ self._recorders = {}
+
+ def start_recording(self, hookspecs):
+ if not isinstance(hookspecs, (list, tuple)):
+ hookspecs = [hookspecs]
+ for hookspec in hookspecs:
+ assert hookspec not in self._recorders
+ class RecordCalls:
+ _recorder = self
+ for name, method in vars(hookspec).items():
+ if name[0] != "_":
+ setattr(RecordCalls, name, self._makecallparser(method))
+ recorder = RecordCalls()
+ self._recorders[hookspec] = recorder
+ self._registry.register(recorder)
+ self.hook = HookRelay(hookspecs, registry=self._registry)
+
+ def finish_recording(self):
+ for recorder in self._recorders.values():
+ self._registry.unregister(recorder)
+ self._recorders.clear()
+
+ def _makecallparser(self, method):
+ name = method.__name__
+ args, varargs, varkw, default = py.std.inspect.getargspec(method)
+ if not args or args[0] != "self":
+ args.insert(0, 'self')
+ fspec = py.std.inspect.formatargspec(args, varargs, varkw, default)
+ # we use exec because we want to have early type
+ # errors on wrong input arguments, using
+ # *args/**kwargs delays this and gives errors
+ # elsewhere
+ exec (py.code.compile("""
+ def %(name)s%(fspec)s:
+ self._recorder.calls.append(
+ ParsedCall(%(name)r, locals()))
+ """ % locals()))
+ return locals()[name]
+
+ def getcalls(self, names):
+ if isinstance(names, str):
+ names = names.split()
+ for name in names:
+ for cls in self._recorders:
+ if name in vars(cls):
+ break
+ else:
+ raise ValueError("callname %r not found in %r" %(
+ name, self._recorders.keys()))
+ l = []
+ for call in self.calls:
+ if call._name in names:
+ l.append(call)
+ return l
+
+ def popcall(self, name):
+ for i, call in enumerate(self.calls):
+ if call._name == name:
+ del self.calls[i]
+ return call
+ raise ValueError("could not find call %r" %(name, ))
+
+ def getcall(self, name):
+ l = self.getcalls(name)
+ assert len(l) == 1, (name, l)
+ return l[0]
+
Added: pypy/branch/py12/py/_plugin/pytest_assertion.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_plugin/pytest_assertion.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,28 @@
+import py
+import sys
+
+def pytest_addoption(parser):
+ group = parser.getgroup("debugconfig")
+ group._addoption('--no-assert', action="store_true", default=False,
+ dest="noassert",
+ help="disable python assert expression reinterpretation."),
+
+def pytest_configure(config):
+ if not config.getvalue("noassert") and not config.getvalue("nomagic"):
+ warn_about_missing_assertion()
+ config._oldassertion = py.builtin.builtins.AssertionError
+ py.builtin.builtins.AssertionError = py.code._AssertionError
+
+def pytest_unconfigure(config):
+ if hasattr(config, '_oldassertion'):
+ py.builtin.builtins.AssertionError = config._oldassertion
+ del config._oldassertion
+
+def warn_about_missing_assertion():
+ try:
+ assert False
+ except AssertionError:
+ pass
+ else:
+ py.std.warnings.warn("Assertions are turned off!"
+ " (are you using python -O?)")
Added: pypy/branch/py12/py/_plugin/pytest_capture.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_plugin/pytest_capture.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,281 @@
+"""
+configurable per-test stdout/stderr capturing mechanisms.
+
+This plugin captures stdout/stderr output for each test separately.
+In case of test failures this captured output is shown grouped
+togtther with the test.
+
+The plugin also provides test function arguments that help to
+assert stdout/stderr output from within your tests, see the
+`funcarg example`_.
+
+
+Capturing of input/output streams during tests
+---------------------------------------------------
+
+By default ``sys.stdout`` and ``sys.stderr`` are substituted with
+temporary streams during the execution of tests and setup/teardown code.
+During the whole testing process it will re-use the same temporary
+streams allowing to play well with the logging module which easily
+takes ownership on these streams.
+
+Also, 'sys.stdin' is substituted with a file-like "null" object that
+does not return any values. This is to immediately error out
+on tests that wait on reading something from stdin.
+
+You can influence output capturing mechanisms from the command line::
+
+ py.test -s # disable all capturing
+ py.test --capture=sys # replace sys.stdout/stderr with in-mem files
+ py.test --capture=fd # point filedescriptors 1 and 2 to temp file
+
+If you set capturing values in a conftest file like this::
+
+ # conftest.py
+ option_capture = 'fd'
+
+then all tests in that directory will execute with "fd" style capturing.
+
+sys-level capturing
+------------------------------------------
+
+Capturing on 'sys' level means that ``sys.stdout`` and ``sys.stderr``
+will be replaced with in-memory files (``py.io.TextIO`` to be precise)
+that capture writes and decode non-unicode strings to a unicode object
+(using a default, usually, UTF-8, encoding).
+
+FD-level capturing and subprocesses
+------------------------------------------
+
+The ``fd`` based method means that writes going to system level files
+based on the standard file descriptors will be captured, for example
+writes such as ``os.write(1, 'hello')`` will be captured properly.
+Capturing on fd-level will include output generated from
+any subprocesses created during a test.
+
+.. _`funcarg example`:
+
+Example Usage of the capturing Function arguments
+---------------------------------------------------
+
+You can use the `capsys funcarg`_ and `capfd funcarg`_ to
+capture writes to stdout and stderr streams. Using the
+funcargs frees your test from having to care about setting/resetting
+the old streams and also interacts well with py.test's own
+per-test capturing. Here is an example test function:
+
+.. sourcecode:: python
+
+ def test_myoutput(capsys):
+ print ("hello")
+ sys.stderr.write("world\\n")
+ out, err = capsys.readouterr()
+ assert out == "hello\\n"
+ assert err == "world\\n"
+ print "next"
+ out, err = capsys.readouterr()
+ assert out == "next\\n"
+
+The ``readouterr()`` call snapshots the output so far -
+and capturing will be continued. After the test
+function finishes the original streams will
+be restored. If you want to capture on
+the filedescriptor level you can use the ``capfd`` function
+argument which offers the same interface.
+"""
+
+import py
+import os
+
+def pytest_addoption(parser):
+ group = parser.getgroup("general")
+ group._addoption('--capture', action="store", default=None,
+ metavar="method", type="choice", choices=['fd', 'sys', 'no'],
+ help="per-test capturing method: one of fd (default)|sys|no.")
+ group._addoption('-s', action="store_const", const="no", dest="capture",
+ help="shortcut for --capture=no.")
+
+def addouterr(rep, outerr):
+ repr = getattr(rep, 'longrepr', None)
+ if not hasattr(repr, 'addsection'):
+ return
+ for secname, content in zip(["out", "err"], outerr):
+ if content:
+ repr.addsection("Captured std%s" % secname, content.rstrip())
+
+def pytest_configure(config):
+ config.pluginmanager.register(CaptureManager(), 'capturemanager')
+
+class CaptureManager:
+ def __init__(self):
+ self._method2capture = {}
+
+ def _maketempfile(self):
+ f = py.std.tempfile.TemporaryFile()
+ newf = py.io.dupfile(f, encoding="UTF-8")
+ return newf
+
+ def _makestringio(self):
+ return py.io.TextIO()
+
+ def _startcapture(self, method):
+ if method == "fd":
+ return py.io.StdCaptureFD(
+ out=self._maketempfile(), err=self._maketempfile()
+ )
+ elif method == "sys":
+ return py.io.StdCapture(
+ out=self._makestringio(), err=self._makestringio()
+ )
+ else:
+ raise ValueError("unknown capturing method: %r" % method)
+
+ def _getmethod(self, config, fspath):
+ if config.option.capture:
+ method = config.option.capture
+ else:
+ try:
+ method = config._conftest.rget("option_capture", path=fspath)
+ except KeyError:
+ method = "fd"
+ if method == "fd" and not hasattr(os, 'dup'): # e.g. jython
+ method = "sys"
+ return method
+
+ def resumecapture_item(self, item):
+ method = self._getmethod(item.config, item.fspath)
+ if not hasattr(item, 'outerr'):
+ item.outerr = ('', '') # we accumulate outerr on the item
+ return self.resumecapture(method)
+
+ def resumecapture(self, method):
+ if hasattr(self, '_capturing'):
+ raise ValueError("cannot resume, already capturing with %r" %
+ (self._capturing,))
+ if method != "no":
+ cap = self._method2capture.get(method)
+ if cap is None:
+ cap = self._startcapture(method)
+ self._method2capture[method] = cap
+ else:
+ cap.resume()
+ self._capturing = method
+
+ def suspendcapture(self, item=None):
+ self.deactivate_funcargs()
+ if hasattr(self, '_capturing'):
+ method = self._capturing
+ if method != "no":
+ cap = self._method2capture[method]
+ outerr = cap.suspend()
+ else:
+ outerr = "", ""
+ del self._capturing
+ if item:
+ outerr = (item.outerr[0] + outerr[0], item.outerr[1] + outerr[1])
+ return outerr
+ return "", ""
+
+ def activate_funcargs(self, pyfuncitem):
+ if not hasattr(pyfuncitem, 'funcargs'):
+ return
+ assert not hasattr(self, '_capturing_funcargs')
+ l = []
+ for name, obj in pyfuncitem.funcargs.items():
+ if name == 'capfd' and not hasattr(os, 'dup'):
+ py.test.skip("capfd funcarg needs os.dup")
+ if name in ('capsys', 'capfd'):
+ obj._start()
+ l.append(obj)
+ if l:
+ self._capturing_funcargs = l
+
+ def deactivate_funcargs(self):
+ if hasattr(self, '_capturing_funcargs'):
+ for capfuncarg in self._capturing_funcargs:
+ capfuncarg._finalize()
+ del self._capturing_funcargs
+
+ def pytest_make_collect_report(self, __multicall__, collector):
+ method = self._getmethod(collector.config, collector.fspath)
+ self.resumecapture(method)
+ try:
+ rep = __multicall__.execute()
+ finally:
+ outerr = self.suspendcapture()
+ addouterr(rep, outerr)
+ return rep
+
+ def pytest_runtest_setup(self, item):
+ self.resumecapture_item(item)
+
+ def pytest_runtest_call(self, item):
+ self.resumecapture_item(item)
+ self.activate_funcargs(item)
+
+ def pytest_runtest_teardown(self, item):
+ self.resumecapture_item(item)
+
+ def pytest__teardown_final(self, __multicall__, session):
+ method = self._getmethod(session.config, None)
+ self.resumecapture(method)
+ try:
+ rep = __multicall__.execute()
+ finally:
+ outerr = self.suspendcapture()
+ if rep:
+ addouterr(rep, outerr)
+ return rep
+
+ def pytest_keyboard_interrupt(self, excinfo):
+ if hasattr(self, '_capturing'):
+ self.suspendcapture()
+
+ def pytest_runtest_makereport(self, __multicall__, item, call):
+ self.deactivate_funcargs()
+ rep = __multicall__.execute()
+ outerr = self.suspendcapture(item)
+ if not rep.passed:
+ addouterr(rep, outerr)
+ if not rep.passed or rep.when == "teardown":
+ outerr = ('', '')
+ item.outerr = outerr
+ return rep
+
+def pytest_funcarg__capsys(request):
+ """captures writes to sys.stdout/sys.stderr and makes
+ them available successively via a ``capsys.readouterr()`` method
+ which returns a ``(out, err)`` tuple of captured snapshot strings.
+ """
+ return CaptureFuncarg(request, py.io.StdCapture)
+
+def pytest_funcarg__capfd(request):
+ """captures writes to file descriptors 1 and 2 and makes
+ snapshotted ``(out, err)`` string tuples available
+ via the ``capsys.readouterr()`` method. If the underlying
+ platform does not have ``os.dup`` (e.g. Jython) tests using
+ this funcarg will automatically skip.
+ """
+ return CaptureFuncarg(request, py.io.StdCaptureFD)
+
+
+class CaptureFuncarg:
+ def __init__(self, request, captureclass):
+ self._cclass = captureclass
+ #request.addfinalizer(self._finalize)
+
+ def _start(self):
+ self.capture = self._cclass()
+
+ def _finalize(self):
+ if hasattr(self, 'capture'):
+ self.capture.reset()
+ del self.capture
+
+ def readouterr(self):
+ return self.capture.readouterr()
+
+ def close(self):
+ self.capture.reset()
+ del self.capture
+
Added: pypy/branch/py12/py/_plugin/pytest_default.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_plugin/pytest_default.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,125 @@
+""" default hooks and general py.test options. """
+
+import sys
+import py
+
+def pytest_pyfunc_call(__multicall__, pyfuncitem):
+ if not __multicall__.execute():
+ testfunction = pyfuncitem.obj
+ if pyfuncitem._isyieldedfunction():
+ testfunction(*pyfuncitem._args)
+ else:
+ funcargs = pyfuncitem.funcargs
+ testfunction(**funcargs)
+
+def pytest_collect_file(path, parent):
+ ext = path.ext
+ pb = path.purebasename
+ if pb.startswith("test_") or pb.endswith("_test") or \
+ path in parent.config._argfspaths:
+ if ext == ".py":
+ return parent.ihook.pytest_pycollect_makemodule(
+ path=path, parent=parent)
+
+def pytest_pycollect_makemodule(path, parent):
+ return parent.Module(path, parent)
+
+def pytest_funcarg__pytestconfig(request):
+ """ the pytest config object with access to command line opts."""
+ return request.config
+
+def pytest_ignore_collect_path(path, config):
+ ignore_paths = config.getconftest_pathlist("collect_ignore", path=path)
+ ignore_paths = ignore_paths or []
+ excludeopt = config.getvalue("ignore")
+ if excludeopt:
+ ignore_paths.extend([py.path.local(x) for x in excludeopt])
+ return path in ignore_paths
+ # XXX more refined would be:
+ if ignore_paths:
+ for p in ignore_paths:
+ if path == p or path.relto(p):
+ return True
+
+
+def pytest_collect_directory(path, parent):
+ # XXX reconsider the following comment
+ # not use parent.Directory here as we generally
+ # want dir/conftest.py to be able to
+ # define Directory(dir) already
+ if not parent.recfilter(path): # by default special ".cvs", ...
+ # check if cmdline specified this dir or a subdir directly
+ for arg in parent.config._argfspaths:
+ if path == arg or arg.relto(path):
+ break
+ else:
+ return
+ Directory = parent.config._getcollectclass('Directory', path)
+ return Directory(path, parent=parent)
+
+def pytest_report_iteminfo(item):
+ return item.reportinfo()
+
+def pytest_addoption(parser):
+ group = parser.getgroup("general", "running and selection options")
+ group._addoption('-x', '--exitfirst',
+ action="store_true", dest="exitfirst", default=False,
+ help="exit instantly on first error or failed test."),
+ group._addoption('-k',
+ action="store", dest="keyword", default='',
+ help="only run test items matching the given "
+ "space separated keywords. precede a keyword with '-' to negate. "
+ "Terminate the expression with ':' to treat a match as a signal "
+ "to run all subsequent tests. ")
+
+ group = parser.getgroup("collect", "collection")
+ group.addoption('--collectonly',
+ action="store_true", dest="collectonly",
+ help="only collect tests, don't execute them."),
+ group.addoption("--ignore", action="append", metavar="path",
+ help="ignore path during collection (multi-allowed).")
+ group.addoption('--confcutdir', dest="confcutdir", default=None,
+ metavar="dir",
+ help="only load conftest.py's relative to specified dir.")
+
+ group = parser.getgroup("debugconfig",
+ "test process debugging and configuration")
+ group.addoption('--basetemp', dest="basetemp", default=None, metavar="dir",
+ help="base temporary directory for this test run.")
+
+def pytest_configure(config):
+ setsession(config)
+
+def setsession(config):
+ val = config.getvalue
+ if val("collectonly"):
+ from py._test.session import Session
+ config.setsessionclass(Session)
+
+# pycollect related hooks and code, should move to pytest_pycollect.py
+
+def pytest_pycollect_makeitem(__multicall__, collector, name, obj):
+ res = __multicall__.execute()
+ if res is not None:
+ return res
+ if collector._istestclasscandidate(name, obj):
+ res = collector._deprecated_join(name)
+ if res is not None:
+ return res
+ return collector.Class(name, parent=collector)
+ elif collector.funcnamefilter(name) and hasattr(obj, '__call__'):
+ res = collector._deprecated_join(name)
+ if res is not None:
+ return res
+ if is_generator(obj):
+ # XXX deprecation warning
+ return collector.Generator(name, parent=collector)
+ else:
+ return collector._genfunctions(name, obj)
+
+def is_generator(func):
+ try:
+ return py.code.getrawcode(func).co_flags & 32 # generator function
+ except AttributeError: # builtin functions have no bytecode
+ # assume them to not be generators
+ return False
Added: pypy/branch/py12/py/_plugin/pytest_doctest.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_plugin/pytest_doctest.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,100 @@
+"""
+collect and execute doctests from modules and test files.
+
+Usage
+-------------
+
+By default all files matching the ``test*.txt`` pattern will
+be run through the python standard ``doctest`` module. Issue::
+
+ py.test --doctest-glob='*.rst'
+
+to change the pattern. Additionally you can trigger running of
+tests in all python modules (including regular python test modules)::
+
+ py.test --doctest-modules
+
+You can also make these changes permanent in your project by
+putting them into a conftest.py file like this::
+
+ # content of conftest.py
+ option_doctestmodules = True
+ option_doctestglob = "*.rst"
+"""
+
+import py
+from py._code.code import TerminalRepr, ReprFileLocation
+import doctest
+
+def pytest_addoption(parser):
+ group = parser.getgroup("collect")
+ group.addoption("--doctest-modules",
+ action="store_true", default=False,
+ help="run doctests in all .py modules",
+ dest="doctestmodules")
+ group.addoption("--doctest-glob",
+ action="store", default="test*.txt", metavar="pat",
+ help="doctests file matching pattern, default: test*.txt",
+ dest="doctestglob")
+
+def pytest_collect_file(path, parent):
+ config = parent.config
+ if path.ext == ".py":
+ if config.getvalue("doctestmodules"):
+ return DoctestModule(path, parent)
+ elif path.check(fnmatch=config.getvalue("doctestglob")):
+ return DoctestTextfile(path, parent)
+
+class ReprFailDoctest(TerminalRepr):
+ def __init__(self, reprlocation, lines):
+ self.reprlocation = reprlocation
+ self.lines = lines
+ def toterminal(self, tw):
+ for line in self.lines:
+ tw.line(line)
+ self.reprlocation.toterminal(tw)
+
+class DoctestItem(py.test.collect.Item):
+ def __init__(self, path, parent):
+ name = self.__class__.__name__ + ":" + path.basename
+ super(DoctestItem, self).__init__(name=name, parent=parent)
+ self.fspath = path
+
+ def repr_failure(self, excinfo):
+ if excinfo.errisinstance(doctest.DocTestFailure):
+ doctestfailure = excinfo.value
+ example = doctestfailure.example
+ test = doctestfailure.test
+ filename = test.filename
+ lineno = test.lineno + example.lineno + 1
+ message = excinfo.type.__name__
+ reprlocation = ReprFileLocation(filename, lineno, message)
+ checker = doctest.OutputChecker()
+ REPORT_UDIFF = doctest.REPORT_UDIFF
+ filelines = py.path.local(filename).readlines(cr=0)
+ i = max(test.lineno, max(0, lineno - 10)) # XXX?
+ lines = []
+ for line in filelines[i:lineno]:
+ lines.append("%03d %s" % (i+1, line))
+ i += 1
+ lines += checker.output_difference(example,
+ doctestfailure.got, REPORT_UDIFF).split("\n")
+ return ReprFailDoctest(reprlocation, lines)
+ elif excinfo.errisinstance(doctest.UnexpectedException):
+ excinfo = py.code.ExceptionInfo(excinfo.value.exc_info)
+ return super(DoctestItem, self).repr_failure(excinfo)
+ else:
+ return super(DoctestItem, self).repr_failure(excinfo)
+
+class DoctestTextfile(DoctestItem):
+ def runtest(self):
+ if not self._deprecated_testexecution():
+ failed, tot = doctest.testfile(
+ str(self.fspath), module_relative=False,
+ raise_on_error=True, verbose=0)
+
+class DoctestModule(DoctestItem):
+ def runtest(self):
+ module = self.fspath.pyimport()
+ failed, tot = doctest.testmod(
+ module, raise_on_error=True, verbose=0)
Added: pypy/branch/py12/py/_plugin/pytest_genscript.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_plugin/pytest_genscript.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,69 @@
+#! /usr/bin/env python
+"""
+generate standalone test script to be distributed along with an application.
+"""
+
+import os
+import zlib
+import base64
+import sys
+try:
+ import pickle
+except Importerror:
+ import cPickle as pickle
+
+def pytest_addoption(parser):
+ group = parser.getgroup("debugconfig")
+ group.addoption("--genscript", action="store", default=None,
+ dest="genscript", metavar="path",
+ help="create standalone py.test script at given target path.")
+
+def pytest_configure(config):
+ genscript = config.getvalue("genscript")
+ if genscript:
+ import py
+ mydir = py.path.local(__file__).dirpath()
+ infile = mydir.join("standalonetemplate.py")
+ pybasedir = py.path.local(py.__file__).dirpath().dirpath()
+ genscript = py.path.local(genscript)
+ main(pybasedir, outfile=genscript, infile=infile)
+ raise SystemExit(0)
+
+def main(pybasedir, outfile, infile):
+ outfile = str(outfile)
+ infile = str(infile)
+ assert os.path.isabs(outfile)
+ os.chdir(str(pybasedir))
+ files = []
+ for dirpath, dirnames, filenames in os.walk("py"):
+ for f in filenames:
+ if not f.endswith(".py"):
+ continue
+
+ fn = os.path.join(dirpath, f)
+ files.append(fn)
+
+ name2src = {}
+ for f in files:
+ k = f.replace(os.sep, ".")[:-3]
+ name2src[k] = open(f, "r").read()
+
+ data = pickle.dumps(name2src, 2)
+ data = zlib.compress(data, 9)
+ data = base64.encodestring(data)
+ data = data.decode("ascii")
+
+ exe = open(infile, "r").read()
+ exe = exe.replace("@SOURCES@", data)
+
+ open(outfile, "w").write(exe)
+ os.chmod(outfile, 493) # 0755
+ sys.stdout.write("generated standalone py.test at %r, have fun!\n" % outfile)
+
+if __name__=="__main__":
+ dn = os.path.dirname
+ here = os.path.abspath(dn(__file__)) # py/plugin/
+ pybasedir = dn(dn(here))
+ outfile = os.path.join(os.getcwd(), "py.test-standalone")
+ infile = os.path.join(here, 'standalonetemplate.py')
+ main(pybasedir, outfile, infile)
Added: pypy/branch/py12/py/_plugin/pytest_helpconfig.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_plugin/pytest_helpconfig.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,164 @@
+""" provide version info, conftest/environment config names.
+"""
+import py
+import inspect, sys
+
+def pytest_addoption(parser):
+ group = parser.getgroup('debugconfig')
+ group.addoption('--version', action="store_true",
+ help="display py lib version and import information.")
+ group._addoption('-p', action="append", dest="plugins", default = [],
+ metavar="name",
+ help="early-load given plugin (multi-allowed).")
+ group.addoption('--traceconfig',
+ action="store_true", dest="traceconfig", default=False,
+ help="trace considerations of conftest.py files."),
+ group._addoption('--nomagic',
+ action="store_true", dest="nomagic", default=False,
+ help="don't reinterpret asserts, no traceback cutting. ")
+ group.addoption('--debug',
+ action="store_true", dest="debug", default=False,
+ help="generate and show internal debugging information.")
+ group.addoption("--help-config", action="store_true", dest="helpconfig",
+ help="show available conftest.py and ENV-variable names.")
+
+
+def pytest_configure(__multicall__, config):
+ if config.option.version:
+ p = py.path.local(py.__file__).dirpath()
+ sys.stderr.write("This is py.test version %s, imported from %s\n" %
+ (py.__version__, p))
+ sys.exit(0)
+ if not config.option.helpconfig:
+ return
+ __multicall__.execute()
+ options = []
+ for group in config._parser._groups:
+ options.extend(group.options)
+ widths = [0] * 10
+ tw = py.io.TerminalWriter()
+ tw.sep("-")
+ tw.line("%-13s | %-18s | %-25s | %s" %(
+ "cmdline name", "conftest.py name", "ENV-variable name", "help"))
+ tw.sep("-")
+
+ options = [opt for opt in options if opt._long_opts]
+ options.sort(key=lambda x: x._long_opts)
+ for opt in options:
+ if not opt._long_opts:
+ continue
+ optstrings = list(opt._long_opts) # + list(opt._short_opts)
+ optstrings = filter(None, optstrings)
+ optstring = "|".join(optstrings)
+ line = "%-13s | %-18s | %-25s | %s" %(
+ optstring,
+ "option_%s" % opt.dest,
+ "PYTEST_OPTION_%s" % opt.dest.upper(),
+ opt.help and opt.help or "",
+ )
+ tw.line(line[:tw.fullwidth])
+ for name, help in conftest_options:
+ line = "%-13s | %-18s | %-25s | %s" %(
+ "",
+ name,
+ "",
+ help,
+ )
+ tw.line(line[:tw.fullwidth])
+
+ tw.sep("-")
+ sys.exit(0)
+
+conftest_options = (
+ ('pytest_plugins', 'list of plugin names to load'),
+ ('collect_ignore', '(relative) paths ignored during collection'),
+ ('rsyncdirs', 'to-be-rsynced directories for dist-testing'),
+)
+
+def pytest_report_header(config):
+ lines = []
+ if config.option.debug or config.option.traceconfig:
+ lines.append("using py lib: %s" % (py.path.local(py.__file__).dirpath()))
+ if config.option.traceconfig:
+ lines.append("active plugins:")
+ plugins = []
+ items = config.pluginmanager._name2plugin.items()
+ for name, plugin in items:
+ lines.append(" %-20s: %s" %(name, repr(plugin)))
+ return lines
+
+
+# =====================================================
+# validate plugin syntax and hooks
+# =====================================================
+
+def pytest_plugin_registered(manager, plugin):
+ methods = collectattr(plugin)
+ hooks = {}
+ for hookspec in manager.hook._hookspecs:
+ hooks.update(collectattr(hookspec))
+
+ stringio = py.io.TextIO()
+ def Print(*args):
+ if args:
+ stringio.write(" ".join(map(str, args)))
+ stringio.write("\n")
+
+ fail = False
+ while methods:
+ name, method = methods.popitem()
+ #print "checking", name
+ if isgenerichook(name):
+ continue
+ if name not in hooks:
+ if not getattr(method, 'optionalhook', False):
+ Print("found unknown hook:", name)
+ fail = True
+ else:
+ #print "checking", method
+ method_args = getargs(method)
+ #print "method_args", method_args
+ if '__multicall__' in method_args:
+ method_args.remove('__multicall__')
+ hook = hooks[name]
+ hookargs = getargs(hook)
+ for arg in method_args:
+ if arg not in hookargs:
+ Print("argument %r not available" %(arg, ))
+ Print("actual definition: %s" %(formatdef(method)))
+ Print("available hook arguments: %s" %
+ ", ".join(hookargs))
+ fail = True
+ break
+ #if not fail:
+ # print "matching hook:", formatdef(method)
+ if fail:
+ name = getattr(plugin, '__name__', plugin)
+ raise PluginValidationError("%s:\n%s" %(name, stringio.getvalue()))
+
+class PluginValidationError(Exception):
+ """ plugin failed validation. """
+
+def isgenerichook(name):
+ return name == "pytest_plugins" or \
+ name.startswith("pytest_funcarg__")
+
+def getargs(func):
+ args = inspect.getargs(py.code.getrawcode(func))[0]
+ startindex = inspect.ismethod(func) and 1 or 0
+ return args[startindex:]
+
+def collectattr(obj, prefixes=("pytest_",)):
+ methods = {}
+ for apiname in dir(obj):
+ for prefix in prefixes:
+ if apiname.startswith(prefix):
+ methods[apiname] = getattr(obj, apiname)
+ return methods
+
+def formatdef(func):
+ return "%s%s" %(
+ func.__name__,
+ inspect.formatargspec(*inspect.getargspec(func))
+ )
+
Added: pypy/branch/py12/py/_plugin/pytest_hooklog.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_plugin/pytest_hooklog.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,33 @@
+""" log invocations of extension hooks to a file. """
+import py
+
+def pytest_addoption(parser):
+ parser.addoption("--hooklog", dest="hooklog", default=None,
+ help="write hook calls to the given file.")
+
+def pytest_configure(config):
+ hooklog = config.getvalue("hooklog")
+ if hooklog:
+ config._hooklogfile = open(hooklog, 'w')
+ config._hooklog_oldperformcall = config.hook._performcall
+ config.hook._performcall = (lambda name, multicall:
+ logged_call(name=name, multicall=multicall, config=config))
+
+def logged_call(name, multicall, config):
+ f = config._hooklogfile
+ f.write("%s(**%s)\n" % (name, multicall.kwargs))
+ try:
+ res = config._hooklog_oldperformcall(name=name, multicall=multicall)
+ except:
+ f.write("-> exception")
+ raise
+ f.write("-> %r" % (res,))
+ return res
+
+def pytest_unconfigure(config):
+ try:
+ del config.hook.__dict__['_performcall']
+ except KeyError:
+ pass
+ else:
+ config._hooklogfile.close()
Added: pypy/branch/py12/py/_plugin/pytest_junitxml.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_plugin/pytest_junitxml.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,161 @@
+"""
+ logging of test results in JUnit-XML format, for use with Hudson
+ and build integration servers. Based on initial code from Ross Lawley.
+"""
+
+import py
+import time
+
+def pytest_addoption(parser):
+ group = parser.getgroup("terminal reporting")
+ group.addoption('--junitxml', action="store", dest="xmlpath",
+ metavar="path", default=None,
+ help="create junit-xml style report file at given path.")
+
+def pytest_configure(config):
+ xmlpath = config.option.xmlpath
+ if xmlpath:
+ config._xml = LogXML(xmlpath)
+ config.pluginmanager.register(config._xml)
+
+def pytest_unconfigure(config):
+ xml = getattr(config, '_xml', None)
+ if xml:
+ del config._xml
+ config.pluginmanager.unregister(xml)
+
+class LogXML(object):
+ def __init__(self, logfile):
+ self.logfile = logfile
+ self.test_logs = []
+ self.passed = self.skipped = 0
+ self.failed = self.errors = 0
+ self._durations = {}
+
+ def _opentestcase(self, report):
+ node = report.item
+ d = {'time': self._durations.pop(report.item, "0")}
+ names = [x.replace(".py", "") for x in node.listnames() if x != "()"]
+ d['classname'] = ".".join(names[:-1])
+ d['name'] = names[-1]
+ attrs = ['%s="%s"' % item for item in sorted(d.items())]
+ self.test_logs.append("\n<testcase %s>" % " ".join(attrs))
+
+ def _closetestcase(self):
+ self.test_logs.append("</testcase>")
+
+ def appendlog(self, fmt, *args):
+ args = tuple([py.xml.escape(arg) for arg in args])
+ self.test_logs.append(fmt % args)
+
+ def append_pass(self, report):
+ self.passed += 1
+ self._opentestcase(report)
+ self._closetestcase()
+
+ def append_failure(self, report):
+ self._opentestcase(report)
+ #msg = str(report.longrepr.reprtraceback.extraline)
+ self.appendlog('<failure message="test failure">%s</failure>',
+ report.longrepr)
+ self._closetestcase()
+ self.failed += 1
+
+ def _opentestcase_collectfailure(self, report):
+ node = report.collector
+ d = {'time': '???'}
+ names = [x.replace(".py", "") for x in node.listnames() if x != "()"]
+ d['classname'] = ".".join(names[:-1])
+ d['name'] = names[-1]
+ attrs = ['%s="%s"' % item for item in sorted(d.items())]
+ self.test_logs.append("\n<testcase %s>" % " ".join(attrs))
+
+ def append_collect_failure(self, report):
+ self._opentestcase_collectfailure(report)
+ #msg = str(report.longrepr.reprtraceback.extraline)
+ self.appendlog('<failure message="collection failure">%s</failure>',
+ report.longrepr)
+ self._closetestcase()
+ self.errors += 1
+
+ def append_collect_skipped(self, report):
+ self._opentestcase_collectfailure(report)
+ #msg = str(report.longrepr.reprtraceback.extraline)
+ self.appendlog('<skipped message="collection skipped">%s</skipped>',
+ report.longrepr)
+ self._closetestcase()
+ self.skipped += 1
+
+ def append_error(self, report):
+ self._opentestcase(report)
+ self.appendlog('<error message="test setup failure">%s</error>',
+ report.longrepr)
+ self._closetestcase()
+ self.errors += 1
+
+ def append_skipped(self, report):
+ self._opentestcase(report)
+ self.appendlog("<skipped/>")
+ self._closetestcase()
+ self.skipped += 1
+
+ def pytest_runtest_logreport(self, report):
+ if report.passed:
+ self.append_pass(report)
+ elif report.failed:
+ if report.when != "call":
+ self.append_error(report)
+ else:
+ self.append_failure(report)
+ elif report.skipped:
+ self.append_skipped(report)
+
+ def pytest_runtest_call(self, item, __multicall__):
+ start = time.time()
+ try:
+ return __multicall__.execute()
+ finally:
+ self._durations[item] = time.time() - start
+
+ def pytest_collectreport(self, report):
+ if not report.passed:
+ if report.failed:
+ self.append_collect_failure(report)
+ else:
+ self.append_collect_skipped(report)
+
+ def pytest_internalerror(self, excrepr):
+ self.errors += 1
+ data = py.xml.escape(excrepr)
+ self.test_logs.append(
+ '\n<testcase classname="pytest" name="internal">'
+ ' <error message="internal error">'
+ '%s</error></testcase>' % data)
+
+ def pytest_sessionstart(self, session):
+ self.suite_start_time = time.time()
+
+ def pytest_sessionfinish(self, session, exitstatus, __multicall__):
+ if py.std.sys.version_info[0] < 3:
+ logfile = py.std.codecs.open(self.logfile, 'w', encoding='utf-8')
+ else:
+ logfile = open(self.logfile, 'w', encoding='utf-8')
+
+ suite_stop_time = time.time()
+ suite_time_delta = suite_stop_time - self.suite_start_time
+ numtests = self.passed + self.failed
+ logfile.write('<?xml version="1.0" encoding="utf-8"?>')
+ logfile.write('<testsuite ')
+ logfile.write('name="" ')
+ logfile.write('errors="%i" ' % self.errors)
+ logfile.write('failures="%i" ' % self.failed)
+ logfile.write('skips="%i" ' % self.skipped)
+ logfile.write('tests="%i" ' % numtests)
+ logfile.write('time="%.3f"' % suite_time_delta)
+ logfile.write(' >')
+ logfile.writelines(self.test_logs)
+ logfile.write('</testsuite>')
+ logfile.close()
+ tw = session.config.pluginmanager.getplugin("terminalreporter")._tw
+ tw.line()
+ tw.sep("-", "generated xml file: %s" %(self.logfile))
Added: pypy/branch/py12/py/_plugin/pytest_mark.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_plugin/pytest_mark.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,152 @@
+"""
+generic mechanism for marking python functions.
+
+By using the ``py.test.mark`` helper you can instantiate
+decorators that will set named meta data on test functions.
+
+Marking a single function
+----------------------------------------------------
+
+You can "mark" a test function with meta data like this::
+
+ @py.test.mark.webtest
+ def test_send_http():
+ ...
+
+This will set a "Marker" instance as a function attribute named "webtest".
+You can also specify parametrized meta data like this::
+
+ @py.test.mark.webtest(firefox=30)
+ def test_receive():
+ ...
+
+The named marker can be accessed like this later::
+
+ test_receive.webtest.kwargs['firefox'] == 30
+
+In addition to set key-value pairs you can also use positional arguments::
+
+ @py.test.mark.webtest("triangular")
+ def test_receive():
+ ...
+
+and later access it with ``test_receive.webtest.args[0] == 'triangular``.
+
+.. _`scoped-marking`:
+
+Marking classes or modules
+----------------------------------------------------
+
+To mark all methods of a class set a ``pytestmark`` attribute like this::
+
+ import py
+
+ class TestClass:
+ pytestmark = py.test.mark.webtest
+
+You can re-use the same markers that you would use for decorating
+a function - in fact this marker decorator will be applied
+to all test methods of the class.
+
+You can also set a module level marker::
+
+ import py
+ pytestmark = py.test.mark.webtest
+
+in which case then the marker decorator will be applied to all functions and
+methods defined in the module.
+
+The order in which marker functions are called is this::
+
+ per-function (upon import of module already)
+ per-class
+ per-module
+
+Later called markers may overwrite previous key-value settings.
+Positional arguments are all appended to the same 'args' list
+of the Marker object.
+
+Using "-k MARKNAME" to select tests
+----------------------------------------------------
+
+You can use the ``-k`` command line option to select
+tests::
+
+ py.test -k webtest # will only run tests marked as webtest
+
+"""
+import py
+
+def pytest_namespace():
+ return {'mark': MarkGenerator()}
+
+class MarkGenerator:
+ """ non-underscore attributes of this object can be used as decorators for
+ marking test functions. Example: @py.test.mark.slowtest in front of a
+ function will set the 'slowtest' marker object on it. """
+ def __getattr__(self, name):
+ if name[0] == "_":
+ raise AttributeError(name)
+ return MarkDecorator(name)
+
+class MarkDecorator:
+ """ decorator for setting function attributes. """
+ def __init__(self, name):
+ self.markname = name
+ self.kwargs = {}
+ self.args = []
+
+ def __repr__(self):
+ d = self.__dict__.copy()
+ name = d.pop('markname')
+ return "<MarkDecorator %r %r>" %(name, d)
+
+ def __call__(self, *args, **kwargs):
+ """ if passed a single callable argument: decorate it with mark info.
+ otherwise add *args/**kwargs in-place to mark information. """
+ if args:
+ if len(args) == 1 and hasattr(args[0], '__call__'):
+ func = args[0]
+ holder = getattr(func, self.markname, None)
+ if holder is None:
+ holder = MarkInfo(self.markname, self.args, self.kwargs)
+ setattr(func, self.markname, holder)
+ else:
+ holder.kwargs.update(self.kwargs)
+ holder.args.extend(self.args)
+ return func
+ else:
+ self.args.extend(args)
+ self.kwargs.update(kwargs)
+ return self
+
+class MarkInfo:
+ def __init__(self, name, args, kwargs):
+ self._name = name
+ self.args = args
+ self.kwargs = kwargs
+
+ def __getattr__(self, name):
+ if name[0] != '_' and name in self.kwargs:
+ py.log._apiwarn("1.1", "use .kwargs attribute to access key-values")
+ return self.kwargs[name]
+ raise AttributeError(name)
+
+ def __repr__(self):
+ return "<MarkInfo %r args=%r kwargs=%r>" % (
+ self._name, self.args, self.kwargs)
+
+
+def pytest_pycollect_makeitem(__multicall__, collector, name, obj):
+ item = __multicall__.execute()
+ if isinstance(item, py.test.collect.Function):
+ cls = collector.getparent(py.test.collect.Class)
+ mod = collector.getparent(py.test.collect.Module)
+ func = item.obj
+ func = getattr(func, '__func__', func) # py3
+ func = getattr(func, 'im_func', func) # py2
+ for parent in [x for x in (mod, cls) if x]:
+ marker = getattr(parent.obj, 'pytestmark', None)
+ if isinstance(marker, MarkDecorator):
+ marker(func)
+ return item
Added: pypy/branch/py12/py/_plugin/pytest_monkeypatch.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_plugin/pytest_monkeypatch.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,141 @@
+"""
+safely patch object attributes, dicts and environment variables.
+
+Usage
+----------------
+
+Use the `monkeypatch funcarg`_ to tweak your global test environment
+for running a particular test. You can safely set/del an attribute,
+dictionary item or environment variable by respective methods
+on the monkeypatch funcarg. If you want e.g. to set an ENV1 variable
+and have os.path.expanduser return a particular directory, you can
+write it down like this:
+
+.. sourcecode:: python
+
+ def test_mytest(monkeypatch):
+ monkeypatch.setenv('ENV1', 'myval')
+ monkeypatch.setattr(os.path, 'expanduser', lambda x: '/tmp/xyz')
+ ... # your test code that uses those patched values implicitely
+
+After the test function finished all modifications will be undone,
+because the ``monkeypatch.undo()`` method is registered as a finalizer.
+
+``monkeypatch.setattr/delattr/delitem/delenv()`` all
+by default raise an Exception if the target does not exist.
+Pass ``raising=False`` if you want to skip this check.
+
+prepending to PATH or other environment variables
+---------------------------------------------------------
+
+To prepend a value to an already existing environment parameter:
+
+.. sourcecode:: python
+
+ def test_mypath_finding(monkeypatch):
+ monkeypatch.setenv('PATH', 'x/y', prepend=":")
+ # in bash language: export PATH=x/y:$PATH
+
+calling "undo" finalization explicitely
+-----------------------------------------
+
+At the end of function execution py.test invokes
+a teardown hook which undoes all monkeypatch changes.
+If you do not want to wait that long you can call
+finalization explicitely::
+
+ monkeypatch.undo()
+
+This will undo previous changes. This call consumes the
+undo stack. Calling it a second time has no effect unless
+you start monkeypatching after the undo call.
+
+.. _`monkeypatch blog post`: http://tetamap.wordpress.com/2009/03/03/monkeypatching-in-unit-tests-done-right/
+"""
+
+import py, os, sys
+
+def pytest_funcarg__monkeypatch(request):
+ """The returned ``monkeypatch`` funcarg provides these
+ helper methods to modify objects, dictionaries or os.environ::
+
+ monkeypatch.setattr(obj, name, value, raising=True)
+ monkeypatch.delattr(obj, name, raising=True)
+ monkeypatch.setitem(mapping, name, value)
+ monkeypatch.delitem(obj, name, raising=True)
+ monkeypatch.setenv(name, value, prepend=False)
+ monkeypatch.delenv(name, value, raising=True)
+ monkeypatch.syspath_prepend(path)
+
+ All modifications will be undone when the requesting
+ test function finished its execution. The ``raising``
+ parameter determines if a KeyError or AttributeError
+ will be raised if the set/deletion operation has no target.
+ """
+ monkeypatch = MonkeyPatch()
+ request.addfinalizer(monkeypatch.undo)
+ return monkeypatch
+
+notset = object()
+
+class MonkeyPatch:
+ def __init__(self):
+ self._setattr = []
+ self._setitem = []
+
+ def setattr(self, obj, name, value, raising=True):
+ oldval = getattr(obj, name, notset)
+ if raising and oldval is notset:
+ raise AttributeError("%r has no attribute %r" %(obj, name))
+ self._setattr.insert(0, (obj, name, oldval))
+ setattr(obj, name, value)
+
+ def delattr(self, obj, name, raising=True):
+ if not hasattr(obj, name):
+ if raising:
+ raise AttributeError(name)
+ else:
+ self._setattr.insert(0, (obj, name, getattr(obj, name, notset)))
+ delattr(obj, name)
+
+ def setitem(self, dic, name, value):
+ self._setitem.insert(0, (dic, name, dic.get(name, notset)))
+ dic[name] = value
+
+ def delitem(self, dic, name, raising=True):
+ if name not in dic:
+ if raising:
+ raise KeyError(name)
+ else:
+ self._setitem.insert(0, (dic, name, dic.get(name, notset)))
+ del dic[name]
+
+ def setenv(self, name, value, prepend=None):
+ value = str(value)
+ if prepend and name in os.environ:
+ value = value + prepend + os.environ[name]
+ self.setitem(os.environ, name, value)
+
+ def delenv(self, name, raising=True):
+ self.delitem(os.environ, name, raising=raising)
+
+ def syspath_prepend(self, path):
+ if not hasattr(self, '_savesyspath'):
+ self._savesyspath = sys.path[:]
+ sys.path.insert(0, str(path))
+
+ def undo(self):
+ for obj, name, value in self._setattr:
+ if value is not notset:
+ setattr(obj, name, value)
+ else:
+ delattr(obj, name)
+ self._setattr[:] = []
+ for dictionary, name, value in self._setitem:
+ if value is notset:
+ del dictionary[name]
+ else:
+ dictionary[name] = value
+ self._setitem[:] = []
+ if hasattr(self, '_savesyspath'):
+ sys.path[:] = self._savesyspath
Added: pypy/branch/py12/py/_plugin/pytest_nose.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_plugin/pytest_nose.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,98 @@
+"""nose-compatibility plugin: allow to run nose test suites natively.
+
+This is an experimental plugin for allowing to run tests written
+in 'nosetests style with py.test.
+
+Usage
+-------------
+
+type::
+
+ py.test # instead of 'nosetests'
+
+and you should be able to run nose style tests and at the same
+time can make full use of py.test's capabilities.
+
+Supported nose Idioms
+----------------------
+
+* setup and teardown at module/class/method level
+* SkipTest exceptions and markers
+* setup/teardown decorators
+* yield-based tests and their setup
+* general usage of nose utilities
+
+Unsupported idioms / issues
+----------------------------------
+
+- nose-style doctests are not collected and executed correctly,
+ also fixtures don't work.
+
+- no nose-configuration is recognized
+
+If you find other issues or have suggestions please run::
+
+ py.test --pastebin=all
+
+and send the resulting URL to a py.test contact channel,
+at best to the mailing list.
+"""
+import py
+import inspect
+import sys
+
+def pytest_runtest_makereport(__multicall__, item, call):
+ SkipTest = getattr(sys.modules.get('nose', None), 'SkipTest', None)
+ if SkipTest:
+ if call.excinfo and call.excinfo.errisinstance(SkipTest):
+ # let's substitute the excinfo with a py.test.skip one
+ call2 = call.__class__(lambda: py.test.skip(str(call.excinfo.value)), call.when)
+ call.excinfo = call2.excinfo
+
+def pytest_report_iteminfo(item):
+ # nose 0.11.1 uses decorators for "raises" and other helpers.
+ # for reporting progress by filename we fish for the filename
+ if isinstance(item, py.test.collect.Function):
+ obj = item.obj
+ if hasattr(obj, 'compat_co_firstlineno'):
+ fn = sys.modules[obj.__module__].__file__
+ if fn.endswith(".pyc"):
+ fn = fn[:-1]
+ #assert 0
+ #fn = inspect.getsourcefile(obj) or inspect.getfile(obj)
+ lineno = obj.compat_co_firstlineno
+ return py.path.local(fn), lineno, obj.__module__
+
+def pytest_runtest_setup(item):
+ if isinstance(item, (py.test.collect.Function)):
+ if isinstance(item.parent, py.test.collect.Generator):
+ gen = item.parent
+ if not hasattr(gen, '_nosegensetup'):
+ call_optional(gen.obj, 'setup')
+ if isinstance(gen.parent, py.test.collect.Instance):
+ call_optional(gen.parent.obj, 'setup')
+ gen._nosegensetup = True
+ if not call_optional(item.obj, 'setup'):
+ # call module level setup if there is no object level one
+ call_optional(item.parent.obj, 'setup')
+
+def pytest_runtest_teardown(item):
+ if isinstance(item, py.test.collect.Function):
+ if not call_optional(item.obj, 'teardown'):
+ call_optional(item.parent.obj, 'teardown')
+ #if hasattr(item.parent, '_nosegensetup'):
+ # #call_optional(item._nosegensetup, 'teardown')
+ # del item.parent._nosegensetup
+
+def pytest_make_collect_report(collector):
+ if isinstance(collector, py.test.collect.Generator):
+ call_optional(collector.obj, 'setup')
+
+def call_optional(obj, name):
+ method = getattr(obj, name, None)
+ if method:
+ ismethod = inspect.ismethod(method)
+ rawcode = py.code.getrawcode(method)
+ if not rawcode.co_varnames[ismethod:]:
+ method()
+ return True
Added: pypy/branch/py12/py/_plugin/pytest_pastebin.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_plugin/pytest_pastebin.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,83 @@
+"""
+submit failure or test session information to a pastebin service.
+
+Usage
+----------
+
+**Creating a URL for each test failure**::
+
+ py.test --pastebin=failed
+
+This will submit test run information to a remote Paste service and
+provide a URL for each failure. You may select tests as usual or add
+for example ``-x`` if you only want to send one particular failure.
+
+**Creating a URL for a whole test session log**::
+
+ py.test --pastebin=all
+
+Currently only pasting to the http://paste.pocoo.org service is implemented.
+
+"""
+import py, sys
+
+class url:
+ base = "http://paste.pocoo.org"
+ xmlrpc = base + "/xmlrpc/"
+ show = base + "/show/"
+
+def pytest_addoption(parser):
+ group = parser.getgroup("terminal reporting")
+ group._addoption('--pastebin', metavar="mode",
+ action='store', dest="pastebin", default=None,
+ type="choice", choices=['failed', 'all'],
+ help="send failed|all info to Pocoo pastebin service.")
+
+def pytest_configure(__multicall__, config):
+ import tempfile
+ __multicall__.execute()
+ if config.option.pastebin == "all":
+ config._pastebinfile = tempfile.TemporaryFile('w+')
+ tr = config.pluginmanager.getplugin('terminalreporter')
+ oldwrite = tr._tw.write
+ def tee_write(s, **kwargs):
+ oldwrite(s, **kwargs)
+ config._pastebinfile.write(str(s))
+ tr._tw.write = tee_write
+
+def pytest_unconfigure(config):
+ if hasattr(config, '_pastebinfile'):
+ config._pastebinfile.seek(0)
+ sessionlog = config._pastebinfile.read()
+ config._pastebinfile.close()
+ del config._pastebinfile
+ proxyid = getproxy().newPaste("python", sessionlog)
+ pastebinurl = "%s%s" % (url.show, proxyid)
+ sys.stderr.write("pastebin session-log: %s\n" % pastebinurl)
+ tr = config.pluginmanager.getplugin('terminalreporter')
+ del tr._tw.__dict__['write']
+
+def getproxy():
+ return py.std.xmlrpclib.ServerProxy(url.xmlrpc).pastes
+
+def pytest_terminal_summary(terminalreporter):
+ if terminalreporter.config.option.pastebin != "failed":
+ return
+ tr = terminalreporter
+ if 'failed' in tr.stats:
+ terminalreporter.write_sep("=", "Sending information to Paste Service")
+ if tr.config.option.debug:
+ terminalreporter.write_line("xmlrpcurl: %s" %(url.xmlrpc,))
+ serverproxy = getproxy()
+ for rep in terminalreporter.stats.get('failed'):
+ try:
+ msg = rep.longrepr.reprtraceback.reprentries[-1].reprfileloc
+ except AttributeError:
+ msg = tr._getfailureheadline(rep)
+ tw = py.io.TerminalWriter(stringio=True)
+ rep.toterminal(tw)
+ s = tw.stringio.getvalue()
+ assert len(s)
+ proxyid = serverproxy.newPaste("python", s)
+ pastebinurl = "%s%s" % (url.show, proxyid)
+ tr.write_line("%s --> %s" %(msg, pastebinurl))
Added: pypy/branch/py12/py/_plugin/pytest_pdb.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_plugin/pytest_pdb.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,105 @@
+"""
+interactive debugging with the Python Debugger.
+"""
+import py
+import pdb, sys, linecache
+
+def pytest_addoption(parser):
+ group = parser.getgroup("general")
+ group._addoption('--pdb',
+ action="store_true", dest="usepdb", default=False,
+ help="start the interactive Python debugger on errors.")
+
+def pytest_configure(config):
+ if config.getvalue("usepdb"):
+ config.pluginmanager.register(PdbInvoke(), 'pdb')
+
+class PdbInvoke:
+ def pytest_runtest_makereport(self, item, call):
+ if call.excinfo and not \
+ call.excinfo.errisinstance(py.test.skip.Exception):
+ # play well with capturing, slightly hackish
+ capman = item.config.pluginmanager.getplugin('capturemanager')
+ capman.suspendcapture()
+
+ tw = py.io.TerminalWriter()
+ repr = call.excinfo.getrepr()
+ repr.toterminal(tw)
+ post_mortem(call.excinfo._excinfo[2])
+
+ capman.resumecapture_item(item)
+
+class Pdb(py.std.pdb.Pdb):
+ def do_list(self, arg):
+ self.lastcmd = 'list'
+ last = None
+ if arg:
+ try:
+ x = eval(arg, {}, {})
+ if type(x) == type(()):
+ first, last = x
+ first = int(first)
+ last = int(last)
+ if last < first:
+ # Assume it's a count
+ last = first + last
+ else:
+ first = max(1, int(x) - 5)
+ except:
+ print ('*** Error in argument: %s' % repr(arg))
+ return
+ elif self.lineno is None:
+ first = max(1, self.curframe.f_lineno - 5)
+ else:
+ first = self.lineno + 1
+ if last is None:
+ last = first + 10
+ filename = self.curframe.f_code.co_filename
+ breaklist = self.get_file_breaks(filename)
+ try:
+ for lineno in range(first, last+1):
+ # start difference from normal do_line
+ line = self._getline(filename, lineno)
+ # end difference from normal do_line
+ if not line:
+ print ('[EOF]')
+ break
+ else:
+ s = repr(lineno).rjust(3)
+ if len(s) < 4: s = s + ' '
+ if lineno in breaklist: s = s + 'B'
+ else: s = s + ' '
+ if lineno == self.curframe.f_lineno:
+ s = s + '->'
+ sys.stdout.write(s + '\t' + line)
+ self.lineno = lineno
+ except KeyboardInterrupt:
+ pass
+ do_l = do_list
+
+ def _getline(self, filename, lineno):
+ if hasattr(filename, "__source__"):
+ try:
+ return filename.__source__.lines[lineno - 1] + "\n"
+ except IndexError:
+ return None
+ return linecache.getline(filename, lineno)
+
+ def get_stack(self, f, t):
+ # Modified from bdb.py to be able to walk the stack beyond generators,
+ # which does not work in the normal pdb :-(
+ stack, i = pdb.Pdb.get_stack(self, f, t)
+ if f is None:
+ i = max(0, len(stack) - 1)
+ while i and stack[i][0].f_locals.get("__tracebackhide__", False):
+ i-=1
+ return stack, i
+
+def post_mortem(t):
+ p = Pdb()
+ p.reset()
+ p.interaction(None, t)
+
+def set_trace():
+ # again, a copy of the version in pdb.py
+ Pdb().set_trace(sys._getframe().f_back)
Added: pypy/branch/py12/py/_plugin/pytest_pylint.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_plugin/pytest_pylint.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,36 @@
+"""pylint plugin
+
+XXX: Currently in progress, NOT IN WORKING STATE.
+"""
+import py
+
+pylint = py.test.importorskip("pylint.lint")
+
+def pytest_addoption(parser):
+ group = parser.getgroup('pylint options')
+ group.addoption('--pylint', action='store_true',
+ default=False, dest='pylint',
+ help='run pylint on python files.')
+
+def pytest_collect_file(path, parent):
+ if path.ext == ".py":
+ if parent.config.getvalue('pylint'):
+ return PylintItem(path, parent)
+
+#def pytest_terminal_summary(terminalreporter):
+# print 'placeholder for pylint output'
+
+class PylintItem(py.test.collect.Item):
+ def runtest(self):
+ capture = py.io.StdCaptureFD()
+ try:
+ linter = pylint.lint.PyLinter()
+ linter.check(str(self.fspath))
+ finally:
+ out, err = capture.reset()
+ rating = out.strip().split('\n')[-1]
+ sys.stdout.write(">>>")
+ print(rating)
+ assert 0
+
+
Added: pypy/branch/py12/py/_plugin/pytest_pytester.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_plugin/pytest_pytester.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,490 @@
+"""
+funcargs and support code for testing py.test's own functionality.
+"""
+
+import py
+import sys, os
+import re
+import inspect
+import time
+from py._test.config import Config as pytestConfig
+from py.builtin import print_
+
+def pytest_addoption(parser):
+ group = parser.getgroup("pylib")
+ group.addoption('--tools-on-path',
+ action="store_true", dest="toolsonpath", default=False,
+ help=("discover tools on PATH instead of going through py.cmdline.")
+ )
+
+pytest_plugins = '_pytest'
+
+def pytest_funcarg__linecomp(request):
+ return LineComp()
+
+def pytest_funcarg__LineMatcher(request):
+ return LineMatcher
+
+def pytest_funcarg__testdir(request):
+ tmptestdir = TmpTestdir(request)
+ return tmptestdir
+
+rex_outcome = re.compile("(\d+) (\w+)")
+class RunResult:
+ def __init__(self, ret, outlines, errlines, duration):
+ self.ret = ret
+ self.outlines = outlines
+ self.errlines = errlines
+ self.stdout = LineMatcher(outlines)
+ self.stderr = LineMatcher(errlines)
+ self.duration = duration
+
+ def parseoutcomes(self):
+ for line in reversed(self.outlines):
+ if 'seconds' in line:
+ outcomes = rex_outcome.findall(line)
+ if outcomes:
+ d = {}
+ for num, cat in outcomes:
+ d[cat] = int(num)
+ return d
+
+class TmpTestdir:
+ def __init__(self, request):
+ self.request = request
+ self._pytest = request.getfuncargvalue("_pytest")
+ # XXX remove duplication with tmpdir plugin
+ basetmp = request.config.ensuretemp("testdir")
+ name = request.function.__name__
+ for i in range(100):
+ try:
+ tmpdir = basetmp.mkdir(name + str(i))
+ except py.error.EEXIST:
+ continue
+ break
+ # we need to create another subdir
+ # because Directory.collect() currently loads
+ # conftest.py from sibling directories
+ self.tmpdir = tmpdir.mkdir(name)
+ self.plugins = []
+ self._syspathremove = []
+ self.chdir() # always chdir
+ self.request.addfinalizer(self.finalize)
+
+ def __repr__(self):
+ return "<TmpTestdir %r>" % (self.tmpdir,)
+
+ def Config(self, topdir=None):
+ if topdir is None:
+ topdir = self.tmpdir.dirpath()
+ return pytestConfig(topdir=topdir)
+
+ def finalize(self):
+ for p in self._syspathremove:
+ py.std.sys.path.remove(p)
+ if hasattr(self, '_olddir'):
+ self._olddir.chdir()
+ # delete modules that have been loaded from tmpdir
+ for name, mod in list(sys.modules.items()):
+ if mod:
+ fn = getattr(mod, '__file__', None)
+ if fn and fn.startswith(str(self.tmpdir)):
+ del sys.modules[name]
+
+ def getreportrecorder(self, obj):
+ if hasattr(obj, 'config'):
+ obj = obj.config
+ if hasattr(obj, 'hook'):
+ obj = obj.hook
+ assert hasattr(obj, '_hookspecs'), obj
+ reprec = ReportRecorder(obj)
+ reprec.hookrecorder = self._pytest.gethookrecorder(obj)
+ reprec.hook = reprec.hookrecorder.hook
+ return reprec
+
+ def chdir(self):
+ old = self.tmpdir.chdir()
+ if not hasattr(self, '_olddir'):
+ self._olddir = old
+
+ def _makefile(self, ext, args, kwargs):
+ items = list(kwargs.items())
+ if args:
+ source = "\n".join(map(str, args))
+ basename = self.request.function.__name__
+ items.insert(0, (basename, source))
+ ret = None
+ for name, value in items:
+ p = self.tmpdir.join(name).new(ext=ext)
+ source = str(py.code.Source(value)).lstrip()
+ p.write(source.encode("utf-8"), "wb")
+ if ret is None:
+ ret = p
+ return ret
+
+
+ def makefile(self, ext, *args, **kwargs):
+ return self._makefile(ext, args, kwargs)
+
+ def makeconftest(self, source):
+ return self.makepyfile(conftest=source)
+
+ def makepyfile(self, *args, **kwargs):
+ return self._makefile('.py', args, kwargs)
+
+ def maketxtfile(self, *args, **kwargs):
+ return self._makefile('.txt', args, kwargs)
+
+ def syspathinsert(self, path=None):
+ if path is None:
+ path = self.tmpdir
+ py.std.sys.path.insert(0, str(path))
+ self._syspathremove.append(str(path))
+
+ def mkdir(self, name):
+ return self.tmpdir.mkdir(name)
+
+ def mkpydir(self, name):
+ p = self.mkdir(name)
+ p.ensure("__init__.py")
+ return p
+
+ def genitems(self, colitems):
+ return list(self.session.genitems(colitems))
+
+ def inline_genitems(self, *args):
+ #config = self.parseconfig(*args)
+ config = self.parseconfig(*args)
+ session = config.initsession()
+ rec = self.getreportrecorder(config)
+ colitems = [config.getnode(arg) for arg in config.args]
+ items = list(session.genitems(colitems))
+ return items, rec
+
+ def runitem(self, source):
+ # used from runner functional tests
+ item = self.getitem(source)
+ # the test class where we are called from wants to provide the runner
+ testclassinstance = py.builtin._getimself(self.request.function)
+ runner = testclassinstance.getrunner()
+ return runner(item)
+
+ def inline_runsource(self, source, *cmdlineargs):
+ p = self.makepyfile(source)
+ l = list(cmdlineargs) + [p]
+ return self.inline_run(*l)
+
+ def inline_runsource1(self, *args):
+ args = list(args)
+ source = args.pop()
+ p = self.makepyfile(source)
+ l = list(args) + [p]
+ reprec = self.inline_run(*l)
+ reports = reprec.getreports("pytest_runtest_logreport")
+ assert len(reports) == 1, reports
+ return reports[0]
+
+ def inline_run(self, *args):
+ config = self.parseconfig(*args)
+ config.pluginmanager.do_configure(config)
+ session = config.initsession()
+ reprec = self.getreportrecorder(config)
+ colitems = config.getinitialnodes()
+ session.main(colitems)
+ config.pluginmanager.do_unconfigure(config)
+ return reprec
+
+ def config_preparse(self):
+ config = self.Config()
+ for plugin in self.plugins:
+ if isinstance(plugin, str):
+ config.pluginmanager.import_plugin(plugin)
+ else:
+ if isinstance(plugin, dict):
+ plugin = PseudoPlugin(plugin)
+ if not config.pluginmanager.isregistered(plugin):
+ config.pluginmanager.register(plugin)
+ return config
+
+ def parseconfig(self, *args):
+ if not args:
+ args = (self.tmpdir,)
+ config = self.config_preparse()
+ args = list(args) + ["--basetemp=%s" % self.tmpdir.dirpath('basetemp')]
+ config.parse(args)
+ return config
+
+ def reparseconfig(self, args=None):
+ """ this is used from tests that want to re-invoke parse(). """
+ if not args:
+ args = [self.tmpdir]
+ from py._test import config
+ oldconfig = config.config_per_process # py.test.config
+ try:
+ c = config.config_per_process = py.test.config = pytestConfig()
+ c.basetemp = oldconfig.mktemp("reparse", numbered=True)
+ c.parse(args)
+ return c
+ finally:
+ config.config_per_process = py.test.config = oldconfig
+
+ def parseconfigure(self, *args):
+ config = self.parseconfig(*args)
+ config.pluginmanager.do_configure(config)
+ return config
+
+ def getitem(self, source, funcname="test_func"):
+ modcol = self.getmodulecol(source)
+ moditems = modcol.collect()
+ for item in modcol.collect():
+ if item.name == funcname:
+ return item
+ else:
+ assert 0, "%r item not found in module:\n%s" %(funcname, source)
+
+ def getitems(self, source):
+ modcol = self.getmodulecol(source)
+ return list(modcol.config.initsession().genitems([modcol]))
+ #assert item is not None, "%r item not found in module:\n%s" %(funcname, source)
+ #return item
+
+ def getfscol(self, path, configargs=()):
+ self.config = self.parseconfig(path, *configargs)
+ self.session = self.config.initsession()
+ return self.config.getnode(path)
+
+ def getmodulecol(self, source, configargs=(), withinit=False):
+ kw = {self.request.function.__name__: py.code.Source(source).strip()}
+ path = self.makepyfile(**kw)
+ if withinit:
+ self.makepyfile(__init__ = "#")
+ self.config = self.parseconfig(path, *configargs)
+ self.session = self.config.initsession()
+ #self.config.pluginmanager.do_configure(config=self.config)
+ # XXX
+ self.config.pluginmanager.import_plugin("runner")
+ plugin = self.config.pluginmanager.getplugin("runner")
+ plugin.pytest_configure(config=self.config)
+
+ return self.config.getnode(path)
+
+ def popen(self, cmdargs, stdout, stderr, **kw):
+ if not hasattr(py.std, 'subprocess'):
+ py.test.skip("no subprocess module")
+ env = os.environ.copy()
+ env['PYTHONPATH'] = ":".join(filter(None, [
+ str(os.getcwd()), env.get('PYTHONPATH', '')]))
+ kw['env'] = env
+ #print "env", env
+ return py.std.subprocess.Popen(cmdargs, stdout=stdout, stderr=stderr, **kw)
+
+ def run(self, *cmdargs):
+ return self._run(*cmdargs)
+
+ def _run(self, *cmdargs):
+ cmdargs = [str(x) for x in cmdargs]
+ p1 = self.tmpdir.join("stdout")
+ p2 = self.tmpdir.join("stderr")
+ print_("running", cmdargs, "curdir=", py.path.local())
+ f1 = p1.open("wb")
+ f2 = p2.open("wb")
+ now = time.time()
+ popen = self.popen(cmdargs, stdout=f1, stderr=f2,
+ close_fds=(sys.platform != "win32"))
+ ret = popen.wait()
+ f1.close()
+ f2.close()
+ out = p1.read("rb").decode("utf-8").splitlines()
+ err = p2.read("rb").decode("utf-8").splitlines()
+ def dump_lines(lines, fp):
+ try:
+ for line in lines:
+ py.builtin.print_(line, file=fp)
+ except UnicodeEncodeError:
+ print("couldn't print to %s because of encoding" % (fp,))
+ dump_lines(out, sys.stdout)
+ dump_lines(err, sys.stderr)
+ return RunResult(ret, out, err, time.time()-now)
+
+ def runpybin(self, scriptname, *args):
+ fullargs = self._getpybinargs(scriptname) + args
+ return self.run(*fullargs)
+
+ def _getpybinargs(self, scriptname):
+ if self.request.config.getvalue("toolsonpath"):
+ script = py.path.local.sysfind(scriptname)
+ assert script, "script %r not found" % scriptname
+ return (script,)
+ else:
+ cmdlinename = scriptname.replace(".", "")
+ assert hasattr(py.cmdline, cmdlinename), cmdlinename
+ source = ("import sys;sys.path.insert(0,%r);"
+ "import py;py.cmdline.%s()" %
+ (str(py._pydir.dirpath()), cmdlinename))
+ return (sys.executable, "-c", source,)
+
+ def runpython(self, script):
+ s = self._getsysprepend()
+ if s:
+ script.write(s + "\n" + script.read())
+ return self.run(sys.executable, script)
+
+ def _getsysprepend(self):
+ if not self.request.config.getvalue("toolsonpath"):
+ s = "import sys;sys.path.insert(0,%r);" % str(py._pydir.dirpath())
+ else:
+ s = ""
+ return s
+
+ def runpython_c(self, command):
+ command = self._getsysprepend() + command
+ return self.run(py.std.sys.executable, "-c", command)
+
+ def runpytest(self, *args):
+ p = py.path.local.make_numbered_dir(prefix="runpytest-",
+ keep=None, rootdir=self.tmpdir)
+ args = ('--basetemp=%s' % p, ) + args
+ plugins = [x for x in self.plugins if isinstance(x, str)]
+ if plugins:
+ args = ('-p', plugins[0]) + args
+ return self.runpybin("py.test", *args)
+
+ def spawn_pytest(self, string, expect_timeout=10.0):
+ pexpect = py.test.importorskip("pexpect", "2.4")
+ if not self.request.config.getvalue("toolsonpath"):
+ py.test.skip("need --tools-on-path to run py.test script")
+ basetemp = self.tmpdir.mkdir("pexpect")
+ invoke = self._getpybinargs("py.test")[0]
+ cmd = "%s --basetemp=%s %s" % (invoke, basetemp, string)
+ child = pexpect.spawn(cmd, logfile=basetemp.join("spawn.out").open("w"))
+ child.timeout = expect_timeout
+ return child
+
+class PseudoPlugin:
+ def __init__(self, vars):
+ self.__dict__.update(vars)
+
+class ReportRecorder(object):
+ def __init__(self, hook):
+ self.hook = hook
+ self.registry = hook._registry
+ self.registry.register(self)
+
+ def getcall(self, name):
+ return self.hookrecorder.getcall(name)
+
+ def popcall(self, name):
+ return self.hookrecorder.popcall(name)
+
+ def getcalls(self, names):
+ """ return list of ParsedCall instances matching the given eventname. """
+ return self.hookrecorder.getcalls(names)
+
+ # functionality for test reports
+
+ def getreports(self, names="pytest_runtest_logreport pytest_collectreport"):
+ return [x.report for x in self.getcalls(names)]
+
+ def matchreport(self, inamepart="", names="pytest_runtest_logreport pytest_collectreport"):
+ """ return a testreport whose dotted import path matches """
+ l = []
+ for rep in self.getreports(names=names):
+ colitem = rep.getnode()
+ if not inamepart or inamepart in colitem.listnames():
+ l.append(rep)
+ if not l:
+ raise ValueError("could not find test report matching %r: no test reports at all!" %
+ (inamepart,))
+ if len(l) > 1:
+ raise ValueError("found more than one testreport matching %r: %s" %(
+ inamepart, l))
+ return l[0]
+
+ def getfailures(self, names='pytest_runtest_logreport pytest_collectreport'):
+ return [rep for rep in self.getreports(names) if rep.failed]
+
+ def getfailedcollections(self):
+ return self.getfailures('pytest_collectreport')
+
+ def listoutcomes(self):
+ passed = []
+ skipped = []
+ failed = []
+ for rep in self.getreports("pytest_runtest_logreport"):
+ if rep.passed:
+ if rep.when == "call":
+ passed.append(rep)
+ elif rep.skipped:
+ skipped.append(rep)
+ elif rep.failed:
+ failed.append(rep)
+ return passed, skipped, failed
+
+ def countoutcomes(self):
+ return [len(x) for x in self.listoutcomes()]
+
+ def assertoutcome(self, passed=0, skipped=0, failed=0):
+ realpassed, realskipped, realfailed = self.listoutcomes()
+ assert passed == len(realpassed)
+ assert skipped == len(realskipped)
+ assert failed == len(realfailed)
+
+ def clear(self):
+ self.hookrecorder.calls[:] = []
+
+ def unregister(self):
+ self.registry.unregister(self)
+ self.hookrecorder.finish_recording()
+
+class LineComp:
+ def __init__(self):
+ self.stringio = py.io.TextIO()
+
+ def assert_contains_lines(self, lines2):
+ """ assert that lines2 are contained (linearly) in lines1.
+ return a list of extralines found.
+ """
+ __tracebackhide__ = True
+ val = self.stringio.getvalue()
+ self.stringio.truncate(0)
+ self.stringio.seek(0)
+ lines1 = val.split("\n")
+ return LineMatcher(lines1).fnmatch_lines(lines2)
+
+class LineMatcher:
+ def __init__(self, lines):
+ self.lines = lines
+
+ def str(self):
+ return "\n".join(self.lines)
+
+ def fnmatch_lines(self, lines2):
+ if isinstance(lines2, str):
+ lines2 = py.code.Source(lines2)
+ if isinstance(lines2, py.code.Source):
+ lines2 = lines2.strip().lines
+
+ from fnmatch import fnmatch
+ lines1 = self.lines[:]
+ nextline = None
+ extralines = []
+ __tracebackhide__ = True
+ for line in lines2:
+ nomatchprinted = False
+ while lines1:
+ nextline = lines1.pop(0)
+ if line == nextline:
+ print_("exact match:", repr(line))
+ break
+ elif fnmatch(nextline, line):
+ print_("fnmatch:", repr(line))
+ print_(" with:", repr(nextline))
+ break
+ else:
+ if not nomatchprinted:
+ print_("nomatch:", repr(line))
+ nomatchprinted = True
+ print_(" and:", repr(nextline))
+ extralines.append(nextline)
+ else:
+ assert line == nextline
Added: pypy/branch/py12/py/_plugin/pytest_pytester.py.orig
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_plugin/pytest_pytester.py.orig Fri Apr 30 17:07:52 2010
@@ -0,0 +1,494 @@
+"""
+funcargs and support code for testing py.test's own functionality.
+"""
+
+import py
+import sys, os
+import re
+import inspect
+import time
+from py._test.config import Config as pytestConfig
+from py.builtin import print_
+
+def pytest_addoption(parser):
+ group = parser.getgroup("pylib")
+ group.addoption('--tools-on-path',
+ action="store_true", dest="toolsonpath", default=False,
+ help=("discover tools on PATH instead of going through py.cmdline.")
+ )
+
+pytest_plugins = '_pytest'
+
+def pytest_funcarg__linecomp(request):
+ return LineComp()
+
+def pytest_funcarg__LineMatcher(request):
+ return LineMatcher
+
+def pytest_funcarg__testdir(request):
+ tmptestdir = TmpTestdir(request)
+ return tmptestdir
+
+rex_outcome = re.compile("(\d+) (\w+)")
+class RunResult:
+ def __init__(self, ret, outlines, errlines, duration):
+ self.ret = ret
+ self.outlines = outlines
+ self.errlines = errlines
+ self.stdout = LineMatcher(outlines)
+ self.stderr = LineMatcher(errlines)
+ self.duration = duration
+
+ def parseoutcomes(self):
+ for line in reversed(self.outlines):
+ if 'seconds' in line:
+ outcomes = rex_outcome.findall(line)
+ if outcomes:
+ d = {}
+ for num, cat in outcomes:
+ d[cat] = int(num)
+ return d
+
+class TmpTestdir:
+ def __init__(self, request):
+ self.request = request
+ self._pytest = request.getfuncargvalue("_pytest")
+ # XXX remove duplication with tmpdir plugin
+ basetmp = request.config.ensuretemp("testdir")
+ name = request.function.__name__
+ for i in range(100):
+ try:
+ tmpdir = basetmp.mkdir(name + str(i))
+ except py.error.EEXIST:
+ continue
+ break
+ # we need to create another subdir
+ # because Directory.collect() currently loads
+ # conftest.py from sibling directories
+ self.tmpdir = tmpdir.mkdir(name)
+ self.plugins = []
+ self._syspathremove = []
+ self.chdir() # always chdir
+ self.request.addfinalizer(self.finalize)
+
+ def __repr__(self):
+ return "<TmpTestdir %r>" % (self.tmpdir,)
+
+ def Config(self, topdir=None):
+ if topdir is None:
+ topdir = self.tmpdir.dirpath()
+ return pytestConfig(topdir=topdir)
+
+ def finalize(self):
+ for p in self._syspathremove:
+ py.std.sys.path.remove(p)
+ if hasattr(self, '_olddir'):
+ self._olddir.chdir()
+ # delete modules that have been loaded from tmpdir
+ for name, mod in list(sys.modules.items()):
+ if mod:
+ fn = getattr(mod, '__file__', None)
+ if fn and fn.startswith(str(self.tmpdir)):
+ del sys.modules[name]
+
+ def getreportrecorder(self, obj):
+ if hasattr(obj, 'config'):
+ obj = obj.config
+ if hasattr(obj, 'hook'):
+ obj = obj.hook
+ assert hasattr(obj, '_hookspecs'), obj
+ reprec = ReportRecorder(obj)
+ reprec.hookrecorder = self._pytest.gethookrecorder(obj)
+ reprec.hook = reprec.hookrecorder.hook
+ return reprec
+
+ def chdir(self):
+ old = self.tmpdir.chdir()
+ if not hasattr(self, '_olddir'):
+ self._olddir = old
+
+ def _makefile(self, ext, args, kwargs):
+ items = list(kwargs.items())
+ if args:
+ source = "\n".join(map(str, args))
+ basename = self.request.function.__name__
+ items.insert(0, (basename, source))
+ ret = None
+ for name, value in items:
+ p = self.tmpdir.join(name).new(ext=ext)
+ source = py.code.Source(value)
+ p.write(str(py.code.Source(value)).lstrip())
+ if ret is None:
+ ret = p
+ return ret
+
+
+ def makefile(self, ext, *args, **kwargs):
+ return self._makefile(ext, args, kwargs)
+
+ def makeconftest(self, source):
+ return self.makepyfile(conftest=source)
+
+ def makepyfile(self, *args, **kwargs):
+ return self._makefile('.py', args, kwargs)
+
+ def maketxtfile(self, *args, **kwargs):
+ return self._makefile('.txt', args, kwargs)
+
+ def syspathinsert(self, path=None):
+ if path is None:
+ path = self.tmpdir
+ py.std.sys.path.insert(0, str(path))
+ self._syspathremove.append(str(path))
+
+ def mkdir(self, name):
+ return self.tmpdir.mkdir(name)
+
+ def mkpydir(self, name):
+ p = self.mkdir(name)
+ p.ensure("__init__.py")
+ return p
+
+ def genitems(self, colitems):
+ return list(self.session.genitems(colitems))
+
+ def inline_genitems(self, *args):
+ #config = self.parseconfig(*args)
+ config = self.parseconfig(*args)
+ session = config.initsession()
+ rec = self.getreportrecorder(config)
+ colitems = [config.getnode(arg) for arg in config.args]
+ items = list(session.genitems(colitems))
+ return items, rec
+
+ def runitem(self, source):
+ # used from runner functional tests
+ item = self.getitem(source)
+ # the test class where we are called from wants to provide the runner
+ testclassinstance = py.builtin._getimself(self.request.function)
+ runner = testclassinstance.getrunner()
+ return runner(item)
+
+ def inline_runsource(self, source, *cmdlineargs):
+ p = self.makepyfile(source)
+ l = list(cmdlineargs) + [p]
+ return self.inline_run(*l)
+
+ def inline_runsource1(self, *args):
+ args = list(args)
+ source = args.pop()
+ p = self.makepyfile(source)
+ l = list(args) + [p]
+ reprec = self.inline_run(*l)
+ reports = reprec.getreports("pytest_runtest_logreport")
+ assert len(reports) == 1, reports
+ return reports[0]
+
+ def inline_run(self, *args):
+ config = self.parseconfig(*args)
+ config.pluginmanager.do_configure(config)
+ session = config.initsession()
+ reprec = self.getreportrecorder(config)
+ colitems = config.getinitialnodes()
+ session.main(colitems)
+ config.pluginmanager.do_unconfigure(config)
+ return reprec
+
+ def config_preparse(self):
+ config = self.Config()
+ for plugin in self.plugins:
+ if isinstance(plugin, str):
+ config.pluginmanager.import_plugin(plugin)
+ else:
+ if isinstance(plugin, dict):
+ plugin = PseudoPlugin(plugin)
+ if not config.pluginmanager.isregistered(plugin):
+ config.pluginmanager.register(plugin)
+ return config
+
+ def parseconfig(self, *args):
+ if not args:
+ args = (self.tmpdir,)
+ config = self.config_preparse()
+ args = list(args) + ["--basetemp=%s" % self.tmpdir.dirpath('basetemp')]
+ config.parse(args)
+ return config
+
+ def reparseconfig(self, args=None):
+ """ this is used from tests that want to re-invoke parse(). """
+ if not args:
+ args = [self.tmpdir]
+ from py._test import config
+ oldconfig = config.config_per_process # py.test.config
+ try:
+ c = config.config_per_process = py.test.config = pytestConfig()
+ c.basetemp = oldconfig.mktemp("reparse", numbered=True)
+ c.parse(args)
+ return c
+ finally:
+ config.config_per_process = py.test.config = oldconfig
+
+ def parseconfigure(self, *args):
+ config = self.parseconfig(*args)
+ config.pluginmanager.do_configure(config)
+ return config
+
+ def getitem(self, source, funcname="test_func"):
+ modcol = self.getmodulecol(source)
+ moditems = modcol.collect()
+ for item in modcol.collect():
+ if item.name == funcname:
+ return item
+ else:
+ assert 0, "%r item not found in module:\n%s" %(funcname, source)
+
+ def getitems(self, source):
+ modcol = self.getmodulecol(source)
+ return list(modcol.config.initsession().genitems([modcol]))
+ #assert item is not None, "%r item not found in module:\n%s" %(funcname, source)
+ #return item
+
+ def getfscol(self, path, configargs=()):
+ self.config = self.parseconfig(path, *configargs)
+ self.session = self.config.initsession()
+ return self.config.getnode(path)
+
+ def getmodulecol(self, source, configargs=(), withinit=False):
+ kw = {self.request.function.__name__: py.code.Source(source).strip()}
+ path = self.makepyfile(**kw)
+ if withinit:
+ self.makepyfile(__init__ = "#")
+ self.config = self.parseconfig(path, *configargs)
+ self.session = self.config.initsession()
+ #self.config.pluginmanager.do_configure(config=self.config)
+ # XXX
+ self.config.pluginmanager.import_plugin("runner")
+ plugin = self.config.pluginmanager.getplugin("runner")
+ plugin.pytest_configure(config=self.config)
+
+ return self.config.getnode(path)
+
+ def popen(self, cmdargs, stdout, stderr, **kw):
+ if not hasattr(py.std, 'subprocess'):
+ py.test.skip("no subprocess module")
+ env = os.environ.copy()
+ env['PYTHONPATH'] = ":".join(filter(None, [
+ str(os.getcwd()), env.get('PYTHONPATH', '')]))
+ kw['env'] = env
+ #print "env", env
+ return py.std.subprocess.Popen(cmdargs, stdout=stdout, stderr=stderr, **kw)
+
+ def run(self, *cmdargs):
+ return self._run(*cmdargs)
+
+ def _run(self, *cmdargs):
+ cmdargs = [str(x) for x in cmdargs]
+ p1 = self.tmpdir.join("stdout")
+ p2 = self.tmpdir.join("stderr")
+ print_("running", cmdargs, "curdir=", py.path.local())
+ # we don't write to a file because Jython2.5.1 will internally
+ # anyway open PIPEs which subsequently leak leading to
+ # Too-Many-Open-Files
+ PIPE = py.std.subprocess.PIPE
+ now = time.time()
+ popen = self.popen(cmdargs, stdout=PIPE, stderr=PIPE, )
+ #close_fds=(sys.platform != "win32"))
+ p1.write(popen.stdout.read())
+ p2.write(popen.stderr.read())
+ popen.stdout.close()
+ popen.stderr.close()
+ ret = popen.wait()
+ out, err = p1.readlines(cr=0), p2.readlines(cr=0)
+ #if err:
+ # for line in err:
+ # py.builtin.print_(line, file=sys.stderr)
+ #if out:
+ # for line in out:
+ # py.builtin.print_(line, file=sys.stdout)
+ return RunResult(ret, out, err, time.time()-now)
+
+ def runpybin(self, scriptname, *args):
+ fullargs = self._getpybinargs(scriptname) + args
+ return self.run(*fullargs)
+
+ def _getpybinargs(self, scriptname):
+ if self.request.config.getvalue("toolsonpath"):
+ script = py.path.local.sysfind(scriptname)
+ assert script, "script %r not found" % scriptname
+ return (script,)
+ else:
+ cmdlinename = scriptname.replace(".", "")
+ assert hasattr(py.cmdline, cmdlinename), cmdlinename
+ source = ("import sys;sys.path.insert(0,%r);"
+ "import py;py.cmdline.%s()" %
+ (str(py._pydir.dirpath()), cmdlinename))
+ return (sys.executable, "-c", source,)
+
+ def runpython(self, script):
+ s = self._getsysprepend()
+ if s:
+ script.write(s + "\n" + script.read())
+ return self.run(sys.executable, script)
+
+ def _getsysprepend(self):
+ if not self.request.config.getvalue("toolsonpath"):
+ s = "import sys;sys.path.insert(0,%r);" % str(py._pydir.dirpath())
+ else:
+ s = ""
+ return s
+
+ def runpython_c(self, command):
+ command = self._getsysprepend() + command
+ return self.run(py.std.sys.executable, "-c", command)
+
+ def runpytest(self, *args):
+ p = py.path.local.make_numbered_dir(prefix="runpytest-",
+ keep=None, rootdir=self.tmpdir)
+ args = ('--basetemp=%s' % p, ) + args
+ plugins = [x for x in self.plugins if isinstance(x, str)]
+ if plugins:
+ args = ('-p', plugins[0]) + args
+ return self.runpybin("py.test", *args)
+
+ def spawn_pytest(self, string, expect_timeout=10.0):
+ pexpect = py.test.importorskip("pexpect", "2.4")
+ if not self.request.config.getvalue("toolsonpath"):
+ py.test.skip("need --tools-on-path to run py.test script")
+ basetemp = self.tmpdir.mkdir("pexpect")
+ invoke = self._getpybinargs("py.test")[0]
+ cmd = "%s --basetemp=%s %s" % (invoke, basetemp, string)
+ child = pexpect.spawn(cmd, logfile=basetemp.join("spawn.out").open("w"))
+ child.timeout = expect_timeout
+ return child
+
+class PseudoPlugin:
+ def __init__(self, vars):
+ self.__dict__.update(vars)
+
+class ReportRecorder(object):
+ def __init__(self, hook):
+ self.hook = hook
+ self.registry = hook._registry
+ self.registry.register(self)
+
+ def getcall(self, name):
+ return self.hookrecorder.getcall(name)
+
+ def popcall(self, name):
+ return self.hookrecorder.popcall(name)
+
+ def getcalls(self, names):
+ """ return list of ParsedCall instances matching the given eventname. """
+ return self.hookrecorder.getcalls(names)
+
+ # functionality for test reports
+
+ def getreports(self, names="pytest_runtest_logreport pytest_collectreport"):
+ return [x.report for x in self.getcalls(names)]
+
+ def matchreport(self, inamepart="", names="pytest_runtest_logreport pytest_collectreport"):
+ """ return a testreport whose dotted import path matches """
+ l = []
+ for rep in self.getreports(names=names):
+ colitem = rep.getnode()
+ if not inamepart or inamepart in colitem.listnames():
+ l.append(rep)
+ if not l:
+ raise ValueError("could not find test report matching %r: no test reports at all!" %
+ (inamepart,))
+ if len(l) > 1:
+ raise ValueError("found more than one testreport matching %r: %s" %(
+ inamepart, l))
+ return l[0]
+
+ def getfailures(self, names='pytest_runtest_logreport pytest_collectreport'):
+ return [rep for rep in self.getreports(names) if rep.failed]
+
+ def getfailedcollections(self):
+ return self.getfailures('pytest_collectreport')
+
+ def listoutcomes(self):
+ passed = []
+ skipped = []
+ failed = []
+ for rep in self.getreports("pytest_runtest_logreport"):
+ if rep.passed:
+ if rep.when == "call":
+ passed.append(rep)
+ elif rep.skipped:
+ skipped.append(rep)
+ elif rep.failed:
+ failed.append(rep)
+ return passed, skipped, failed
+
+ def countoutcomes(self):
+ return [len(x) for x in self.listoutcomes()]
+
+ def assertoutcome(self, passed=0, skipped=0, failed=0):
+ realpassed, realskipped, realfailed = self.listoutcomes()
+ assert passed == len(realpassed)
+ assert skipped == len(realskipped)
+ assert failed == len(realfailed)
+
+ def clear(self):
+ self.hookrecorder.calls[:] = []
+
+ def unregister(self):
+ self.registry.unregister(self)
+ self.hookrecorder.finish_recording()
+
+class LineComp:
+ def __init__(self):
+ self.stringio = py.io.TextIO()
+
+ def assert_contains_lines(self, lines2):
+ """ assert that lines2 are contained (linearly) in lines1.
+ return a list of extralines found.
+ """
+ __tracebackhide__ = True
+ val = self.stringio.getvalue()
+ self.stringio.truncate(0) # remove what we got
+ lines1 = val.split("\n")
+ return LineMatcher(lines1).fnmatch_lines(lines2)
+
+class LineMatcher:
+ def __init__(self, lines):
+ self.lines = lines
+
+ def str(self):
+ return "\n".join(self.lines)
+
+ def fnmatch_lines(self, lines2):
+ if isinstance(lines2, str):
+ lines2 = py.code.Source(lines2)
+ if isinstance(lines2, py.code.Source):
+ lines2 = lines2.strip().lines
+
+ from fnmatch import fnmatch
+ __tracebackhide__ = True
+ lines1 = self.lines[:]
+ nextline = None
+ extralines = []
+ for line in lines2:
+ nomatchprinted = False
+ while lines1:
+ nextline = lines1.pop(0)
+ if line == nextline:
+ print_("exact match:", repr(line))
+ break
+ elif fnmatch(nextline, line):
+ print_("fnmatch:", repr(line))
+ print_(" with:", repr(nextline))
+ break
+ else:
+ if not nomatchprinted:
+ print_("nomatch:", repr(line))
+ nomatchprinted = True
+ print_(" and:", repr(nextline))
+ extralines.append(nextline)
+ else:
+ if line != nextline:
+ #__tracebackhide__ = True
+ raise AssertionError("expected line not found: %r" % line)
+ extralines.extend(lines1)
+ return extralines
Added: pypy/branch/py12/py/_plugin/pytest_recwarn.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_plugin/pytest_recwarn.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,121 @@
+"""
+helpers for asserting deprecation and other warnings.
+
+Example usage
+---------------------
+
+You can use the ``recwarn`` funcarg to track
+warnings within a test function:
+
+.. sourcecode:: python
+
+ def test_hello(recwarn):
+ from warnings import warn
+ warn("hello", DeprecationWarning)
+ w = recwarn.pop(DeprecationWarning)
+ assert issubclass(w.category, DeprecationWarning)
+ assert 'hello' in str(w.message)
+ assert w.filename
+ assert w.lineno
+
+You can also call a global helper for checking
+taht a certain function call yields a Deprecation
+warning:
+
+.. sourcecode:: python
+
+ import py
+
+ def test_global():
+ py.test.deprecated_call(myfunction, 17)
+
+
+"""
+
+import py
+import os
+
+def pytest_funcarg__recwarn(request):
+ """Return a WarningsRecorder instance that provides these methods:
+
+ * ``pop(category=None)``: return last warning matching the category.
+ * ``clear()``: clear list of warnings
+ """
+ warnings = WarningsRecorder()
+ request.addfinalizer(warnings.finalize)
+ return warnings
+
+def pytest_namespace():
+ return {'deprecated_call': deprecated_call}
+
+def deprecated_call(func, *args, **kwargs):
+ """ assert that calling func(*args, **kwargs)
+ triggers a DeprecationWarning.
+ """
+ warningmodule = py.std.warnings
+ l = []
+ oldwarn_explicit = getattr(warningmodule, 'warn_explicit')
+ def warn_explicit(*args, **kwargs):
+ l.append(args)
+ oldwarn_explicit(*args, **kwargs)
+ oldwarn = getattr(warningmodule, 'warn')
+ def warn(*args, **kwargs):
+ l.append(args)
+ oldwarn(*args, **kwargs)
+
+ warningmodule.warn_explicit = warn_explicit
+ warningmodule.warn = warn
+ try:
+ ret = func(*args, **kwargs)
+ finally:
+ warningmodule.warn_explicit = warn_explicit
+ warningmodule.warn = warn
+ if not l:
+ #print warningmodule
+ __tracebackhide__ = True
+ raise AssertionError("%r did not produce DeprecationWarning" %(func,))
+ return ret
+
+
+class RecordedWarning:
+ def __init__(self, message, category, filename, lineno, line):
+ self.message = message
+ self.category = category
+ self.filename = filename
+ self.lineno = lineno
+ self.line = line
+
+class WarningsRecorder:
+ def __init__(self):
+ warningmodule = py.std.warnings
+ self.list = []
+ def showwarning(message, category, filename, lineno, line=0):
+ self.list.append(RecordedWarning(
+ message, category, filename, lineno, line))
+ try:
+ self.old_showwarning(message, category,
+ filename, lineno, line=line)
+ except TypeError:
+ # < python2.6
+ self.old_showwarning(message, category, filename, lineno)
+ self.old_showwarning = warningmodule.showwarning
+ warningmodule.showwarning = showwarning
+
+ def pop(self, cls=Warning):
+ """ pop the first recorded warning, raise exception if not exists."""
+ for i, w in enumerate(self.list):
+ if issubclass(w.category, cls):
+ return self.list.pop(i)
+ __tracebackhide__ = True
+ assert 0, "%r not found in %r" %(cls, self.list)
+
+ #def resetregistry(self):
+ # import warnings
+ # warnings.onceregistry.clear()
+ # warnings.__warningregistry__.clear()
+
+ def clear(self):
+ self.list[:] = []
+
+ def finalize(self):
+ py.std.warnings.showwarning = self.old_showwarning
Added: pypy/branch/py12/py/_plugin/pytest_restdoc.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_plugin/pytest_restdoc.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,429 @@
+"""
+perform ReST syntax, local and remote reference tests on .rst/.txt files.
+"""
+import py
+import sys, os, re
+
+def pytest_addoption(parser):
+ group = parser.getgroup("ReST", "ReST documentation check options")
+ group.addoption('-R', '--urlcheck',
+ action="store_true", dest="urlcheck", default=False,
+ help="urlopen() remote links found in ReST text files.")
+ group.addoption('--urltimeout', action="store", metavar="secs",
+ type="int", dest="urlcheck_timeout", default=5,
+ help="timeout in seconds for remote urlchecks")
+ group.addoption('--forcegen',
+ action="store_true", dest="forcegen", default=False,
+ help="force generation of html files.")
+
+def pytest_collect_file(path, parent):
+ if path.ext in (".txt", ".rst"):
+ project = getproject(path)
+ if project is not None:
+ return ReSTFile(path, parent=parent, project=project)
+
+def getproject(path):
+ for parent in path.parts(reverse=True):
+ confrest = parent.join("confrest.py")
+ if confrest.check():
+ Project = confrest.pyimport().Project
+ return Project(parent)
+
+class ReSTFile(py.test.collect.File):
+ def __init__(self, fspath, parent, project):
+ super(ReSTFile, self).__init__(fspath=fspath, parent=parent)
+ self.project = project
+
+ def collect(self):
+ return [
+ ReSTSyntaxTest("ReSTSyntax", parent=self, project=self.project),
+ LinkCheckerMaker("checklinks", parent=self),
+ DoctestText("doctest", parent=self),
+ ]
+
+def deindent(s, sep='\n'):
+ leastspaces = -1
+ lines = s.split(sep)
+ for line in lines:
+ if not line.strip():
+ continue
+ spaces = len(line) - len(line.lstrip())
+ if leastspaces == -1 or spaces < leastspaces:
+ leastspaces = spaces
+ if leastspaces == -1:
+ return s
+ for i, line in enumerate(lines):
+ if not line.strip():
+ lines[i] = ''
+ else:
+ lines[i] = line[leastspaces:]
+ return sep.join(lines)
+
+class ReSTSyntaxTest(py.test.collect.Item):
+ def __init__(self, name, parent, project):
+ super(ReSTSyntaxTest, self).__init__(name=name, parent=parent)
+ self.project = project
+
+ def reportinfo(self):
+ return self.fspath, None, "syntax check"
+
+ def runtest(self):
+ self.restcheck(py.path.svnwc(self.fspath))
+
+ def restcheck(self, path):
+ py.test.importorskip("docutils")
+ self.register_linkrole()
+ from docutils.utils import SystemMessage
+ try:
+ self._checkskip(path, self.project.get_htmloutputpath(path))
+ self.project.process(path)
+ except KeyboardInterrupt:
+ raise
+ except SystemMessage:
+ # we assume docutils printed info on stdout
+ py.test.fail("docutils processing failed, see captured stderr")
+
+ def register_linkrole(self):
+ #directive.register_linkrole('api', self.resolve_linkrole)
+ #directive.register_linkrole('source', self.resolve_linkrole)
+#
+# # XXX fake sphinx' "toctree" and refs
+# directive.register_linkrole('ref', self.resolve_linkrole)
+
+ from docutils.parsers.rst import directives
+ def toctree_directive(name, arguments, options, content, lineno,
+ content_offset, block_text, state, state_machine):
+ return []
+ toctree_directive.content = 1
+ toctree_directive.options = {'maxdepth': int, 'glob': directives.flag,
+ 'hidden': directives.flag}
+ directives.register_directive('toctree', toctree_directive)
+ self.register_pygments()
+
+ def register_pygments(self):
+ # taken from pygments-main/external/rst-directive.py
+ from docutils.parsers.rst import directives
+ try:
+ from pygments.formatters import HtmlFormatter
+ except ImportError:
+ def pygments_directive(name, arguments, options, content, lineno,
+ content_offset, block_text, state, state_machine):
+ return []
+ pygments_directive.options = {}
+ else:
+ # The default formatter
+ DEFAULT = HtmlFormatter(noclasses=True)
+ # Add name -> formatter pairs for every variant you want to use
+ VARIANTS = {
+ # 'linenos': HtmlFormatter(noclasses=INLINESTYLES, linenos=True),
+ }
+
+ from docutils import nodes
+
+ from pygments import highlight
+ from pygments.lexers import get_lexer_by_name, TextLexer
+
+ def pygments_directive(name, arguments, options, content, lineno,
+ content_offset, block_text, state, state_machine):
+ try:
+ lexer = get_lexer_by_name(arguments[0])
+ except ValueError:
+ # no lexer found - use the text one instead of an exception
+ lexer = TextLexer()
+ # take an arbitrary option if more than one is given
+ formatter = options and VARIANTS[options.keys()[0]] or DEFAULT
+ parsed = highlight('\n'.join(content), lexer, formatter)
+ return [nodes.raw('', parsed, format='html')]
+
+ pygments_directive.options = dict([(key, directives.flag) for key in VARIANTS])
+
+ pygments_directive.arguments = (1, 0, 1)
+ pygments_directive.content = 1
+ directives.register_directive('sourcecode', pygments_directive)
+
+ def resolve_linkrole(self, name, text, check=True):
+ apigen_relpath = self.project.apigen_relpath
+
+ if name == 'api':
+ if text == 'py':
+ return ('py', apigen_relpath + 'api/index.html')
+ else:
+ assert text.startswith('py.'), (
+ 'api link "%s" does not point to the py package') % (text,)
+ dotted_name = text
+ if dotted_name.find('(') > -1:
+ dotted_name = dotted_name[:text.find('(')]
+ # remove pkg root
+ path = dotted_name.split('.')[1:]
+ dotted_name = '.'.join(path)
+ obj = py
+ if check:
+ for chunk in path:
+ try:
+ obj = getattr(obj, chunk)
+ except AttributeError:
+ raise AssertionError(
+ 'problem with linkrole :api:`%s`: can not resolve '
+ 'dotted name %s' % (text, dotted_name,))
+ return (text, apigen_relpath + 'api/%s.html' % (dotted_name,))
+ elif name == 'source':
+ assert text.startswith('py/'), ('source link "%s" does not point '
+ 'to the py package') % (text,)
+ relpath = '/'.join(text.split('/')[1:])
+ if check:
+ pkgroot = py._pydir
+ abspath = pkgroot.join(relpath)
+ assert pkgroot.join(relpath).check(), (
+ 'problem with linkrole :source:`%s`: '
+ 'path %s does not exist' % (text, relpath))
+ if relpath.endswith('/') or not relpath:
+ relpath += 'index.html'
+ else:
+ relpath += '.html'
+ return (text, apigen_relpath + 'source/%s' % (relpath,))
+ elif name == 'ref':
+ return ("", "")
+
+ def _checkskip(self, lpath, htmlpath=None):
+ if not self.config.getvalue("forcegen"):
+ lpath = py.path.local(lpath)
+ if htmlpath is not None:
+ htmlpath = py.path.local(htmlpath)
+ if lpath.ext == '.txt':
+ htmlpath = htmlpath or lpath.new(ext='.html')
+ if htmlpath.check(file=1) and htmlpath.mtime() >= lpath.mtime():
+ py.test.skip("html file is up to date, use --forcegen to regenerate")
+ #return [] # no need to rebuild
+
+class DoctestText(py.test.collect.Item):
+ def reportinfo(self):
+ return self.fspath, None, "doctest"
+
+ def runtest(self):
+ content = self._normalize_linesep()
+ newcontent = self.config.hook.pytest_doctest_prepare_content(content=content)
+ if newcontent is not None:
+ content = newcontent
+ s = content
+ l = []
+ prefix = '.. >>> '
+ mod = py.std.types.ModuleType(self.fspath.purebasename)
+ skipchunk = False
+ for line in deindent(s).split('\n'):
+ stripped = line.strip()
+ if skipchunk and line.startswith(skipchunk):
+ py.builtin.print_("skipping", line)
+ continue
+ skipchunk = False
+ if stripped.startswith(prefix):
+ try:
+ py.builtin.exec_(py.code.Source(
+ stripped[len(prefix):]).compile(), mod.__dict__)
+ except ValueError:
+ e = sys.exc_info()[1]
+ if e.args and e.args[0] == "skipchunk":
+ skipchunk = " " * (len(line) - len(line.lstrip()))
+ else:
+ raise
+ else:
+ l.append(line)
+ docstring = "\n".join(l)
+ mod.__doc__ = docstring
+ failed, tot = py.std.doctest.testmod(mod, verbose=1)
+ if failed:
+ py.test.fail("doctest %s: %s failed out of %s" %(
+ self.fspath, failed, tot))
+
+ def _normalize_linesep(self):
+ # XXX quite nasty... but it works (fixes win32 issues)
+ s = self.fspath.read()
+ linesep = '\n'
+ if '\r' in s:
+ if '\n' not in s:
+ linesep = '\r'
+ else:
+ linesep = '\r\n'
+ s = s.replace(linesep, '\n')
+ return s
+
+class LinkCheckerMaker(py.test.collect.Collector):
+ def collect(self):
+ return list(self.genlinkchecks())
+
+ def genlinkchecks(self):
+ path = self.fspath
+ # generating functions + args as single tests
+ timeout = self.config.getvalue("urlcheck_timeout")
+ for lineno, line in enumerate(path.readlines()):
+ line = line.strip()
+ if line.startswith('.. _'):
+ if line.startswith('.. _`'):
+ delim = '`:'
+ else:
+ delim = ':'
+ l = line.split(delim, 1)
+ if len(l) != 2:
+ continue
+ tryfn = l[1].strip()
+ name = "%s:%d" %(tryfn, lineno)
+ if tryfn.startswith('http:') or tryfn.startswith('https'):
+ if self.config.getvalue("urlcheck"):
+ yield CheckLink(name, parent=self,
+ args=(tryfn, path, lineno, timeout), checkfunc=urlcheck)
+ elif tryfn.startswith('webcal:'):
+ continue
+ else:
+ i = tryfn.find('#')
+ if i != -1:
+ checkfn = tryfn[:i]
+ else:
+ checkfn = tryfn
+ if checkfn.strip() and (1 or checkfn.endswith('.html')):
+ yield CheckLink(name, parent=self,
+ args=(tryfn, path, lineno), checkfunc=localrefcheck)
+
+class CheckLink(py.test.collect.Item):
+ def __init__(self, name, parent, args, checkfunc):
+ super(CheckLink, self).__init__(name, parent)
+ self.args = args
+ self.checkfunc = checkfunc
+
+ def runtest(self):
+ return self.checkfunc(*self.args)
+
+ def reportinfo(self, basedir=None):
+ return (self.fspath, self.args[2], "checklink: %s" % self.args[0])
+
+def urlcheck(tryfn, path, lineno, TIMEOUT_URLOPEN):
+ old = py.std.socket.getdefaulttimeout()
+ py.std.socket.setdefaulttimeout(TIMEOUT_URLOPEN)
+ try:
+ try:
+ py.builtin.print_("trying remote", tryfn)
+ py.std.urllib2.urlopen(tryfn)
+ finally:
+ py.std.socket.setdefaulttimeout(old)
+ except (py.std.urllib2.URLError, py.std.urllib2.HTTPError):
+ e = sys.exc_info()[1]
+ if getattr(e, 'code', None) in (401, 403): # authorization required, forbidden
+ py.test.skip("%s: %s" %(tryfn, str(e)))
+ else:
+ py.test.fail("remote reference error %r in %s:%d\n%s" %(
+ tryfn, path.basename, lineno+1, e))
+
+def localrefcheck(tryfn, path, lineno):
+ # assume it should be a file
+ i = tryfn.find('#')
+ if tryfn.startswith('javascript:'):
+ return # don't check JS refs
+ if i != -1:
+ anchor = tryfn[i+1:]
+ tryfn = tryfn[:i]
+ else:
+ anchor = ''
+ fn = path.dirpath(tryfn)
+ ishtml = fn.ext == '.html'
+ fn = ishtml and fn.new(ext='.txt') or fn
+ py.builtin.print_("filename is", fn)
+ if not fn.check(): # not ishtml or not fn.check():
+ if not py.path.local(tryfn).check(): # the html could be there
+ py.test.fail("reference error %r in %s:%d" %(
+ tryfn, path.basename, lineno+1))
+ if anchor:
+ source = unicode(fn.read(), 'latin1')
+ source = source.lower().replace('-', ' ') # aehem
+
+ anchor = anchor.replace('-', ' ')
+ match2 = ".. _`%s`:" % anchor
+ match3 = ".. _%s:" % anchor
+ candidates = (anchor, match2, match3)
+ py.builtin.print_("candidates", repr(candidates))
+ for line in source.split('\n'):
+ line = line.strip()
+ if line in candidates:
+ break
+ else:
+ py.test.fail("anchor reference error %s#%s in %s:%d" %(
+ tryfn, anchor, path.basename, lineno+1))
+
+if hasattr(sys.stdout, 'fileno') and os.isatty(sys.stdout.fileno()):
+ def log(msg):
+ print(msg)
+else:
+ def log(msg):
+ pass
+
+def convert_rest_html(source, source_path, stylesheet=None, encoding='latin1'):
+ """ return html latin1-encoded document for the given input.
+ source a ReST-string
+ sourcepath where to look for includes (basically)
+ stylesheet path (to be used if any)
+ """
+ from docutils.core import publish_string
+ kwargs = {
+ 'stylesheet' : stylesheet,
+ 'stylesheet_path': None,
+ 'traceback' : 1,
+ 'embed_stylesheet': 0,
+ 'output_encoding' : encoding,
+ #'halt' : 0, # 'info',
+ 'halt_level' : 2,
+ }
+ # docutils uses os.getcwd() :-(
+ source_path = os.path.abspath(str(source_path))
+ prevdir = os.getcwd()
+ try:
+ #os.chdir(os.path.dirname(source_path))
+ return publish_string(source, source_path, writer_name='html',
+ settings_overrides=kwargs)
+ finally:
+ os.chdir(prevdir)
+
+def process(txtpath, encoding='latin1'):
+ """ process a textfile """
+ log("processing %s" % txtpath)
+ assert txtpath.check(ext='.txt')
+ if isinstance(txtpath, py.path.svnwc):
+ txtpath = txtpath.localpath
+ htmlpath = txtpath.new(ext='.html')
+ #svninfopath = txtpath.localpath.new(ext='.svninfo')
+
+ style = txtpath.dirpath('style.css')
+ if style.check():
+ stylesheet = style.basename
+ else:
+ stylesheet = None
+ content = unicode(txtpath.read(), encoding)
+ doc = convert_rest_html(content, txtpath, stylesheet=stylesheet, encoding=encoding)
+ htmlpath.open('wb').write(doc)
+ #log("wrote %r" % htmlpath)
+ #if txtpath.check(svnwc=1, versioned=1):
+ # info = txtpath.info()
+ # svninfopath.dump(info)
+
+if sys.version_info > (3, 0):
+ def _uni(s): return s
+else:
+ def _uni(s):
+ return unicode(s)
+
+rex1 = re.compile(r'.*<body>(.*)</body>.*', re.MULTILINE | re.DOTALL)
+rex2 = re.compile(r'.*<div class="document">(.*)</div>.*', re.MULTILINE | re.DOTALL)
+
+def strip_html_header(string, encoding='utf8'):
+ """ return the content of the body-tag """
+ uni = unicode(string, encoding)
+ for rex in rex1,rex2:
+ match = rex.search(uni)
+ if not match:
+ break
+ uni = match.group(1)
+ return uni
+
+class Project: # used for confrest.py files
+ def __init__(self, sourcepath):
+ self.sourcepath = sourcepath
+ def process(self, path):
+ return process(path)
+ def get_htmloutputpath(self, path):
+ return path.new(ext='html')
Added: pypy/branch/py12/py/_plugin/pytest_resultlog.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_plugin/pytest_resultlog.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,98 @@
+"""non-xml machine-readable logging of test results.
+ Useful for buildbot integration code. See the `PyPy-test`_
+ web page for post-processing.
+
+.. _`PyPy-test`: http://codespeak.net:8099/summary
+
+"""
+
+import py
+from py.builtin import print_
+
+def pytest_addoption(parser):
+ group = parser.getgroup("resultlog", "resultlog plugin options")
+ group.addoption('--resultlog', action="store", dest="resultlog", metavar="path", default=None,
+ help="path for machine-readable result log.")
+
+def pytest_configure(config):
+ resultlog = config.option.resultlog
+ if resultlog:
+ logfile = open(resultlog, 'w', 1) # line buffered
+ config._resultlog = ResultLog(config, logfile)
+ config.pluginmanager.register(config._resultlog)
+
+def pytest_unconfigure(config):
+ resultlog = getattr(config, '_resultlog', None)
+ if resultlog:
+ resultlog.logfile.close()
+ del config._resultlog
+ config.pluginmanager.unregister(resultlog)
+
+def generic_path(item):
+ chain = item.listchain()
+ gpath = [chain[0].name]
+ fspath = chain[0].fspath
+ fspart = False
+ for node in chain[1:]:
+ newfspath = node.fspath
+ if newfspath == fspath:
+ if fspart:
+ gpath.append(':')
+ fspart = False
+ else:
+ gpath.append('.')
+ else:
+ gpath.append('/')
+ fspart = True
+ name = node.name
+ if name[0] in '([':
+ gpath.pop()
+ gpath.append(name)
+ fspath = newfspath
+ return ''.join(gpath)
+
+class ResultLog(object):
+ def __init__(self, config, logfile):
+ self.config = config
+ self.logfile = logfile # preferably line buffered
+
+ def write_log_entry(self, testpath, shortrepr, longrepr):
+ print_("%s %s" % (shortrepr, testpath), file=self.logfile)
+ for line in longrepr.splitlines():
+ print_(" %s" % line, file=self.logfile)
+
+ def log_outcome(self, node, shortrepr, longrepr):
+ testpath = generic_path(node)
+ self.write_log_entry(testpath, shortrepr, longrepr)
+
+ def pytest_runtest_logreport(self, report):
+ res = self.config.hook.pytest_report_teststatus(report=report)
+ if res is not None:
+ code = res[1]
+ else:
+ code = report.shortrepr
+ if code == 'x':
+ longrepr = str(report.longrepr)
+ elif code == 'P':
+ longrepr = ''
+ elif report.passed:
+ longrepr = ""
+ elif report.failed:
+ longrepr = str(report.longrepr)
+ elif report.skipped:
+ longrepr = str(report.longrepr.reprcrash.message)
+ self.log_outcome(report.item, code, longrepr)
+
+ def pytest_collectreport(self, report):
+ if not report.passed:
+ if report.failed:
+ code = "F"
+ else:
+ assert report.skipped
+ code = "S"
+ longrepr = str(report.longrepr.reprcrash)
+ self.log_outcome(report.collector, code, longrepr)
+
+ def pytest_internalerror(self, excrepr):
+ path = excrepr.reprcrash.path
+ self.write_log_entry(path, '!', str(excrepr))
Added: pypy/branch/py12/py/_plugin/pytest_runner.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_plugin/pytest_runner.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,400 @@
+"""
+collect and run test items and create reports.
+"""
+
+import py, sys
+
+def pytest_namespace():
+ return {
+ 'raises' : raises,
+ 'skip' : skip,
+ 'importorskip' : importorskip,
+ 'fail' : fail,
+ 'exit' : exit,
+ }
+
+#
+# pytest plugin hooks
+
+# XXX move to pytest_sessionstart and fix py.test owns tests
+def pytest_configure(config):
+ config._setupstate = SetupState()
+
+def pytest_sessionfinish(session, exitstatus):
+ if hasattr(session.config, '_setupstate'):
+ hook = session.config.hook
+ rep = hook.pytest__teardown_final(session=session)
+ if rep:
+ hook.pytest__teardown_final_logerror(report=rep)
+
+def pytest_make_collect_report(collector):
+ result = excinfo = None
+ try:
+ result = collector._memocollect()
+ except KeyboardInterrupt:
+ raise
+ except:
+ excinfo = py.code.ExceptionInfo()
+ return CollectReport(collector, result, excinfo)
+
+def pytest_runtest_protocol(item):
+ runtestprotocol(item)
+ return True
+
+def runtestprotocol(item, log=True):
+ rep = call_and_report(item, "setup", log)
+ reports = [rep]
+ if rep.passed:
+ reports.append(call_and_report(item, "call", log))
+ reports.append(call_and_report(item, "teardown", log))
+ return reports
+
+def pytest_runtest_setup(item):
+ item.config._setupstate.prepare(item)
+
+def pytest_runtest_call(item):
+ if not item._deprecated_testexecution():
+ item.runtest()
+
+def pytest_runtest_makereport(item, call):
+ return ItemTestReport(item, call.excinfo, call.when)
+
+def pytest_runtest_teardown(item):
+ item.config._setupstate.teardown_exact(item)
+
+def pytest__teardown_final(session):
+ call = CallInfo(session.config._setupstate.teardown_all, when="teardown")
+ if call.excinfo:
+ ntraceback = call.excinfo.traceback .cut(excludepath=py._pydir)
+ call.excinfo.traceback = ntraceback.filter()
+ rep = TeardownErrorReport(call.excinfo)
+ return rep
+
+def pytest_report_teststatus(report):
+ if report.when in ("setup", "teardown"):
+ if report.failed:
+ # category, shortletter, verbose-word
+ return "error", "E", "ERROR"
+ elif report.skipped:
+ return "skipped", "s", "SKIPPED"
+ else:
+ return "", "", ""
+#
+# Implementation
+
+def call_and_report(item, when, log=True):
+ call = call_runtest_hook(item, when)
+ hook = item.ihook
+ report = hook.pytest_runtest_makereport(item=item, call=call)
+ if log and (when == "call" or not report.passed):
+ hook.pytest_runtest_logreport(report=report)
+ return report
+
+def call_runtest_hook(item, when):
+ hookname = "pytest_runtest_" + when
+ ihook = getattr(item.ihook, hookname)
+ return CallInfo(lambda: ihook(item=item), when=when)
+
+class CallInfo:
+ excinfo = None
+ def __init__(self, func, when):
+ self.when = when
+ try:
+ self.result = func()
+ except KeyboardInterrupt:
+ raise
+ except:
+ self.excinfo = py.code.ExceptionInfo()
+
+ def __repr__(self):
+ if self.excinfo:
+ status = "exception: %s" % str(self.excinfo.value)
+ else:
+ status = "result: %r" % (self.result,)
+ return "<CallInfo when=%r %s>" % (self.when, status)
+
+class BaseReport(object):
+ def __repr__(self):
+ l = ["%s=%s" %(key, value)
+ for key, value in self.__dict__.items()]
+ return "<%s %s>" %(self.__class__.__name__, " ".join(l),)
+
+ def toterminal(self, out):
+ longrepr = self.longrepr
+ if hasattr(longrepr, 'toterminal'):
+ longrepr.toterminal(out)
+ else:
+ out.line(str(longrepr))
+
+class ItemTestReport(BaseReport):
+ failed = passed = skipped = False
+
+ def __init__(self, item, excinfo=None, when=None):
+ self.item = item
+ self.when = when
+ if item and when != "setup":
+ self.keywords = item.readkeywords()
+ else:
+ # if we fail during setup it might mean
+ # we are not able to access the underlying object
+ # this might e.g. happen if we are unpickled
+ # and our parent collector did not collect us
+ # (because it e.g. skipped for platform reasons)
+ self.keywords = {}
+ if not excinfo:
+ self.passed = True
+ self.shortrepr = "."
+ else:
+ if not isinstance(excinfo, py.code.ExceptionInfo):
+ self.failed = True
+ shortrepr = "?"
+ longrepr = excinfo
+ elif excinfo.errisinstance(py.test.skip.Exception):
+ self.skipped = True
+ shortrepr = "s"
+ longrepr = self.item._repr_failure_py(excinfo)
+ else:
+ self.failed = True
+ shortrepr = self.item.shortfailurerepr
+ if self.when == "call":
+ longrepr = self.item.repr_failure(excinfo)
+ else: # exception in setup or teardown
+ longrepr = self.item._repr_failure_py(excinfo)
+ shortrepr = shortrepr.lower()
+ self.shortrepr = shortrepr
+ self.longrepr = longrepr
+
+ def __repr__(self):
+ status = (self.passed and "passed" or
+ self.skipped and "skipped" or
+ self.failed and "failed" or
+ "CORRUPT")
+ l = [repr(self.item.name), "when=%r" % self.when, "outcome %r" % status,]
+ if hasattr(self, 'node'):
+ l.append("txnode=%s" % self.node.gateway.id)
+ info = " " .join(map(str, l))
+ return "<ItemTestReport %s>" % info
+
+ def getnode(self):
+ return self.item
+
+class CollectReport(BaseReport):
+ skipped = failed = passed = False
+
+ def __init__(self, collector, result, excinfo=None):
+ self.collector = collector
+ if not excinfo:
+ self.passed = True
+ self.result = result
+ else:
+ self.longrepr = self.collector._repr_failure_py(excinfo)
+ if excinfo.errisinstance(py.test.skip.Exception):
+ self.skipped = True
+ self.reason = str(excinfo.value)
+ else:
+ self.failed = True
+
+ def getnode(self):
+ return self.collector
+
+class TeardownErrorReport(BaseReport):
+ skipped = passed = False
+ failed = True
+ when = "teardown"
+ def __init__(self, excinfo):
+ self.longrepr = excinfo.getrepr(funcargs=True)
+
+class SetupState(object):
+ """ shared state for setting up/tearing down test items or collectors. """
+ def __init__(self):
+ self.stack = []
+ self._finalizers = {}
+
+ def addfinalizer(self, finalizer, colitem):
+ """ attach a finalizer to the given colitem.
+ if colitem is None, this will add a finalizer that
+ is called at the end of teardown_all().
+ """
+ assert hasattr(finalizer, '__call__')
+ #assert colitem in self.stack
+ self._finalizers.setdefault(colitem, []).append(finalizer)
+
+ def _pop_and_teardown(self):
+ colitem = self.stack.pop()
+ self._teardown_with_finalization(colitem)
+
+ def _callfinalizers(self, colitem):
+ finalizers = self._finalizers.pop(colitem, None)
+ while finalizers:
+ fin = finalizers.pop()
+ fin()
+
+ def _teardown_with_finalization(self, colitem):
+ self._callfinalizers(colitem)
+ if colitem:
+ colitem.teardown()
+ for colitem in self._finalizers:
+ assert colitem is None or colitem in self.stack
+
+ def teardown_all(self):
+ while self.stack:
+ self._pop_and_teardown()
+ self._teardown_with_finalization(None)
+ assert not self._finalizers
+
+ def teardown_exact(self, item):
+ if self.stack and item == self.stack[-1]:
+ self._pop_and_teardown()
+ else:
+ self._callfinalizers(item)
+
+ def prepare(self, colitem):
+ """ setup objects along the collector chain to the test-method
+ and teardown previously setup objects."""
+ needed_collectors = colitem.listchain()
+ while self.stack:
+ if self.stack == needed_collectors[:len(self.stack)]:
+ break
+ self._pop_and_teardown()
+ # check if the last collection node has raised an error
+ for col in self.stack:
+ if hasattr(col, '_prepare_exc'):
+ py.builtin._reraise(*col._prepare_exc)
+ for col in needed_collectors[len(self.stack):]:
+ self.stack.append(col)
+ try:
+ col.setup()
+ except Exception:
+ col._prepare_exc = sys.exc_info()
+ raise
+
+# =============================================================
+# Test OutcomeExceptions and helpers for creating them.
+
+
+class OutcomeException(Exception):
+ """ OutcomeException and its subclass instances indicate and
+ contain info about test and collection outcomes.
+ """
+ def __init__(self, msg=None, excinfo=None):
+ self.msg = msg
+ self.excinfo = excinfo
+
+ def __repr__(self):
+ if self.msg:
+ return repr(self.msg)
+ return "<%s instance>" %(self.__class__.__name__,)
+ __str__ = __repr__
+
+class Skipped(OutcomeException):
+ # XXX hackish: on 3k we fake to live in the builtins
+ # in order to have Skipped exception printing shorter/nicer
+ __module__ = 'builtins'
+
+class Failed(OutcomeException):
+ """ raised from an explicit call to py.test.fail() """
+ __module__ = 'builtins'
+
+class ExceptionFailure(Failed):
+ """ raised by py.test.raises on an exception-assertion mismatch. """
+ def __init__(self, expr, expected, msg=None, excinfo=None):
+ Failed.__init__(self, msg=msg, excinfo=excinfo)
+ self.expr = expr
+ self.expected = expected
+
+class Exit(KeyboardInterrupt):
+ """ raised by py.test.exit for immediate program exits without tracebacks and reporter/summary. """
+ def __init__(self, msg="unknown reason"):
+ self.msg = msg
+ KeyboardInterrupt.__init__(self, msg)
+
+# exposed helper methods
+
+def exit(msg):
+ """ exit testing process as if KeyboardInterrupt was triggered. """
+ __tracebackhide__ = True
+ raise Exit(msg)
+
+exit.Exception = Exit
+
+def skip(msg=""):
+ """ skip an executing test with the given message. Note: it's usually
+ better use the py.test.mark.skipif marker to declare a test to be
+ skipped under certain conditions like mismatching platforms or
+ dependencies. See the pytest_skipping plugin for details.
+ """
+ __tracebackhide__ = True
+ raise Skipped(msg=msg)
+
+skip.Exception = Skipped
+
+def fail(msg=""):
+ """ explicitely fail an currently-executing test with the given Message. """
+ __tracebackhide__ = True
+ raise Failed(msg=msg)
+
+fail.Exception = Failed
+
+def raises(ExpectedException, *args, **kwargs):
+ """ if args[0] is callable: raise AssertionError if calling it with
+ the remaining arguments does not raise the expected exception.
+ if args[0] is a string: raise AssertionError if executing the
+ the string in the calling scope does not raise expected exception.
+ for examples:
+ x = 5
+ raises(TypeError, lambda x: x + 'hello', x=x)
+ raises(TypeError, "x + 'hello'")
+ """
+ __tracebackhide__ = True
+ assert args
+ if isinstance(args[0], str):
+ code, = args
+ assert isinstance(code, str)
+ frame = sys._getframe(1)
+ loc = frame.f_locals.copy()
+ loc.update(kwargs)
+ #print "raises frame scope: %r" % frame.f_locals
+ try:
+ code = py.code.Source(code).compile()
+ py.builtin.exec_(code, frame.f_globals, loc)
+ # XXX didn'T mean f_globals == f_locals something special?
+ # this is destroyed here ...
+ except ExpectedException:
+ return py.code.ExceptionInfo()
+ else:
+ func = args[0]
+ try:
+ func(*args[1:], **kwargs)
+ except ExpectedException:
+ return py.code.ExceptionInfo()
+ k = ", ".join(["%s=%r" % x for x in kwargs.items()])
+ if k:
+ k = ', ' + k
+ expr = '%s(%r%s)' %(getattr(func, '__name__', func), args, k)
+ raise ExceptionFailure(msg="DID NOT RAISE",
+ expr=args, expected=ExpectedException)
+
+raises.Exception = ExceptionFailure
+
+def importorskip(modname, minversion=None):
+ """ return imported module if it has a higher __version__ than the
+ optionally specified 'minversion' - otherwise call py.test.skip()
+ with a message detailing the mismatch.
+ """
+ compile(modname, '', 'eval') # to catch syntaxerrors
+ try:
+ mod = __import__(modname, None, None, ['__doc__'])
+ except ImportError:
+ py.test.skip("could not import %r" %(modname,))
+ if minversion is None:
+ return mod
+ verattr = getattr(mod, '__version__', None)
+ if isinstance(minversion, str):
+ minver = minversion.split(".")
+ else:
+ minver = list(minversion)
+ if verattr is None or verattr.split(".") < minver:
+ py.test.skip("module %r has __version__ %r, required is: %r" %(
+ modname, verattr, minversion))
+ return mod
+
Added: pypy/branch/py12/py/_plugin/pytest_skipping.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_plugin/pytest_skipping.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,243 @@
+"""
+advanced skipping for python test functions, classes or modules.
+
+With this plugin you can mark test functions for conditional skipping
+or as "xfail", expected-to-fail. Skipping a test will avoid running it
+while xfail-marked tests will run and result in an inverted outcome:
+a pass becomes a failure and a fail becomes a semi-passing one.
+
+The need for skipping a test is usually connected to a condition.
+If a test fails under all conditions then it's probably better
+to mark your test as 'xfail'.
+
+By passing ``--report=xfailed,skipped`` to the terminal reporter
+you will see summary information on skips and xfail-run tests
+at the end of a test run.
+
+.. _skipif:
+
+Skipping a single function
+-------------------------------------------
+
+Here is an example for marking a test function to be skipped
+when run on a Python3 interpreter::
+
+ @py.test.mark.skipif("sys.version_info >= (3,0)")
+ def test_function():
+ ...
+
+During test function setup the skipif condition is
+evaluated by calling ``eval(expr, namespace)``. The namespace
+contains the ``sys`` and ``os`` modules and the test
+``config`` object. The latter allows you to skip based
+on a test configuration value e.g. like this::
+
+ @py.test.mark.skipif("not config.getvalue('db')")
+ def test_function(...):
+ ...
+
+Create a shortcut for your conditional skip decorator
+at module level like this::
+
+ win32only = py.test.mark.skipif("sys.platform != 'win32'")
+
+ @win32only
+ def test_function():
+ ...
+
+
+skip groups of test functions
+--------------------------------------
+
+As with all metadata function marking you can do it at
+`whole class- or module level`_. Here is an example
+for skipping all methods of a test class based on platform::
+
+ class TestPosixCalls:
+ pytestmark = py.test.mark.skipif("sys.platform == 'win32'")
+
+ def test_function(self):
+ # will not be setup or run under 'win32' platform
+ #
+
+The ``pytestmark`` decorator will be applied to each test function.
+
+.. _`whole class- or module level`: mark.html#scoped-marking
+
+
+mark a test function as **expected to fail**
+-------------------------------------------------------
+
+You can use the ``xfail`` marker to indicate that you
+expect the test to fail::
+
+ @py.test.mark.xfail
+ def test_function():
+ ...
+
+This test will be run but no traceback will be reported
+when it fails. Instead terminal reporting will list it in the
+"expected to fail" or "unexpectedly passing" sections.
+
+Same as with skipif_ you can also selectively expect a failure
+depending on platform::
+
+ @py.test.mark.xfail("sys.version_info >= (3,0)")
+
+ def test_function():
+ ...
+
+
+skipping on a missing import dependency
+--------------------------------------------------
+
+You can use the following import helper at module level
+or within a test or test setup function::
+
+ docutils = py.test.importorskip("docutils")
+
+If ``docutils`` cannot be imported here, this will lead to a
+skip outcome of the test. You can also skip dependeing if
+if a library does not come with a high enough version::
+
+ docutils = py.test.importorskip("docutils", minversion="0.3")
+
+The version will be read from the specified module's ``__version__`` attribute.
+
+imperative skip from within a test or setup function
+------------------------------------------------------
+
+If for some reason you cannot declare skip-conditions
+you can also imperatively produce a Skip-outcome from
+within test or setup code. Example::
+
+ def test_function():
+ if not valid_config():
+ py.test.skip("unsuppored configuration")
+
+"""
+# XXX py.test.skip, .importorskip and the Skipped class
+# should also be defined in this plugin, requires thought/changes
+
+import py
+
+
+def pytest_runtest_setup(item):
+ expr, result = evalexpression(item, 'skipif')
+ if result:
+ py.test.skip(expr)
+
+def pytest_runtest_makereport(__multicall__, item, call):
+ if call.when != "call":
+ return
+ expr, result = evalexpression(item, 'xfail')
+ rep = __multicall__.execute()
+ if result:
+ if call.excinfo:
+ rep.skipped = True
+ rep.failed = rep.passed = False
+ else:
+ rep.skipped = rep.passed = False
+ rep.failed = True
+ rep.keywords['xfail'] = expr
+ else:
+ if 'xfail' in rep.keywords:
+ del rep.keywords['xfail']
+ return rep
+
+# called by terminalreporter progress reporting
+def pytest_report_teststatus(report):
+ if 'xfail' in report.keywords:
+ if report.skipped:
+ return "xfailed", "x", "xfail"
+ elif report.failed:
+ return "xpassed", "P", "xpass"
+
+# called by the terminalreporter instance/plugin
+def pytest_terminal_summary(terminalreporter):
+ show_xfailed(terminalreporter)
+ show_skipped(terminalreporter)
+
+def show_xfailed(terminalreporter):
+ tr = terminalreporter
+ xfailed = tr.stats.get("xfailed")
+ if xfailed:
+ if not tr.hasopt('xfailed'):
+ tr.write_line(
+ "%d expected failures, use --report=xfailed for more info" %
+ len(xfailed))
+ return
+ tr.write_sep("_", "expected failures")
+ for rep in xfailed:
+ entry = rep.longrepr.reprcrash
+ modpath = rep.item.getmodpath(includemodule=True)
+ pos = "%s %s:%d: " %(modpath, entry.path, entry.lineno)
+ reason = rep.longrepr.reprcrash.message
+ i = reason.find("\n")
+ if i != -1:
+ reason = reason[:i]
+ tr._tw.line("%s %s" %(pos, reason))
+
+ xpassed = terminalreporter.stats.get("xpassed")
+ if xpassed:
+ tr.write_sep("_", "UNEXPECTEDLY PASSING TESTS")
+ for rep in xpassed:
+ fspath, lineno, modpath = rep.item.reportinfo()
+ pos = "%s %s:%d: unexpectedly passing" %(modpath, fspath, lineno)
+ tr._tw.line(pos)
+
+
+def evalexpression(item, keyword):
+ if isinstance(item, py.test.collect.Function):
+ markholder = getattr(item.obj, keyword, None)
+ result = False
+ if markholder:
+ d = {'os': py.std.os, 'sys': py.std.sys, 'config': item.config}
+ expr, result = None, True
+ for expr in markholder.args:
+ if isinstance(expr, str):
+ result = cached_eval(item.config, expr, d)
+ else:
+ result = expr
+ if not result:
+ break
+ return expr, result
+ return None, False
+
+def cached_eval(config, expr, d):
+ if not hasattr(config, '_evalcache'):
+ config._evalcache = {}
+ try:
+ return config._evalcache[expr]
+ except KeyError:
+ #import sys
+ #print >>sys.stderr, ("cache-miss: %r" % expr)
+ config._evalcache[expr] = x = eval(expr, d)
+ return x
+
+
+def folded_skips(skipped):
+ d = {}
+ for event in skipped:
+ entry = event.longrepr.reprcrash
+ key = entry.path, entry.lineno, entry.message
+ d.setdefault(key, []).append(event)
+ l = []
+ for key, events in d.items():
+ l.append((len(events),) + key)
+ return l
+
+def show_skipped(terminalreporter):
+ tr = terminalreporter
+ skipped = tr.stats.get('skipped', [])
+ if skipped:
+ if not tr.hasopt('skipped'):
+ tr.write_line(
+ "%d skipped tests, use --report=skipped for more info" %
+ len(skipped))
+ return
+ fskips = folded_skips(skipped)
+ if fskips:
+ tr.write_sep("_", "skipped test summary")
+ for num, fspath, lineno, reason in fskips:
+ tr._tw.line("%s:%d: [%d] %s" %(fspath, lineno, num, reason))
Added: pypy/branch/py12/py/_plugin/pytest_terminal.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_plugin/pytest_terminal.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,511 @@
+"""
+Implements terminal reporting of the full testing process.
+
+This is a good source for looking at the various reporting hooks.
+"""
+import py
+import sys
+
+optionalhook = py.test.mark.optionalhook
+
+def pytest_addoption(parser):
+ group = parser.getgroup("terminal reporting", "reporting", after="general")
+ group._addoption('-v', '--verbose', action="count",
+ dest="verbose", default=0, help="increase verbosity."),
+ group._addoption('-l', '--showlocals',
+ action="store_true", dest="showlocals", default=False,
+ help="show locals in tracebacks (disabled by default).")
+ group.addoption('--report',
+ action="store", dest="report", default=None, metavar="opts",
+ help="show more info, valid: skipped,xfailed")
+ group._addoption('--tb', metavar="style",
+ action="store", dest="tbstyle", default='long',
+ type="choice", choices=['long', 'short', 'no', 'line'],
+ help="traceback print mode (long/short/line/no).")
+ group._addoption('--fulltrace',
+ action="store_true", dest="fulltrace", default=False,
+ help="don't cut any tracebacks (default is to cut).")
+ group._addoption('--funcargs',
+ action="store_true", dest="showfuncargs", default=False,
+ help="show available function arguments, sorted by plugin")
+
+def pytest_configure(config):
+ if config.option.collectonly:
+ reporter = CollectonlyReporter(config)
+ elif config.option.showfuncargs:
+ config.setsessionclass(ShowFuncargSession)
+ reporter = None
+ else:
+ reporter = TerminalReporter(config)
+ if reporter:
+ # XXX see remote.py's XXX
+ for attr in 'pytest_terminal_hasmarkup', 'pytest_terminal_fullwidth':
+ if hasattr(config, attr):
+ #print "SETTING TERMINAL OPTIONS", attr, getattr(config, attr)
+ name = attr.split("_")[-1]
+ assert hasattr(self.reporter._tw, name), name
+ setattr(reporter._tw, name, getattr(config, attr))
+ config.pluginmanager.register(reporter, 'terminalreporter')
+
+def getreportopt(optvalue):
+ d = {}
+ if optvalue:
+ for setting in optvalue.split(","):
+ setting = setting.strip()
+ val = True
+ if setting.startswith("no"):
+ val = False
+ setting = setting[2:]
+ d[setting] = val
+ return d
+
+class TerminalReporter:
+ def __init__(self, config, file=None):
+ self.config = config
+ self.stats = {}
+ self.curdir = py.path.local()
+ if file is None:
+ file = py.std.sys.stdout
+ self._tw = py.io.TerminalWriter(file)
+ self.currentfspath = None
+ self.gateway2info = {}
+ self._reportopt = getreportopt(config.getvalue('report'))
+
+ def hasopt(self, name):
+ return self._reportopt.get(name, False)
+
+ def write_fspath_result(self, fspath, res):
+ fspath = self.curdir.bestrelpath(fspath)
+ if fspath != self.currentfspath:
+ self._tw.line()
+ relpath = self.curdir.bestrelpath(fspath)
+ self._tw.write(relpath + " ")
+ self.currentfspath = fspath
+ self._tw.write(res)
+
+ def write_ensure_prefix(self, prefix, extra="", **kwargs):
+ if self.currentfspath != prefix:
+ self._tw.line()
+ self.currentfspath = prefix
+ self._tw.write(prefix)
+ if extra:
+ self._tw.write(extra, **kwargs)
+ self.currentfspath = -2
+
+ def ensure_newline(self):
+ if self.currentfspath:
+ self._tw.line()
+ self.currentfspath = None
+
+ def write_line(self, line, **markup):
+ line = str(line)
+ self.ensure_newline()
+ self._tw.line(line, **markup)
+
+ def write_sep(self, sep, title=None, **markup):
+ self.ensure_newline()
+ self._tw.sep(sep, title, **markup)
+
+ def getcategoryletterword(self, rep):
+ res = self.config.hook.pytest_report_teststatus(report=rep)
+ if res:
+ return res
+ for cat in 'skipped failed passed ???'.split():
+ if getattr(rep, cat, None):
+ break
+ return cat, self.getoutcomeletter(rep), self.getoutcomeword(rep)
+
+ def getoutcomeletter(self, rep):
+ return rep.shortrepr
+
+ def getoutcomeword(self, rep):
+ if rep.passed:
+ return "PASS", dict(green=True)
+ elif rep.failed:
+ return "FAIL", dict(red=True)
+ elif rep.skipped:
+ return "SKIP"
+ else:
+ return "???", dict(red=True)
+
+ def pytest_internalerror(self, excrepr):
+ for line in str(excrepr).split("\n"):
+ self.write_line("INTERNALERROR> " + line)
+
+ def pytest_plugin_registered(self, plugin):
+ if self.config.option.traceconfig:
+ msg = "PLUGIN registered: %s" %(plugin,)
+ # XXX this event may happen during setup/teardown time
+ # which unfortunately captures our output here
+ # which garbles our output if we use self.write_line
+ self.write_line(msg)
+
+ @optionalhook
+ def pytest_gwmanage_newgateway(self, gateway, platinfo):
+ #self.write_line("%s instantiated gateway from spec %r" %(gateway.id, gateway.spec._spec))
+ d = {}
+ d['version'] = repr_pythonversion(platinfo.version_info)
+ d['id'] = gateway.id
+ d['spec'] = gateway.spec._spec
+ d['platform'] = platinfo.platform
+ if self.config.option.verbose:
+ d['extra'] = "- " + platinfo.executable
+ else:
+ d['extra'] = ""
+ d['cwd'] = platinfo.cwd
+ infoline = ("[%(id)s] %(spec)s -- platform %(platform)s, "
+ "Python %(version)s "
+ "cwd: %(cwd)s"
+ "%(extra)s" % d)
+ self.write_line(infoline)
+ self.gateway2info[gateway] = infoline
+
+ @optionalhook
+ def pytest_testnodeready(self, node):
+ self.write_line("[%s] txnode ready to receive tests" %(node.gateway.id,))
+
+ @optionalhook
+ def pytest_testnodedown(self, node, error):
+ if error:
+ self.write_line("[%s] node down, error: %s" %(node.gateway.id, error))
+
+ @optionalhook
+ def pytest_rescheduleitems(self, items):
+ if self.config.option.debug:
+ self.write_sep("!", "RESCHEDULING %s " %(items,))
+
+ @optionalhook
+ def pytest_looponfailinfo(self, failreports, rootdirs):
+ if failreports:
+ self.write_sep("#", "LOOPONFAILING", red=True)
+ for report in failreports:
+ loc = self._getcrashline(report)
+ self.write_line(loc, red=True)
+ self.write_sep("#", "waiting for changes")
+ for rootdir in rootdirs:
+ self.write_line("### Watching: %s" %(rootdir,), bold=True)
+
+
+ def pytest_trace(self, category, msg):
+ if self.config.option.debug or \
+ self.config.option.traceconfig and category.find("config") != -1:
+ self.write_line("[%s] %s" %(category, msg))
+
+ def pytest_deselected(self, items):
+ self.stats.setdefault('deselected', []).append(items)
+
+ def pytest_itemstart(self, item, node=None):
+ if getattr(self.config.option, 'dist', 'no') != "no":
+ # for dist-testing situations itemstart means we
+ # queued the item for sending, not interesting (unless debugging)
+ if self.config.option.debug:
+ line = self._reportinfoline(item)
+ extra = ""
+ if node:
+ extra = "-> [%s]" % node.gateway.id
+ self.write_ensure_prefix(line, extra)
+ else:
+ if self.config.option.verbose:
+ line = self._reportinfoline(item)
+ self.write_ensure_prefix(line, "")
+ else:
+ # ensure that the path is printed before the
+ # 1st test of a module starts running
+
+ self.write_fspath_result(self._getfspath(item), "")
+
+ def pytest__teardown_final_logerror(self, report):
+ self.stats.setdefault("error", []).append(report)
+
+ def pytest_runtest_logreport(self, report):
+ rep = report
+ cat, letter, word = self.getcategoryletterword(rep)
+ if not letter and not word:
+ # probably passed setup/teardown
+ return
+ if isinstance(word, tuple):
+ word, markup = word
+ else:
+ markup = {}
+ self.stats.setdefault(cat, []).append(rep)
+ if not self.config.option.verbose:
+ self.write_fspath_result(self._getfspath(rep.item), letter)
+ else:
+ line = self._reportinfoline(rep.item)
+ if not hasattr(rep, 'node'):
+ self.write_ensure_prefix(line, word, **markup)
+ else:
+ self.ensure_newline()
+ if hasattr(rep, 'node'):
+ self._tw.write("[%s] " % rep.node.gateway.id)
+ self._tw.write(word, **markup)
+ self._tw.write(" " + line)
+ self.currentfspath = -2
+
+ def pytest_collectreport(self, report):
+ if not report.passed:
+ if report.failed:
+ self.stats.setdefault("error", []).append(report)
+ msg = report.longrepr.reprcrash.message
+ self.write_fspath_result(report.collector.fspath, "E")
+ elif report.skipped:
+ self.stats.setdefault("skipped", []).append(report)
+ self.write_fspath_result(report.collector.fspath, "S")
+
+ def pytest_sessionstart(self, session):
+ self.write_sep("=", "test session starts", bold=True)
+ self._sessionstarttime = py.std.time.time()
+
+ verinfo = ".".join(map(str, sys.version_info[:3]))
+ msg = "python: platform %s -- Python %s" % (sys.platform, verinfo)
+ msg += " -- pytest-%s" % (py.__version__)
+ if self.config.option.verbose or self.config.option.debug or getattr(self.config.option, 'pastebin', None):
+ msg += " -- " + str(sys.executable)
+ self.write_line(msg)
+ lines = self.config.hook.pytest_report_header(config=self.config)
+ lines.reverse()
+ for line in flatten(lines):
+ self.write_line(line)
+ for i, testarg in enumerate(self.config.args):
+ self.write_line("test object %d: %s" %(i+1, testarg))
+
+ def pytest_sessionfinish(self, exitstatus, __multicall__):
+ __multicall__.execute()
+ self._tw.line("")
+ if exitstatus in (0, 1, 2):
+ self.summary_errors()
+ self.summary_failures()
+ self.config.hook.pytest_terminal_summary(terminalreporter=self)
+ if exitstatus == 2:
+ self._report_keyboardinterrupt()
+ self.summary_deselected()
+ self.summary_stats()
+
+ def pytest_keyboard_interrupt(self, excinfo):
+ self._keyboardinterrupt_memo = excinfo.getrepr(funcargs=True)
+
+ def _report_keyboardinterrupt(self):
+ self.write_sep("!", "KEYBOARD INTERRUPT")
+ excrepr = self._keyboardinterrupt_memo
+ if self.config.option.verbose:
+ excrepr.toterminal(self._tw)
+ else:
+ excrepr.reprcrash.toterminal(self._tw)
+
+ def _getcrashline(self, report):
+ try:
+ return report.longrepr.reprcrash
+ except AttributeError:
+ return str(report.longrepr)[:50]
+
+ def _reportinfoline(self, item):
+ collect_fspath = self._getfspath(item)
+ fspath, lineno, msg = self._getreportinfo(item)
+ if fspath and fspath != collect_fspath:
+ fspath = "%s <- %s" % (
+ self.curdir.bestrelpath(collect_fspath),
+ self.curdir.bestrelpath(fspath))
+ elif fspath:
+ fspath = self.curdir.bestrelpath(fspath)
+ if lineno is not None:
+ lineno += 1
+ if fspath and lineno and msg:
+ line = "%(fspath)s:%(lineno)s: %(msg)s"
+ elif fspath and msg:
+ line = "%(fspath)s: %(msg)s"
+ elif fspath and lineno:
+ line = "%(fspath)s:%(lineno)s %(extrapath)s"
+ else:
+ line = "[noreportinfo]"
+ return line % locals() + " "
+
+ def _getfailureheadline(self, rep):
+ if hasattr(rep, "collector"):
+ return str(rep.collector.fspath)
+ elif hasattr(rep, 'item'):
+ fspath, lineno, msg = self._getreportinfo(rep.item)
+ return msg
+ else:
+ return "test session"
+
+ def _getreportinfo(self, item):
+ try:
+ return item.__reportinfo
+ except AttributeError:
+ pass
+ reportinfo = item.config.hook.pytest_report_iteminfo(item=item)
+ # cache on item
+ item.__reportinfo = reportinfo
+ return reportinfo
+
+ def _getfspath(self, item):
+ try:
+ return item.fspath
+ except AttributeError:
+ fspath, lineno, msg = self._getreportinfo(item)
+ return fspath
+
+ #
+ # summaries for sessionfinish
+ #
+
+ def summary_failures(self):
+ tbstyle = self.config.getvalue("tbstyle")
+ if 'failed' in self.stats and tbstyle != "no":
+ self.write_sep("=", "FAILURES")
+ for rep in self.stats['failed']:
+ if tbstyle == "line":
+ line = self._getcrashline(rep)
+ self.write_line(line)
+ else:
+ msg = self._getfailureheadline(rep)
+ self.write_sep("_", msg)
+ self.write_platinfo(rep)
+ rep.toterminal(self._tw)
+
+ def summary_errors(self):
+ if 'error' in self.stats and self.config.option.tbstyle != "no":
+ self.write_sep("=", "ERRORS")
+ for rep in self.stats['error']:
+ msg = self._getfailureheadline(rep)
+ if not hasattr(rep, 'when'):
+ # collect
+ msg = "ERROR during collection " + msg
+ elif rep.when == "setup":
+ msg = "ERROR at setup of " + msg
+ elif rep.when == "teardown":
+ msg = "ERROR at teardown of " + msg
+ self.write_sep("_", msg)
+ self.write_platinfo(rep)
+ rep.toterminal(self._tw)
+
+ def write_platinfo(self, rep):
+ if hasattr(rep, 'node'):
+ self.write_line(self.gateway2info.get(
+ rep.node.gateway,
+ "node %r (platinfo not found? strange)")
+ [:self._tw.fullwidth-1])
+
+ def summary_stats(self):
+ session_duration = py.std.time.time() - self._sessionstarttime
+
+ keys = "failed passed skipped deselected".split()
+ for key in self.stats.keys():
+ if key not in keys:
+ keys.append(key)
+ parts = []
+ for key in keys:
+ val = self.stats.get(key, None)
+ if val:
+ parts.append("%d %s" %(len(val), key))
+ line = ", ".join(parts)
+ # XXX coloring
+ self.write_sep("=", "%s in %.2f seconds" %(line, session_duration))
+
+ def summary_deselected(self):
+ if 'deselected' in self.stats:
+ self.write_sep("=", "%d tests deselected by %r" %(
+ len(self.stats['deselected']), self.config.option.keyword), bold=True)
+
+
+class CollectonlyReporter:
+ INDENT = " "
+
+ def __init__(self, config, out=None):
+ self.config = config
+ if out is None:
+ out = py.std.sys.stdout
+ self.out = py.io.TerminalWriter(out)
+ self.indent = ""
+ self._failed = []
+
+ def outindent(self, line):
+ self.out.line(self.indent + str(line))
+
+ def pytest_internalerror(self, excrepr):
+ for line in str(excrepr).split("\n"):
+ self.out.line("INTERNALERROR> " + line)
+
+ def pytest_collectstart(self, collector):
+ self.outindent(collector)
+ self.indent += self.INDENT
+
+ def pytest_itemstart(self, item, node=None):
+ self.outindent(item)
+
+ def pytest_collectreport(self, report):
+ if not report.passed:
+ self.outindent("!!! %s !!!" % report.longrepr.reprcrash.message)
+ self._failed.append(report)
+ self.indent = self.indent[:-len(self.INDENT)]
+
+ def pytest_sessionfinish(self, session, exitstatus):
+ if self._failed:
+ self.out.sep("!", "collection failures")
+ for rep in self._failed:
+ rep.toterminal(self.out)
+
+
+def repr_pythonversion(v=None):
+ if v is None:
+ v = sys.version_info
+ try:
+ return "%s.%s.%s-%s-%s" % v
+ except (TypeError, ValueError):
+ return str(v)
+
+def flatten(l):
+ for x in l:
+ if isinstance(x, (list, tuple)):
+ for y in flatten(x):
+ yield y
+ else:
+ yield x
+
+from py._test.session import Session
+class ShowFuncargSession(Session):
+ def main(self, colitems):
+ self.fspath = py.path.local()
+ self.sessionstarts()
+ try:
+ self.showargs(colitems[0])
+ finally:
+ self.sessionfinishes(exitstatus=1)
+
+ def showargs(self, colitem):
+ tw = py.io.TerminalWriter()
+ from py._test.funcargs import getplugins
+ from py._test.funcargs import FuncargRequest
+ plugins = getplugins(colitem, withpy=True)
+ verbose = self.config.getvalue("verbose")
+ for plugin in plugins:
+ available = []
+ for name, factory in vars(plugin).items():
+ if name.startswith(FuncargRequest._argprefix):
+ name = name[len(FuncargRequest._argprefix):]
+ if name not in available:
+ available.append([name, factory])
+ if available:
+ pluginname = plugin.__name__
+ for name, factory in available:
+ loc = self.getlocation(factory)
+ if verbose:
+ funcargspec = "%s -- %s" %(name, loc,)
+ else:
+ funcargspec = name
+ tw.line(funcargspec, green=True)
+ doc = factory.__doc__ or ""
+ if doc:
+ for line in doc.split("\n"):
+ tw.line(" " + line.strip())
+ else:
+ tw.line(" %s: no docstring available" %(loc,),
+ red=True)
+
+ def getlocation(self, function):
+ import inspect
+ fn = py.path.local(inspect.getfile(function))
+ lineno = py.builtin._getcode(function).co_firstlineno
+ if fn.relto(self.fspath):
+ fn = fn.relto(self.fspath)
+ return "%s:%d" %(fn, lineno+1)
Added: pypy/branch/py12/py/_plugin/pytest_terminal.py.orig
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_plugin/pytest_terminal.py.orig Fri Apr 30 17:07:52 2010
@@ -0,0 +1,513 @@
+"""
+Implements terminal reporting of the full testing process.
+
+This is a good source for looking at the various reporting hooks.
+"""
+import py
+import sys
+
+optionalhook = py.test.mark.optionalhook
+
+def pytest_addoption(parser):
+ group = parser.getgroup("terminal reporting", "reporting", after="general")
+ group._addoption('-v', '--verbose', action="count",
+ dest="verbose", default=0, help="increase verbosity."),
+ group._addoption('-l', '--showlocals',
+ action="store_true", dest="showlocals", default=False,
+ help="show locals in tracebacks (disabled by default).")
+ group.addoption('--report',
+ action="store", dest="report", default=None, metavar="opts",
+ help="show more info, valid: skipped,xfailed")
+ group._addoption('--tb', metavar="style",
+ action="store", dest="tbstyle", default='long',
+ type="choice", choices=['long', 'short', 'no', 'line'],
+ help="traceback print mode (long/short/line/no).")
+ group._addoption('--fulltrace',
+ action="store_true", dest="fulltrace", default=False,
+ help="don't cut any tracebacks (default is to cut).")
+ group._addoption('--funcargs',
+ action="store_true", dest="showfuncargs", default=False,
+ help="show available function arguments, sorted by plugin")
+
+def pytest_configure(config):
+ if config.option.collectonly:
+ reporter = CollectonlyReporter(config)
+ elif config.option.showfuncargs:
+ config.setsessionclass(ShowFuncargSession)
+ reporter = None
+ else:
+ reporter = TerminalReporter(config)
+ if reporter:
+ # XXX see remote.py's XXX
+ for attr in 'pytest_terminal_hasmarkup', 'pytest_terminal_fullwidth':
+ if hasattr(config, attr):
+ #print "SETTING TERMINAL OPTIONS", attr, getattr(config, attr)
+ name = attr.split("_")[-1]
+ assert hasattr(self.reporter._tw, name), name
+ setattr(reporter._tw, name, getattr(config, attr))
+ config.pluginmanager.register(reporter, 'terminalreporter')
+
+def getreportopt(optvalue):
+ d = {}
+ if optvalue:
+ for setting in optvalue.split(","):
+ setting = setting.strip()
+ val = True
+ if setting.startswith("no"):
+ val = False
+ setting = setting[2:]
+ d[setting] = val
+ return d
+
+class TerminalReporter:
+ def __init__(self, config, file=None):
+ self.config = config
+ self.stats = {}
+ self.curdir = py.path.local()
+ if file is None:
+ file = py.std.sys.stdout
+ self._tw = py.io.TerminalWriter(file)
+ self.currentfspath = None
+ self.gateway2info = {}
+ self._reportopt = getreportopt(config.getvalue('report'))
+
+ def hasopt(self, name):
+ return self._reportopt.get(name, False)
+
+ def write_fspath_result(self, fspath, res):
+ fspath = self.curdir.bestrelpath(fspath)
+ if fspath != self.currentfspath:
+ self._tw.line()
+ relpath = self.curdir.bestrelpath(fspath)
+ self._tw.write(relpath + " ")
+ self.currentfspath = fspath
+ self._tw.write(res)
+
+ def write_ensure_prefix(self, prefix, extra="", **kwargs):
+ if self.currentfspath != prefix:
+ self._tw.line()
+ self.currentfspath = prefix
+ self._tw.write(prefix)
+ if extra:
+ self._tw.write(extra, **kwargs)
+ self.currentfspath = -2
+
+ def ensure_newline(self):
+ if self.currentfspath:
+ self._tw.line()
+ self.currentfspath = None
+
+ def write_line(self, line, **markup):
+ line = str(line)
+ self.ensure_newline()
+ self._tw.line(line, **markup)
+
+ def write_sep(self, sep, title=None, **markup):
+ self.ensure_newline()
+ self._tw.sep(sep, title, **markup)
+
+ def getcategoryletterword(self, rep):
+ res = self.config.hook.pytest_report_teststatus(report=rep)
+ if res:
+ return res
+ for cat in 'skipped failed passed ???'.split():
+ if getattr(rep, cat, None):
+ break
+ return cat, self.getoutcomeletter(rep), self.getoutcomeword(rep)
+
+ def getoutcomeletter(self, rep):
+ return rep.shortrepr
+
+ def getoutcomeword(self, rep):
+ if rep.passed:
+ return "PASS", dict(green=True)
+ elif rep.failed:
+ return "FAIL", dict(red=True)
+ elif rep.skipped:
+ return "SKIP"
+ else:
+ return "???", dict(red=True)
+
+ def pytest_internalerror(self, excrepr):
+ for line in str(excrepr).split("\n"):
+ self.write_line("INTERNALERROR> " + line)
+
+ def pytest_plugin_registered(self, plugin):
+ if self.config.option.traceconfig:
+ msg = "PLUGIN registered: %s" %(plugin,)
+ # XXX this event may happen during setup/teardown time
+ # which unfortunately captures our output here
+ # which garbles our output if we use self.write_line
+ self.write_line(msg)
+
+ @optionalhook
+ def pytest_gwmanage_newgateway(self, gateway, platinfo):
+ #self.write_line("%s instantiated gateway from spec %r" %(gateway.id, gateway.spec._spec))
+ d = {}
+ d['version'] = repr_pythonversion(platinfo.version_info)
+ d['id'] = gateway.id
+ d['spec'] = gateway.spec._spec
+ d['platform'] = platinfo.platform
+ if self.config.option.verbose:
+ d['extra'] = "- " + platinfo.executable
+ else:
+ d['extra'] = ""
+ d['cwd'] = platinfo.cwd
+ infoline = ("[%(id)s] %(spec)s -- platform %(platform)s, "
+ "Python %(version)s "
+ "cwd: %(cwd)s"
+ "%(extra)s" % d)
+ self.write_line(infoline)
+ self.gateway2info[gateway] = infoline
+
+ @optionalhook
+ def pytest_testnodeready(self, node):
+ self.write_line("[%s] txnode ready to receive tests" %(node.gateway.id,))
+
+ @optionalhook
+ def pytest_testnodedown(self, node, error):
+ if error:
+ self.write_line("[%s] node down, error: %s" %(node.gateway.id, error))
+
+ @optionalhook
+ def pytest_rescheduleitems(self, items):
+ if self.config.option.debug:
+ self.write_sep("!", "RESCHEDULING %s " %(items,))
+
+ @optionalhook
+ def pytest_looponfailinfo(self, failreports, rootdirs):
+ if failreports:
+ self.write_sep("#", "LOOPONFAILING", red=True)
+ for report in failreports:
+ loc = self._getcrashline(report)
+ self.write_line(loc, red=True)
+ self.write_sep("#", "waiting for changes")
+ for rootdir in rootdirs:
+ self.write_line("### Watching: %s" %(rootdir,), bold=True)
+
+
+ def pytest_trace(self, category, msg):
+ if self.config.option.debug or \
+ self.config.option.traceconfig and category.find("config") != -1:
+ self.write_line("[%s] %s" %(category, msg))
+
+ def pytest_deselected(self, items):
+ self.stats.setdefault('deselected', []).append(items)
+
+ def pytest_itemstart(self, item, node=None):
+ if getattr(self.config.option, 'dist', 'no') != "no":
+ # for dist-testing situations itemstart means we
+ # queued the item for sending, not interesting (unless debugging)
+ if self.config.option.debug:
+ line = self._reportinfoline(item)
+ extra = ""
+ if node:
+ extra = "-> [%s]" % node.gateway.id
+ self.write_ensure_prefix(line, extra)
+ else:
+ if self.config.option.verbose:
+ line = self._reportinfoline(item)
+ self.write_ensure_prefix(line, "")
+ else:
+ # ensure that the path is printed before the
+ # 1st test of a module starts running
+
+ self.write_fspath_result(self._getfspath(item), "")
+
+ def pytest__teardown_final_logerror(self, report):
+ self.stats.setdefault("error", []).append(report)
+
+ def pytest_runtest_logreport(self, report):
+ rep = report
+ cat, letter, word = self.getcategoryletterword(rep)
+ if not letter and not word:
+ # probably passed setup/teardown
+ return
+ if isinstance(word, tuple):
+ word, markup = word
+ else:
+ markup = {}
+ self.stats.setdefault(cat, []).append(rep)
+ if not self.config.option.verbose:
+ self.write_fspath_result(self._getfspath(rep.item), letter)
+ else:
+ line = self._reportinfoline(rep.item)
+ if not hasattr(rep, 'node'):
+ self.write_ensure_prefix(line, word, **markup)
+ else:
+ self.ensure_newline()
+ if hasattr(rep, 'node'):
+ self._tw.write("[%s] " % rep.node.gateway.id)
+ self._tw.write(word, **markup)
+ self._tw.write(" " + line)
+ self.currentfspath = -2
+
+ def pytest_collectreport(self, report):
+ if not report.passed:
+ if report.failed:
+ self.stats.setdefault("error", []).append(report)
+ msg = report.longrepr.reprcrash.message
+ self.write_fspath_result(report.collector.fspath, "E")
+ elif report.skipped:
+ self.stats.setdefault("skipped", []).append(report)
+ self.write_fspath_result(report.collector.fspath, "S")
+
+ def pytest_sessionstart(self, session):
+ self.write_sep("=", "test session starts", bold=True)
+ self._sessionstarttime = py.std.time.time()
+
+ verinfo = ".".join(map(str, sys.version_info[:3]))
+ msg = "python: platform %s -- Python %s" % (sys.platform, verinfo)
+ msg += " -- pytest-%s" % (py.__version__)
+ if self.config.option.verbose or self.config.option.debug or getattr(self.config.option, 'pastebin', None):
+ msg += " -- " + str(sys.executable)
+ self.write_line(msg)
+ lines = self.config.hook.pytest_report_header(config=self.config)
+ lines.reverse()
+ for line in flatten(lines):
+ self.write_line(line)
+ for i, testarg in enumerate(self.config.args):
+ self.write_line("test object %d: %s" %(i+1, testarg))
+
+ def pytest_sessionfinish(self, exitstatus, __multicall__):
+ __multicall__.execute()
+ self._tw.line("")
+ if exitstatus in (0, 1, 2):
+ self.summary_errors()
+ self.summary_failures()
+ self.config.hook.pytest_terminal_summary(terminalreporter=self)
+ if exitstatus == 2:
+ self._report_keyboardinterrupt()
+ self.summary_deselected()
+ self.summary_stats()
+
+ def pytest_keyboard_interrupt(self, excinfo):
+ self._keyboardinterrupt_memo = excinfo.getrepr(funcargs=True)
+
+ def _report_keyboardinterrupt(self):
+ self.write_sep("!", "KEYBOARD INTERRUPT")
+ excrepr = self._keyboardinterrupt_memo
+ if self.config.option.verbose:
+ excrepr.toterminal(self._tw)
+ else:
+ excrepr.reprcrash.toterminal(self._tw)
+
+ def _getcrashline(self, report):
+ try:
+ return report.longrepr.reprcrash
+ except AttributeError:
+ return str(report.longrepr)[:50]
+
+ def _reportinfoline(self, item):
+ collect_fspath = self._getfspath(item)
+ fspath, lineno, msg = self._getreportinfo(item)
+ if fspath and fspath != collect_fspath:
+ fspath = "%s <- %s" % (
+ self.curdir.bestrelpath(collect_fspath),
+ self.curdir.bestrelpath(fspath))
+ elif fspath:
+ fspath = self.curdir.bestrelpath(fspath)
+ if lineno is not None:
+ lineno += 1
+ if fspath and lineno and msg:
+ line = "%(fspath)s:%(lineno)s: %(msg)s"
+ elif fspath and msg:
+ line = "%(fspath)s: %(msg)s"
+ elif fspath and lineno:
+ line = "%(fspath)s:%(lineno)s %(extrapath)s"
+ else:
+ line = "[noreportinfo]"
+ return line % locals() + " "
+
+ def _getfailureheadline(self, rep):
+ if hasattr(rep, "collector"):
+ return str(rep.collector.fspath)
+ elif hasattr(rep, 'item'):
+ fspath, lineno, msg = self._getreportinfo(rep.item)
+ return msg
+ else:
+ return "test session"
+
+ def _getreportinfo(self, item):
+ try:
+ return item.__reportinfo
+ except AttributeError:
+ pass
+ reportinfo = item.config.hook.pytest_report_iteminfo(item=item)
+ # cache on item
+ item.__reportinfo = reportinfo
+ return reportinfo
+
+ def _getfspath(self, item):
+ try:
+ return item.fspath
+ except AttributeError:
+ fspath, lineno, msg = self._getreportinfo(item)
+ return fspath
+
+ #
+ # summaries for sessionfinish
+ #
+
+ def summary_failures(self):
+ tbstyle = self.config.getvalue("tbstyle")
+ if 'failed' in self.stats and tbstyle != "no":
+ self.write_sep("=", "FAILURES")
+ for rep in self.stats['failed']:
+ if tbstyle == "line":
+ line = self._getcrashline(rep)
+ self.write_line(line)
+ else:
+ msg = self._getfailureheadline(rep)
+ self.write_sep("_", msg)
+ self.write_platinfo(rep)
+ rep.toterminal(self._tw)
+
+ def summary_errors(self):
+ if 'error' in self.stats and self.config.option.tbstyle != "no":
+ self.write_sep("=", "ERRORS")
+ for rep in self.stats['error']:
+ msg = self._getfailureheadline(rep)
+ if not hasattr(rep, 'when'):
+ # collect
+ msg = "ERROR during collection " + msg
+ elif rep.when == "setup":
+ msg = "ERROR at setup of " + msg
+ elif rep.when == "teardown":
+ msg = "ERROR at teardown of " + msg
+ self.write_sep("_", msg)
+ self.write_platinfo(rep)
+ rep.toterminal(self._tw)
+
+ def write_platinfo(self, rep):
+ if hasattr(rep, 'node'):
+ self.write_line(self.gateway2info.get(
+ rep.node.gateway,
+ "node %r (platinfo not found? strange)")
+ [:self._tw.fullwidth-1])
+
+ def summary_stats(self):
+ session_duration = py.std.time.time() - self._sessionstarttime
+
+ keys = "failed passed skipped deselected".split()
+ for key in self.stats.keys():
+ if key not in keys:
+ keys.append(key)
+ parts = []
+ for key in keys:
+ val = self.stats.get(key, None)
+ if val:
+ parts.append("%d %s" %(len(val), key))
+ line = ", ".join(parts)
+ # XXX coloring
+ self.write_sep("=", "%s in %.2f seconds" %(line, session_duration))
+
+ def summary_deselected(self):
+ if 'deselected' in self.stats:
+ self.write_sep("=", "%d tests deselected by %r" %(
+ len(self.stats['deselected']), self.config.option.keyword), bold=True)
+
+
+class CollectonlyReporter:
+ INDENT = " "
+
+ def __init__(self, config, out=None):
+ self.config = config
+ if out is None:
+ out = py.std.sys.stdout
+ self.out = py.io.TerminalWriter(out)
+ self.indent = ""
+ self._failed = []
+
+ def outindent(self, line):
+ s = self.indent + str(line)
+ print ("printing: %s" % s)
+ self.out.line(self.indent + str(line))
+
+ def pytest_internalerror(self, excrepr):
+ for line in str(excrepr).split("\n"):
+ self.out.line("INTERNALERROR> " + line)
+
+ def pytest_collectstart(self, collector):
+ self.outindent(collector)
+ self.indent += self.INDENT
+
+ def pytest_itemstart(self, item, node=None):
+ self.outindent(item)
+
+ def pytest_collectreport(self, report):
+ if not report.passed:
+ self.outindent("!!! %s !!!" % report.longrepr.reprcrash.message)
+ self._failed.append(report)
+ self.indent = self.indent[:-len(self.INDENT)]
+
+ def pytest_sessionfinish(self, session, exitstatus):
+ if self._failed:
+ self.out.sep("!", "collection failures")
+ for rep in self._failed:
+ rep.toterminal(self.out)
+
+
+def repr_pythonversion(v=None):
+ if v is None:
+ v = sys.version_info
+ try:
+ return "%s.%s.%s-%s-%s" % v
+ except (TypeError, ValueError):
+ return str(v)
+
+def flatten(l):
+ for x in l:
+ if isinstance(x, (list, tuple)):
+ for y in flatten(x):
+ yield y
+ else:
+ yield x
+
+from py._test.session import Session
+class ShowFuncargSession(Session):
+ def main(self, colitems):
+ self.fspath = py.path.local()
+ self.sessionstarts()
+ try:
+ self.showargs(colitems[0])
+ finally:
+ self.sessionfinishes(exitstatus=1)
+
+ def showargs(self, colitem):
+ tw = py.io.TerminalWriter()
+ from py._test.funcargs import getplugins
+ from py._test.funcargs import FuncargRequest
+ plugins = getplugins(colitem, withpy=True)
+ verbose = self.config.getvalue("verbose")
+ for plugin in plugins:
+ available = []
+ for name, factory in vars(plugin).items():
+ if name.startswith(FuncargRequest._argprefix):
+ name = name[len(FuncargRequest._argprefix):]
+ if name not in available:
+ available.append([name, factory])
+ if available:
+ pluginname = plugin.__name__
+ for name, factory in available:
+ loc = self.getlocation(factory)
+ if verbose:
+ funcargspec = "%s -- %s" %(name, loc,)
+ else:
+ funcargspec = name
+ tw.line(funcargspec, green=True)
+ doc = factory.__doc__ or ""
+ if doc:
+ for line in doc.split("\n"):
+ tw.line(" " + line.strip())
+ else:
+ tw.line(" %s: no docstring available" %(loc,),
+ red=True)
+
+ def getlocation(self, function):
+ import inspect
+ fn = py.path.local(inspect.getfile(function))
+ lineno = py.builtin._getcode(function).co_firstlineno
+ if fn.relto(self.fspath):
+ fn = fn.relto(self.fspath)
+ return "%s:%d" %(fn, lineno+1)
Added: pypy/branch/py12/py/_plugin/pytest_tmpdir.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_plugin/pytest_tmpdir.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,22 @@
+"""provide temporary directories to test functions.
+
+usage example::
+
+ def test_plugin(tmpdir):
+ tmpdir.join("hello").write("hello")
+
+.. _`py.path.local`: ../../path.html
+
+"""
+import py
+
+def pytest_funcarg__tmpdir(request):
+ """return a temporary directory path object
+ unique to each test function invocation,
+ created as a sub directory of the base temporary
+ directory. The returned object is a `py.path.local`_
+ path object.
+ """
+ name = request.function.__name__
+ x = request.config.mktemp(name, numbered=True)
+ return x.realpath()
Added: pypy/branch/py12/py/_plugin/pytest_unittest.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_plugin/pytest_unittest.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,81 @@
+"""
+automatically discover and run traditional "unittest.py" style tests.
+
+Usage
+----------------
+
+This plugin collects and runs Python `unittest.py style`_ tests.
+It will automatically collect ``unittest.TestCase`` subclasses
+and their ``test`` methods from the test modules of a project
+(usually following the ``test_*.py`` pattern).
+
+This plugin is enabled by default.
+
+.. _`unittest.py style`: http://docs.python.org/library/unittest.html
+"""
+import py
+import sys
+
+def pytest_pycollect_makeitem(collector, name, obj):
+ if 'unittest' not in sys.modules:
+ return # nobody derived unittest.TestCase
+ try:
+ isunit = issubclass(obj, py.std.unittest.TestCase)
+ except KeyboardInterrupt:
+ raise
+ except Exception:
+ pass
+ else:
+ if isunit:
+ return UnitTestCase(name, parent=collector)
+
+class UnitTestCase(py.test.collect.Class):
+ def collect(self):
+ return [UnitTestCaseInstance("()", self)]
+
+ def setup(self):
+ pass
+
+ def teardown(self):
+ pass
+
+_dummy = object()
+class UnitTestCaseInstance(py.test.collect.Instance):
+ def collect(self):
+ loader = py.std.unittest.TestLoader()
+ names = loader.getTestCaseNames(self.obj.__class__)
+ l = []
+ for name in names:
+ callobj = getattr(self.obj, name)
+ if py.builtin.callable(callobj):
+ l.append(UnitTestFunction(name, parent=self))
+ return l
+
+ def _getobj(self):
+ x = self.parent.obj
+ return self.parent.obj(methodName='run')
+
+class UnitTestFunction(py.test.collect.Function):
+ def __init__(self, name, parent, args=(), obj=_dummy, sort_value=None):
+ super(UnitTestFunction, self).__init__(name, parent)
+ self._args = args
+ if obj is not _dummy:
+ self._obj = obj
+ self._sort_value = sort_value
+ if hasattr(self.parent, 'newinstance'):
+ self.parent.newinstance()
+ self.obj = self._getobj()
+
+ def runtest(self):
+ target = self.obj
+ args = self._args
+ target(*args)
+
+ def setup(self):
+ instance = py.builtin._getimself(self.obj)
+ instance.setUp()
+
+ def teardown(self):
+ instance = py.builtin._getimself(self.obj)
+ instance.tearDown()
+
Added: pypy/branch/py12/py/_plugin/standalonetemplate.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_plugin/standalonetemplate.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,63 @@
+#! /usr/bin/env python
+
+sources = """
+ at SOURCES@"""
+
+import sys
+import base64
+import zlib
+import imp
+
+class DictImporter(object):
+ def __init__(self, sources):
+ self.sources = sources
+
+ def find_module(self, fullname, path=None):
+ if fullname in self.sources:
+ return self
+ if fullname+'.__init__' in self.sources:
+ return self
+ return None
+
+ def load_module(self, fullname):
+ # print "load_module:", fullname
+ from types import ModuleType
+ try:
+ s = self.sources[fullname]
+ is_pkg = False
+ except KeyError:
+ s = self.sources[fullname+'.__init__']
+ is_pkg = True
+
+ co = compile(s, fullname, 'exec')
+ module = sys.modules.setdefault(fullname, ModuleType(fullname))
+ module.__file__ = "%s/%s" % (__file__, fullname)
+ module.__loader__ = self
+ if is_pkg:
+ module.__path__ = [fullname]
+
+ do_exec(co, module.__dict__)
+ return sys.modules[fullname]
+
+ def get_source(self, name):
+ res = self.sources.get(name)
+ if res is None:
+ res = self.sources.get(name+'.__init__')
+ return res
+
+if __name__ == "__main__":
+ if sys.version_info >= (3,0):
+ exec("def do_exec(co, loc): exec(co, loc)\n")
+ import pickle
+ sources = sources.encode("ascii") # ensure bytes
+ sources = pickle.loads(zlib.decompress(base64.decodebytes(sources)))
+ else:
+ import cPickle as pickle
+ exec("def do_exec(co, loc): exec co in loc\n")
+ sources = pickle.loads(zlib.decompress(base64.decodestring(sources)))
+
+ importer = DictImporter(sources)
+ sys.meta_path.append(importer)
+
+ import py
+ py.cmdline.pytest()
Added: pypy/branch/py12/py/_process/__init__.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_process/__init__.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1 @@
+""" high-level sub-process handling """
Added: pypy/branch/py12/py/_process/cmdexec.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_process/cmdexec.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,46 @@
+"""
+
+"""
+
+import os, sys
+import subprocess
+import py
+from subprocess import Popen, PIPE
+
+def cmdexec(cmd):
+ """ return output of executing 'cmd' in a separate process.
+
+ raise cmdexec.ExecutionFailed exeception if the command failed.
+ the exception will provide an 'err' attribute containing
+ the error-output from the command.
+ """
+ process = subprocess.Popen(cmd, shell=True,
+ universal_newlines=True,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ out, err = process.communicate()
+ out = py.builtin._totext(out, sys.getdefaultencoding())
+ err = py.builtin._totext(err, sys.getdefaultencoding())
+ status = process.poll()
+ if status:
+ raise ExecutionFailed(status, status, cmd, out, err)
+ return out
+
+class ExecutionFailed(py.error.Error):
+ def __init__(self, status, systemstatus, cmd, out, err):
+ Exception.__init__(self)
+ self.status = status
+ self.systemstatus = systemstatus
+ self.cmd = cmd
+ self.err = err
+ self.out = out
+
+ def __str__(self):
+ return "ExecutionFailed: %d %s\n%s" %(self.status, self.cmd, self.err)
+
+# export the exception under the name 'py.process.cmdexec.Error'
+cmdexec.Error = ExecutionFailed
+try:
+ ExecutionFailed.__module__ = 'py.process.cmdexec'
+ ExecutionFailed.__name__ = 'Error'
+except (AttributeError, TypeError):
+ pass
Added: pypy/branch/py12/py/_process/forkedfunc.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_process/forkedfunc.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,108 @@
+
+"""
+ ForkedFunc provides a way to run a function in a forked process
+ and get at its return value, stdout and stderr output as well
+ as signals and exitstatusus.
+
+ XXX see if tempdir handling is sane
+"""
+
+import py
+import os
+import sys
+import marshal
+
+class ForkedFunc(object):
+ EXITSTATUS_EXCEPTION = 3
+ def __init__(self, fun, args=None, kwargs=None, nice_level=0):
+ if args is None:
+ args = []
+ if kwargs is None:
+ kwargs = {}
+ self.fun = fun
+ self.args = args
+ self.kwargs = kwargs
+ self.tempdir = tempdir = py.path.local.mkdtemp()
+ self.RETVAL = tempdir.ensure('retval')
+ self.STDOUT = tempdir.ensure('stdout')
+ self.STDERR = tempdir.ensure('stderr')
+
+ pid = os.fork()
+ if pid: # in parent process
+ self.pid = pid
+ else: # in child process
+ self._child(nice_level)
+
+ def _child(self, nice_level):
+ # right now we need to call a function, but first we need to
+ # map all IO that might happen
+ # make sure sys.stdout points to file descriptor one
+ sys.stdout = stdout = self.STDOUT.open('w')
+ sys.stdout.flush()
+ fdstdout = stdout.fileno()
+ if fdstdout != 1:
+ os.dup2(fdstdout, 1)
+ sys.stderr = stderr = self.STDERR.open('w')
+ fdstderr = stderr.fileno()
+ if fdstderr != 2:
+ os.dup2(fdstderr, 2)
+ retvalf = self.RETVAL.open("wb")
+ EXITSTATUS = 0
+ try:
+ if nice_level:
+ os.nice(nice_level)
+ try:
+ retval = self.fun(*self.args, **self.kwargs)
+ retvalf.write(marshal.dumps(retval))
+ except:
+ excinfo = py.code.ExceptionInfo()
+ stderr.write(excinfo.exconly())
+ EXITSTATUS = self.EXITSTATUS_EXCEPTION
+ finally:
+ stdout.close()
+ stderr.close()
+ retvalf.close()
+ os.close(1)
+ os.close(2)
+ os._exit(EXITSTATUS)
+
+ def waitfinish(self, waiter=os.waitpid):
+ pid, systemstatus = waiter(self.pid, 0)
+ if systemstatus:
+ if os.WIFSIGNALED(systemstatus):
+ exitstatus = os.WTERMSIG(systemstatus) + 128
+ else:
+ exitstatus = os.WEXITSTATUS(systemstatus)
+ #raise ExecutionFailed(status, systemstatus, cmd,
+ # ''.join(out), ''.join(err))
+ else:
+ exitstatus = 0
+ signal = systemstatus & 0x7f
+ if not exitstatus and not signal:
+ retval = self.RETVAL.open('rb')
+ try:
+ retval_data = retval.read()
+ finally:
+ retval.close()
+ retval = marshal.loads(retval_data)
+ else:
+ retval = None
+ stdout = self.STDOUT.read()
+ stderr = self.STDERR.read()
+ self._removetemp()
+ return Result(exitstatus, signal, retval, stdout, stderr)
+
+ def _removetemp(self):
+ if self.tempdir.check():
+ self.tempdir.remove()
+
+ def __del__(self):
+ self._removetemp()
+
+class Result(object):
+ def __init__(self, exitstatus, signal, retval, stdout, stderr):
+ self.exitstatus = exitstatus
+ self.signal = signal
+ self.retval = retval
+ self.out = stdout
+ self.err = stderr
Added: pypy/branch/py12/py/_process/killproc.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_process/killproc.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,23 @@
+import py
+import os, sys
+
+if sys.platform == "win32" or getattr(os, '_name', '') == 'nt':
+ try:
+ import ctypes
+ except ImportError:
+ def dokill(pid):
+ py.process.cmdexec("taskkill /F /PID %d" %(pid,))
+ else:
+ def dokill(pid):
+ PROCESS_TERMINATE = 1
+ handle = ctypes.windll.kernel32.OpenProcess(
+ PROCESS_TERMINATE, False, pid)
+ ctypes.windll.kernel32.TerminateProcess(handle, -1)
+ ctypes.windll.kernel32.CloseHandle(handle)
+else:
+ def dokill(pid):
+ os.kill(pid, 15)
+
+def kill(pid):
+ """ kill process by id. """
+ dokill(pid)
Added: pypy/branch/py12/py/_std.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_std.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,18 @@
+import sys
+
+class Std(object):
+ """ makes top-level python modules available as an attribute,
+ importing them on first access.
+ """
+
+ def __init__(self):
+ self.__dict__ = sys.modules
+
+ def __getattr__(self, name):
+ try:
+ m = __import__(name)
+ except ImportError:
+ raise AttributeError("py.std: could not import %s" % name)
+ return m
+
+std = Std()
Added: pypy/branch/py12/py/_test/.pluginmanager.py.swp
==============================================================================
Binary file. No diff available.
Added: pypy/branch/py12/py/_test/__init__.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_test/__init__.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1 @@
+""" assertion and py.test helper API."""
Added: pypy/branch/py12/py/_test/cmdline.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_test/cmdline.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,24 @@
+import py
+import sys
+
+#
+# main entry point
+#
+
+def main(args=None):
+ if args is None:
+ args = sys.argv[1:]
+ config = py.test.config
+ try:
+ config.parse(args)
+ config.pluginmanager.do_configure(config)
+ session = config.initsession()
+ colitems = config.getinitialnodes()
+ exitstatus = session.main(colitems)
+ config.pluginmanager.do_unconfigure(config)
+ raise SystemExit(exitstatus)
+ except config.Error:
+ e = sys.exc_info()[1]
+ sys.stderr.write("ERROR: %s\n" %(e.args[0],))
+ raise SystemExit(3)
+
Added: pypy/branch/py12/py/_test/collect.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_test/collect.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,417 @@
+"""
+test collection nodes, forming a tree, Items are leafs.
+"""
+import py
+
+def configproperty(name):
+ def fget(self):
+ #print "retrieving %r property from %s" %(name, self.fspath)
+ return self.config._getcollectclass(name, self.fspath)
+ return property(fget)
+
+class HookProxy:
+ def __init__(self, node):
+ self.node = node
+ def __getattr__(self, name):
+ if name[0] == "_":
+ raise AttributeError(name)
+ hookmethod = getattr(self.node.config.hook, name)
+ def call_matching_hooks(**kwargs):
+ plugins = self.node.config._getmatchingplugins(self.node.fspath)
+ return hookmethod.pcall(plugins, **kwargs)
+ return call_matching_hooks
+
+class Node(object):
+ """ base class for all Nodes in the collection tree.
+ Collector subclasses have children, Items are terminal nodes.
+ """
+ def __init__(self, name, parent=None, config=None):
+ self.name = name
+ self.parent = parent
+ self.config = config or parent.config
+ self.fspath = getattr(parent, 'fspath', None)
+ self.ihook = HookProxy(self)
+
+ def _reraiseunpicklingproblem(self):
+ if hasattr(self, '_unpickle_exc'):
+ py.builtin._reraise(*self._unpickle_exc)
+
+ #
+ # note to myself: Pickling is uh.
+ #
+ def __getstate__(self):
+ return (self.name, self.parent)
+ def __setstate__(self, nameparent):
+ name, parent = nameparent
+ try:
+ colitems = parent._memocollect()
+ for colitem in colitems:
+ if colitem.name == name:
+ # we are a copy that will not be returned
+ # by our parent
+ self.__dict__ = colitem.__dict__
+ break
+ else:
+ raise ValueError("item %r not found in parent collection %r" %(
+ name, [x.name for x in colitems]))
+ except KeyboardInterrupt:
+ raise
+ except Exception:
+ # our parent can't collect us but we want unpickling to
+ # otherwise continue - self._reraiseunpicklingproblem() will
+ # reraise the problem
+ self._unpickle_exc = py.std.sys.exc_info()
+ self.name = name
+ self.parent = parent
+ self.config = parent.config
+
+ def __repr__(self):
+ if getattr(self.config.option, 'debug', False):
+ return "<%s %r %0x>" %(self.__class__.__name__,
+ getattr(self, 'name', None), id(self))
+ else:
+ return "<%s %r>" %(self.__class__.__name__,
+ getattr(self, 'name', None))
+
+ # methods for ordering nodes
+
+ def __eq__(self, other):
+ if not isinstance(other, Node):
+ return False
+ return self.name == other.name and self.parent == other.parent
+
+ def __ne__(self, other):
+ return not self == other
+
+ def __hash__(self):
+ return hash((self.name, self.parent))
+
+ def setup(self):
+ pass
+
+ def teardown(self):
+ pass
+
+ def _memoizedcall(self, attrname, function):
+ exattrname = "_ex_" + attrname
+ failure = getattr(self, exattrname, None)
+ if failure is not None:
+ py.builtin._reraise(failure[0], failure[1], failure[2])
+ if hasattr(self, attrname):
+ return getattr(self, attrname)
+ try:
+ res = function()
+ except (KeyboardInterrupt, SystemExit):
+ raise
+ except:
+ failure = py.std.sys.exc_info()
+ setattr(self, exattrname, failure)
+ raise
+ setattr(self, attrname, res)
+ return res
+
+ def listchain(self):
+ """ return list of all parent collectors up to self,
+ starting from root of collection tree. """
+ l = [self]
+ while 1:
+ x = l[0]
+ if x.parent is not None and x.parent.parent is not None:
+ l.insert(0, x.parent)
+ else:
+ return l
+
+ def listnames(self):
+ return [x.name for x in self.listchain()]
+
+ def getparent(self, cls):
+ current = self
+ while current and not isinstance(current, cls):
+ current = current.parent
+ return current
+
+ def readkeywords(self):
+ return dict([(x, True) for x in self._keywords()])
+
+ def _keywords(self):
+ return [self.name]
+
+ def _skipbykeyword(self, keywordexpr):
+ """ return True if they given keyword expression means to
+ skip this collector/item.
+ """
+ if not keywordexpr:
+ return
+ chain = self.listchain()
+ for key in filter(None, keywordexpr.split()):
+ eor = key[:1] == '-'
+ if eor:
+ key = key[1:]
+ if not (eor ^ self._matchonekeyword(key, chain)):
+ return True
+
+ def _matchonekeyword(self, key, chain):
+ elems = key.split(".")
+ # XXX O(n^2), anyone cares?
+ chain = [item.readkeywords() for item in chain if item._keywords()]
+ for start, _ in enumerate(chain):
+ if start + len(elems) > len(chain):
+ return False
+ for num, elem in enumerate(elems):
+ for keyword in chain[num + start]:
+ ok = False
+ if elem in keyword:
+ ok = True
+ break
+ if not ok:
+ break
+ if num == len(elems) - 1 and ok:
+ return True
+ return False
+
+ def _prunetraceback(self, traceback):
+ return traceback
+
+ def _repr_failure_py(self, excinfo):
+ excinfo.traceback = self._prunetraceback(excinfo.traceback)
+ # XXX should excinfo.getrepr record all data and toterminal()
+ # process it?
+ if self.config.option.tbstyle == "short":
+ style = "short"
+ else:
+ style = "long"
+ return excinfo.getrepr(funcargs=True,
+ showlocals=self.config.option.showlocals,
+ style=style)
+
+ repr_failure = _repr_failure_py
+ shortfailurerepr = "F"
+
+class Collector(Node):
+ """
+ Collector instances create children through collect()
+ and thus iteratively build a tree. attributes::
+
+ parent: attribute pointing to the parent collector
+ (or None if this is the root collector)
+ name: basename of this collector object
+ """
+ Directory = configproperty('Directory')
+ Module = configproperty('Module')
+
+ def collect(self):
+ """ returns a list of children (items and collectors)
+ for this collection node.
+ """
+ raise NotImplementedError("abstract")
+
+ def collect_by_name(self, name):
+ """ return a child matching the given name, else None. """
+ for colitem in self._memocollect():
+ if colitem.name == name:
+ return colitem
+
+ def repr_failure(self, excinfo, outerr=None):
+ """ represent a failure. """
+ assert outerr is None, "XXX deprecated"
+ return self._repr_failure_py(excinfo)
+
+ def _memocollect(self):
+ """ internal helper method to cache results of calling collect(). """
+ return self._memoizedcall('_collected', self.collect)
+
+ # **********************************************************************
+ # DEPRECATED METHODS
+ # **********************************************************************
+
+ def _deprecated_collect(self):
+ # avoid recursion:
+ # collect -> _deprecated_collect -> custom run() ->
+ # super().run() -> collect
+ attrname = '_depcollectentered'
+ if hasattr(self, attrname):
+ return
+ setattr(self, attrname, True)
+ method = getattr(self.__class__, 'run', None)
+ if method is not None and method != Collector.run:
+ warnoldcollect(function=method)
+ names = self.run()
+ return [x for x in [self.join(name) for name in names] if x]
+
+ def run(self):
+ """ DEPRECATED: returns a list of names available from this collector.
+ You can return an empty list. Callers of this method
+ must take care to catch exceptions properly.
+ """
+ return [colitem.name for colitem in self._memocollect()]
+
+ def join(self, name):
+ """ DEPRECATED: return a child collector or item for the given name.
+ If the return value is None there is no such child.
+ """
+ return self.collect_by_name(name)
+
+ def _prunetraceback(self, traceback):
+ if hasattr(self, 'fspath'):
+ path = self.fspath
+ ntraceback = traceback.cut(path=self.fspath)
+ if ntraceback == traceback:
+ ntraceback = ntraceback.cut(excludepath=py._pydir)
+ traceback = ntraceback.filter()
+ return traceback
+
+class FSCollector(Collector):
+ def __init__(self, fspath, parent=None, config=None):
+ fspath = py.path.local(fspath)
+ super(FSCollector, self).__init__(fspath.basename, parent, config=config)
+ self.fspath = fspath
+
+ def __getstate__(self):
+ # RootCollector.getbynames() inserts a directory which we need
+ # to throw out here for proper re-instantiation
+ if isinstance(self.parent.parent, RootCollector):
+ assert self.parent.fspath == self.parent.parent.fspath, self.parent
+ return (self.name, self.parent.parent) # shortcut
+ return super(Collector, self).__getstate__()
+
+class File(FSCollector):
+ """ base class for collecting tests from a file. """
+
+class Directory(FSCollector):
+ def recfilter(self, path):
+ if path.check(dir=1, dotfile=0):
+ return path.basename not in ('CVS', '_darcs', '{arch}')
+
+ def collect(self):
+ l = self._deprecated_collect()
+ if l is not None:
+ return l
+ l = []
+ for path in self.fspath.listdir(sort=True):
+ res = self.consider(path)
+ if res is not None:
+ if isinstance(res, (list, tuple)):
+ l.extend(res)
+ else:
+ l.append(res)
+ return l
+
+ def consider(self, path):
+ if self.ihook.pytest_ignore_collect_path(path=path, config=self.config):
+ return
+ if path.check(file=1):
+ res = self.consider_file(path)
+ elif path.check(dir=1):
+ res = self.consider_dir(path)
+ else:
+ res = None
+ if isinstance(res, list):
+ # throw out identical results
+ l = []
+ for x in res:
+ if x not in l:
+ assert x.parent == self, (x.parent, self)
+ assert x.fspath == path, (x.fspath, path)
+ l.append(x)
+ res = l
+ return res
+
+ def consider_file(self, path):
+ return self.ihook.pytest_collect_file(path=path, parent=self)
+
+ def consider_dir(self, path, usefilters=None):
+ if usefilters is not None:
+ py.log._apiwarn("0.99", "usefilters argument not needed")
+ return self.ihook.pytest_collect_directory(path=path, parent=self)
+
+class Item(Node):
+ """ a basic test item. """
+ def _deprecated_testexecution(self):
+ if self.__class__.run != Item.run:
+ warnoldtestrun(function=self.run)
+ elif self.__class__.execute != Item.execute:
+ warnoldtestrun(function=self.execute)
+ else:
+ return False
+ self.run()
+ return True
+
+ def run(self):
+ """ deprecated, here because subclasses might call it. """
+ return self.execute(self.obj)
+
+ def execute(self, obj):
+ """ deprecated, here because subclasses might call it. """
+ return obj()
+
+ def reportinfo(self):
+ return self.fspath, None, ""
+
+def warnoldcollect(function=None):
+ py.log._apiwarn("1.0",
+ "implement collector.collect() instead of "
+ "collector.run() and collector.join()",
+ stacklevel=2, function=function)
+
+def warnoldtestrun(function=None):
+ py.log._apiwarn("1.0",
+ "implement item.runtest() instead of "
+ "item.run() and item.execute()",
+ stacklevel=2, function=function)
+
+
+
+class RootCollector(Directory):
+ def __init__(self, config):
+ Directory.__init__(self, config.topdir, parent=None, config=config)
+ self.name = None
+
+ def __repr__(self):
+ return "<RootCollector fspath=%r>" %(self.fspath,)
+
+ def getbynames(self, names):
+ current = self.consider(self.config.topdir)
+ while names:
+ name = names.pop(0)
+ if name == ".": # special "identity" name
+ continue
+ l = []
+ for x in current._memocollect():
+ if x.name == name:
+ l.append(x)
+ elif x.fspath == current.fspath.join(name):
+ l.append(x)
+ elif x.name == "()":
+ names.insert(0, name)
+ l.append(x)
+ break
+ if not l:
+ raise ValueError("no node named %r below %r" %(name, current))
+ current = l[0]
+ return current
+
+ def totrail(self, node):
+ chain = node.listchain()
+ names = [self._getrelpath(chain[0].fspath)]
+ names += [x.name for x in chain[1:]]
+ return names
+
+ def fromtrail(self, trail):
+ return self.config._rootcol.getbynames(trail)
+
+ def _getrelpath(self, fspath):
+ topdir = self.config.topdir
+ relpath = fspath.relto(topdir)
+ if not relpath:
+ if fspath == topdir:
+ relpath = "."
+ else:
+ raise ValueError("%r not relative to topdir %s"
+ %(self.fspath, topdir))
+ return relpath
+
+ def __getstate__(self):
+ return self.config
+
+ def __setstate__(self, config):
+ self.__init__(config)
Added: pypy/branch/py12/py/_test/config.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_test/config.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,291 @@
+import py, os
+from py._test.conftesthandle import Conftest
+from py._test.pluginmanager import PluginManager
+from py._test import parseopt
+from py._test.collect import RootCollector
+
+def ensuretemp(string, dir=1):
+ """ (deprecated) return temporary directory path with
+ the given string as the trailing part. It is usually
+ better to use the 'tmpdir' function argument which will
+ take care to provide empty unique directories for each
+ test call even if the test is called multiple times.
+ """
+ #py.log._apiwarn(">1.1", "use tmpdir function argument")
+ return py.test.config.ensuretemp(string, dir=dir)
+
+class CmdOptions(object):
+ """ holds cmdline options as attributes."""
+ def __init__(self, **kwargs):
+ self.__dict__.update(kwargs)
+ def __repr__(self):
+ return "<CmdOptions %r>" %(self.__dict__,)
+
+class Error(Exception):
+ """ Test Configuration Error. """
+
+class Config(object):
+ """ access to config values, pluginmanager and plugin hooks. """
+ Option = py.std.optparse.Option
+ Error = Error
+ basetemp = None
+ _sessionclass = None
+
+ def __init__(self, topdir=None, option=None):
+ self.option = option or CmdOptions()
+ self.topdir = topdir
+ self._parser = parseopt.Parser(
+ usage="usage: %prog [options] [file_or_dir] [file_or_dir] [...]",
+ processopt=self._processopt,
+ )
+ self.pluginmanager = PluginManager()
+ self._conftest = Conftest(onimport=self._onimportconftest)
+ self.hook = self.pluginmanager.hook
+
+ def _onimportconftest(self, conftestmodule):
+ self.trace("loaded conftestmodule %r" %(conftestmodule,))
+ self.pluginmanager.consider_conftest(conftestmodule)
+
+ def _getmatchingplugins(self, fspath):
+ allconftests = self._conftest._conftestpath2mod.values()
+ plugins = [x for x in self.pluginmanager.getplugins()
+ if x not in allconftests]
+ plugins += self._conftest.getconftestmodules(fspath)
+ return plugins
+
+ def trace(self, msg):
+ if getattr(self.option, 'traceconfig', None):
+ self.hook.pytest_trace(category="config", msg=msg)
+
+ def _processopt(self, opt):
+ if hasattr(opt, 'default') and opt.dest:
+ val = os.environ.get("PYTEST_OPTION_" + opt.dest.upper(), None)
+ if val is not None:
+ if opt.type == "int":
+ val = int(val)
+ elif opt.type == "long":
+ val = long(val)
+ elif opt.type == "float":
+ val = float(val)
+ elif not opt.type and opt.action in ("store_true", "store_false"):
+ val = eval(val)
+ opt.default = val
+ else:
+ name = "option_" + opt.dest
+ try:
+ opt.default = self._conftest.rget(name)
+ except (ValueError, KeyError):
+ pass
+ if not hasattr(self.option, opt.dest):
+ setattr(self.option, opt.dest, opt.default)
+
+ def _preparse(self, args):
+ self.pluginmanager.consider_setuptools_entrypoints()
+ self.pluginmanager.consider_env()
+ self.pluginmanager.consider_preparse(args)
+ self._conftest.setinitial(args)
+ self.pluginmanager.do_addoption(self._parser)
+
+ def parse(self, args):
+ """ parse cmdline arguments into this config object.
+ Note that this can only be called once per testing process.
+ """
+ assert not hasattr(self, 'args'), (
+ "can only parse cmdline args at most once per Config object")
+ self._preparse(args)
+ self._parser.hints.extend(self.pluginmanager._hints)
+ args = self._parser.parse_setoption(args, self.option)
+ if not args:
+ args.append(py.std.os.getcwd())
+ self.topdir = gettopdir(args)
+ self._rootcol = RootCollector(config=self)
+ self._setargs(args)
+
+ def _setargs(self, args):
+ self.args = list(args)
+ self._argfspaths = [py.path.local(decodearg(x)[0]) for x in args]
+
+ # config objects are usually pickled across system
+ # barriers but they contain filesystem paths.
+ # upon getstate/setstate we take care to do everything
+ # relative to "topdir".
+ def __getstate__(self):
+ l = []
+ for path in self.args:
+ path = py.path.local(path)
+ l.append(path.relto(self.topdir))
+ return l, self.option.__dict__
+
+ def __setstate__(self, repr):
+ # we have to set py.test.config because loading
+ # of conftest files may use it (deprecated)
+ # mainly by py.test.config.addoptions()
+ global config_per_process
+ py.test.config = config_per_process = self
+ args, cmdlineopts = repr
+ cmdlineopts = CmdOptions(**cmdlineopts)
+ # next line will registers default plugins
+ self.__init__(topdir=py.path.local(), option=cmdlineopts)
+ self._rootcol = RootCollector(config=self)
+ args = [str(self.topdir.join(x)) for x in args]
+ self._preparse(args)
+ self._setargs(args)
+
+ def ensuretemp(self, string, dir=True):
+ return self.getbasetemp().ensure(string, dir=dir)
+
+ def getbasetemp(self):
+ if self.basetemp is None:
+ basetemp = self.option.basetemp
+ if basetemp:
+ basetemp = py.path.local(basetemp)
+ if not basetemp.check(dir=1):
+ basetemp.mkdir()
+ else:
+ basetemp = py.path.local.make_numbered_dir(prefix='pytest-')
+ self.basetemp = basetemp
+ return self.basetemp
+
+ def mktemp(self, basename, numbered=False):
+ basetemp = self.getbasetemp()
+ if not numbered:
+ return basetemp.mkdir(basename)
+ else:
+ return py.path.local.make_numbered_dir(prefix=basename,
+ keep=0, rootdir=basetemp, lock_timeout=None)
+
+ def getinitialnodes(self):
+ return [self.getnode(arg) for arg in self.args]
+
+ def getnode(self, arg):
+ parts = decodearg(arg)
+ path = py.path.local(parts.pop(0))
+ if not path.check():
+ raise self.Error("file not found: %s" %(path,))
+ topdir = self.topdir
+ if path != topdir and not path.relto(topdir):
+ raise self.Error("path %r is not relative to %r" %
+ (str(path), str(topdir)))
+ # assumtion: pytest's fs-collector tree follows the filesystem tree
+ names = list(filter(None, path.relto(topdir).split(path.sep)))
+ names += parts
+ try:
+ return self._rootcol.getbynames(names)
+ except ValueError:
+ e = py.std.sys.exc_info()[1]
+ raise self.Error("can't collect: %s\n%s" % (arg, e.args[0]))
+
+ def _getcollectclass(self, name, path):
+ try:
+ cls = self._conftest.rget(name, path)
+ except KeyError:
+ return getattr(py.test.collect, name)
+ else:
+ py.log._apiwarn(">1.1", "%r was found in a conftest.py file, "
+ "use pytest_collect hooks instead." % (cls,))
+ return cls
+
+ def getconftest_pathlist(self, name, path=None):
+ """ return a matching value, which needs to be sequence
+ of filenames that will be returned as a list of Path
+ objects (they can be relative to the location
+ where they were found).
+ """
+ try:
+ mod, relroots = self._conftest.rget_with_confmod(name, path)
+ except KeyError:
+ return None
+ modpath = py.path.local(mod.__file__).dirpath()
+ l = []
+ for relroot in relroots:
+ if not isinstance(relroot, py.path.local):
+ relroot = relroot.replace("/", py.path.local.sep)
+ relroot = modpath.join(relroot, abs=True)
+ l.append(relroot)
+ return l
+
+ def addoptions(self, groupname, *specs):
+ """ add a named group of options to the current testing session.
+ This function gets invoked during testing session initialization.
+ """
+ py.log._apiwarn("1.0", "define pytest_addoptions(parser) to add options", stacklevel=2)
+ group = self._parser.getgroup(groupname)
+ for opt in specs:
+ group._addoption_instance(opt)
+ return self.option
+
+ def addoption(self, *optnames, **attrs):
+ return self._parser.addoption(*optnames, **attrs)
+
+ def getvalueorskip(self, name, path=None):
+ """ return getvalue() or call py.test.skip if no value exists. """
+ try:
+ val = self.getvalue(name, path)
+ if val is None:
+ raise KeyError(name)
+ return val
+ except KeyError:
+ py.test.skip("no %r value found" %(name,))
+
+ def getvalue(self, name, path=None):
+ """ return 'name' value looked up from the 'options'
+ and then from the first conftest file found up
+ the path (including the path itself).
+ if path is None, lookup the value in the initial
+ conftest modules found during command line parsing.
+ """
+ try:
+ return getattr(self.option, name)
+ except AttributeError:
+ return self._conftest.rget(name, path)
+
+ def setsessionclass(self, cls):
+ if self._sessionclass is not None:
+ raise ValueError("sessionclass already set to: %r" %(
+ self._sessionclass))
+ self._sessionclass = cls
+
+ def initsession(self):
+ """ return an initialized session object. """
+ cls = self._sessionclass
+ if cls is None:
+ from py._test.session import Session
+ cls = Session
+ session = cls(self)
+ self.trace("instantiated session %r" % session)
+ return session
+
+#
+# helpers
+#
+
+def gettopdir(args):
+ """ return the top directory for the given paths.
+ if the common base dir resides in a python package
+ parent directory of the root package is returned.
+ """
+ fsargs = [py.path.local(decodearg(arg)[0]) for arg in args]
+ p = fsargs and fsargs[0] or None
+ for x in fsargs[1:]:
+ p = p.common(x)
+ assert p, "cannot determine common basedir of %s" %(fsargs,)
+ pkgdir = p.pypkgpath()
+ if pkgdir is None:
+ if p.check(file=1):
+ p = p.dirpath()
+ return p
+ else:
+ return pkgdir.dirpath()
+
+def decodearg(arg):
+ arg = str(arg)
+ return arg.split("::")
+
+def onpytestaccess():
+ # it's enough to have our containing module loaded as
+ # it initializes a per-process config instance
+ # which loads default plugins which add to py.test.*
+ pass
+
+# a default per-process instance of py.test configuration
+config_per_process = Config()
Added: pypy/branch/py12/py/_test/conftesthandle.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_test/conftesthandle.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,113 @@
+import py
+
+class Conftest(object):
+ """ the single place for accessing values and interacting
+ towards conftest modules from py.test objects.
+
+ (deprecated)
+ Note that triggering Conftest instances to import
+ conftest.py files may result in added cmdline options.
+ """
+ def __init__(self, onimport=None, confcutdir=None):
+ self._path2confmods = {}
+ self._onimport = onimport
+ self._conftestpath2mod = {}
+ self._confcutdir = confcutdir
+
+ def setinitial(self, args):
+ """ try to find a first anchor path for looking up global values
+ from conftests. This function is usually called _before_
+ argument parsing. conftest files may add command line options
+ and we thus have no completely safe way of determining
+ which parts of the arguments are actually related to options
+ and which are file system paths. We just try here to get
+ bootstrapped ...
+ """
+ current = py.path.local()
+ opt = '--confcutdir'
+ for i in range(len(args)):
+ opt1 = str(args[i])
+ if opt1.startswith(opt):
+ if opt1 == opt:
+ if len(args) > i:
+ p = current.join(args[i+1], abs=True)
+ elif opt1.startswith(opt + "="):
+ p = current.join(opt1[len(opt)+1:], abs=1)
+ self._confcutdir = p
+ break
+ for arg in args + [current]:
+ anchor = current.join(arg, abs=1)
+ if anchor.check(): # we found some file object
+ self._path2confmods[None] = self.getconftestmodules(anchor)
+ # let's also consider test* dirs
+ if anchor.check(dir=1):
+ for x in anchor.listdir(lambda x: x.check(dir=1, dotfile=0)):
+ self.getconftestmodules(x)
+ break
+ else:
+ assert 0, "no root of filesystem?"
+
+ def getconftestmodules(self, path):
+ """ return a list of imported conftest modules for the given path. """
+ try:
+ clist = self._path2confmods[path]
+ except KeyError:
+ if path is None:
+ raise ValueError("missing default confest.")
+ dp = path.dirpath()
+ if dp == path:
+ clist = []
+ else:
+ cutdir = self._confcutdir
+ clist = self.getconftestmodules(dp)
+ if cutdir and path != cutdir and not path.relto(cutdir):
+ pass
+ else:
+ conftestpath = path.join("conftest.py")
+ if conftestpath.check(file=1):
+ clist.append(self.importconftest(conftestpath))
+ self._path2confmods[path] = clist
+ # be defensive: avoid changes from caller side to
+ # affect us by always returning a copy of the actual list
+ return clist[:]
+
+ def rget(self, name, path=None):
+ mod, value = self.rget_with_confmod(name, path)
+ return value
+
+ def rget_with_confmod(self, name, path=None):
+ modules = self.getconftestmodules(path)
+ modules.reverse()
+ for mod in modules:
+ try:
+ return mod, getattr(mod, name)
+ except AttributeError:
+ continue
+ raise KeyError(name)
+
+ def importconftest(self, conftestpath):
+ assert conftestpath.check(), conftestpath
+ try:
+ return self._conftestpath2mod[conftestpath]
+ except KeyError:
+ if not conftestpath.dirpath('__init__.py').check(file=1):
+ # HACK: we don't want any "globally" imported conftest.py,
+ # prone to conflicts and subtle problems
+ modname = str(conftestpath).replace('.', conftestpath.sep)
+ mod = conftestpath.pyimport(modname=modname)
+ else:
+ mod = conftestpath.pyimport()
+ self._conftestpath2mod[conftestpath] = mod
+ dirpath = conftestpath.dirpath()
+ if dirpath in self._path2confmods:
+ for path, mods in self._path2confmods.items():
+ if path and path.relto(dirpath) or path == dirpath:
+ assert mod not in mods
+ mods.append(mod)
+ self._postimport(mod)
+ return mod
+
+ def _postimport(self, mod):
+ if self._onimport:
+ self._onimport(mod)
+ return mod
Added: pypy/branch/py12/py/_test/funcargs.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_test/funcargs.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,176 @@
+import py
+
+def getfuncargnames(function):
+ argnames = py.std.inspect.getargs(py.code.getrawcode(function))[0]
+ startindex = py.std.inspect.ismethod(function) and 1 or 0
+ defaults = getattr(function, 'func_defaults',
+ getattr(function, '__defaults__', None)) or ()
+ numdefaults = len(defaults)
+ if numdefaults:
+ return argnames[startindex:-numdefaults]
+ return argnames[startindex:]
+
+def fillfuncargs(function):
+ """ fill missing funcargs. """
+ request = FuncargRequest(pyfuncitem=function)
+ request._fillfuncargs()
+
+def getplugins(node, withpy=False): # might by any node
+ plugins = node.config._getmatchingplugins(node.fspath)
+ if withpy:
+ mod = node.getparent(py.test.collect.Module)
+ if mod is not None:
+ plugins.append(mod.obj)
+ inst = node.getparent(py.test.collect.Instance)
+ if inst is not None:
+ plugins.append(inst.obj)
+ return plugins
+
+_notexists = object()
+class CallSpec:
+ def __init__(self, funcargs, id, param):
+ self.funcargs = funcargs
+ self.id = id
+ if param is not _notexists:
+ self.param = param
+ def __repr__(self):
+ return "<CallSpec id=%r param=%r funcargs=%r>" %(
+ self.id, getattr(self, 'param', '?'), self.funcargs)
+
+class Metafunc:
+ def __init__(self, function, config=None, cls=None, module=None):
+ self.config = config
+ self.module = module
+ self.function = function
+ self.funcargnames = getfuncargnames(function)
+ self.cls = cls
+ self.module = module
+ self._calls = []
+ self._ids = py.builtin.set()
+
+ def addcall(self, funcargs=None, id=_notexists, param=_notexists):
+ assert funcargs is None or isinstance(funcargs, dict)
+ if id is None:
+ raise ValueError("id=None not allowed")
+ if id is _notexists:
+ id = len(self._calls)
+ id = str(id)
+ if id in self._ids:
+ raise ValueError("duplicate id %r" % id)
+ self._ids.add(id)
+ self._calls.append(CallSpec(funcargs, id, param))
+
+class FuncargRequest:
+ _argprefix = "pytest_funcarg__"
+ _argname = None
+
+ class LookupError(LookupError):
+ """ error on performing funcarg request. """
+
+ def __init__(self, pyfuncitem):
+ self._pyfuncitem = pyfuncitem
+ self.function = pyfuncitem.obj
+ self.module = pyfuncitem.getparent(py.test.collect.Module).obj
+ clscol = pyfuncitem.getparent(py.test.collect.Class)
+ self.cls = clscol and clscol.obj or None
+ self.instance = py.builtin._getimself(self.function)
+ self.config = pyfuncitem.config
+ self.fspath = pyfuncitem.fspath
+ if hasattr(pyfuncitem, '_requestparam'):
+ self.param = pyfuncitem._requestparam
+ self._plugins = getplugins(pyfuncitem, withpy=True)
+ self._funcargs = self._pyfuncitem.funcargs.copy()
+ self._name2factory = {}
+ self._currentarg = None
+
+ def _fillfuncargs(self):
+ argnames = getfuncargnames(self.function)
+ if argnames:
+ assert not getattr(self._pyfuncitem, '_args', None), (
+ "yielded functions cannot have funcargs")
+ for argname in argnames:
+ if argname not in self._pyfuncitem.funcargs:
+ self._pyfuncitem.funcargs[argname] = self.getfuncargvalue(argname)
+
+ def cached_setup(self, setup, teardown=None, scope="module", extrakey=None):
+ """ cache and return result of calling setup().
+
+ The requested argument name, the scope and the ``extrakey``
+ determine the cache key. The scope also determines when
+ teardown(result) will be called. valid scopes are:
+ scope == 'function': when the single test function run finishes.
+ scope == 'module': when tests in a different module are run
+ scope == 'session': when tests of the session have run.
+ """
+ if not hasattr(self.config, '_setupcache'):
+ self.config._setupcache = {} # XXX weakref?
+ cachekey = (self._currentarg, self._getscopeitem(scope), extrakey)
+ cache = self.config._setupcache
+ try:
+ val = cache[cachekey]
+ except KeyError:
+ val = setup()
+ cache[cachekey] = val
+ if teardown is not None:
+ def finalizer():
+ del cache[cachekey]
+ teardown(val)
+ self._addfinalizer(finalizer, scope=scope)
+ return val
+
+ def getfuncargvalue(self, argname):
+ try:
+ return self._funcargs[argname]
+ except KeyError:
+ pass
+ if argname not in self._name2factory:
+ self._name2factory[argname] = self.config.pluginmanager.listattr(
+ plugins=self._plugins,
+ attrname=self._argprefix + str(argname)
+ )
+ #else: we are called recursively
+ if not self._name2factory[argname]:
+ self._raiselookupfailed(argname)
+ funcargfactory = self._name2factory[argname].pop()
+ oldarg = self._currentarg
+ self._currentarg = argname
+ try:
+ self._funcargs[argname] = res = funcargfactory(request=self)
+ finally:
+ self._currentarg = oldarg
+ return res
+
+ def _getscopeitem(self, scope):
+ if scope == "function":
+ return self._pyfuncitem
+ elif scope == "module":
+ return self._pyfuncitem.getparent(py.test.collect.Module)
+ elif scope == "session":
+ return None
+ raise ValueError("unknown finalization scope %r" %(scope,))
+
+ def _addfinalizer(self, finalizer, scope):
+ colitem = self._getscopeitem(scope)
+ self.config._setupstate.addfinalizer(
+ finalizer=finalizer, colitem=colitem)
+
+ def addfinalizer(self, finalizer):
+ """ call the given finalizer after test function finished execution. """
+ self._addfinalizer(finalizer, scope="function")
+
+ def __repr__(self):
+ return "<FuncargRequest for %r>" %(self._pyfuncitem)
+
+ def _raiselookupfailed(self, argname):
+ available = []
+ for plugin in self._plugins:
+ for name in vars(plugin):
+ if name.startswith(self._argprefix):
+ name = name[len(self._argprefix):]
+ if name not in available:
+ available.append(name)
+ fspath, lineno, msg = self._pyfuncitem.reportinfo()
+ msg = "LookupError: no factory found for function argument %r" % (argname,)
+ msg += "\n available funcargs: %s" %(", ".join(available),)
+ msg += "\n use 'py.test --funcargs [testpath]' for help on them."
+ raise self.LookupError(msg)
Added: pypy/branch/py12/py/_test/parseopt.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_test/parseopt.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,112 @@
+"""
+thin wrapper around Python's optparse.py
+adding some extra checks and ways to systematically
+have Environment variables provide default values
+for options. basic usage:
+
+ >>> parser = Parser()
+ >>> parser.addoption("--hello", action="store_true", dest="hello")
+ >>> option, args = parser.parse(['--hello'])
+ >>> option.hello
+ True
+ >>> args
+ []
+
+"""
+import py
+import optparse
+
+class Parser:
+ """ Parser for command line arguments. """
+
+ def __init__(self, usage=None, processopt=None):
+ self._anonymous = OptionGroup("custom options", parser=self)
+ self._groups = []
+ self._processopt = processopt
+ self._usage = usage
+ self.hints = []
+
+ def processoption(self, option):
+ if self._processopt:
+ if option.dest:
+ self._processopt(option)
+
+ def addnote(self, note):
+ self._notes.append(note)
+
+ def getgroup(self, name, description="", after=None):
+ for group in self._groups:
+ if group.name == name:
+ return group
+ group = OptionGroup(name, description, parser=self)
+ i = 0
+ for i, grp in enumerate(self._groups):
+ if grp.name == after:
+ break
+ self._groups.insert(i+1, group)
+ return group
+
+ addgroup = getgroup
+ def addgroup(self, name, description=""):
+ py.log._apiwarn("1.1", "use getgroup() which gets-or-creates")
+ return self.getgroup(name, description)
+
+ def addoption(self, *opts, **attrs):
+ """ add an optparse-style option. """
+ self._anonymous.addoption(*opts, **attrs)
+
+ def parse(self, args):
+ optparser = MyOptionParser(self)
+ groups = self._groups + [self._anonymous]
+ for group in groups:
+ if group.options:
+ desc = group.description or group.name
+ optgroup = optparse.OptionGroup(optparser, desc)
+ optgroup.add_options(group.options)
+ optparser.add_option_group(optgroup)
+ return optparser.parse_args([str(x) for x in args])
+
+ def parse_setoption(self, args, option):
+ parsedoption, args = self.parse(args)
+ for name, value in parsedoption.__dict__.items():
+ setattr(option, name, value)
+ return args
+
+
+class OptionGroup:
+ def __init__(self, name, description="", parser=None):
+ self.name = name
+ self.description = description
+ self.options = []
+ self.parser = parser
+
+ def addoption(self, *optnames, **attrs):
+ """ add an option to this group. """
+ option = optparse.Option(*optnames, **attrs)
+ self._addoption_instance(option, shortupper=False)
+
+ def _addoption(self, *optnames, **attrs):
+ option = optparse.Option(*optnames, **attrs)
+ self._addoption_instance(option, shortupper=True)
+
+ def _addoption_instance(self, option, shortupper=False):
+ if not shortupper:
+ for opt in option._short_opts:
+ if opt[0] == '-' and opt[1].islower():
+ raise ValueError("lowercase shortoptions reserved")
+ if self.parser:
+ self.parser.processoption(option)
+ self.options.append(option)
+
+
+class MyOptionParser(optparse.OptionParser):
+ def __init__(self, parser):
+ self._parser = parser
+ optparse.OptionParser.__init__(self, usage=parser._usage)
+ def format_epilog(self, formatter):
+ hints = self._parser.hints
+ if hints:
+ s = "\n".join(["hint: " + x for x in hints]) + "\n"
+ s = "\n" + s + "\n"
+ return s
+ return ""
Added: pypy/branch/py12/py/_test/pluginmanager.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_test/pluginmanager.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,345 @@
+"""
+managing loading and interacting with pytest plugins.
+"""
+import py
+import inspect
+from py._plugin import hookspec
+
+default_plugins = (
+ "default runner capture mark terminal skipping tmpdir monkeypatch "
+ "recwarn pdb pastebin unittest helpconfig nose assertion genscript "
+ "junitxml doctest").split()
+
+def check_old_use(mod, modname):
+ clsname = modname[len('pytest_'):].capitalize() + "Plugin"
+ assert not hasattr(mod, clsname), (mod, clsname)
+
+class PluginManager(object):
+ def __init__(self):
+ self.registry = Registry()
+ self._name2plugin = {}
+ self._hints = []
+ self.hook = HookRelay([hookspec], registry=self.registry)
+ self.register(self)
+ for spec in default_plugins:
+ self.import_plugin(spec)
+
+ def _getpluginname(self, plugin, name):
+ if name is None:
+ if hasattr(plugin, '__name__'):
+ name = plugin.__name__.split(".")[-1]
+ else:
+ name = id(plugin)
+ return name
+
+ def register(self, plugin, name=None):
+ assert not self.isregistered(plugin), plugin
+ assert not self.registry.isregistered(plugin), plugin
+ name = self._getpluginname(plugin, name)
+ if name in self._name2plugin:
+ return False
+ self._name2plugin[name] = plugin
+ self.call_plugin(plugin, "pytest_registerhooks",
+ {'pluginmanager': self})
+ self.hook.pytest_plugin_registered(manager=self, plugin=plugin)
+ self.registry.register(plugin)
+ return True
+
+ def unregister(self, plugin):
+ self.hook.pytest_plugin_unregistered(plugin=plugin)
+ self.registry.unregister(plugin)
+ for name, value in list(self._name2plugin.items()):
+ if value == plugin:
+ del self._name2plugin[name]
+
+ def isregistered(self, plugin, name=None):
+ if self._getpluginname(plugin, name) in self._name2plugin:
+ return True
+ for val in self._name2plugin.values():
+ if plugin == val:
+ return True
+
+ def registerhooks(self, spec):
+ self.hook._registerhooks(spec)
+
+ def getplugins(self):
+ return list(self.registry)
+
+ def skipifmissing(self, name):
+ if not self.hasplugin(name):
+ py.test.skip("plugin %r is missing" % name)
+
+ def hasplugin(self, name):
+ try:
+ self.getplugin(name)
+ except KeyError:
+ return False
+ else:
+ return True
+
+ def getplugin(self, name):
+ try:
+ return self._name2plugin[name]
+ except KeyError:
+ impname = canonical_importname(name)
+ return self._name2plugin[impname]
+
+ # API for bootstrapping
+ #
+ def _envlist(self, varname):
+ val = py.std.os.environ.get(varname, None)
+ if val is not None:
+ return val.split(',')
+ return ()
+
+ def consider_env(self):
+ for spec in self._envlist("PYTEST_PLUGINS"):
+ self.import_plugin(spec)
+
+ def consider_setuptools_entrypoints(self):
+ try:
+ from pkg_resources import iter_entry_points
+ except ImportError:
+ return # XXX issue a warning
+ for ep in iter_entry_points('pytest11'):
+ name = canonical_importname(ep.name)
+ if name in self._name2plugin:
+ continue
+ plugin = ep.load()
+ self.register(plugin, name=name)
+
+ def consider_preparse(self, args):
+ for opt1,opt2 in zip(args, args[1:]):
+ if opt1 == "-p":
+ self.import_plugin(opt2)
+
+ def consider_conftest(self, conftestmodule):
+ cls = getattr(conftestmodule, 'ConftestPlugin', None)
+ if cls is not None:
+ raise ValueError("%r: 'ConftestPlugins' only existed till 1.0.0b1, "
+ "were removed in 1.0.0b2" % (cls,))
+ if self.register(conftestmodule, name=conftestmodule.__file__):
+ self.consider_module(conftestmodule)
+
+ def consider_module(self, mod):
+ attr = getattr(mod, "pytest_plugins", ())
+ if attr:
+ if not isinstance(attr, (list, tuple)):
+ attr = (attr,)
+ for spec in attr:
+ self.import_plugin(spec)
+
+ def import_plugin(self, spec):
+ assert isinstance(spec, str)
+ modname = canonical_importname(spec)
+ if modname in self._name2plugin:
+ return
+ try:
+ mod = importplugin(modname)
+ except KeyboardInterrupt:
+ raise
+ except py.test.skip.Exception:
+ e = py.std.sys.exc_info()[1]
+ self._hints.append("skipped plugin %r: %s" %((modname, e.msg)))
+ else:
+ check_old_use(mod, modname)
+ self.register(mod)
+ self.consider_module(mod)
+
+ def pytest_terminal_summary(self, terminalreporter):
+ tw = terminalreporter._tw
+ if terminalreporter.config.option.traceconfig:
+ for hint in self._hints:
+ tw.line("hint: %s" % hint)
+
+ #
+ #
+ # API for interacting with registered and instantiated plugin objects
+ #
+ #
+ def listattr(self, attrname, plugins=None):
+ return self.registry.listattr(attrname, plugins=plugins)
+
+ def notify_exception(self, excinfo=None):
+ if excinfo is None:
+ excinfo = py.code.ExceptionInfo()
+ excrepr = excinfo.getrepr(funcargs=True, showlocals=True)
+ return self.hook.pytest_internalerror(excrepr=excrepr)
+
+ def do_addoption(self, parser):
+ mname = "pytest_addoption"
+ methods = self.registry.listattr(mname, reverse=True)
+ mc = MultiCall(methods, {'parser': parser})
+ mc.execute()
+
+ def pytest_plugin_registered(self, plugin):
+ dic = self.call_plugin(plugin, "pytest_namespace", {}) or {}
+ for name, value in dic.items():
+ setattr(py.test, name, value)
+ py.test.__all__.append(name)
+ if hasattr(self, '_config'):
+ self.call_plugin(plugin, "pytest_addoption",
+ {'parser': self._config._parser})
+ self.call_plugin(plugin, "pytest_configure",
+ {'config': self._config})
+
+ def call_plugin(self, plugin, methname, kwargs):
+ return MultiCall(
+ methods=self.listattr(methname, plugins=[plugin]),
+ kwargs=kwargs, firstresult=True).execute()
+
+ def do_configure(self, config):
+ assert not hasattr(self, '_config')
+ self._config = config
+ config.hook.pytest_configure(config=self._config)
+
+ def do_unconfigure(self, config):
+ config = self._config
+ del self._config
+ config.hook.pytest_unconfigure(config=config)
+ config.pluginmanager.unregister(self)
+
+def canonical_importname(name):
+ name = name.lower()
+ modprefix = "pytest_"
+ if not name.startswith(modprefix):
+ name = modprefix + name
+ return name
+
+def importplugin(importspec):
+ try:
+ return __import__(importspec)
+ except ImportError:
+ e = py.std.sys.exc_info()[1]
+ if str(e).find(importspec) == -1:
+ raise
+ try:
+ return __import__("py._plugin.%s" %(importspec),
+ None, None, '__doc__')
+ except ImportError:
+ e = py.std.sys.exc_info()[1]
+ if str(e).find(importspec) == -1:
+ raise
+ # show the original exception, not the failing internal one
+ return __import__(importspec)
+
+
+class MultiCall:
+ """ execute a call into multiple python functions/methods. """
+
+ def __init__(self, methods, kwargs, firstresult=False):
+ self.methods = methods[:]
+ self.kwargs = kwargs.copy()
+ self.kwargs['__multicall__'] = self
+ self.results = []
+ self.firstresult = firstresult
+
+ def __repr__(self):
+ status = "%d results, %d meths" % (len(self.results), len(self.methods))
+ return "<MultiCall %s, kwargs=%r>" %(status, self.kwargs)
+
+ def execute(self):
+ while self.methods:
+ method = self.methods.pop()
+ kwargs = self.getkwargs(method)
+ res = method(**kwargs)
+ if res is not None:
+ self.results.append(res)
+ if self.firstresult:
+ return res
+ if not self.firstresult:
+ return self.results
+
+ def getkwargs(self, method):
+ kwargs = {}
+ for argname in varnames(method):
+ try:
+ kwargs[argname] = self.kwargs[argname]
+ except KeyError:
+ pass # might be optional param
+ return kwargs
+
+def varnames(func):
+ ismethod = inspect.ismethod(func)
+ rawcode = py.code.getrawcode(func)
+ try:
+ return rawcode.co_varnames[ismethod:]
+ except AttributeError:
+ return ()
+
+class Registry:
+ """
+ Manage Plugins: register/unregister call calls to plugins.
+ """
+ def __init__(self, plugins=None):
+ if plugins is None:
+ plugins = []
+ self._plugins = plugins
+
+ def register(self, plugin):
+ assert not isinstance(plugin, str)
+ assert not plugin in self._plugins
+ self._plugins.append(plugin)
+
+ def unregister(self, plugin):
+ self._plugins.remove(plugin)
+
+ def isregistered(self, plugin):
+ return plugin in self._plugins
+
+ def __iter__(self):
+ return iter(self._plugins)
+
+ def listattr(self, attrname, plugins=None, reverse=False):
+ l = []
+ if plugins is None:
+ plugins = self._plugins
+ for plugin in plugins:
+ try:
+ l.append(getattr(plugin, attrname))
+ except AttributeError:
+ continue
+ if reverse:
+ l.reverse()
+ return l
+
+class HookRelay:
+ def __init__(self, hookspecs, registry):
+ if not isinstance(hookspecs, list):
+ hookspecs = [hookspecs]
+ self._hookspecs = []
+ self._registry = registry
+ for hookspec in hookspecs:
+ self._registerhooks(hookspec)
+
+ def _registerhooks(self, hookspecs):
+ self._hookspecs.append(hookspecs)
+ for name, method in vars(hookspecs).items():
+ if name[:1] != "_":
+ firstresult = getattr(method, 'firstresult', False)
+ hc = HookCaller(self, name, firstresult=firstresult)
+ setattr(self, name, hc)
+ #print ("setting new hook", name)
+
+ def _performcall(self, name, multicall):
+ return multicall.execute()
+
+class HookCaller:
+ def __init__(self, hookrelay, name, firstresult):
+ self.hookrelay = hookrelay
+ self.name = name
+ self.firstresult = firstresult
+
+ def __repr__(self):
+ return "<HookCaller %r>" %(self.name,)
+
+ def __call__(self, **kwargs):
+ methods = self.hookrelay._registry.listattr(self.name)
+ mc = MultiCall(methods, kwargs, firstresult=self.firstresult)
+ return self.hookrelay._performcall(self.name, mc)
+
+ def pcall(self, plugins, **kwargs):
+ methods = self.hookrelay._registry.listattr(self.name, plugins=plugins)
+ mc = MultiCall(methods, kwargs, firstresult=self.firstresult)
+ return self.hookrelay._performcall(self.name, mc)
+
Added: pypy/branch/py12/py/_test/pycollect.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_test/pycollect.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,397 @@
+"""
+Python related collection nodes.
+"""
+import py
+import inspect
+from py._test.collect import configproperty, warnoldcollect
+from py._test import funcargs
+from py._code.code import TerminalRepr
+
+class PyobjMixin(object):
+ def obj():
+ def fget(self):
+ try:
+ return self._obj
+ except AttributeError:
+ self._obj = obj = self._getobj()
+ return obj
+ def fset(self, value):
+ self._obj = value
+ return property(fget, fset, None, "underlying python object")
+ obj = obj()
+
+ def _getobj(self):
+ return getattr(self.parent.obj, self.name)
+
+ def getmodpath(self, stopatmodule=True, includemodule=False):
+ """ return python path relative to the containing module. """
+ chain = self.listchain()
+ chain.reverse()
+ parts = []
+ for node in chain:
+ if isinstance(node, Instance):
+ continue
+ name = node.name
+ if isinstance(node, Module):
+ assert name.endswith(".py")
+ name = name[:-3]
+ if stopatmodule:
+ if includemodule:
+ parts.append(name)
+ break
+ parts.append(name)
+ parts.reverse()
+ s = ".".join(parts)
+ return s.replace(".[", "[")
+
+ def _getfslineno(self):
+ try:
+ return self._fslineno
+ except AttributeError:
+ pass
+ obj = self.obj
+ # xxx let decorators etc specify a sane ordering
+ if hasattr(obj, 'place_as'):
+ obj = obj.place_as
+
+ self._fslineno = py.code.getfslineno(obj)
+ return self._fslineno
+
+ def reportinfo(self):
+ fspath, lineno = self._getfslineno()
+ modpath = self.getmodpath()
+ return fspath, lineno, modpath
+
+class PyCollectorMixin(PyobjMixin, py.test.collect.Collector):
+ Class = configproperty('Class')
+ Instance = configproperty('Instance')
+ Function = configproperty('Function')
+ Generator = configproperty('Generator')
+
+ def funcnamefilter(self, name):
+ return name.startswith('test')
+ def classnamefilter(self, name):
+ return name.startswith('Test')
+
+ def collect(self):
+ l = self._deprecated_collect()
+ if l is not None:
+ return l
+ # NB. we avoid random getattrs and peek in the __dict__ instead
+ dicts = [getattr(self.obj, '__dict__', {})]
+ for basecls in inspect.getmro(self.obj.__class__):
+ dicts.append(basecls.__dict__)
+ seen = {}
+ l = []
+ for dic in dicts:
+ for name, obj in dic.items():
+ if name in seen:
+ continue
+ seen[name] = True
+ if name[0] != "_":
+ res = self.makeitem(name, obj)
+ if res is None:
+ continue
+ if not isinstance(res, list):
+ res = [res]
+ l.extend(res)
+ l.sort(key=lambda item: item.reportinfo()[:2])
+ return l
+
+ def _deprecated_join(self, name):
+ if self.__class__.join != py.test.collect.Collector.join:
+ warnoldcollect()
+ return self.join(name)
+
+ def makeitem(self, name, obj):
+ return self.ihook.pytest_pycollect_makeitem(
+ collector=self, name=name, obj=obj)
+
+ def _istestclasscandidate(self, name, obj):
+ if self.classnamefilter(name) and \
+ inspect.isclass(obj):
+ if hasinit(obj):
+ # XXX WARN
+ return False
+ return True
+
+ def _genfunctions(self, name, funcobj):
+ module = self.getparent(Module).obj
+ clscol = self.getparent(Class)
+ cls = clscol and clscol.obj or None
+ metafunc = funcargs.Metafunc(funcobj, config=self.config,
+ cls=cls, module=module)
+ gentesthook = self.config.hook.pytest_generate_tests
+ plugins = funcargs.getplugins(self, withpy=True)
+ gentesthook.pcall(plugins, metafunc=metafunc)
+ if not metafunc._calls:
+ return self.Function(name, parent=self)
+ l = []
+ for callspec in metafunc._calls:
+ subname = "%s[%s]" %(name, callspec.id)
+ function = self.Function(name=subname, parent=self,
+ callspec=callspec, callobj=funcobj)
+ l.append(function)
+ return l
+
+class Module(py.test.collect.File, PyCollectorMixin):
+ def _getobj(self):
+ return self._memoizedcall('_obj', self._importtestmodule)
+
+ def _importtestmodule(self):
+ # we assume we are only called once per module
+ mod = self.fspath.pyimport()
+ #print "imported test module", mod
+ self.config.pluginmanager.consider_module(mod)
+ return mod
+
+ def setup(self):
+ if getattr(self.obj, 'disabled', 0):
+ py.log._apiwarn(">1.1.1", "%r uses 'disabled' which is deprecated, "
+ "use pytestmark=..., see pytest_skipping plugin" % (self.obj,))
+ py.test.skip("%r is disabled" %(self.obj,))
+ if hasattr(self.obj, 'setup_module'):
+ #XXX: nose compat hack, move to nose plugin
+ # if it takes a positional arg, its probably a py.test style one
+ # so we pass the current module object
+ if inspect.getargspec(self.obj.setup_module)[0]:
+ self.obj.setup_module(self.obj)
+ else:
+ self.obj.setup_module()
+
+ def teardown(self):
+ if hasattr(self.obj, 'teardown_module'):
+ #XXX: nose compat hack, move to nose plugin
+ # if it takes a positional arg, its probably a py.test style one
+ # so we pass the current module object
+ if inspect.getargspec(self.obj.teardown_module)[0]:
+ self.obj.teardown_module(self.obj)
+ else:
+ self.obj.teardown_module()
+
+class Class(PyCollectorMixin, py.test.collect.Collector):
+
+ def collect(self):
+ l = self._deprecated_collect()
+ if l is not None:
+ return l
+ return [self.Instance(name="()", parent=self)]
+
+ def setup(self):
+ if getattr(self.obj, 'disabled', 0):
+ py.log._apiwarn(">1.1.1", "%r uses 'disabled' which is deprecated, "
+ "use pytestmark=..., see pytest_skipping plugin" % (self.obj,))
+ py.test.skip("%r is disabled" %(self.obj,))
+ setup_class = getattr(self.obj, 'setup_class', None)
+ if setup_class is not None:
+ setup_class = getattr(setup_class, 'im_func', setup_class)
+ setup_class(self.obj)
+
+ def teardown(self):
+ teardown_class = getattr(self.obj, 'teardown_class', None)
+ if teardown_class is not None:
+ teardown_class = getattr(teardown_class, 'im_func', teardown_class)
+ teardown_class(self.obj)
+
+class Instance(PyCollectorMixin, py.test.collect.Collector):
+ def _getobj(self):
+ return self.parent.obj()
+ def Function(self):
+ return getattr(self.obj, 'Function',
+ PyCollectorMixin.Function.__get__(self)) # XXX for python 2.2
+ def _keywords(self):
+ return []
+ Function = property(Function)
+
+ #def __repr__(self):
+ # return "<%s of '%s'>" %(self.__class__.__name__,
+ # self.parent.obj.__name__)
+
+ def newinstance(self):
+ self.obj = self._getobj()
+ return self.obj
+
+class FunctionMixin(PyobjMixin):
+ """ mixin for the code common to Function and Generator.
+ """
+
+ def setup(self):
+ """ perform setup for this test function. """
+ if inspect.ismethod(self.obj):
+ name = 'setup_method'
+ else:
+ name = 'setup_function'
+ if isinstance(self.parent, Instance):
+ obj = self.parent.newinstance()
+ self.obj = self._getobj()
+ else:
+ obj = self.parent.obj
+ setup_func_or_method = getattr(obj, name, None)
+ if setup_func_or_method is not None:
+ setup_func_or_method(self.obj)
+
+ def teardown(self):
+ """ perform teardown for this test function. """
+ if inspect.ismethod(self.obj):
+ name = 'teardown_method'
+ else:
+ name = 'teardown_function'
+ obj = self.parent.obj
+ teardown_func_or_meth = getattr(obj, name, None)
+ if teardown_func_or_meth is not None:
+ teardown_func_or_meth(self.obj)
+
+ def _prunetraceback(self, traceback):
+ if hasattr(self, '_obj') and not self.config.option.fulltrace:
+ code = py.code.Code(self.obj)
+ path, firstlineno = code.path, code.firstlineno
+ ntraceback = traceback.cut(path=path, firstlineno=firstlineno)
+ if ntraceback == traceback:
+ ntraceback = ntraceback.cut(path=path)
+ if ntraceback == traceback:
+ ntraceback = ntraceback.cut(excludepath=py._pydir)
+ traceback = ntraceback.filter()
+ return traceback
+
+ def _repr_failure_py(self, excinfo):
+ if excinfo.errisinstance(funcargs.FuncargRequest.LookupError):
+ fspath, lineno, msg = self.reportinfo()
+ lines, _ = inspect.getsourcelines(self.obj)
+ for i, line in enumerate(lines):
+ if line.strip().startswith('def'):
+ return FuncargLookupErrorRepr(fspath, lineno,
+ lines[:i+1], str(excinfo.value))
+ return super(FunctionMixin, self)._repr_failure_py(excinfo)
+
+ def repr_failure(self, excinfo, outerr=None):
+ assert outerr is None, "XXX outerr usage is deprecated"
+ return self._repr_failure_py(excinfo)
+
+ shortfailurerepr = "F"
+
+class FuncargLookupErrorRepr(TerminalRepr):
+ def __init__(self, filename, firstlineno, deflines, errorstring):
+ self.deflines = deflines
+ self.errorstring = errorstring
+ self.filename = filename
+ self.firstlineno = firstlineno
+
+ def toterminal(self, tw):
+ tw.line()
+ for line in self.deflines:
+ tw.line(" " + line.strip())
+ for line in self.errorstring.split("\n"):
+ tw.line(" " + line.strip(), red=True)
+ tw.line()
+ tw.line("%s:%d" % (self.filename, self.firstlineno+1))
+
+class Generator(FunctionMixin, PyCollectorMixin, py.test.collect.Collector):
+ def collect(self):
+ # test generators are seen as collectors but they also
+ # invoke setup/teardown on popular request
+ # (induced by the common "test_*" naming shared with normal tests)
+ self.config._setupstate.prepare(self)
+ l = []
+ seen = {}
+ for i, x in enumerate(self.obj()):
+ name, call, args = self.getcallargs(x)
+ if not py.builtin.callable(call):
+ raise TypeError("%r yielded non callable test %r" %(self.obj, call,))
+ if name is None:
+ name = "[%d]" % i
+ else:
+ name = "['%s']" % name
+ if name in seen:
+ raise ValueError("%r generated tests with non-unique name %r" %(self, name))
+ seen[name] = True
+ l.append(self.Function(name, self, args=args, callobj=call))
+ return l
+
+ def getcallargs(self, obj):
+ if not isinstance(obj, (tuple, list)):
+ obj = (obj,)
+ # explict naming
+ if isinstance(obj[0], py.builtin._basestring):
+ name = obj[0]
+ obj = obj[1:]
+ else:
+ name = None
+ call, args = obj[0], obj[1:]
+ return name, call, args
+
+
+#
+# Test Items
+#
+_dummy = object()
+class Function(FunctionMixin, py.test.collect.Item):
+ """ a Function Item is responsible for setting up
+ and executing a Python callable test object.
+ """
+ _genid = None
+ def __init__(self, name, parent=None, args=None, config=None,
+ callspec=None, callobj=_dummy):
+ super(Function, self).__init__(name, parent, config=config)
+ self._args = args
+ if self._isyieldedfunction():
+ assert not callspec, "yielded functions (deprecated) cannot have funcargs"
+ else:
+ if callspec is not None:
+ self.funcargs = callspec.funcargs or {}
+ self._genid = callspec.id
+ if hasattr(callspec, "param"):
+ self._requestparam = callspec.param
+ else:
+ self.funcargs = {}
+ if callobj is not _dummy:
+ self._obj = callobj
+ self.function = getattr(self.obj, 'im_func', self.obj)
+
+ def _getobj(self):
+ name = self.name
+ i = name.find("[") # parametrization
+ if i != -1:
+ name = name[:i]
+ return getattr(self.parent.obj, name)
+
+ def _isyieldedfunction(self):
+ return self._args is not None
+
+ def readkeywords(self):
+ d = super(Function, self).readkeywords()
+ d.update(py.builtin._getfuncdict(self.obj))
+ return d
+
+ def runtest(self):
+ """ execute the underlying test function. """
+ self.ihook.pytest_pyfunc_call(pyfuncitem=self)
+
+ def setup(self):
+ super(Function, self).setup()
+ if hasattr(self, 'funcargs'):
+ funcargs.fillfuncargs(self)
+
+ def __eq__(self, other):
+ try:
+ return (self.name == other.name and
+ self._args == other._args and
+ self.parent == other.parent and
+ self.obj == other.obj and
+ getattr(self, '_genid', None) ==
+ getattr(other, '_genid', None)
+ )
+ except AttributeError:
+ pass
+ return False
+
+ def __ne__(self, other):
+ return not self == other
+
+ def __hash__(self):
+ return hash((self.parent, self.name))
+
+def hasinit(obj):
+ init = getattr(obj, '__init__', None)
+ if init:
+ if not isinstance(init, type(object.__init__)):
+ return True
Added: pypy/branch/py12/py/_test/session.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_test/session.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,128 @@
+""" basic test session implementation.
+
+* drives collection of tests
+* triggers executions of tests
+* produces events used by reporting
+"""
+
+import py
+
+# exitcodes for the command line
+EXIT_OK = 0
+EXIT_TESTSFAILED = 1
+EXIT_INTERRUPTED = 2
+EXIT_INTERNALERROR = 3
+EXIT_NOHOSTS = 4
+
+# imports used for genitems()
+Item = py.test.collect.Item
+Collector = py.test.collect.Collector
+
+class Session(object):
+ nodeid = ""
+ def __init__(self, config):
+ self.config = config
+ self.pluginmanager = config.pluginmanager # shortcut
+ self.pluginmanager.register(self)
+ self._testsfailed = False
+ self._nomatch = False
+ self.shouldstop = False
+
+ def genitems(self, colitems, keywordexpr=None):
+ """ yield Items from iterating over the given colitems. """
+ if colitems:
+ colitems = list(colitems)
+ while colitems:
+ next = colitems.pop(0)
+ if isinstance(next, (tuple, list)):
+ colitems[:] = list(next) + colitems
+ continue
+ assert self.pluginmanager is next.config.pluginmanager
+ if isinstance(next, Item):
+ remaining = self.filteritems([next])
+ if remaining:
+ self.config.hook.pytest_itemstart(item=next)
+ yield next
+ else:
+ assert isinstance(next, Collector)
+ self.config.hook.pytest_collectstart(collector=next)
+ rep = self.config.hook.pytest_make_collect_report(collector=next)
+ if rep.passed:
+ for x in self.genitems(rep.result, keywordexpr):
+ yield x
+ self.config.hook.pytest_collectreport(report=rep)
+ if self.shouldstop:
+ break
+
+ def filteritems(self, colitems):
+ """ return items to process (some may be deselected)"""
+ keywordexpr = self.config.option.keyword
+ if not keywordexpr or self._nomatch:
+ return colitems
+ if keywordexpr[-1] == ":":
+ keywordexpr = keywordexpr[:-1]
+ remaining = []
+ deselected = []
+ for colitem in colitems:
+ if isinstance(colitem, Item):
+ if colitem._skipbykeyword(keywordexpr):
+ deselected.append(colitem)
+ continue
+ remaining.append(colitem)
+ if deselected:
+ self.config.hook.pytest_deselected(items=deselected)
+ if self.config.option.keyword.endswith(":"):
+ self._nomatch = True
+ return remaining
+
+ def collect(self, colitems):
+ keyword = self.config.option.keyword
+ for x in self.genitems(colitems, keyword):
+ yield x
+
+ def sessionstarts(self):
+ """ setup any neccessary resources ahead of the test run. """
+ self.config.hook.pytest_sessionstart(session=self)
+
+ def pytest_runtest_logreport(self, report):
+ if report.failed:
+ self._testsfailed = True
+ if self.config.option.exitfirst:
+ self.shouldstop = True
+ pytest_collectreport = pytest_runtest_logreport
+
+ def sessionfinishes(self, exitstatus):
+ """ teardown any resources after a test run. """
+ self.config.hook.pytest_sessionfinish(
+ session=self,
+ exitstatus=exitstatus,
+ )
+
+ def main(self, colitems):
+ """ main loop for running tests. """
+ self.shouldstop = False
+ self.sessionstarts()
+ exitstatus = EXIT_OK
+ try:
+ self._mainloop(colitems)
+ if self._testsfailed:
+ exitstatus = EXIT_TESTSFAILED
+ self.sessionfinishes(exitstatus=exitstatus)
+ except KeyboardInterrupt:
+ excinfo = py.code.ExceptionInfo()
+ self.config.hook.pytest_keyboard_interrupt(excinfo=excinfo)
+ exitstatus = EXIT_INTERRUPTED
+ except:
+ excinfo = py.code.ExceptionInfo()
+ self.config.pluginmanager.notify_exception(excinfo)
+ exitstatus = EXIT_INTERNALERROR
+ if exitstatus in (EXIT_INTERNALERROR, EXIT_INTERRUPTED):
+ self.sessionfinishes(exitstatus=exitstatus)
+ return exitstatus
+
+ def _mainloop(self, colitems):
+ for item in self.collect(colitems):
+ if self.shouldstop:
+ break
+ if not self.config.option.collectonly:
+ item.config.hook.pytest_runtest_protocol(item=item)
Added: pypy/branch/py12/py/_xmlgen.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/_xmlgen.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,244 @@
+"""
+module for generating and serializing xml and html structures
+by using simple python objects.
+
+(c) holger krekel, holger at merlinux eu. 2009
+"""
+import py
+import sys, re
+
+if sys.version_info >= (3,0):
+ def u(s):
+ return s
+ def unicode(x):
+ if hasattr(x, '__unicode__'):
+ return x.__unicode__()
+ return str(x)
+else:
+ def u(s):
+ return unicode(s)
+ unicode = unicode
+
+
+class NamespaceMetaclass(type):
+ def __getattr__(self, name):
+ if name[:1] == '_':
+ raise AttributeError(name)
+ if self == Namespace:
+ raise ValueError("Namespace class is abstract")
+ tagspec = self.__tagspec__
+ if tagspec is not None and name not in tagspec:
+ raise AttributeError(name)
+ classattr = {}
+ if self.__stickyname__:
+ classattr['xmlname'] = name
+ cls = type(name, (self.__tagclass__,), classattr)
+ setattr(self, name, cls)
+ return cls
+
+class Tag(list):
+ class Attr(object):
+ def __init__(self, **kwargs):
+ self.__dict__.update(kwargs)
+
+ def __init__(self, *args, **kwargs):
+ super(Tag, self).__init__(args)
+ self.attr = self.Attr(**kwargs)
+
+ def __unicode__(self):
+ return self.unicode(indent=0)
+ __str__ = __unicode__
+
+ def unicode(self, indent=2):
+ l = []
+ SimpleUnicodeVisitor(l.append, indent).visit(self)
+ return "".join(l)
+
+ def __repr__(self):
+ name = self.__class__.__name__
+ return "<%r tag object %d>" % (name, id(self))
+
+Namespace = NamespaceMetaclass('Namespace', (object, ), {
+ '__tagspec__': None,
+ '__tagclass__': Tag,
+ '__stickyname__': False,
+})
+
+class HtmlTag(Tag):
+ def unicode(self, indent=2):
+ l = []
+ HtmlVisitor(l.append, indent, shortempty=False).visit(self)
+ return u("").join(l)
+
+# exported plain html namespace
+class html(Namespace):
+ __tagclass__ = HtmlTag
+ __stickyname__ = True
+ __tagspec__ = dict([(x,1) for x in (
+ 'a,abbr,acronym,address,applet,area,b,bdo,big,blink,'
+ 'blockquote,body,br,button,caption,center,cite,code,col,'
+ 'colgroup,comment,dd,del,dfn,dir,div,dl,dt,em,embed,'
+ 'fieldset,font,form,frameset,h1,h2,h3,h4,h5,h6,head,html,'
+ 'i,iframe,img,input,ins,kbd,label,legend,li,link,listing,'
+ 'map,marquee,menu,meta,multicol,nobr,noembed,noframes,'
+ 'noscript,object,ol,optgroup,option,p,pre,q,s,script,'
+ 'select,small,span,strike,strong,style,sub,sup,table,'
+ 'tbody,td,textarea,tfoot,th,thead,title,tr,tt,u,ul,xmp,'
+ 'base,basefont,frame,hr,isindex,param,samp,var'
+ ).split(',') if x])
+
+ class Style(object):
+ def __init__(self, **kw):
+ for x, y in kw.items():
+ x = x.replace('_', '-')
+ setattr(self, x, y)
+
+
+class raw(object):
+ """just a box that can contain a unicode string that will be
+ included directly in the output"""
+ def __init__(self, uniobj):
+ self.uniobj = uniobj
+
+class SimpleUnicodeVisitor(object):
+ """ recursive visitor to write unicode. """
+ def __init__(self, write, indent=0, curindent=0, shortempty=True):
+ self.write = write
+ self.cache = {}
+ self.visited = {} # for detection of recursion
+ self.indent = indent
+ self.curindent = curindent
+ self.parents = []
+ self.shortempty = shortempty # short empty tags or not
+
+ def visit(self, node):
+ """ dispatcher on node's class/bases name. """
+ cls = node.__class__
+ try:
+ visitmethod = self.cache[cls]
+ except KeyError:
+ for subclass in cls.__mro__:
+ visitmethod = getattr(self, subclass.__name__, None)
+ if visitmethod is not None:
+ break
+ else:
+ visitmethod = self.object
+ self.cache[cls] = visitmethod
+ visitmethod(node)
+
+ def object(self, obj):
+ #self.write(obj)
+ self.write(escape(unicode(obj)))
+
+ def raw(self, obj):
+ self.write(obj.uniobj)
+
+ def list(self, obj):
+ assert id(obj) not in self.visited
+ self.visited[id(obj)] = 1
+ map(self.visit, obj)
+
+ def Tag(self, tag):
+ assert id(tag) not in self.visited
+ try:
+ tag.parent = self.parents[-1]
+ except IndexError:
+ tag.parent = None
+ self.visited[id(tag)] = 1
+ tagname = getattr(tag, 'xmlname', tag.__class__.__name__)
+ if self.curindent and not self._isinline(tagname):
+ self.write("\n" + u(' ') * self.curindent)
+ if tag:
+ self.curindent += self.indent
+ self.write(u('<%s%s>') % (tagname, self.attributes(tag)))
+ self.parents.append(tag)
+ for x in tag:
+ self.visit(x)
+ self.parents.pop()
+ self.write(u('</%s>') % tagname)
+ self.curindent -= self.indent
+ else:
+ nameattr = tagname+self.attributes(tag)
+ if self._issingleton(tagname):
+ self.write(u('<%s/>') % (nameattr,))
+ else:
+ self.write(u('<%s></%s>') % (nameattr, tagname))
+
+ def attributes(self, tag):
+ # serialize attributes
+ attrlist = dir(tag.attr)
+ attrlist.sort()
+ l = []
+ for name in attrlist:
+ res = self.repr_attribute(tag.attr, name)
+ if res is not None:
+ l.append(res)
+ l.extend(self.getstyle(tag))
+ return u("").join(l)
+
+ def repr_attribute(self, attrs, name):
+ if name[:2] != '__':
+ value = getattr(attrs, name)
+ if name.endswith('_'):
+ name = name[:-1]
+ return ' %s="%s"' % (name, escape(unicode(value)))
+
+ def getstyle(self, tag):
+ """ return attribute list suitable for styling. """
+ try:
+ styledict = tag.style.__dict__
+ except AttributeError:
+ return []
+ else:
+ stylelist = [x+': ' + y for x,y in styledict.items()]
+ return [u(' style="%s"') % u('; ').join(stylelist)]
+
+ def _issingleton(self, tagname):
+ """can (and will) be overridden in subclasses"""
+ return self.shortempty
+
+ def _isinline(self, tagname):
+ """can (and will) be overridden in subclasses"""
+ return False
+
+class HtmlVisitor(SimpleUnicodeVisitor):
+
+ single = dict([(x, 1) for x in
+ ('br,img,area,param,col,hr,meta,link,base,'
+ 'input,frame').split(',')])
+ inline = dict([(x, 1) for x in
+ ('a abbr acronym b basefont bdo big br cite code dfn em font '
+ 'i img input kbd label q s samp select small span strike '
+ 'strong sub sup textarea tt u var'.split(' '))])
+
+ def repr_attribute(self, attrs, name):
+ if name == 'class_':
+ value = getattr(attrs, name)
+ if value is None:
+ return
+ return super(HtmlVisitor, self).repr_attribute(attrs, name)
+
+ def _issingleton(self, tagname):
+ return tagname in self.single
+
+ def _isinline(self, tagname):
+ return tagname in self.inline
+
+
+class _escape:
+ def __init__(self):
+ self.escape = {
+ u('"') : u('"'), u('<') : u('<'), u('>') : u('>'),
+ u('&') : u('&'), u("'") : u('''),
+ }
+ self.charef_rex = re.compile(u("|").join(self.escape.keys()))
+
+ def _replacer(self, match):
+ return self.escape[match.group(0)]
+
+ def __call__(self, ustring):
+ """ xml-escape the given unicode string. """
+ ustring = unicode(ustring)
+ return self.charef_rex.sub(self._replacer, ustring)
+
+escape = _escape()
Added: pypy/branch/py12/py/apipkg.py
==============================================================================
--- (empty file)
+++ pypy/branch/py12/py/apipkg.py Fri Apr 30 17:07:52 2010
@@ -0,0 +1,95 @@
+"""
+apipkg: control the exported namespace of a python package.
+
+see http://pypi.python.org/pypi/apipkg
+
+(c) holger krekel, 2009 - MIT license
+"""
+import sys
+from types import ModuleType
+
+__version__ = "1.0b6"
+
+def initpkg(pkgname, exportdefs):
+ """ initialize given package from the export definitions. """
+ mod = ApiModule(pkgname, exportdefs, implprefix=pkgname)
+ oldmod = sys.modules[pkgname]
+ mod.__file__ = getattr(oldmod, '__file__', None)
+ mod.__version__ = getattr(oldmod, '__version__', '0')
+ for name in ('__path__', '__loader__'):
+ if hasattr(oldmod, name):
+ setattr(mod, name, getattr(oldmod, name))
+ sys.modules[pkgname] = mod
+
+def importobj(modpath, attrname):
+ module = __import__(modpath, None, None, ['__doc__'])
+ return getattr(module, attrname)
+
+class ApiModule(ModuleType):
+ def __init__(self, name, importspec, implprefix=None):
+ self.__name__ = name
+ self.__all__ = [x for x in importspec if x != '__onfirstaccess__']
+ self.__map__ = {}
+ self.__implprefix__ = implprefix or name
+ for name, importspec in importspec.items():
+ if isinstance(importspec, dict):
+ subname = '%s.%s'%(self.__name__, name)
+ apimod = ApiModule(subname, importspec, implprefix)
+ sys.modules[subname] = apimod
+ setattr(self, name, apimod)
+ else:
+ modpath, attrname = importspec.split(':')
+ if modpath[0] == '.':
+ modpath = implprefix + modpath
+ if name == '__doc__':
+ self.__doc__ = importobj(modpath, attrname)
+ else:
+ self.__map__[name] = (modpath, attrname)
+
+ def __repr__(self):
+ l = []
+ if hasattr(self, '__version__'):
+ l.append("version=" + repr(self.__version__))
+ if hasattr(self, '__file__'):
+ l.append('from ' + repr(self.__file__))
+ if l:
+ return '<ApiModule %r %s>' % (self.__name__, " ".join(l))
+ return '<ApiModule %r>' % (self.__name__,)
+
+ def __makeattr(self, name):
+ """lazily compute value for name or raise AttributeError if unknown."""
+ target = None
+ if '__onfirstaccess__' in self.__map__:
+ target = self.__map__.pop('__onfirstaccess__')
+ importobj(*target)()
+ try:
+ modpath, attrname = self.__map__[name]
+ except KeyError:
+ if target is not None and name != '__onfirstaccess__':
+ # retry, onfirstaccess might have set attrs
+ return getattr(self, name)
+ raise AttributeError(name)
+ else:
+ result = importobj(modpath, attrname)
+ setattr(self, name, result)
+ try:
+ del self.__map__[name]
+ except KeyError:
+ pass # in a recursive-import situation a double-del can happen
+ return result
+
+ __getattr__ = __makeattr
+
+ def __dict__(self):
+ # force all the content of the module to be loaded when __dict__ is read
+ dictdescr = ModuleType.__dict__['__dict__']
+ dict = dictdescr.__get__(self)
+ if dict is not None:
+ hasattr(self, 'some')
+ for name in self.__all__:
+ try:
+ self.__makeattr(name)
+ except AttributeError:
+ pass
+ return dict
+ __dict__ = property(__dict__)
More information about the Pypy-commit
mailing list